data
dict
{ "proceeding": { "id": "1tmhi3ly74c", "title": "2020 25th International Conference on Pattern Recognition (ICPR)", "acronym": "icpr", "groupId": "1000545", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1tmiobOt10A", "doi": "10.1109/ICPR48806.2021.9412616", "title": "Separation of Aleatoric and Epistemic Uncertainty in Deterministic Deep Neural Networks", "normalizedTitle": "Separation of Aleatoric and Epistemic Uncertainty in Deterministic Deep Neural Networks", "abstract": "Despite the success of deep neural networks (DNN) in many applications, their ability to model uncertainty is still significantly limited. For example, in safety-critical applications such as autonomous driving, it is crucial to obtain a prediction that reflects different types of uncertainty to address life-threatening situations appropriately. In such cases, it is essential to be aware of the risk (i.e., aleatoric uncertainty) and the reliability (i.e., epistemic uncertainty) that comes with a prediction. We present AE-DNN, a model allowing the separation of aleatoric and epistemic uncertainty while maintaining a proper generalization capability. AE-DNN is based on deterministic DNN, which can determine the respective uncertainty measures in a single forward pass. In analyses with synthetic and image data, we show that our method improves the modeling of epistemic uncertainty while providing an intuitively understandable separation of risk and reliability.", "abstracts": [ { "abstractType": "Regular", "content": "Despite the success of deep neural networks (DNN) in many applications, their ability to model uncertainty is still significantly limited. For example, in safety-critical applications such as autonomous driving, it is crucial to obtain a prediction that reflects different types of uncertainty to address life-threatening situations appropriately. In such cases, it is essential to be aware of the risk (i.e., aleatoric uncertainty) and the reliability (i.e., epistemic uncertainty) that comes with a prediction. We present AE-DNN, a model allowing the separation of aleatoric and epistemic uncertainty while maintaining a proper generalization capability. AE-DNN is based on deterministic DNN, which can determine the respective uncertainty measures in a single forward pass. In analyses with synthetic and image data, we show that our method improves the modeling of epistemic uncertainty while providing an intuitively understandable separation of risk and reliability.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Despite the success of deep neural networks (DNN) in many applications, their ability to model uncertainty is still significantly limited. For example, in safety-critical applications such as autonomous driving, it is crucial to obtain a prediction that reflects different types of uncertainty to address life-threatening situations appropriately. In such cases, it is essential to be aware of the risk (i.e., aleatoric uncertainty) and the reliability (i.e., epistemic uncertainty) that comes with a prediction. We present AE-DNN, a model allowing the separation of aleatoric and epistemic uncertainty while maintaining a proper generalization capability. AE-DNN is based on deterministic DNN, which can determine the respective uncertainty measures in a single forward pass. In analyses with synthetic and image data, we show that our method improves the modeling of epistemic uncertainty while providing an intuitively understandable separation of risk and reliability.", "fno": "09412616", "keywords": [ "Data Analysis", "Deep Learning Artificial Intelligence", "Generalisation Artificial Intelligence", "Uncertainty Handling", "Life Threatening Situations", "Aleatoric Uncertainty", "Epistemic Uncertainty", "AE DNN", "Deterministic DNN", "Uncertainty Measures", "Deterministic Deep Neural Networks", "Safety Critical Applications", "Reliability", "Generalization Capability", "Synthetic Data Analysis", "Image Data Analysis", "Analytical Models", "Uncertainty", "Neural Networks", "Measurement Uncertainty", "Data Models", "Pattern Recognition", "Reliability" ], "authors": [ { "affiliation": "University of Kassel,Intelligent Embedded Systems,Germany", "fullName": "Denis Huseljic", "givenName": "Denis", "surname": "Huseljic", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Kassel,Intelligent Embedded Systems,Germany", "fullName": "Bernhard Sick", "givenName": "Bernhard", "surname": "Sick", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Kassel,Intelligent Embedded Systems,Germany", "fullName": "Marek Herde", "givenName": "Marek", "surname": "Herde", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Kassel,Intelligent Embedded Systems,Germany", "fullName": "Daniel Kottke", "givenName": "Daniel", "surname": "Kottke", "__typename": "ArticleAuthorType" } ], "idPrefix": "icpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-01-01T00:00:00", "pubType": "proceedings", "pages": "9172-9179", "year": "2021", "issn": "1051-4651", "isbn": "978-1-7281-8808-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "09413310", "articleId": "1tmjMPuE6li", "__typename": "AdjacentArticleType" }, "next": { "fno": "09412889", "articleId": "1tmhUkfj9iE", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/isuma/1990/2107/0/00151232", "title": "Epistemic background problems of uncertainty", "doi": null, "abstractUrl": "/proceedings-article/isuma/1990/00151232/12OmNyUWR6x", "parentPublication": { "id": "proceedings/isuma/1990/2107/0", "title": "Proceedings First International Symposium on Uncertainty Modeling and Analysis", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdm/2021/2398/0/239800b174", "title": "Detecting and Mitigating Test-time Failure Risks via Model-agnostic Uncertainty Learning", "doi": null, "abstractUrl": "/proceedings-article/icdm/2021/239800b174/1Aqxkq3wPMA", "parentPublication": { "id": "proceedings/icdm/2021/2398/0", "title": "2021 IEEE International Conference on Data Mining (ICDM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ta/2022/04/09826787", "title": "Dimensional Affect Uncertainty Modelling for Apparent Personality Recognition", "doi": null, "abstractUrl": "/journal/ta/2022/04/09826787/1EWSpbXdtqo", "parentPublication": { "id": "trans/ta", "title": "IEEE Transactions on Affective Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cacml/2022/8290/0/829000a804", "title": "Uncertainty Estimation for Efficient Monocular Depth Perception", "doi": null, "abstractUrl": "/proceedings-article/cacml/2022/829000a804/1FY1rtZD0ME", "parentPublication": { "id": "proceedings/cacml/2022/8290/0", "title": "2022 Asia Conference on Algorithms, Computing and Machine Learning (CACML)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2022/8739/0/873900b508", "title": "A Deeper Look into Aleatoric and Epistemic Uncertainty Disentanglement", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2022/873900b508/1G56BW7QpEs", "parentPublication": { "id": "proceedings/cvprw/2022/8739/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2022/9062/0/09956231", "title": "AutoDEUQ: Automated Deep Ensemble with Uncertainty Quantification", "doi": null, "abstractUrl": "/proceedings-article/icpr/2022/09956231/1IHq9h4Xzkk", "parentPublication": { "id": "proceedings/icpr/2022/9062/0", "title": "2022 26th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300c931", "title": "Sampling-Free Epistemic Uncertainty Estimation Using Approximated Variance Propagation", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300c931/1hQqmmfFfyw", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2019/5023/0/09022324", "title": "Characterizing Sources of Uncertainty to Proxy Calibration and Disambiguate Annotator and Data Bias", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2019/09022324/1i5mCk3kctW", "parentPublication": { "id": "proceedings/iccvw/2019/5023/0", "title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2020/9360/0/09150658", "title": "Evaluating Scalable Bayesian Deep Learning Methods for Robust Computer Vision", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2020/09150658/1lPHjiK2I24", "parentPublication": { "id": "proceedings/cvprw/2020/9360/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ispa-bdcloud-socialcom-sustaincom/2021/3574/0/357400a826", "title": "Capture Uncertainties in Deep Neural Networks for Safe Operation of Autonomous Driving Vehicles", "doi": null, "abstractUrl": "/proceedings-article/ispa-bdcloud-socialcom-sustaincom/2021/357400a826/1zxL7N4SG6Q", "parentPublication": { "id": "proceedings/ispa-bdcloud-socialcom-sustaincom/2021/3574/0", "title": "2021 IEEE Intl Conf on Parallel & Distributed Processing with Applications, Big Data & Cloud Computing, Sustainable Computing & Communications, Social Computing & Networking (ISPA/BDCloud/SocialCom/SustainCom)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1tuAeQeDJja", "title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1tuAQLvc5WM", "doi": "10.1109/VR50410.2021.00104", "title": "Evaluating the Potential of Glanceable AR Interfaces for Authentic Everyday Uses", "normalizedTitle": "Evaluating the Potential of Glanceable AR Interfaces for Authentic Everyday Uses", "abstract": "In the near future, augmented reality (AR) glasses are envisioned to become the next-generation personal computing platform. They could be always on and worn all day, delivering continuous and pervasive AR experiences for general-purpose everyday use cases. However, it remains unclear how we could enable unobtrusive and easy information access without distracting users, while being acceptable to use at the same time. To address this question, we implemented two prototypes based on the Glanceable AR paradigm, a promising way of managing and acquiring information through glancing at the periphery of AR head-worn displays (HWDs). We conducted two separate studies to evaluate our designs. In the first study, we obtained feedback from a large sample of participants of varied age and background about a video prototype that showcased some envisioned scenarios of using Glanceable AR for everyday tasks. In the second study, we asked participants to use a working prototype during authentic real-world activities for three days. We found that users appreciated the Glanceable AR approach. They found it less distracting or intrusive than existing devices in authentic everyday use cases, and would like to use the interface on a daily basis if the form factor of the AR headset was more like eyeglasses.", "abstracts": [ { "abstractType": "Regular", "content": "In the near future, augmented reality (AR) glasses are envisioned to become the next-generation personal computing platform. They could be always on and worn all day, delivering continuous and pervasive AR experiences for general-purpose everyday use cases. However, it remains unclear how we could enable unobtrusive and easy information access without distracting users, while being acceptable to use at the same time. To address this question, we implemented two prototypes based on the Glanceable AR paradigm, a promising way of managing and acquiring information through glancing at the periphery of AR head-worn displays (HWDs). We conducted two separate studies to evaluate our designs. In the first study, we obtained feedback from a large sample of participants of varied age and background about a video prototype that showcased some envisioned scenarios of using Glanceable AR for everyday tasks. In the second study, we asked participants to use a working prototype during authentic real-world activities for three days. We found that users appreciated the Glanceable AR approach. They found it less distracting or intrusive than existing devices in authentic everyday use cases, and would like to use the interface on a daily basis if the form factor of the AR headset was more like eyeglasses.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In the near future, augmented reality (AR) glasses are envisioned to become the next-generation personal computing platform. They could be always on and worn all day, delivering continuous and pervasive AR experiences for general-purpose everyday use cases. However, it remains unclear how we could enable unobtrusive and easy information access without distracting users, while being acceptable to use at the same time. To address this question, we implemented two prototypes based on the Glanceable AR paradigm, a promising way of managing and acquiring information through glancing at the periphery of AR head-worn displays (HWDs). We conducted two separate studies to evaluate our designs. In the first study, we obtained feedback from a large sample of participants of varied age and background about a video prototype that showcased some envisioned scenarios of using Glanceable AR for everyday tasks. In the second study, we asked participants to use a working prototype during authentic real-world activities for three days. We found that users appreciated the Glanceable AR approach. They found it less distracting or intrusive than existing devices in authentic everyday use cases, and would like to use the interface on a daily basis if the form factor of the AR headset was more like eyeglasses.", "fno": "255600a768", "keywords": [ "Augmented Reality", "Helmet Mounted Displays", "Ubiquitous Computing", "User Interfaces", "Wearable Computers", "Glanceable AR Interfaces", "Augmented Reality Glasses", "Next Generation Personal Computing Platform", "Pervasive AR Experiences", "AR Head Worn Displays", "Information Management", "Information Acquisition", "Headphones", "Three Dimensional Displays", "Head Mounted Displays", "Prototypes", "Glass", "User Interfaces", "Task Analysis", "Human Centered Computing Mixed Augmented Reality", "Human Centered Computing User Interface Design" ], "authors": [ { "affiliation": "Center for Human-Computer Interaction, Virginia Tech,Department of Computer Science,Blacksburg,VA,USA", "fullName": "Feiyu Lu", "givenName": "Feiyu", "surname": "Lu", "__typename": "ArticleAuthorType" }, { "affiliation": "Center for Human-Computer Interaction, Virginia Tech,Department of Computer Science,Blacksburg,VA,USA", "fullName": "Doug A. Bowman", "givenName": "Doug A.", "surname": "Bowman", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-03-01T00:00:00", "pubType": "proceedings", "pages": "768-777", "year": "2021", "issn": null, "isbn": "978-1-6654-1838-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [ { "id": "1tuAQvN0Lxm", "name": "pvr202118380-09417649s1-mm_255600a768.zip", "size": "39.9 MB", "location": "https://www.computer.org/csdl/api/v1/extra/pvr202118380-09417649s1-mm_255600a768.zip", "__typename": "WebExtraType" } ], "adjacentArticles": { "previous": { "fno": "255600a759", "articleId": "1tuAI6Ij8is", "__typename": "AdjacentArticleType" }, "next": { "fno": "255600a778", "articleId": "1tuBngWRAC4", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vrw/2022/8402/0/840200a584", "title": "Investigating Display Position of a Head-Fixed Augmented Reality Notification for Dual-task", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a584/1CJd297BiDu", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a876", "title": "HoloCMDS: Investigating Around Field of View Glanceable Commands Selection in AR-HMDs", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a876/1CJdZ8RwdnG", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a175", "title": "Towards a Desktop-AR Prototyping Framework: Prototyping Cross-Reality Between Desktops and Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a175/1J7WgIHKkVy", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a812", "title": "AR-HMD Multitask Viewing System Concept with a Supporting Handheld Viewport for Multiple Spatially-Anchored Workspaces", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a812/1J7WvwZew9O", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ijcime/2019/5586/0/558600a288", "title": "The Effect of 360-Degree Video Authentic Materials on EFL Learners' Listening Comprehension", "doi": null, "abstractUrl": "/proceedings-article/ijcime/2019/558600a288/1j9wyWbeGsg", "parentPublication": { "id": "proceedings/ijcime/2019/5586/0", "title": "2019 International Joint Conference on Information, Media and Engineering (IJCIME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2020/5608/0/09089433", "title": "Glanceable AR: Evaluating Information Access Methods for Head-Worn Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2020/09089433/1jIxf3ZEs0w", "parentPublication": { "id": "proceedings/vr/2020/5608/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2020/6532/0/09090678", "title": "Occlusion Management Techniques for Everyday Glanceable AR Interfaces", "doi": null, "abstractUrl": "/proceedings-article/vrw/2020/09090678/1jIxt3vMkZq", "parentPublication": { "id": "proceedings/vrw/2020/6532/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2020/8508/0/850800a665", "title": "Supporting Medical Auxiliary Work: The Central Sterile Services Department as a Challenging Environment for Augmented Reality Applications", "doi": null, "abstractUrl": "/proceedings-article/ismar/2020/850800a665/1pysyCXzE8o", "parentPublication": { "id": "proceedings/ismar/2020/8508/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a717", "title": "[DC] Glanceable AR: Towards an Always-on Augmented Reality Future", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a717/1tnXrUsEHYc", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2021/0158/0/015800z016", "title": "Keynote Speaker: User Experience Considerations for Everyday Augmented Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar/2021/015800z016/1yeCV2T6UAE", "parentPublication": { "id": "proceedings/ismar/2021/0158/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1xH9IIVQB20", "title": "2021 IEEE International Conference On Artificial Intelligence Testing (AITest)", "acronym": "aitest", "groupId": "1831724", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1xH9KgUfKKs", "doi": "10.1109/AITEST52744.2021.00027", "title": "Prediction Surface Uncertainty Quantification in Object Detection Models for Autonomous Driving", "normalizedTitle": "Prediction Surface Uncertainty Quantification in Object Detection Models for Autonomous Driving", "abstract": "Object detection in autonomous cars is commonly based on camera images and Lidar inputs, which are often used to train prediction models such as deep artificial neural networks for decision making for object recognition, adjusting speed, etc. A mistake in such decision making can be damaging; thus, it is vital to measure the reliability of decisions made by such prediction models via uncertainty measurement. Uncertainty, in deep learning models, is often measured for classification problems. However, deep learning models in autonomous driving are often multi-output regression models. Hence, we propose a novel method called PURE (Prediction sURface uncErtainty) for measuring prediction uncertainty of such regression models. We formulate the object recognition problem as a regression model with more than one outputs for finding object locations in a 2-dimensional camera view. For evaluation, we modified three widely-applied object recognition models (i.e., YoLo, SSD300 and SSD512) and used the KITTI, Stanford Cars, Berkeley DeepDrive, and NEXET datasets. Results showed the statistically significant negative correlation between prediction surface uncertainty and prediction accuracy suggesting that uncertainty significantly impacts the decisions made by autonomous driving.", "abstracts": [ { "abstractType": "Regular", "content": "Object detection in autonomous cars is commonly based on camera images and Lidar inputs, which are often used to train prediction models such as deep artificial neural networks for decision making for object recognition, adjusting speed, etc. A mistake in such decision making can be damaging; thus, it is vital to measure the reliability of decisions made by such prediction models via uncertainty measurement. Uncertainty, in deep learning models, is often measured for classification problems. However, deep learning models in autonomous driving are often multi-output regression models. Hence, we propose a novel method called PURE (Prediction sURface uncErtainty) for measuring prediction uncertainty of such regression models. We formulate the object recognition problem as a regression model with more than one outputs for finding object locations in a 2-dimensional camera view. For evaluation, we modified three widely-applied object recognition models (i.e., YoLo, SSD300 and SSD512) and used the KITTI, Stanford Cars, Berkeley DeepDrive, and NEXET datasets. Results showed the statistically significant negative correlation between prediction surface uncertainty and prediction accuracy suggesting that uncertainty significantly impacts the decisions made by autonomous driving.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Object detection in autonomous cars is commonly based on camera images and Lidar inputs, which are often used to train prediction models such as deep artificial neural networks for decision making for object recognition, adjusting speed, etc. A mistake in such decision making can be damaging; thus, it is vital to measure the reliability of decisions made by such prediction models via uncertainty measurement. Uncertainty, in deep learning models, is often measured for classification problems. However, deep learning models in autonomous driving are often multi-output regression models. Hence, we propose a novel method called PURE (Prediction sURface uncErtainty) for measuring prediction uncertainty of such regression models. We formulate the object recognition problem as a regression model with more than one outputs for finding object locations in a 2-dimensional camera view. For evaluation, we modified three widely-applied object recognition models (i.e., YoLo, SSD300 and SSD512) and used the KITTI, Stanford Cars, Berkeley DeepDrive, and NEXET datasets. Results showed the statistically significant negative correlation between prediction surface uncertainty and prediction accuracy suggesting that uncertainty significantly impacts the decisions made by autonomous driving.", "fno": "348100a093", "keywords": [ "Automatic Guided Vehicles", "Cameras", "Decision Making", "Deep Learning Artificial Intelligence", "Object Detection", "Regression Analysis", "Reliability", "Uncertain Systems", "Object Detection Models", "Autonomous Driving", "Autonomous Cars", "Camera Images", "Lidar Inputs", "Train Prediction Models", "Deep Artificial Neural Networks", "Decision Making", "Uncertainty Measurement", "Deep Learning Models", "Multioutput Regression Models", "Measuring Prediction Uncertainty", "Regression Model", "Object Recognition Problem", "Object Locations", "2 Dimensional Camera View", "Object Recognition Models", "Prediction Accuracy", "Prediction Surface Uncertainty Quantification", "PURE", "Deep Learning", "Uncertainty", "Measurement Uncertainty", "Decision Making", "Object Detection", "Predictive Models", "Cameras", "Uncertainty", "Deep Learning", "Object Detection", "Autonomous Driving" ], "authors": [ { "affiliation": "Simula Research Laboratory,Fornebu,Norway", "fullName": "Ferhat Ozgur Catak", "givenName": "Ferhat Ozgur", "surname": "Catak", "__typename": "ArticleAuthorType" }, { "affiliation": "Nanjing University of Aeronautics and Astronautics,Simula Research Laboratory", "fullName": "Tao Yue", "givenName": "Tao", "surname": "Yue", "__typename": "ArticleAuthorType" }, { "affiliation": "Simula Research Laboratory,Fornebu,Norway", "fullName": "Shaukat Ali", "givenName": "Shaukat", "surname": "Ali", "__typename": "ArticleAuthorType" } ], "idPrefix": "aitest", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-08-01T00:00:00", "pubType": "proceedings", "pages": "93-100", "year": "2021", "issn": null, "isbn": "978-1-6654-3481-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "348100a085", "articleId": "1xH9KHAxYOs", "__typename": "AdjacentArticleType" }, "next": { "fno": "348100a101", "articleId": "1xH9L8y9JOo", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/big-data/2017/2715/0/08257906", "title": "Collective subjective logic: Scalable uncertainty-based opinion inference", "doi": null, "abstractUrl": "/proceedings-article/big-data/2017/08257906/17D45Vu1Tya", "parentPublication": { "id": "proceedings/big-data/2017/2715/0", "title": "2017 IEEE International Conference on Big Data (Big Data)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/seams/2018/5715/0/571501a051", "title": "Uncertainty Reduction in Self-Adaptive Systems", "doi": null, "abstractUrl": "/proceedings-article/seams/2018/571501a051/17D45WZZ7BR", "parentPublication": { "id": "proceedings/seams/2018/5715/0", "title": "2018 IEEE/ACM 13th International Symposium on Software Engineering for Adaptive and Self-Managing Systems (SEAMS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdm/2021/2398/0/239800a886", "title": "Fair Decision-making Under Uncertainty", "doi": null, "abstractUrl": "/proceedings-article/icdm/2021/239800a886/1AqxlJEDx4s", "parentPublication": { "id": "proceedings/icdm/2021/2398/0", "title": "2021 IEEE International Conference on Data Mining (ICDM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacvw/2022/5824/0/582400a241", "title": "Uncertainty Aware Proposal Segmentation for Unknown Object Detection", "doi": null, "abstractUrl": "/proceedings-article/wacvw/2022/582400a241/1B12wMYtiN2", "parentPublication": { "id": "proceedings/wacvw/2022/5824/0", "title": "2022 IEEE/CVF Winter Conference on Applications of Computer Vision Workshops (WACVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200n3117", "title": "Estimating and Exploiting the Aleatoric Uncertainty in Surface Normal Estimation", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200n3117/1BmFBTOLb9u", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/irc/2022/7260/0/726000a181", "title": "Pedestrian Intention Anticipation with Uncertainty Based Decision for Autonomous Driving", "doi": null, "abstractUrl": "/proceedings-article/irc/2022/726000a181/1KckiSEK8RW", "parentPublication": { "id": "proceedings/irc/2022/7260/0", "title": "2022 Sixth IEEE International Conference on Robotic Computing (IRC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2023/9346/0/934600d910", "title": "Gradient-Based Quantification of Epistemic Uncertainty for Deep Object Detectors", "doi": null, "abstractUrl": "/proceedings-article/wacv/2023/934600d910/1KxVxFagDaU", "parentPublication": { "id": "proceedings/wacv/2023/9346/0", "title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300a502", "title": "Gaussian YOLOv3: An Accurate and Fast Object Detector Using Localization Uncertainty for Autonomous Driving", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300a502/1hVlKGOjr1e", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2020/6553/0/09093332", "title": "Uncertainty-aware Short-term Motion Prediction of Traffic Actors for Autonomous Driving", "doi": null, "abstractUrl": "/proceedings-article/wacv/2020/09093332/1jPbqTotqIU", "parentPublication": { "id": "proceedings/wacv/2020/6553/0", "title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2021/0477/0/047700c483", "title": "Misclassification Risk and Uncertainty Quantification in Deep Classifiers", "doi": null, "abstractUrl": "/proceedings-article/wacv/2021/047700c483/1uqGlA3HgFW", "parentPublication": { "id": "proceedings/wacv/2021/0477/0", "title": "2021 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1yfxDjRGMmc", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "acronym": "ismar-adjunct", "groupId": "1810084", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1yeQMONGc9y", "doi": "10.1109/ISMAR-Adjunct54149.2021.00032", "title": "Comparing Head and AR Glasses Pose Estimation", "normalizedTitle": "Comparing Head and AR Glasses Pose Estimation", "abstract": "In this paper, we compare AR glasses and head pose estimation performance. We train different pose estimation approaches for head pose estimation with the generated head pose labels to compare them to their AR glasses estimation accuracy. These include the state-of-art GlassPoseRN and P2P networks, as well as our novel CapsPose algorithm. We show that estimating the AR glasses pose is more accurate than the head pose in general. In a first analysis, we show the general regression performance of the models when the AR glasses and faces are both known to the network during training. We then analyze the driver generalization performance, where all glasses are known, but part of the drivers are unknown to the Neural Networks. There, the estimation of AR glasses pose again exceeds the head pose. Only in our third analysis, head pose estimation performs better than AR glasses pose estimation. In this case, a new glasses model is added, which was unknown to the Neural Network yet. In addition, we introduce a novel pose estimation network called CapsPose, which is the first network deploying Capsule Networks for 6-DoF pose estimation. We outperform the current state-of-the- art method GlassPoseRN on the HMDPose dataset by reducing the error by 46% for orientation and 51% for translation.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper, we compare AR glasses and head pose estimation performance. We train different pose estimation approaches for head pose estimation with the generated head pose labels to compare them to their AR glasses estimation accuracy. These include the state-of-art GlassPoseRN and P2P networks, as well as our novel CapsPose algorithm. We show that estimating the AR glasses pose is more accurate than the head pose in general. In a first analysis, we show the general regression performance of the models when the AR glasses and faces are both known to the network during training. We then analyze the driver generalization performance, where all glasses are known, but part of the drivers are unknown to the Neural Networks. There, the estimation of AR glasses pose again exceeds the head pose. Only in our third analysis, head pose estimation performs better than AR glasses pose estimation. In this case, a new glasses model is added, which was unknown to the Neural Network yet. In addition, we introduce a novel pose estimation network called CapsPose, which is the first network deploying Capsule Networks for 6-DoF pose estimation. We outperform the current state-of-the- art method GlassPoseRN on the HMDPose dataset by reducing the error by 46% for orientation and 51% for translation.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper, we compare AR glasses and head pose estimation performance. We train different pose estimation approaches for head pose estimation with the generated head pose labels to compare them to their AR glasses estimation accuracy. These include the state-of-art GlassPoseRN and P2P networks, as well as our novel CapsPose algorithm. We show that estimating the AR glasses pose is more accurate than the head pose in general. In a first analysis, we show the general regression performance of the models when the AR glasses and faces are both known to the network during training. We then analyze the driver generalization performance, where all glasses are known, but part of the drivers are unknown to the Neural Networks. There, the estimation of AR glasses pose again exceeds the head pose. Only in our third analysis, head pose estimation performs better than AR glasses pose estimation. In this case, a new glasses model is added, which was unknown to the Neural Network yet. In addition, we introduce a novel pose estimation network called CapsPose, which is the first network deploying Capsule Networks for 6-DoF pose estimation. We outperform the current state-of-the- art method GlassPoseRN on the HMDPose dataset by reducing the error by 46% for orientation and 51% for translation.", "fno": "129800a109", "keywords": [ "Augmented Reality", "Neural Nets", "Peer To Peer Computing", "Pose Estimation", "Regression Analysis", "General Regression Performance", "Driver Generalization Performance", "Neural Network", "Glass Pose RN", "AR Glasses Pose Estimation", "Caps Pose Algorithm", "P 2 P Networks", "Head Pose Estimation", "Training", "Analytical Models", "Head", "Pose Estimation", "Neural Networks", "Pipelines", "Glass", "Computing Methodologies", "Artificial Intelligence", "Computer Vision", "Tracking", "Machine Learning", "Machine Learning Approaches", "Neural Networks" ], "authors": [ { "affiliation": "TU Kaiserslautern,BMW Group Research, New Technologies, Innovations, Garching (Munich),Germany", "fullName": "Ahmet Firintepe", "givenName": "Ahmet", "surname": "Firintepe", "__typename": "ArticleAuthorType" }, { "affiliation": "BMW Group Research, New Technologies, Innovations, Garching (Munich),Germany", "fullName": "Oussema Dhaouadi", "givenName": "Oussema", "surname": "Dhaouadi", "__typename": "ArticleAuthorType" }, { "affiliation": "German Research Center for Artificial Intelligence (DFKI),Kaiserslautern,Germany", "fullName": "Alain Pagani", "givenName": "Alain", "surname": "Pagani", "__typename": "ArticleAuthorType" }, { "affiliation": "TU Kaiserslautern,German Research Center for Artificial Intelligence (DFKI),Germany", "fullName": "Didier Stricker", "givenName": "Didier", "surname": "Stricker", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar-adjunct", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-10-01T00:00:00", "pubType": "proceedings", "pages": "109-114", "year": "2021", "issn": null, "isbn": "978-1-6654-1298-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "129800a106", "articleId": "1yeQDjpkbXW", "__typename": "AdjacentArticleType" }, "next": { "fno": "129800a115", "articleId": "1yeQLPBHFBe", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/motion/2002/1860/0/18600125", "title": "Comparative Study of Coarse Head Pose Estimation", "doi": null, "abstractUrl": "/proceedings-article/motion/2002/18600125/12OmNAGw13Q", "parentPublication": { "id": "proceedings/motion/2002/1860/0", "title": "Proceedings Workshop on Motion and Video Computing (MOTION 2002)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2014/6184/0/06948520", "title": "Hands free — Exploring AR glasses and their peculiarities", "doi": null, "abstractUrl": "/proceedings-article/ismar/2014/06948520/12OmNC17hVV", "parentPublication": { "id": "proceedings/ismar/2014/6184/0", "title": "2014 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/crv/2012/4683/0/4683a125", "title": "Coarse Head Pose Estimation using Image Abstraction", "doi": null, "abstractUrl": "/proceedings-article/crv/2012/4683a125/12OmNwE9ORM", "parentPublication": { "id": "proceedings/crv/2012/4683/0", "title": "2012 Ninth Conference on Computer and Robot Vision", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/amfg/2003/2010/0/20100092", "title": "Absolute Head Pose Estimation From Overhead Wide-Angle Cameras", "doi": null, "abstractUrl": "/proceedings-article/amfg/2003/20100092/12OmNyen1y9", "parentPublication": { "id": "proceedings/amfg/2003/2010/0", "title": "2003 IEEE International Workshop on Analysis and Modeling of Faces and Gestures", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2017/0733/0/0733b165", "title": "DriveAHead — A Large-Scale Driver Head Pose Dataset", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2017/0733b165/12OmNzsJ7ue", "parentPublication": { "id": "proceedings/cvprw/2017/0733/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2018/1737/0/08486490", "title": "FI-CAP: Robust Framework to Benchmark Head Pose Estimation in Challenging Environments", "doi": null, "abstractUrl": "/proceedings-article/icme/2018/08486490/14jQfOLF2bC", "parentPublication": { "id": "proceedings/icme/2018/1737/0", "title": "2018 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2021/3176/0/09666992", "title": "Relative Pose Consistency for Semi-Supervised Head Pose Estimation", "doi": null, "abstractUrl": "/proceedings-article/fg/2021/09666992/1A6BGyUQ4yk", "parentPublication": { "id": "proceedings/fg/2021/3176/0", "title": "2021 16th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2021)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a690", "title": "Lightweight Wearable AR System using Head-mounted Projector for Work Support", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a690/1J7Wqal3Fkc", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ithings-greencom-cpscom-smartdata-cybermatics/2020/7647/0/764700a787", "title": "A survey of head pose estimation methods", "doi": null, "abstractUrl": "/proceedings-article/ithings-greencom-cpscom-smartdata-cybermatics/2020/764700a787/1pVHmSe7k7m", "parentPublication": { "id": "proceedings/ithings-greencom-cpscom-smartdata-cybermatics/2020/7647/0", "title": "2020 International Conferences on Internet of Things (iThings) and IEEE Green Computing and Communications (GreenCom) and IEEE Cyber, Physical and Social Computing (CPSCom) and IEEE Smart Data (SmartData) and IEEE Congress on Cybermatics (Cybermatics)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a571", "title": "A Comparison of Single and Multi-View IR image-based AR Glasses Pose Estimation Approaches", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a571/1tnXdVxqFTG", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNyoiYVr", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2017", "__typename": "ProceedingType" }, "article": { "id": "12OmNrH1PER", "doi": "10.1109/CVPR.2017.785", "title": "ROAM: A Rich Object Appearance Model with Application to Rotoscoping", "normalizedTitle": "ROAM: A Rich Object Appearance Model with Application to Rotoscoping", "abstract": "Rotoscoping, the detailed delineation of scene elements through a video shot, is a painstaking task of tremendous importance in professional post-production pipelines. While pixel-wise segmentation techniques can help for this task, professional rotoscoping tools rely on parametric curves that offer the artists a much better interactive control on the definition, editing and manipulation of the segments of interest. Sticking to this prevalent rotoscoping paradigm, we propose a novel framework to capture and track the visual aspect of an arbitrary object in a scene, given a first closed outline of this object. This model combines a collection of local foreground/background appearance models spread along the outline, a global appearance model of the enclosed object and a set of distinctive foreground landmarks. The structure of this rich appearance model allows simple initialization, efficient iterative optimization with exact minimization at each step, and on-line adaptation in videos. We demonstrate qualitatively and quantitatively the merit of this framework through comparisons with tools based on either dynamic segmentation with a closed curve or pixel-wise binary labelling.", "abstracts": [ { "abstractType": "Regular", "content": "Rotoscoping, the detailed delineation of scene elements through a video shot, is a painstaking task of tremendous importance in professional post-production pipelines. While pixel-wise segmentation techniques can help for this task, professional rotoscoping tools rely on parametric curves that offer the artists a much better interactive control on the definition, editing and manipulation of the segments of interest. Sticking to this prevalent rotoscoping paradigm, we propose a novel framework to capture and track the visual aspect of an arbitrary object in a scene, given a first closed outline of this object. This model combines a collection of local foreground/background appearance models spread along the outline, a global appearance model of the enclosed object and a set of distinctive foreground landmarks. The structure of this rich appearance model allows simple initialization, efficient iterative optimization with exact minimization at each step, and on-line adaptation in videos. We demonstrate qualitatively and quantitatively the merit of this framework through comparisons with tools based on either dynamic segmentation with a closed curve or pixel-wise binary labelling.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Rotoscoping, the detailed delineation of scene elements through a video shot, is a painstaking task of tremendous importance in professional post-production pipelines. While pixel-wise segmentation techniques can help for this task, professional rotoscoping tools rely on parametric curves that offer the artists a much better interactive control on the definition, editing and manipulation of the segments of interest. Sticking to this prevalent rotoscoping paradigm, we propose a novel framework to capture and track the visual aspect of an arbitrary object in a scene, given a first closed outline of this object. This model combines a collection of local foreground/background appearance models spread along the outline, a global appearance model of the enclosed object and a set of distinctive foreground landmarks. The structure of this rich appearance model allows simple initialization, efficient iterative optimization with exact minimization at each step, and on-line adaptation in videos. We demonstrate qualitatively and quantitatively the merit of this framework through comparisons with tools based on either dynamic segmentation with a closed curve or pixel-wise binary labelling.", "fno": "0457h426", "keywords": [ "Image Motion Analysis", "Image Segmentation", "Iterative Methods", "Minimisation", "Video Signal Processing", "Local Foreground Background Appearance Models", "Interactive Control", "Rich Object Appearance Model", "ROAM", "Pixel Wise Binary Labelling", "Dynamic Segmentation", "Distinctive Foreground Landmarks", "Enclosed Object", "Global Appearance Model", "Parametric Curves", "Professional Rotoscoping Tools", "Pixel Wise Segmentation Techniques", "Adaptation Models", "Tools", "Shape", "Deformable Models", "Image Color Analysis", "Strain" ], "authors": [ { "affiliation": null, "fullName": "Ondrej Miksik", "givenName": "Ondrej", "surname": "Miksik", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Juan-Manuel Pérez-Rúa", "givenName": "Juan-Manuel", "surname": "Pérez-Rúa", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Philip H. S. Torr", "givenName": "Philip H. S.", "surname": "Torr", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Patrick Pérez", "givenName": "Patrick", "surname": "Pérez", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2017-07-01T00:00:00", "pubType": "proceedings", "pages": "7426-7434", "year": "2017", "issn": "1063-6919", "isbn": "978-1-5386-0457-1", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "0457h417", "articleId": "12OmNCctfko", "__typename": "AdjacentArticleType" }, "next": { "fno": "0457h435", "articleId": "12OmNzICEOm", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccvw/2011/0063/0/06130475", "title": "Robust object tracking via online learning of adaptive appearance manifold", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2011/06130475/12OmNBPtJzB", "parentPublication": { "id": "proceedings/iccvw/2011/0063/0", "title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2014/4717/0/06890708", "title": "Online human tracking via superpixel-based collaborative appearance model", "doi": null, "abstractUrl": "/proceedings-article/icmew/2014/06890708/12OmNCcbE8E", "parentPublication": { "id": "proceedings/icmew/2014/4717/0", "title": "2014 IEEE International Conference on Multimedia and Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ssiai/2014/4053/0/06806034", "title": "Spatio-temporal multimodal mean", "doi": null, "abstractUrl": "/proceedings-article/ssiai/2014/06806034/12OmNwDSdAq", "parentPublication": { "id": "proceedings/ssiai/2014/4053/0", "title": "2014 IEEE Southwest Symposium on Image Analysis and Interpretation (SSIAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/avss/2015/7632/0/07301770", "title": "Modified particle filtering using foreground separation and confidence for object tracking", "doi": null, "abstractUrl": "/proceedings-article/avss/2015/07301770/12OmNxXl5xj", "parentPublication": { "id": "proceedings/avss/2015/7632/0", "title": "2015 12th IEEE International Conference on Advanced Video and Signal Based Surveillance (AVSS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ccbd/2016/3555/0/3555a248", "title": "A Robust Appearance Model for Object Tracking", "doi": null, "abstractUrl": "/proceedings-article/ccbd/2016/3555a248/12OmNxuXcvS", "parentPublication": { "id": "proceedings/ccbd/2016/3555/0", "title": "2016 7th International Conference on Cloud Computing and Big Data (CCBD)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2014/5209/0/5209b224", "title": "Real-Time Object Tracking with Generalized Part-Based Appearance Model and Structure-Constrained Motion Model", "doi": null, "abstractUrl": "/proceedings-article/icpr/2014/5209b224/12OmNyLA5yI", "parentPublication": { "id": "proceedings/icpr/2014/5209/0", "title": "2014 22nd International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdh/2018/9497/0/949700a072", "title": "Adaptive Weighted Deformable Part Model for Object Detection", "doi": null, "abstractUrl": "/proceedings-article/icdh/2018/949700a072/17D45WZZ7Cl", "parentPublication": { "id": "proceedings/icdh/2018/9497/0", "title": "2018 7th International Conference on Digital Home (ICDH)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2020/08/08666808", "title": "ROAM: A Rich Object Appearance Model with Application to Rotoscoping", "doi": null, "abstractUrl": "/journal/tp/2020/08/08666808/18mLzzeeRsk", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09982378", "title": "Appearance-preserved Portrait-to-anime Translation via Proxy-guided Domain Adaptation", "doi": null, "abstractUrl": "/journal/tg/5555/01/09982378/1J2T8H9Y2Ws", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/03/09158550", "title": "Deformable Generator Networks: Unsupervised Disentanglement of Appearance and Geometry", "doi": null, "abstractUrl": "/journal/tp/2022/03/09158550/1m1eGhbPcT6", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1LHcsjgTKTe", "title": "2022 2nd International Conference on Electronic Information Engineering and Computer Technology (EIECT)", "acronym": "eiect", "groupId": "10066533", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1LHcAvYlDxK", "doi": "10.1109/EIECT58010.2022.00017", "title": "Digital portrait model construction for telemedicine doctor authentication", "normalizedTitle": "Digital portrait model construction for telemedicine doctor authentication", "abstract": "In the remote diagnosis and treatment scenarios of telemedicine, there are various authentication elements that are not credible, and the traditional doctor authentication methods cannot realize the problem of credible authentication. The article combines conventional authentication technology and user portrait technology, and proposes a continuous authentication and abnormal behavior monitoring technology based on doctor user portraits. The article obtains the property labels by analyzing the basic attributes of doctors and the behavior labels by mining the frequent access sequences of doctors using the Prefixspan algorithm, and constructs a digital portrait of doctor users by combining the property labels and behavior labels. The experimental results show that the implementation of continuous identity authentication and abnormal behavior monitoring technology based on the digital portrait of user authentication effectively improves the authentication problem under the condition of untrustworthy authentication elements and enhances the security of the telemedicine service platform.", "abstracts": [ { "abstractType": "Regular", "content": "In the remote diagnosis and treatment scenarios of telemedicine, there are various authentication elements that are not credible, and the traditional doctor authentication methods cannot realize the problem of credible authentication. The article combines conventional authentication technology and user portrait technology, and proposes a continuous authentication and abnormal behavior monitoring technology based on doctor user portraits. The article obtains the property labels by analyzing the basic attributes of doctors and the behavior labels by mining the frequent access sequences of doctors using the Prefixspan algorithm, and constructs a digital portrait of doctor users by combining the property labels and behavior labels. The experimental results show that the implementation of continuous identity authentication and abnormal behavior monitoring technology based on the digital portrait of user authentication effectively improves the authentication problem under the condition of untrustworthy authentication elements and enhances the security of the telemedicine service platform.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In the remote diagnosis and treatment scenarios of telemedicine, there are various authentication elements that are not credible, and the traditional doctor authentication methods cannot realize the problem of credible authentication. The article combines conventional authentication technology and user portrait technology, and proposes a continuous authentication and abnormal behavior monitoring technology based on doctor user portraits. The article obtains the property labels by analyzing the basic attributes of doctors and the behavior labels by mining the frequent access sequences of doctors using the Prefixspan algorithm, and constructs a digital portrait of doctor users by combining the property labels and behavior labels. The experimental results show that the implementation of continuous identity authentication and abnormal behavior monitoring technology based on the digital portrait of user authentication effectively improves the authentication problem under the condition of untrustworthy authentication elements and enhances the security of the telemedicine service platform.", "fno": "995600a058", "keywords": [ "Authorisation", "Computer Network Security", "Data Mining", "Message Authentication", "Telemedicine", "Authentication Problem", "Behavior Labels", "Continuous Authentication", "Continuous Identity Authentication", "Conventional Authentication Technology", "Credible Authentication", "Digital Portrait Model Construction", "Doctor User Portraits", "Doctor Users", "Property Labels", "Remote Diagnosis", "Telemedicine Doctor Authentication", "Telemedicine Service Platform", "Traditional Doctor Authentication Methods", "Untrustworthy Authentication Elements", "User Authentication", "User Portrait Technology", "Telemedicine", "Authentication", "Medical Services", "Network Security", "User Experience", "Data Models", "Behavioral Sciences", "Component", "Telemedicine", "Identity Authentication", "User Portrait", "Abnormal Behavior Monitoring" ], "authors": [ { "affiliation": "Zhengzhou University,Zhongyuan Network Security Research Institute,Zhengzhou,China", "fullName": "Junli Guo", "givenName": "Junli", "surname": "Guo", "__typename": "ArticleAuthorType" }, { "affiliation": "Zhengzhou University,School of Cyber Science and Engineering,Zhengzhou,China", "fullName": "Haohua Li", "givenName": "Haohua", "surname": "Li", "__typename": "ArticleAuthorType" } ], "idPrefix": "eiect", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-10-01T00:00:00", "pubType": "proceedings", "pages": "58-64", "year": "2022", "issn": null, "isbn": "979-8-3503-9956-1", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "995600a054", "articleId": "1LHctaKAbgQ", "__typename": "AdjacentArticleType" }, "next": { "fno": "995600a065", "articleId": "1LHculTHmrC", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/eurmic/1997/8129/0/00617319", "title": "An inter/intranet multimedia service for telemedicine", "doi": null, "abstractUrl": "/proceedings-article/eurmic/1997/00617319/12OmNASILUH", "parentPublication": { "id": "proceedings/eurmic/1997/8129/0", "title": "EUROMICRO 97. Proceedings of the 23rd EUROMICRO Conference: New Frontiers of Information Technology (Cat. No.97TB100167)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2005/2378/0/23780363", "title": "A Proposal of COMPASS (COMmunity Portrait Authentication SyStem)", "doi": null, "abstractUrl": "/proceedings-article/cw/2005/23780363/12OmNx8wTnJ", "parentPublication": { "id": "proceedings/cw/2005/2378/0", "title": "2005 International Conference on Cyberworlds (CW'05)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ncm/2009/3769/0/3769b546", "title": "The Analysis for the Auxiliary Telemedicine System QoS", "doi": null, "abstractUrl": "/proceedings-article/ncm/2009/3769b546/12OmNz61d5k", "parentPublication": { "id": "proceedings/ncm/2009/3769/0", "title": "Networked Computing and Advanced Information Management, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acsat/2015/0423/0/07478751", "title": "Watermarking Algorithm for Medical Images Authentication", "doi": null, "abstractUrl": "/proceedings-article/acsat/2015/07478751/12OmNznCl1e", "parentPublication": { "id": "proceedings/acsat/2015/0423/0", "title": "2015 4th International Conference on Advanced Computer Science Applications and Technologies (ACSAT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tq/5555/01/09786740", "title": "CrossBehaAuth: Cross-Scenario Behavioral Biometrics Authentication Using Keystroke Dynamics", "doi": null, "abstractUrl": "/journal/tq/5555/01/09786740/1DSuoGwfC4E", "parentPublication": { "id": "trans/tq", "title": "IEEE Transactions on Dependable and Secure Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/percom-workshops/2022/1647/0/09767507", "title": "Enabling the E@syCare Telemedicine Platform with Push Notification with End-to-end Acknowledgment", "doi": null, "abstractUrl": "/proceedings-article/percom-workshops/2022/09767507/1Df8fFqxciY", "parentPublication": { "id": "proceedings/percom-workshops/2022/1647/0", "title": "2022 IEEE International Conference on Pervasive Computing and Communications Workshops and other Affiliated Events (PerCom Workshops)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pst/2022/7398/0/09851969", "title": "LOG-OFF: A Novel Behavior Based Authentication Compromise Detection Approach", "doi": null, "abstractUrl": "/proceedings-article/pst/2022/09851969/1FWmkOYJrDa", "parentPublication": { "id": "proceedings/pst/2022/7398/0", "title": "2022 19th Annual International Conference on Privacy, Security & Trust (PST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icisce/2019/5712/0/09107826", "title": "Construction of portrait system of listed companies based on big data", "doi": null, "abstractUrl": "/proceedings-article/icisce/2019/09107826/1koLFP1s4P6", "parentPublication": { "id": "proceedings/icisce/2019/5712/0", "title": "2019 6th International Conference on Information Science and Control Engineering (ICISCE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2021/4065/0/406500a262", "title": "Secure Transmission of Medical Images using Improved Hybrid Cryptosystem: Authentication, Confidentiality and Integrity", "doi": null, "abstractUrl": "/proceedings-article/cw/2021/406500a262/1yBF1St07JK", "parentPublication": { "id": "proceedings/cw/2021/4065/0", "title": "2021 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icphds/2021/2594/0/259400a096", "title": "Ideas on the construction of the telemedicine system for the gestational diabetes mellitus based on the clinical decision support system", "doi": null, "abstractUrl": "/proceedings-article/icphds/2021/259400a096/1ymIRozqeOc", "parentPublication": { "id": "proceedings/icphds/2021/2594/0", "title": "2021 International Conference on Public Health and Data Science (ICPHDS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1bwKL9IvjX2", "title": "2019 IEEE 43rd Annual Computer Software and Applications Conference (COMPSAC)", "acronym": "compsac", "groupId": "1000143", "volume": "1", "displayVolume": "1", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1cYiCnOZUSQ", "doi": "10.1109/COMPSAC.2019.00058", "title": "Purchasing Behavior Analysis Based on Customer's Data Portrait Model", "normalizedTitle": "Purchasing Behavior Analysis Based on Customer's Data Portrait Model", "abstract": "This paper defines the data label, data portrait, and data portrait causal model, which proves the equivalence between the data portrait causal model and the Bayesian network model. The causal model of constructing data portraits and user behavior prediction algorithm are proposed to verify the effectiveness of the proposed model and algorithm.", "abstracts": [ { "abstractType": "Regular", "content": "This paper defines the data label, data portrait, and data portrait causal model, which proves the equivalence between the data portrait causal model and the Bayesian network model. The causal model of constructing data portraits and user behavior prediction algorithm are proposed to verify the effectiveness of the proposed model and algorithm.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper defines the data label, data portrait, and data portrait causal model, which proves the equivalence between the data portrait causal model and the Bayesian network model. The causal model of constructing data portraits and user behavior prediction algorithm are proposed to verify the effectiveness of the proposed model and algorithm.", "fno": "260701a352", "keywords": [ "Behavioural Sciences Computing", "Belief Networks", "Consumer Behaviour", "Data Handling", "Data Models", "Purchasing", "Data Portrait Causal Model", "Bayesian Network Model", "Data Label", "Purchasing", "Customer Data Portrait Model", "Behavior Analysis", "Data Models", "Automobiles", "Adaptation Models", "Predictive Models", "Bayes Methods", "Prediction Algorithms", "Probability Distribution", "Label Structure", "Data Portrait Model", "Customers Behavior Analysis", "Bayesian Network" ], "authors": [ { "affiliation": "North China University of Technology", "fullName": "Jing Sun", "givenName": "Jing", "surname": "Sun", "__typename": "ArticleAuthorType" }, { "affiliation": "North China University of Technology", "fullName": "Huiqun Zhao", "givenName": "Huiqun", "surname": "Zhao", "__typename": "ArticleAuthorType" }, { "affiliation": "North China University of Technology", "fullName": "Sanwen Mu", "givenName": "Sanwen", "surname": "Mu", "__typename": "ArticleAuthorType" }, { "affiliation": "North China University of Technology", "fullName": "Zimu Li", "givenName": "Zimu", "surname": "Li", "__typename": "ArticleAuthorType" } ], "idPrefix": "compsac", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-07-01T00:00:00", "pubType": "proceedings", "pages": "352-357", "year": "2019", "issn": "0730-3157", "isbn": "978-1-7281-2607-4", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "260701a346", "articleId": "1cYiAeo6MUw", "__typename": "AdjacentArticleType" }, "next": { "fno": "260701a358", "articleId": "1cYiBvGIQU0", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/csie/2009/3507/3/3507c006", "title": "Feature-Based Automatic Portrait Generation System", "doi": null, "abstractUrl": "/proceedings-article/csie/2009/3507c006/12OmNxEBzmz", "parentPublication": { "id": "proceedings/csie/2009/3507/3", "title": "Computer Science and Information Engineering, World Congress on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200a782", "title": "Neural Video Portrait Relighting in Real-time via Consistency Modeling", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200a782/1BmEMHlM0Du", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bdicn/2022/8476/0/847600a156", "title": "Student Ability Portrait Construction Research Based on Big Data", "doi": null, "abstractUrl": "/proceedings-article/bdicn/2022/847600a156/1CJgd24RInC", "parentPublication": { "id": "proceedings/bdicn/2022/8476/0", "title": "2022 International Conference on Big Data, Information and Computer Network (BDICN)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09982378", "title": "Appearance-preserved Portrait-to-anime Translation via Proxy-guided Domain Adaptation", "doi": null, "abstractUrl": "/journal/tg/5555/01/09982378/1J2T8H9Y2Ws", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/eiect/2022/9956/0/995600a058", "title": "Digital portrait model construction for telemedicine doctor authentication", "doi": null, "abstractUrl": "/proceedings-article/eiect/2022/995600a058/1LHcAvYlDxK", "parentPublication": { "id": "proceedings/eiect/2022/9956/0", "title": "2022 2nd International Conference on Electronic Information Engineering and Computer Technology (EIECT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icbar/2022/3426/0/342600a070", "title": "Construction of Resource Element Portrait Analysis Model Based on Data Center", "doi": null, "abstractUrl": "/proceedings-article/icbar/2022/342600a070/1MIhDuZY424", "parentPublication": { "id": "proceedings/icbar/2022/3426/0", "title": "2022 2nd International Conference on Big Data, Artificial Intelligence and Risk Management (ICBAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800n3515", "title": "PuppeteerGAN: Arbitrary Portrait Animation With Semantic-Aware Appearance Transformation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800n3515/1m3opHwOOZy", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ickg/2020/8156/0/09194534", "title": "The Short-Term Power Consumption Forecasting Based on the Portrait of Substation Areas", "doi": null, "abstractUrl": "/proceedings-article/ickg/2020/09194534/1n2nlP1U9d6", "parentPublication": { "id": "proceedings/ickg/2020/8156/0", "title": "2020 IEEE International Conference on Knowledge Graph (ICKG)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hpcc-dss-smartcity/2020/7649/0/764900b225", "title": "A Novel Developer Portrait Model based on Bert-Capsule Network", "doi": null, "abstractUrl": "/proceedings-article/hpcc-dss-smartcity/2020/764900b225/1t7mT1qOv3q", "parentPublication": { "id": "proceedings/hpcc-dss-smartcity/2020/7649/0", "title": "2020 IEEE 22nd International Conference on High Performance Computing and Communications; IEEE 18th International Conference on Smart City; IEEE 6th International Conference on Data Science and Systems (HPCC/SmartCity/DSS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hpcc-dss-smartcity/2020/7649/0/764900b233", "title": "A Novel SMOTE Algorithm based Portrait Model for Programmers", "doi": null, "abstractUrl": "/proceedings-article/hpcc-dss-smartcity/2020/764900b233/1t7mXeYbFAY", "parentPublication": { "id": "proceedings/hpcc-dss-smartcity/2020/7649/0", "title": "2020 IEEE 22nd International Conference on High Performance Computing and Communications; IEEE 18th International Conference on Smart City; IEEE 6th International Conference on Data Science and Systems (HPCC/SmartCity/DSS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1fHkkWQ0aEE", "title": "2019 International Conference on Cyberworlds (CW)", "acronym": "cw", "groupId": "1000175", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1fHklgptlIs", "doi": "10.1109/CW.2019.00017", "title": "Semi-Automatic Creation of an Anime-Like 3D Face Model from a Single Illustration", "normalizedTitle": "Semi-Automatic Creation of an Anime-Like 3D Face Model from a Single Illustration", "abstract": "In this paper, we propose a method for semi-automatically creating an anime-like 3D face model from a single illustration. In the proposed method, principal component analysis (PCA) is applied to existing anime-like 3D models to obtain base models for generating natural 3D models. To align the dimensions of the data and make geometric correspondence, a template model is deformed using a modified Nonrigid Iterative Closest Point (NICP) method. Then, the coefficients of the linear combination of the base models are estimated by minimizing the difference between the rendered image of the 3D model with the coefficients and the input illustration using edge-based matching. We confirmed that our method was able to generate a natural anime-like 3D face models which has similar eye and face shapes to those of the input illustration.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper, we propose a method for semi-automatically creating an anime-like 3D face model from a single illustration. In the proposed method, principal component analysis (PCA) is applied to existing anime-like 3D models to obtain base models for generating natural 3D models. To align the dimensions of the data and make geometric correspondence, a template model is deformed using a modified Nonrigid Iterative Closest Point (NICP) method. Then, the coefficients of the linear combination of the base models are estimated by minimizing the difference between the rendered image of the 3D model with the coefficients and the input illustration using edge-based matching. We confirmed that our method was able to generate a natural anime-like 3D face models which has similar eye and face shapes to those of the input illustration.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper, we propose a method for semi-automatically creating an anime-like 3D face model from a single illustration. In the proposed method, principal component analysis (PCA) is applied to existing anime-like 3D models to obtain base models for generating natural 3D models. To align the dimensions of the data and make geometric correspondence, a template model is deformed using a modified Nonrigid Iterative Closest Point (NICP) method. Then, the coefficients of the linear combination of the base models are estimated by minimizing the difference between the rendered image of the 3D model with the coefficients and the input illustration using edge-based matching. We confirmed that our method was able to generate a natural anime-like 3D face models which has similar eye and face shapes to those of the input illustration.", "fno": "229700a053", "keywords": [ "Face Recognition", "Image Matching", "Image Registration", "Iterative Methods", "Principal Component Analysis", "Rendering Computer Graphics", "Solid Modelling", "Semiautomatic Creation", "Anime Like 3 D Face Model", "Single Illustration", "Existing Anime Like 3 D Models", "Base Models", "Natural 3 D Models", "Template Model", "Modified Nonrigid Iterative Closest Point Method", "Input Illustration", "Natural Anime Like 3 D Face Models", "Solid Modeling", "Three Dimensional Displays", "Face", "Deformable Models", "Training", "Principal Component Analysis", "Data Models", "3 D Modeling", "Nonrigid Registration", "Principal Component Analysis" ], "authors": [ { "affiliation": "Saitama University", "fullName": "Takayuki Niki", "givenName": "Takayuki", "surname": "Niki", "__typename": "ArticleAuthorType" }, { "affiliation": "Saitama University", "fullName": "Takashi Komuro", "givenName": "Takashi", "surname": "Komuro", "__typename": "ArticleAuthorType" } ], "idPrefix": "cw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-10-01T00:00:00", "pubType": "proceedings", "pages": "53-56", "year": "2019", "issn": null, "isbn": "978-1-7281-2297-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "229700a045", "articleId": "1fHkltMBEEU", "__typename": "AdjacentArticleType" }, "next": { "fno": "229700a057", "articleId": "1fHkkZzKire", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cgi/2001/1007/0/10070343", "title": "Technical Illustration Based on Human-Like Approach", "doi": null, "abstractUrl": "/proceedings-article/cgi/2001/10070343/12OmNCm7BIM", "parentPublication": { "id": "proceedings/cgi/2001/1007/0", "title": "Proceedings. Computer Graphics International 2001", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2015/8391/0/8391d604", "title": "A Groupwise Multilinear Correspondence Optimization for 3D Faces", "doi": null, "abstractUrl": "/proceedings-article/iccv/2015/8391d604/12OmNwB2dYl", "parentPublication": { "id": "proceedings/iccv/2015/8391/0", "title": "2015 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2018/2335/0/233501a416", "title": "Deep Transfer Network with 3D Morphable Models for Face Recognition", "doi": null, "abstractUrl": "/proceedings-article/fg/2018/233501a416/12OmNx0RIU2", "parentPublication": { "id": "proceedings/fg/2018/2335/0", "title": "2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2008/2153/0/04813333", "title": "Bilinear elastically deformable models with application to 3D face and facial expression recognition", "doi": null, "abstractUrl": "/proceedings-article/fg/2008/04813333/12OmNxX3uoG", "parentPublication": { "id": "proceedings/fg/2008/2153/0", "title": "2008 8th IEEE International Conference on Automatic Face & Gesture Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2014/5118/0/5118b789", "title": "RAPS: Robust and Efficient Automatic Construction of Person-Specific Deformable Models", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2014/5118b789/12OmNxYL5av", "parentPublication": { "id": "proceedings/cvpr/2014/5118/0", "title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2012/12/ttp2012122341", "title": "Gender and Ethnicity Specific Generic Elastic Models from a Single 2D Image for Novel 2D Pose Face Synthesis and Recognition", "doi": null, "abstractUrl": "/journal/tp/2012/12/ttp2012122341/13rRUILtJng", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2013/02/ttp2013020381", "title": "Iterative Closest Normal Point for 3D Face Recognition", "doi": null, "abstractUrl": "/journal/tp/2013/02/ttp2013020381/13rRUx0xQ0F", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09982378", "title": "Appearance-preserved Portrait-to-anime Translation via Proxy-guided Domain Adaptation", "doi": null, "abstractUrl": "/journal/tg/5555/01/09982378/1J2T8H9Y2Ws", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/nicoint/2020/8771/0/09122328", "title": "Face Stylized Modeling for Virtual Character", "doi": null, "abstractUrl": "/proceedings-article/nicoint/2020/09122328/1kRSfEpyyWI", "parentPublication": { "id": "proceedings/nicoint/2020/8771/0", "title": "2020 Nicograph International (NicoInt)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2020/6497/0/649700a110", "title": "Automatic Generation of 3D Natural Anime-like Non-Player Characters with Machine Learning", "doi": null, "abstractUrl": "/proceedings-article/cw/2020/649700a110/1olHy70NCQU", "parentPublication": { "id": "proceedings/cw/2020/6497/0", "title": "2020 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1m3n9N02qgE", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1m3nzAe5fKE", "doi": "10.1109/CVPR42600.2020.00142", "title": "Robust 3D Self-Portraits in Seconds", "normalizedTitle": "Robust 3D Self-Portraits in Seconds", "abstract": "In this paper, we propose an efficient method for robust 3D self-portraits using a single RGBD camera. Benefiting from the proposed PIFusion and lightweight bundle adjustment algorithm, our method can generate detailed 3D self-portraits in seconds and shows the ability to handle subjects wearing extremely loose clothes. To achieve highly efficient and robust reconstruction, we propose PIFusion, which combines learning-based 3D recovery with volumetric non-rigid fusion to generate accurate sparse partial scans of the subject. Moreover, a non-rigid volumetric deformation method is proposed to continuously refine the learned shape prior. Finally, a lightweight bundle adjustment algorithm is proposed to guarantee that all the partial scans can not only ``loop'' with each other but also remain consistent with the selected live key observations. The results and experiments show that the proposed method achieves more robust and efficient 3D self-portraits compared with state-of-the-art methods.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper, we propose an efficient method for robust 3D self-portraits using a single RGBD camera. Benefiting from the proposed PIFusion and lightweight bundle adjustment algorithm, our method can generate detailed 3D self-portraits in seconds and shows the ability to handle subjects wearing extremely loose clothes. To achieve highly efficient and robust reconstruction, we propose PIFusion, which combines learning-based 3D recovery with volumetric non-rigid fusion to generate accurate sparse partial scans of the subject. Moreover, a non-rigid volumetric deformation method is proposed to continuously refine the learned shape prior. Finally, a lightweight bundle adjustment algorithm is proposed to guarantee that all the partial scans can not only ``loop'' with each other but also remain consistent with the selected live key observations. The results and experiments show that the proposed method achieves more robust and efficient 3D self-portraits compared with state-of-the-art methods.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper, we propose an efficient method for robust 3D self-portraits using a single RGBD camera. Benefiting from the proposed PIFusion and lightweight bundle adjustment algorithm, our method can generate detailed 3D self-portraits in seconds and shows the ability to handle subjects wearing extremely loose clothes. To achieve highly efficient and robust reconstruction, we propose PIFusion, which combines learning-based 3D recovery with volumetric non-rigid fusion to generate accurate sparse partial scans of the subject. Moreover, a non-rigid volumetric deformation method is proposed to continuously refine the learned shape prior. Finally, a lightweight bundle adjustment algorithm is proposed to guarantee that all the partial scans can not only ``loop'' with each other but also remain consistent with the selected live key observations. The results and experiments show that the proposed method achieves more robust and efficient 3D self-portraits compared with state-of-the-art methods.", "fno": "716800b341", "keywords": [ "Cameras", "Deformation", "Image Colour Analysis", "Image Fusion", "Image Reconstruction", "Learning Artificial Intelligence", "Robust 3 D Self Portraits", "Single RGBD Camera", "PI Fusion", "Lightweight Bundle Adjustment Algorithm", "Robust Reconstruction", "Nonrigid Fusion", "Accurate Sparse Partial Scans", "Nonrigid Volumetric Deformation Method", "Learned Shape", "Learning Based 3 D Recovery", "Volumetric Nonrigid Fusion", "Learned Shape Prior", "Bundle Adjustment", "Image Reconstruction", "Robustness", "Strain", "Shape", "Deformable Models" ], "authors": [ { "affiliation": "Department of Automation, Tsinghua University, China", "fullName": "Zhe Li", "givenName": "Zhe", "surname": "Li", "__typename": "ArticleAuthorType" }, { "affiliation": "Department of Automation, Tsinghua University, China", "fullName": "Tao Yu", "givenName": "Tao", "surname": "Yu", "__typename": "ArticleAuthorType" }, { "affiliation": "Department of Automation, Tsinghua University, China", "fullName": "Chuanyu Pan", "givenName": "Chuanyu", "surname": "Pan", "__typename": "ArticleAuthorType" }, { "affiliation": "Department of Automation, Tsinghua University, China", "fullName": "Zerong Zheng", "givenName": "Zerong", "surname": "Zheng", "__typename": "ArticleAuthorType" }, { "affiliation": "Department of Automation, Tsinghua University, China; Institute for Brain and Cognitive Sciences, Tsinghua University, China", "fullName": "Yebin Liu", "givenName": "Yebin", "surname": "Liu", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-06-01T00:00:00", "pubType": "proceedings", "pages": "1341-1350", "year": "2020", "issn": null, "isbn": "978-1-7281-7168-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "716800b331", "articleId": "1m3ooB64tNe", "__typename": "AdjacentArticleType" }, "next": { "fno": "716800b351", "articleId": "1m3orD9TlNm", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icpr/2004/2128/3/212830902", "title": "3D Model Reconstruction by Constrained Bundle Adjustment", "doi": null, "abstractUrl": "/proceedings-article/icpr/2004/212830902/12OmNCvLXXS", "parentPublication": { "id": "proceedings/icpr/2004/2128/3", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ias/2009/3744/1/3744a273", "title": "Research on 3D Reconstruction Procedure of Marked Points for Large Workpiece Measurement", "doi": null, "abstractUrl": "/proceedings-article/ias/2009/3744a273/12OmNz3bdBQ", "parentPublication": { "id": "proceedings/ias/2009/3744/1", "title": "Information Assurance and Security, International Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2019/04/08325527", "title": "Accurate 3D Reconstruction from Small Motion Clip for Rolling Shutter Cameras", "doi": null, "abstractUrl": "/journal/tp/2019/04/08325527/13rRUxlgxXO", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000b876", "title": "pOSE: Pseudo Object Space Error for Initialization-Free Bundle Adjustment", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000b876/17D45VVho3g", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2022/9062/0/09956128", "title": "Self-calibration of multiple-line-lasers based on coplanarity and Epipolar constraints for wide area shape scan using moving camera", "doi": null, "abstractUrl": "/proceedings-article/icpr/2022/09956128/1IHpT93z7Mc", "parentPublication": { "id": "proceedings/icpr/2022/9062/0", "title": "2022 26th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2019/3131/0/313100a453", "title": "Simultaneous Shape Registration and Active Stereo Shape Reconstruction using Modified Bundle Adjustment", "doi": null, "abstractUrl": "/proceedings-article/3dv/2019/313100a453/1ezRD0dXDhe", "parentPublication": { "id": "proceedings/3dv/2019/3131/0", "title": "2019 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/02/09151354", "title": "Spatiotemporal Bundle Adjustment for Dynamic 3D Human Reconstruction in the Wild", "doi": null, "abstractUrl": "/journal/tp/2022/02/09151354/1lPCkW5UbPG", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800g468", "title": "Learning to Dress 3D People in Generative Clothing", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800g468/1m3nwUHFD68", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2020/8128/0/812800a652", "title": "RidgeSfM: Structure from Motion via Robust Pairwise Matching Under Depth Uncertainty", "doi": null, "abstractUrl": "/proceedings-article/3dv/2020/812800a652/1qyxoovaSBi", "parentPublication": { "id": "proceedings/3dv/2020/8128/0", "title": "2020 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/11/09540284", "title": "Robust and Accurate 3D Self-Portraits in Seconds", "doi": null, "abstractUrl": "/journal/tp/2022/11/09540284/1wWCcQDdEZi", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1t7mQaZpzb2", "title": "2020 IEEE 22nd International Conference on High Performance Computing and Communications; IEEE 18th International Conference on Smart City; IEEE 6th International Conference on Data Science and Systems (HPCC/SmartCity/DSS)", "acronym": "hpcc-dss-smartcity", "groupId": "1002461", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1t7mT1qOv3q", "doi": "10.1109/HPCC-SmartCity-DSS50907.2020.00158", "title": "A Novel Developer Portrait Model based on Bert-Capsule Network", "normalizedTitle": "A Novel Developer Portrait Model based on Bert-Capsule Network", "abstract": "In order to ensure code quality, it's necessary to construct portraits for developers, which could analyze their behavior to provide personalized programming suggestions. However, most of the existing developer portrait algorithms only use global features and ignore local features extracted from log texts, which leads to the lack of comprehensive personality analysis. To solve this problem, the proposed method proposes a novel developer portrait model, which could describe developers' programming styles more accurately with both global and local information extracted from texts. The proposed model firstly collects the log data produced in the process of continuous integration development. Afterwards, the proposed method proposes the personality portrait model based on BERT-Capsule network, which successfully combines global semantic features and local emotional features. The experimental results show that the proposed BERT-Capsule model can effectively extract the contextual information and the local emotional information of the text, thus improving classification performance of the developer portrait model.", "abstracts": [ { "abstractType": "Regular", "content": "In order to ensure code quality, it's necessary to construct portraits for developers, which could analyze their behavior to provide personalized programming suggestions. However, most of the existing developer portrait algorithms only use global features and ignore local features extracted from log texts, which leads to the lack of comprehensive personality analysis. To solve this problem, the proposed method proposes a novel developer portrait model, which could describe developers' programming styles more accurately with both global and local information extracted from texts. The proposed model firstly collects the log data produced in the process of continuous integration development. Afterwards, the proposed method proposes the personality portrait model based on BERT-Capsule network, which successfully combines global semantic features and local emotional features. The experimental results show that the proposed BERT-Capsule model can effectively extract the contextual information and the local emotional information of the text, thus improving classification performance of the developer portrait model.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In order to ensure code quality, it's necessary to construct portraits for developers, which could analyze their behavior to provide personalized programming suggestions. However, most of the existing developer portrait algorithms only use global features and ignore local features extracted from log texts, which leads to the lack of comprehensive personality analysis. To solve this problem, the proposed method proposes a novel developer portrait model, which could describe developers' programming styles more accurately with both global and local information extracted from texts. The proposed model firstly collects the log data produced in the process of continuous integration development. Afterwards, the proposed method proposes the personality portrait model based on BERT-Capsule network, which successfully combines global semantic features and local emotional features. The experimental results show that the proposed BERT-Capsule model can effectively extract the contextual information and the local emotional information of the text, thus improving classification performance of the developer portrait model.", "fno": "764900b225", "keywords": [ "Data Analysis", "Emotion Recognition", "Feature Extraction", "Novel Developer Portrait Model", "Bert Capsule Network", "Portraits", "Personalized Programming Suggestions", "Existing Developer Portrait Algorithms", "Global Features", "Local Features", "Log Texts", "Comprehensive Personality Analysis", "Developers", "Global Information", "Local Information", "Continuous Integration Development", "Personality Portrait Model", "BERT Capsule Network", "Global Semantic Features", "Local Emotional Features", "BERT Capsule Model", "Local Emotional Information", "Analytical Models", "Computational Modeling", "High Performance Computing", "Conferences", "Semantics", "Programming", "Feature Extraction", "Bert Capsule Network", "Developer Portrait Model", "Global Information" ], "authors": [ { "affiliation": "Hohai University,College of Computer and Information,Nanjing,China", "fullName": "Pengyu Yu", "givenName": "Pengyu", "surname": "Yu", "__typename": "ArticleAuthorType" }, { "affiliation": "Hohai University,College of Computer and Information,Nanjing,China", "fullName": "Yirui Wu", "givenName": "Yirui", "surname": "Wu", "__typename": "ArticleAuthorType" }, { "affiliation": "Hohai University,College of Computer and Information,Nanjing,China", "fullName": "Benze Wu", "givenName": "Benze", "surname": "Wu", "__typename": "ArticleAuthorType" } ], "idPrefix": "hpcc-dss-smartcity", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-12-01T00:00:00", "pubType": "proceedings", "pages": "1225-1232", "year": "2020", "issn": null, "isbn": "978-1-7281-7649-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "764900b219", "articleId": "1t7mTczZKMw", "__typename": "AdjacentArticleType" }, "next": { "fno": "764900b233", "articleId": "1t7mXeYbFAY", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/bibm/2021/0126/0/09669855", "title": "C2BERT: Cross-contrast BERT for Chinese Biomedical Sentence Representation", "doi": null, "abstractUrl": "/proceedings-article/bibm/2021/09669855/1A9VRlgss1i", "parentPublication": { "id": "proceedings/bibm/2021/0126/0", "title": "2021 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bibm/2021/0126/0/09669869", "title": "TL-BERT: A Novel Biomedical Relation Extraction Approach", "doi": null, "abstractUrl": "/proceedings-article/bibm/2021/09669869/1A9WkaPDGrm", "parentPublication": { "id": "proceedings/bibm/2021/0126/0", "title": "2021 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icsc/2022/3418/0/341800a171", "title": "Estimating Context-Specific Subjective Content Descriptions using BERT", "doi": null, "abstractUrl": "/proceedings-article/icsc/2022/341800a171/1BYIsoK0cko", "parentPublication": { "id": "proceedings/icsc/2022/3418/0", "title": "2022 IEEE 16th International Conference on Semantic Computing (ICSC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iucc-cit-dsci-smartcns/2021/6667/0/666700a296", "title": "BERT- BiLSTM-Caps Language Model for Screening of Children's Severe Mental Retardation", "doi": null, "abstractUrl": "/proceedings-article/iucc-cit-dsci-smartcns/2021/666700a296/1BrAJI8Iu6Q", "parentPublication": { "id": "proceedings/iucc-cit-dsci-smartcns/2021/6667/0", "title": "2021 20th International Conference on Ubiquitous Computing and Communications (IUCC/CIT/DSCI/SmartCNS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icse/2022/9221/0/922100a946", "title": "Fast Changeset-based Bug Localization with BERT", "doi": null, "abstractUrl": "/proceedings-article/icse/2022/922100a946/1Ems4YVqjio", "parentPublication": { "id": "proceedings/icse/2022/9221/0", "title": "2022 IEEE/ACM 44th International Conference on Software Engineering (ICSE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icftic/2022/2195/0/10075256", "title": "Analysis of E-commerce Review Text Based on Capsule Network", "doi": null, "abstractUrl": "/proceedings-article/icftic/2022/10075256/1LRl5QrVGKc", "parentPublication": { "id": "proceedings/icftic/2022/2195/0", "title": "2022 4th International Conference on Frontiers Technology of Information and Computer (ICFTIC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hpcc-dss-smartcity/2020/7649/0/764900b233", "title": "A Novel SMOTE Algorithm based Portrait Model for Programmers", "doi": null, "abstractUrl": "/proceedings-article/hpcc-dss-smartcity/2020/764900b233/1t7mXeYbFAY", "parentPublication": { "id": "proceedings/hpcc-dss-smartcity/2020/7649/0", "title": "2020 IEEE 22nd International Conference on High Performance Computing and Communications; IEEE 18th International Conference on Smart City; IEEE 6th International Conference on Data Science and Systems (HPCC/SmartCity/DSS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/itca/2020/0378/0/037800a677", "title": "A Rapid Ear Modeling Method Based on Standard Template and Screening Algorithm for Portrait Product", "doi": null, "abstractUrl": "/proceedings-article/itca/2020/037800a677/1tpB89ki0gg", "parentPublication": { "id": "proceedings/itca/2020/0378/0", "title": "2020 2nd International Conference on Information Technology and Computer Application (ITCA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bigdatasecurity-hpsc-ids/2021/3927/0/392700a105", "title": "Chinese Text Classification Model Based On Bert And Capsule Network Structure", "doi": null, "abstractUrl": "/proceedings-article/bigdatasecurity-hpsc-ids/2021/392700a105/1uPzcAiN6KI", "parentPublication": { "id": "proceedings/bigdatasecurity-hpsc-ids/2021/3927/0", "title": "2021 7th IEEE Intl Conference on Big Data Security on Cloud (BigDataSecurity), IEEE Intl Conference on High Performance and Smart Computing, (HPSC) and IEEE Intl Conference on Intelligent Data and Security (IDS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ichi/2021/0132/0/013200a507", "title": "Contrastive Representations Pre-Training for Enhanced Discharge Summary BERT", "doi": null, "abstractUrl": "/proceedings-article/ichi/2021/013200a507/1xIOO9qqC9a", "parentPublication": { "id": "proceedings/ichi/2021/0132/0", "title": "2021 IEEE 9th International Conference on Healthcare Informatics (ICHI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNBC8AAD", "title": "2010 IEEE Virtual Reality Conference (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2010", "__typename": "ProceedingType" }, "article": { "id": "12OmNAle6ku", "doi": "10.1109/VR.2010.5444812", "title": "GUD WIP: Gait-Understanding-Driven Walking-In-Place", "normalizedTitle": "GUD WIP: Gait-Understanding-Driven Walking-In-Place", "abstract": "Many Virtual Environments require walking interfaces to explore virtual worlds much larger than available real-world tracked space. We present a model for generating virtual locomotion speeds from Walking-In-Place (WIP) inputs based on walking biomechanics. By employing gait principles, our model - called Gait-Understanding-Driven Walking-In-Place (GUD WIP) - creates output speeds which better match those evident in Real Walking, and which better respond to variations in step frequency, including realistic starting and stopping. The speeds output by our implementation demonstrate considerably less within-step fluctuation than a good current WIP system - Low-Latency, Continuous-Motion (LLCM) WIP - while still remaining responsive to changes in user input. We compared resulting speeds from Real Walking, GUD WIP, and LLCM-WIP via user study: The average output speeds for Real Walking and GUD WIP respond consistently with changing step frequency - LLCM-WIP is far less consistent. GUD WIP produces output speeds that are more locally consistent (smooth) and step-frequency-to-walk-speed consistent than LLCM-WIP.", "abstracts": [ { "abstractType": "Regular", "content": "Many Virtual Environments require walking interfaces to explore virtual worlds much larger than available real-world tracked space. We present a model for generating virtual locomotion speeds from Walking-In-Place (WIP) inputs based on walking biomechanics. By employing gait principles, our model - called Gait-Understanding-Driven Walking-In-Place (GUD WIP) - creates output speeds which better match those evident in Real Walking, and which better respond to variations in step frequency, including realistic starting and stopping. The speeds output by our implementation demonstrate considerably less within-step fluctuation than a good current WIP system - Low-Latency, Continuous-Motion (LLCM) WIP - while still remaining responsive to changes in user input. We compared resulting speeds from Real Walking, GUD WIP, and LLCM-WIP via user study: The average output speeds for Real Walking and GUD WIP respond consistently with changing step frequency - LLCM-WIP is far less consistent. GUD WIP produces output speeds that are more locally consistent (smooth) and step-frequency-to-walk-speed consistent than LLCM-WIP.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Many Virtual Environments require walking interfaces to explore virtual worlds much larger than available real-world tracked space. We present a model for generating virtual locomotion speeds from Walking-In-Place (WIP) inputs based on walking biomechanics. By employing gait principles, our model - called Gait-Understanding-Driven Walking-In-Place (GUD WIP) - creates output speeds which better match those evident in Real Walking, and which better respond to variations in step frequency, including realistic starting and stopping. The speeds output by our implementation demonstrate considerably less within-step fluctuation than a good current WIP system - Low-Latency, Continuous-Motion (LLCM) WIP - while still remaining responsive to changes in user input. We compared resulting speeds from Real Walking, GUD WIP, and LLCM-WIP via user study: The average output speeds for Real Walking and GUD WIP respond consistently with changing step frequency - LLCM-WIP is far less consistent. GUD WIP produces output speeds that are more locally consistent (smooth) and step-frequency-to-walk-speed consistent than LLCM-WIP.", "fno": "05444812", "keywords": [ "GUD WIP", "Virtual Environments", "Walking Interface", "Virtual Locomotion Speed", "Gait Understanding Driven Walking In Place", "Biomechanics", "Gait Principles", "Low Latency Continuous Motion", "Step Frequency To Walk Speed" ], "authors": [ { "affiliation": "Univ. of North Carolina at Chapel Hill, Chapel Hill, NC, USA", "fullName": "Jeremy D Wendt", "givenName": "Jeremy D", "surname": "Wendt", "__typename": "ArticleAuthorType" }, { "affiliation": "Univ. of North Carolina at Chapel Hill, Chapel Hill, NC, USA", "fullName": "Mary C Whitton", "givenName": "Mary C", "surname": "Whitton", "__typename": "ArticleAuthorType" }, { "affiliation": "Univ. of North Carolina at Chapel Hill, Chapel Hill, NC, USA", "fullName": "Frederick P Brooks", "givenName": "Frederick P", "surname": "Brooks", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2010-03-01T00:00:00", "pubType": "proceedings", "pages": "51-58", "year": "2010", "issn": null, "isbn": "978-1-4244-6237-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "05444815", "articleId": "12OmNylsZU8", "__typename": "AdjacentArticleType" }, "next": { "fno": "05444813", "articleId": "12OmNA0vnTx", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2012/1247/0/06180876", "title": "Sensor-fusion walking-in-place interaction technique using mobile devices", "doi": null, "abstractUrl": "/proceedings-article/vr/2012/06180876/12OmNB6UI9j", "parentPublication": { "id": "proceedings/vr/2012/1247/0", "title": "Virtual Reality Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cit/2009/3836/1/3836a263", "title": "Primary Exploration on the Symmetry of Human Walking", "doi": null, "abstractUrl": "/proceedings-article/cit/2009/3836a263/12OmNC3Xhu8", "parentPublication": { "id": "proceedings/cit/2009/3836/1", "title": "2009 Ninth IEEE International Conference on Computer and Information Technology. CIT 2009", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2016/0836/0/07504772", "title": "Evaluating two alternative walking in place interfaces for virtual reality gaming", "doi": null, "abstractUrl": "/proceedings-article/vr/2016/07504772/12OmNCf1Dnb", "parentPublication": { "id": "proceedings/vr/2016/0836/0", "title": "2016 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2011/0039/0/05759437", "title": "An evaluation of navigational ability comparing Redirected Free Exploration with Distractors to Walking-in-Place and joystick locomotio interfaces", "doi": null, "abstractUrl": "/proceedings-article/vr/2011/05759437/12OmNx8OuyK", "parentPublication": { "id": "proceedings/vr/2011/0039/0", "title": "2011 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2008/2047/0/04476598", "title": "LLCM-WIP: Low-Latency, Continuous-Motion Walking-in-Place", "doi": null, "abstractUrl": "/proceedings-article/3dui/2008/04476598/12OmNyQYtvN", "parentPublication": { "id": "proceedings/3dui/2008/2047/0", "title": "2008 IEEE Symposium on 3D User Interfaces", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2014/04/ttg201404569", "title": "Establishing the Range of Perceptually Natural Visual Walking Speeds for Virtual Walking-In-Place Locomotion", "doi": null, "abstractUrl": "/journal/tg/2014/04/ttg201404569/13rRUxAASTb", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09978713", "title": "Revisiting Walking-in-Place by Introducing Step-Height Control, Elastic Input, and Pseudo-Haptic Feedback", "doi": null, "abstractUrl": "/journal/tg/5555/01/09978713/1IXUnnVaWoE", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/05/10049680", "title": "Assisted walking-in-place: Introducing assisted motion to walking-by-cycling in embodied virtual reality", "doi": null, "abstractUrl": "/journal/tg/2023/05/10049680/1KYolEFtr6U", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798345", "title": "Investigation of Visual Self-Representation for a Walking-in-Place Navigation System in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798345/1cJ1hpkUgHS", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a498", "title": "SHeF-WIP: Walking-in-Place based on Step Height and Frequency for Wider Range of Virtual Speed", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a498/1tnWFlvbESk", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNAYXWAF", "title": "2016 IEEE Virtual Reality (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2016", "__typename": "ProceedingType" }, "article": { "id": "12OmNCf1Dnb", "doi": "10.1109/VR.2016.7504772", "title": "Evaluating two alternative walking in place interfaces for virtual reality gaming", "normalizedTitle": "Evaluating two alternative walking in place interfaces for virtual reality gaming", "abstract": "This study investigates sliding as a walking-in-place (WIP) method for virtual reality navigation using the Wizdish. The Wizdish is a novel WIP device built for home usage. Two WIP methods, sliding and marching, were compared for naturalness, presence, and surface difference. The sliding technique used on the Wizdish was found to be significantly more disruptive during the experience compared to marching. This could be due to the size of the Wizdish, restricting the users stride, or due to a longer acclimatization time.", "abstracts": [ { "abstractType": "Regular", "content": "This study investigates sliding as a walking-in-place (WIP) method for virtual reality navigation using the Wizdish. The Wizdish is a novel WIP device built for home usage. Two WIP methods, sliding and marching, were compared for naturalness, presence, and surface difference. The sliding technique used on the Wizdish was found to be significantly more disruptive during the experience compared to marching. This could be due to the size of the Wizdish, restricting the users stride, or due to a longer acclimatization time.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This study investigates sliding as a walking-in-place (WIP) method for virtual reality navigation using the Wizdish. The Wizdish is a novel WIP device built for home usage. Two WIP methods, sliding and marching, were compared for naturalness, presence, and surface difference. The sliding technique used on the Wizdish was found to be significantly more disruptive during the experience compared to marching. This could be due to the size of the Wizdish, restricting the users stride, or due to a longer acclimatization time.", "fno": "07504772", "keywords": [ "Legged Locomotion", "Friction", "Tracking", "Training", "Virtual Reality", "Footwear", "Navigation", "Virtual Environments", "3 D Navigation", "Walking" ], "authors": [ { "affiliation": "Aalborg University Copenhagen", "fullName": "Mikkel Nielsen", "givenName": "Mikkel", "surname": "Nielsen", "__typename": "ArticleAuthorType" }, { "affiliation": "Aalborg University Copenhagen", "fullName": "Christian Toft", "givenName": "Christian", "surname": "Toft", "__typename": "ArticleAuthorType" }, { "affiliation": "Aalborg University Copenhagen", "fullName": "Niels C. Nilsson", "givenName": "Niels C.", "surname": "Nilsson", "__typename": "ArticleAuthorType" }, { "affiliation": "Aalborg University Copenhagen", "fullName": "Rolf Nordahl", "givenName": "Rolf", "surname": "Nordahl", "__typename": "ArticleAuthorType" }, { "affiliation": "Aalborg University Copenhagen", "fullName": "Stefania Serafin", "givenName": "Stefania", "surname": "Serafin", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2016-03-01T00:00:00", "pubType": "proceedings", "pages": "299-300", "year": "2016", "issn": "2375-5334", "isbn": "978-1-5090-0836-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07504771", "articleId": "12OmNAle71v", "__typename": "AdjacentArticleType" }, "next": { "fno": "07504773", "articleId": "12OmNApcuq5", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2010/6237/0/05444812", "title": "GUD WIP: Gait-Understanding-Driven Walking-In-Place", "doi": null, "abstractUrl": "/proceedings-article/vr/2010/05444812/12OmNAle6ku", "parentPublication": { "id": "proceedings/vr/2010/6237/0", "title": "2010 IEEE Virtual Reality Conference (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2013/6097/0/06550193", "title": "Tapping-In-Place: Increasing the naturalness of immersive walking-in-place locomotion through novel gestural input", "doi": null, "abstractUrl": "/proceedings-article/3dui/2013/06550193/12OmNAnMuyq", "parentPublication": { "id": "proceedings/3dui/2013/6097/0", "title": "2013 IEEE Symposium on 3D User Interfaces (3DUI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2015/1727/0/07223415", "title": "Impact of illusory resistance on finger walking behavior", "doi": null, "abstractUrl": "/proceedings-article/vr/2015/07223415/12OmNvA1h4N", "parentPublication": { "id": "proceedings/vr/2015/1727/0", "title": "2015 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2014/04/ttg201404569", "title": "Establishing the Range of Perceptually Natural Visual Walking Speeds for Virtual Walking-In-Place Locomotion", "doi": null, "abstractUrl": "/journal/tg/2014/04/ttg201404569/13rRUxAASTb", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2018/7592/0/08699289", "title": "Walking-in-Place for VR Navigation Independent of Gaze Direction Using a Waist-Worn Inertial Measurement Unit", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699289/19F1PlWtKJa", "parentPublication": { "id": "proceedings/ismar-adjunct/2018/7592/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09978713", "title": "Revisiting Walking-in-Place by Introducing Step-Height Control, Elastic Input, and Pseudo-Haptic Feedback", "doi": null, "abstractUrl": "/journal/tg/5555/01/09978713/1IXUnnVaWoE", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/05/10049680", "title": "Assisted walking-in-place: Introducing assisted motion to walking-by-cycling in embodied virtual reality", "doi": null, "abstractUrl": "/journal/tg/2023/05/10049680/1KYolEFtr6U", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798345", "title": "Investigation of Visual Self-Representation for a Walking-in-Place Navigation System in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798345/1cJ1hpkUgHS", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a498", "title": "SHeF-WIP: Walking-in-Place based on Step Height and Frequency for Wider Range of Virtual Speed", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a498/1tnWFlvbESk", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2021/1838/0/255600a438", "title": "Larger Step Faster Speed: Investigating Gesture-Amplitude-based Locomotion in Place with Different Virtual Walking Speed in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2021/255600a438/1tuBuuWZCLe", "parentPublication": { "id": "proceedings/vr/2021/1838/0", "title": "2021 IEEE Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1tnWwqMuCzu", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "acronym": "vrw", "groupId": "1836626", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1tnWFlvbESk", "doi": "10.1109/VRW52623.2021.00131", "title": "SHeF-WIP: Walking-in-Place based on Step Height and Frequency for Wider Range of Virtual Speed", "normalizedTitle": "SHeF-WIP: Walking-in-Place based on Step Height and Frequency for Wider Range of Virtual Speed", "abstract": "Walking-in-place (WIP) approaches face difficulties in reaching high locomotion speeds because of the required high step frequency, rapidly creating an awkward or risky experience for the user. In this paper, we introduce a novel WIP approach called Step-Height-and-Frequency (SHeF) WIP, which considers a second parameter, i.e., the step height, in addition to the step frequency, to better control the speed of advancement. We compared SHeF-WIP with a conventional WIP system in a user study conducted with 12 participants. Our results suggest that SHeF-WIP enabled them to reach higher virtual speeds (+80%) with more efficacy and ease.", "abstracts": [ { "abstractType": "Regular", "content": "Walking-in-place (WIP) approaches face difficulties in reaching high locomotion speeds because of the required high step frequency, rapidly creating an awkward or risky experience for the user. In this paper, we introduce a novel WIP approach called Step-Height-and-Frequency (SHeF) WIP, which considers a second parameter, i.e., the step height, in addition to the step frequency, to better control the speed of advancement. We compared SHeF-WIP with a conventional WIP system in a user study conducted with 12 participants. Our results suggest that SHeF-WIP enabled them to reach higher virtual speeds (+80%) with more efficacy and ease.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Walking-in-place (WIP) approaches face difficulties in reaching high locomotion speeds because of the required high step frequency, rapidly creating an awkward or risky experience for the user. In this paper, we introduce a novel WIP approach called Step-Height-and-Frequency (SHeF) WIP, which considers a second parameter, i.e., the step height, in addition to the step frequency, to better control the speed of advancement. We compared SHeF-WIP with a conventional WIP system in a user study conducted with 12 participants. Our results suggest that SHeF-WIP enabled them to reach higher virtual speeds (+80%) with more efficacy and ease.", "fno": "405700a498", "keywords": [ "Gait Analysis", "Medical Computing", "Virtual Reality", "Step Height", "Walking In Place", "Locomotion Speeds", "Step Frequency", "Novel WIP Approach", "S He F WIP", "Conventional WIP System", "Virtual Speed", "Legged Locomotion", "Three Dimensional Displays", "Conferences", "Virtual Reality", "User Interfaces", "Frequency Control", "Faces", "Computer Graphics", "Three Dimensional Graphics And Realism", "Virtual Reality" ], "authors": [ { "affiliation": "The University of Tokyo", "fullName": "Yutaro Hirao", "givenName": "Yutaro", "surname": "Hirao", "__typename": "ArticleAuthorType" }, { "affiliation": "The University of Tokyo", "fullName": "Takuji Narumi", "givenName": "Takuji", "surname": "Narumi", "__typename": "ArticleAuthorType" }, { "affiliation": "Inria,Rennes,France", "fullName": "Ferran Argelaguet Sanz", "givenName": "Ferran", "surname": "Argelaguet Sanz", "__typename": "ArticleAuthorType" }, { "affiliation": "Inria,Rennes,France", "fullName": "Anatole Lécuyer", "givenName": "Anatole", "surname": "Lécuyer", "__typename": "ArticleAuthorType" } ], "idPrefix": "vrw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-03-01T00:00:00", "pubType": "proceedings", "pages": "498-499", "year": "2021", "issn": null, "isbn": "978-1-6654-4057-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [ { "id": "1tnWF79jc64", "name": "pvrw202140570-09419253s1-mm_405700a498.zip", "size": "34.6 MB", "location": "https://www.computer.org/csdl/api/v1/extra/pvrw202140570-09419253s1-mm_405700a498.zip", "__typename": "WebExtraType" } ], "adjacentArticles": { "previous": { "fno": "405700a496", "articleId": "1tnXzzRDZBu", "__typename": "AdjacentArticleType" }, "next": { "fno": "405700a500", "articleId": "1tnWZ1klXdS", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/vr/2010/6237/0/05444812", "title": "GUD WIP: Gait-Understanding-Driven Walking-In-Place", "doi": null, "abstractUrl": "/proceedings-article/vr/2010/05444812/12OmNAle6ku", "parentPublication": { "id": "proceedings/vr/2010/6237/0", "title": "2010 IEEE Virtual Reality Conference (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2016/0836/0/07504772", "title": "Evaluating two alternative walking in place interfaces for virtual reality gaming", "doi": null, "abstractUrl": "/proceedings-article/vr/2016/07504772/12OmNCf1Dnb", "parentPublication": { "id": "proceedings/vr/2016/0836/0", "title": "2016 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dui/2008/2047/0/04476598", "title": "LLCM-WIP: Low-Latency, Continuous-Motion Walking-in-Place", "doi": null, "abstractUrl": "/proceedings-article/3dui/2008/04476598/12OmNyQYtvN", "parentPublication": { "id": "proceedings/3dui/2008/2047/0", "title": "2008 IEEE Symposium on 3D User Interfaces", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2014/04/ttg201404569", "title": "Establishing the Range of Perceptually Natural Visual Walking Speeds for Virtual Walking-In-Place Locomotion", "doi": null, "abstractUrl": "/journal/tg/2014/04/ttg201404569/13rRUxAASTb", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2018/7592/0/08699289", "title": "Walking-in-Place for VR Navigation Independent of Gaze Direction Using a Waist-Worn Inertial Measurement Unit", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699289/19F1PlWtKJa", "parentPublication": { "id": "proceedings/ismar-adjunct/2018/7592/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09978713", "title": "Revisiting Walking-in-Place by Introducing Step-Height Control, Elastic Input, and Pseudo-Haptic Feedback", "doi": null, "abstractUrl": "/journal/tg/5555/01/09978713/1IXUnnVaWoE", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2023/05/10049680", "title": "Assisted walking-in-place: Introducing assisted motion to walking-by-cycling in embodied virtual reality", "doi": null, "abstractUrl": "/journal/tg/2023/05/10049680/1KYolEFtr6U", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08798345", "title": "Investigation of Visual Self-Representation for a Walking-in-Place Navigation System in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08798345/1cJ1hpkUgHS", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2020/5608/0/09089561", "title": "Real Walking in Place: HEX-CORE-PROTOTYPE Omnidirectional Treadmill", "doi": null, "abstractUrl": "/proceedings-article/vr/2020/09089561/1jIxfncHjNe", "parentPublication": { "id": "proceedings/vr/2020/5608/0", "title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2021/4057/0/405700a375", "title": "Direction change of redirected walking via a single shoe height change", "doi": null, "abstractUrl": "/proceedings-article/vrw/2021/405700a375/1tnXbVNaL9S", "parentPublication": { "id": "proceedings/vrw/2021/4057/0", "title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNxdm4HV", "title": "Computers in Education, International Conference on", "acronym": "icce", "groupId": "1002161", "volume": "0", "displayVolume": "0", "year": "2002", "__typename": "ProceedingType" }, "article": { "id": "12OmNvT2oZw", "doi": "10.1109/CIE.2002.1186297", "title": "A Research on the Types of the Web Based Corrective Feedback", "normalizedTitle": "A Research on the Types of the Web Based Corrective Feedback", "abstract": "We have designed and realized a formation evaluation system which provide two types of corrective feedback to understand the difference in the study achievement degree and to find the best appropriate method in accordance with the types of the web-based corrective feedback at this point of time when the importance of feedback is being clearly brought out, as well as the formation evaluation, in the teaching-Iearning process. In this research work, we have designed and realized a corrective feedback which suggest similar questions repetitively' and a 'corrective feedback which suggest the results', leading to apply the two types to real lectures to verify the effects. It is expected that the two types can be used effectively in correcting students' errors and providing them with correct information in the web-based formation evaluation process.", "abstracts": [ { "abstractType": "Regular", "content": "We have designed and realized a formation evaluation system which provide two types of corrective feedback to understand the difference in the study achievement degree and to find the best appropriate method in accordance with the types of the web-based corrective feedback at this point of time when the importance of feedback is being clearly brought out, as well as the formation evaluation, in the teaching-Iearning process. In this research work, we have designed and realized a corrective feedback which suggest similar questions repetitively' and a 'corrective feedback which suggest the results', leading to apply the two types to real lectures to verify the effects. It is expected that the two types can be used effectively in correcting students' errors and providing them with correct information in the web-based formation evaluation process.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We have designed and realized a formation evaluation system which provide two types of corrective feedback to understand the difference in the study achievement degree and to find the best appropriate method in accordance with the types of the web-based corrective feedback at this point of time when the importance of feedback is being clearly brought out, as well as the formation evaluation, in the teaching-Iearning process. In this research work, we have designed and realized a corrective feedback which suggest similar questions repetitively' and a 'corrective feedback which suggest the results', leading to apply the two types to real lectures to verify the effects. It is expected that the two types can be used effectively in correcting students' errors and providing them with correct information in the web-based formation evaluation process.", "fno": "15091459", "keywords": [], "authors": [ { "affiliation": "Korea National University of Education", "fullName": "JangHyeon Baek", "givenName": "JangHyeon", "surname": "Baek", "__typename": "ArticleAuthorType" }, { "affiliation": "Korea National University of Education", "fullName": "Sehee Jang", "givenName": "Sehee", "surname": "Jang", "__typename": "ArticleAuthorType" }, { "affiliation": "Korea National University of Education", "fullName": "Yungsik Kim", "givenName": "Yungsik", "surname": "Kim", "__typename": "ArticleAuthorType" } ], "idPrefix": "icce", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2002-12-01T00:00:00", "pubType": "proceedings", "pages": "1459", "year": "2002", "issn": null, "isbn": "0-7695-1509-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "15091457", "articleId": "12OmNxT56Em", "__typename": "AdjacentArticleType" }, "next": { "fno": "15091461", "articleId": "12OmNBVrjoM", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iceccs/2001/1159/0/11590112", "title": "Motivating the Corrective Maintenance Maturity Model (CM3)", "doi": null, "abstractUrl": "/proceedings-article/iceccs/2001/11590112/12OmNBQC86n", "parentPublication": { "id": "proceedings/iceccs/2001/1159/0", "title": "Engineering of Complex Computer Systems, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2017/6647/0/07892292", "title": "Corrective feedback for depth perception in CAVE-like systems", "doi": null, "abstractUrl": "/proceedings-article/vr/2017/07892292/12OmNrNh0Ml", "parentPublication": { "id": "proceedings/vr/2017/6647/0", "title": "2017 IEEE Virtual Reality (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icsm/2010/8630/0/05609538", "title": "Cost drivers of software corrective maintenance: An empirical study in two companies", "doi": null, "abstractUrl": "/proceedings-article/icsm/2010/05609538/12OmNzFdt9D", "parentPublication": { "id": "proceedings/icsm/2010/8630/0", "title": "2010 IEEE International Conference on Software Maintenance", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/msr/2022/9303/0/930300a687", "title": "Extracting Corrective Actions from Code Repositories", "doi": null, "abstractUrl": "/proceedings-article/msr/2022/930300a687/1Eo5TFTFGW4", "parentPublication": { "id": "proceedings/msr/2022/9303/0", "title": "2022 IEEE/ACM 19th International Conference on Mining Software Repositories (MSR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icalt/2022/9519/0/951900a248", "title": "Towards A Vocalization Feedback Pipeline for Language Learners", "doi": null, "abstractUrl": "/proceedings-article/icalt/2022/951900a248/1FUUlpjgaRO", "parentPublication": { "id": "proceedings/icalt/2022/9519/0", "title": "2022 International Conference on Advanced Learning Technologies (ICALT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09978915", "title": "Visual Cue Based Corrective Feedback for Motor Skill Training in Mixed Reality: A Survey", "doi": null, "abstractUrl": "/journal/tg/5555/01/09978915/1IXUnNBj0Yw", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/chase/2022/9476/0/947600a138", "title": "Poster: Corrective Real-Time Feedback for Smartwatch Devices using Quaternion Manipulation", "doi": null, "abstractUrl": "/proceedings-article/chase/2022/947600a138/1Jjyl47NJ6M", "parentPublication": { "id": "proceedings/chase/2022/9476/0", "title": "2022 IEEE/ACM Conference on Connected Health: Applications, Systems and Engineering Technologies (CHASE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/big-data/2022/8045/0/10020560", "title": "Improving Character Recognition by the Crowd Workers via Corrective Feedback", "doi": null, "abstractUrl": "/proceedings-article/big-data/2022/10020560/1KfRYRYAPIc", "parentPublication": { "id": "proceedings/big-data/2022/8045/0", "title": "2022 IEEE International Conference on Big Data (Big Data)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icalt/2019/3485/0/348500a258", "title": "Categorizing Software Feedback in Current Language Software", "doi": null, "abstractUrl": "/proceedings-article/icalt/2019/348500a258/1cYi5Arc2AM", "parentPublication": { "id": "proceedings/icalt/2019/3485/2161-377X", "title": "2019 IEEE 19th International Conference on Advanced Learning Technologies (ICALT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2020/9134/0/913400a397", "title": "Evaluating Query Strategies for Different Feedback Types in Interactive View Recommendation", "doi": null, "abstractUrl": "/proceedings-article/iv/2020/913400a397/1rSR7WElAre", "parentPublication": { "id": "proceedings/iv/2020/9134/0", "title": "2020 24th International Conference Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNySXF2u", "title": "Advanced Learning Technologies, IEEE International Conference on", "acronym": "icalt", "groupId": "1000009", "volume": "0", "displayVolume": "0", "year": "2012", "__typename": "ProceedingType" }, "article": { "id": "12OmNzIUfYD", "doi": "10.1109/ICALT.2012.215", "title": "Feedback in the Motor Skill Domain", "normalizedTitle": "Feedback in the Motor Skill Domain", "abstract": "Feedback is an important ingredient to enhance athletes' skill acquisition. Currently, feedback design in the motor skill domain via Computer-based Sport Training (CBST) can be categorized as: (1) feedback content, such as speed, accuracy, movement, time, and reaction time, (2) providing athletes with access to their feedback via an appropriate user interface, and (3) feedback modality, such as visual, audio, tactile, and haptic. The current feedback is led by technology and does not explicitly address the pedagogical issues on the achievement of intended training outcomes. To overcome the limitations of current feedback design in CBST, this paper suggests the design of feedback that is based on a pedagogical approach.", "abstracts": [ { "abstractType": "Regular", "content": "Feedback is an important ingredient to enhance athletes' skill acquisition. Currently, feedback design in the motor skill domain via Computer-based Sport Training (CBST) can be categorized as: (1) feedback content, such as speed, accuracy, movement, time, and reaction time, (2) providing athletes with access to their feedback via an appropriate user interface, and (3) feedback modality, such as visual, audio, tactile, and haptic. The current feedback is led by technology and does not explicitly address the pedagogical issues on the achievement of intended training outcomes. To overcome the limitations of current feedback design in CBST, this paper suggests the design of feedback that is based on a pedagogical approach.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Feedback is an important ingredient to enhance athletes' skill acquisition. Currently, feedback design in the motor skill domain via Computer-based Sport Training (CBST) can be categorized as: (1) feedback content, such as speed, accuracy, movement, time, and reaction time, (2) providing athletes with access to their feedback via an appropriate user interface, and (3) feedback modality, such as visual, audio, tactile, and haptic. The current feedback is led by technology and does not explicitly address the pedagogical issues on the achievement of intended training outcomes. To overcome the limitations of current feedback design in CBST, this paper suggests the design of feedback that is based on a pedagogical approach.", "fno": "4702a332", "keywords": [ "Training", "IP Networks", "Visualization", "Context", "Educational Institutions", "Monitoring", "Graphical User Interfaces", "Computer Based Sport Training", "Feedback", "Motor Skill Domain" ], "authors": [ { "affiliation": null, "fullName": "Yulita Hanum P. Iskandar", "givenName": "Yulita Hanum P.", "surname": "Iskandar", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Lester Gilbert", "givenName": "Lester", "surname": "Gilbert", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Gary B. Wills", "givenName": "Gary B.", "surname": "Wills", "__typename": "ArticleAuthorType" } ], "idPrefix": "icalt", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2012-07-01T00:00:00", "pubType": "proceedings", "pages": "332-336", "year": "2012", "issn": null, "isbn": "978-1-4673-1642-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "4702a329", "articleId": "12OmNAL3B8F", "__typename": "AdjacentArticleType" }, "next": { "fno": "4702a340", "articleId": "12OmNrGb2fE", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/kam/2009/3888/2/3888b115", "title": "Skill Premium and Wage Differences: The Case of China", "doi": null, "abstractUrl": "/proceedings-article/kam/2009/3888b115/12OmNBOllkv", "parentPublication": { "id": "proceedings/kam/2009/3888/2", "title": "Knowledge Acquisition and Modeling, International Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/whc/2007/2738/0/04145145", "title": "Haptic Feedback Enhances Force Skill Learning", "doi": null, "abstractUrl": "/proceedings-article/whc/2007/04145145/12OmNrNh0Ci", "parentPublication": { "id": "proceedings/whc/2007/2738/0", "title": "2007 2nd Joint EuroHaptics Conference and Symposium on Haptic Interfaces for Virtual Environments and Teleoperator Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/haptics/2006/0226/0/02260069", "title": "Haptic Attributes and Human Motor Skills", "doi": null, "abstractUrl": "/proceedings-article/haptics/2006/02260069/12OmNvSKNDj", "parentPublication": { "id": "proceedings/haptics/2006/0226/0", "title": "2006 14th Symposium on Haptic Interfaces for Virtual Environment and Teleoperator Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icalt/2009/3711/0/3711a037", "title": "A Framework for Pedagogical Feedback in the Motor Skill Domain", "doi": null, "abstractUrl": "/proceedings-article/icalt/2009/3711a037/12OmNwM6A1a", "parentPublication": { "id": "proceedings/icalt/2009/3711/0", "title": "Advanced Learning Technologies, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icalt/2007/2916/0/29160723", "title": "The Analysis of Simpson?s Psychomotor Domain Educational Objectives and its Application on the Skill Evaluation for the Department of Computer Engineering at Vocational School", "doi": null, "abstractUrl": "/proceedings-article/icalt/2007/29160723/12OmNwkzups", "parentPublication": { "id": "proceedings/icalt/2007/2916/0", "title": "2007 International Conference on Advanced Learning Technologies", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icm/2011/4522/3/4522c394", "title": "Operational Skill Training Needs Analysis for Manufacturing Industry", "doi": null, "abstractUrl": "/proceedings-article/icm/2011/4522c394/12OmNzw8j16", "parentPublication": { "id": "proceedings/icm/2011/4522/3", "title": "Information Technology, Computer Engineering and Management Sciences, International Conference of", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/pc/2006/04/b4070", "title": "Rapid Feedback Systems for Elite Sports Training", "doi": null, "abstractUrl": "/magazine/pc/2006/04/b4070/13rRUILc8cL", "parentPublication": { "id": "mags/pc", "title": "IEEE Pervasive Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2017/03/07862265", "title": "Effects of Concurrent and Delayed Visual Feedback on Motor Memory Consolidation", "doi": null, "abstractUrl": "/journal/th/2017/03/07862265/13rRUwI5U2V", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/mu/2015/01/mmu2015010058", "title": "Interactive Sonification in Rowing: Acoustic Feedback for On-Water Training", "doi": null, "abstractUrl": "/magazine/mu/2015/01/mmu2015010058/13rRUxjyX13", "parentPublication": { "id": "mags/mu", "title": "IEEE MultiMedia", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09978915", "title": "Visual Cue Based Corrective Feedback for Motor Skill Training in Mixed Reality: A Survey", "doi": null, "abstractUrl": "/journal/tg/5555/01/09978915/1IXUnNBj0Yw", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "19F1LC52tjO", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "acronym": "ismar-adjunct", "groupId": "1810084", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "19F1OQnjwSk", "doi": "10.1109/ISMAR-Adjunct.2018.00070", "title": "The Deployment of a Mixed Reality Experience for a Small-Scale Exhibition in the Wild", "normalizedTitle": "The Deployment of a Mixed Reality Experience for a Small-Scale Exhibition in the Wild", "abstract": "Museums and exhibitions often present physical artefacts which may contain rich histories or deep meaning associated with them. These additional contents are often installed physically as informational panels shown on the wall. However, these may sometimes be challenging to deploy due to space constraints. In order to address this challenge, we introduce the use of mixed reality. Mixed reality offers an immersive and interactive experience through the use of head mounted displays and in-air gestures. Visitors can discover additional content virtually, without changing the physical space. For a small-scale exhibition at a cafe, we developed a Microsoft HoloLens application to create an interactive experience on top of a collection of historic physical items. Through public experiences at the café, we received positive feedback of our system. In this paper, we discuss the design and implications of our system, survey results, as well as challenges that were encountered in deploying our mixed reality experience in a public setting.", "abstracts": [ { "abstractType": "Regular", "content": "Museums and exhibitions often present physical artefacts which may contain rich histories or deep meaning associated with them. These additional contents are often installed physically as informational panels shown on the wall. However, these may sometimes be challenging to deploy due to space constraints. In order to address this challenge, we introduce the use of mixed reality. Mixed reality offers an immersive and interactive experience through the use of head mounted displays and in-air gestures. Visitors can discover additional content virtually, without changing the physical space. For a small-scale exhibition at a cafe, we developed a Microsoft HoloLens application to create an interactive experience on top of a collection of historic physical items. Through public experiences at the café, we received positive feedback of our system. In this paper, we discuss the design and implications of our system, survey results, as well as challenges that were encountered in deploying our mixed reality experience in a public setting.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Museums and exhibitions often present physical artefacts which may contain rich histories or deep meaning associated with them. These additional contents are often installed physically as informational panels shown on the wall. However, these may sometimes be challenging to deploy due to space constraints. In order to address this challenge, we introduce the use of mixed reality. Mixed reality offers an immersive and interactive experience through the use of head mounted displays and in-air gestures. Visitors can discover additional content virtually, without changing the physical space. For a small-scale exhibition at a cafe, we developed a Microsoft HoloLens application to create an interactive experience on top of a collection of historic physical items. Through public experiences at the café, we received positive feedback of our system. In this paper, we discuss the design and implications of our system, survey results, as well as challenges that were encountered in deploying our mixed reality experience in a public setting.", "fno": "08699334", "keywords": [ "Augmented Reality", "Exhibitions", "Helmet Mounted Displays", "History", "Museums", "Immersive Experience", "Interactive Experience", "Physical Space", "Small Scale Exhibition", "Historic Physical Items", "Mixed Reality Experience", "Exhibitions", "Physical Artefacts", "Informational Panels", "Space Constraints", "Museums", "Head Mounted Displays", "In Air Gestures", "Microsoft Holo Lens", "Sports", "Organizations", "Visualization", "Augmented Reality", "History", "Videos", "Mixed Reality", "Exhibition", "Public Deployment", "H 5 1 Information Interfaces And Presentation Multimedia Information Systems X 2014 Artificial Augmented Virtual And Mixed Realities" ], "authors": [ { "affiliation": "Rakuten, Inc., Rakuten Institute of Technology, Japan", "fullName": "Kelvin Cheng", "givenName": "Kelvin", "surname": "Cheng", "__typename": "ArticleAuthorType" }, { "affiliation": "Marketing & UX CoE Dept., Rakuten, Inc., Japan", "fullName": "Ichiro Furusawa", "givenName": "Ichiro", "surname": "Furusawa", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar-adjunct", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-10-01T00:00:00", "pubType": "proceedings", "pages": "214-215", "year": "2018", "issn": null, "isbn": "978-1-5386-7592-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08699245", "articleId": "19F1QUyCKEE", "__typename": "AdjacentArticleType" }, "next": { "fno": "08699229", "articleId": "19F1LS1YWuA", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ismar/2002/1781/0/17810059", "title": "Interactive Theatre Experience in Embodied + Wearable Mixed Reality Space", "doi": null, "abstractUrl": "/proceedings-article/ismar/2002/17810059/12OmNs5rl02", "parentPublication": { "id": "proceedings/ismar/2002/1781/0", "title": "Proceedings. International Symposium on Mixed and Augmented Reality", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-amh/2012/4663/0/06483999", "title": "When AR meets food: A structural overview of the research space on multi-facets of food", "doi": null, "abstractUrl": "/proceedings-article/ismar-amh/2012/06483999/12OmNvStcxG", "parentPublication": { "id": "proceedings/ismar-amh/2012/4663/0", "title": "2012 IEEE International Symposium on Mixed and Augmented Reality - Arts, Media, and Humanities (ISMAR-AMH)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvvrhc/1998/8283/0/82830078", "title": "Vision and Graphics in Producing Mixed Reality Worlds", "doi": null, "abstractUrl": "/proceedings-article/cvvrhc/1998/82830078/12OmNylbov1", "parentPublication": { "id": "proceedings/cvvrhc/1998/8283/0", "title": "Computer Vision for Virtual Reality Based Human Communications, Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2018/7592/0/08699173", "title": "Opportunities for Virtual and Mixed Reality Knowledge Demonstration", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699173/19F1Ptshvb2", "parentPublication": { "id": "proceedings/ismar-adjunct/2018/7592/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2018/7592/0/08699316", "title": "Inverse Augmented Reality: A Virtual Agent's Perspective", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699316/19F1UA1hw40", "parentPublication": { "id": "proceedings/ismar-adjunct/2018/7592/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09978915", "title": "Visual Cue Based Corrective Feedback for Motor Skill Training in Mixed Reality: A Survey", "doi": null, "abstractUrl": "/journal/tg/5555/01/09978915/1IXUnNBj0Yw", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a786", "title": "Reimagining the Stadium Spectator Experience using Augmented Reality and Visual Positioning System", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a786/1J7W91pf7ry", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a891", "title": "MR-FoodCoach: Enabling a convenience store on mixed reality space for healthier purchases", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a891/1J7WnK9PRxS", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797806", "title": "Star Tag: A superhuman sport to promote physical activity", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797806/1cJ0SbGoFTa", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2020/7675/0/767500a237", "title": "Pleistocene Crete: A narrative, interactive mixed reality exhibition that brings prehistoric wildlife back to life", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a237/1pBMhErKmli", "parentPublication": { "id": "proceedings/ismar-adjunct/2020/7675/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1yfxDjRGMmc", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "acronym": "ismar-adjunct", "groupId": "1810084", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1yeQDGki96U", "doi": "10.1109/ISMAR-Adjunct54149.2021.00088", "title": "Watch-Your-Skiing: Visualizations for VR Skiing using Real-time Body Tracking", "normalizedTitle": "Watch-Your-Skiing: Visualizations for VR Skiing using Real-time Body Tracking", "abstract": "Correcting one’s body posture is necessary when acquiring specific skills, especially for some sports such as skiing or gymnastics. However, it is difficult to observe our posture objectively, which is the reason why a trainer is required. In this paper, we introduce a VR ski training system using full body motion capture to provide real-time feedback for the user. Two types of different visual cues are developed and qualitatively compared in a user study. This system opens the opportunity to learn alpine skiing by oneself and also has a potential to be applied to other sports or skill acquisition.", "abstracts": [ { "abstractType": "Regular", "content": "Correcting one’s body posture is necessary when acquiring specific skills, especially for some sports such as skiing or gymnastics. However, it is difficult to observe our posture objectively, which is the reason why a trainer is required. In this paper, we introduce a VR ski training system using full body motion capture to provide real-time feedback for the user. Two types of different visual cues are developed and qualitatively compared in a user study. This system opens the opportunity to learn alpine skiing by oneself and also has a potential to be applied to other sports or skill acquisition.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Correcting one’s body posture is necessary when acquiring specific skills, especially for some sports such as skiing or gymnastics. However, it is difficult to observe our posture objectively, which is the reason why a trainer is required. In this paper, we introduce a VR ski training system using full body motion capture to provide real-time feedback for the user. Two types of different visual cues are developed and qualitatively compared in a user study. This system opens the opportunity to learn alpine skiing by oneself and also has a potential to be applied to other sports or skill acquisition.", "fno": "129800a387", "keywords": [ "Biomechanics", "Image Motion Analysis", "Sport", "Virtual Reality", "Real Time Feedback", "Different Visual Cues", "Alpine Skiing", "Sports", "Skill Acquisition", "Watch Your Skiing", "VR Skiing", "Real Time Body Tracking", "Body Posture", "Specific Skills", "Posture Objectively", "VR Ski Training System", "Body Motion Capture", "Training", "Visualization", "Design Methodology", "Real Time Systems", "Augmented Reality", "Sports", "Human Centered Computing", "Human Computer Interaction HCI", "HCI Design And Evaluation Methods", "Computing Methodologies", "Computer Graphics", "Graphics Systems And Interfaces", "Virtual Reality" ], "authors": [ { "affiliation": "Tokyo Institute of Technology", "fullName": "Xuan Zhang", "givenName": "Xuan", "surname": "Zhang", "__typename": "ArticleAuthorType" }, { "affiliation": "Tokyo Institute of Technology", "fullName": "Erwin Wu", "givenName": "Erwin", "surname": "Wu", "__typename": "ArticleAuthorType" }, { "affiliation": "Tokyo Institute of Technology", "fullName": "Hideki Koike", "givenName": "Hideki", "surname": "Koike", "__typename": "ArticleAuthorType" } ], "idPrefix": "ismar-adjunct", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-10-01T00:00:00", "pubType": "proceedings", "pages": "387-388", "year": "2021", "issn": null, "isbn": "978-1-6654-1298-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "129800a384", "articleId": "1yeQWO0csfe", "__typename": "AdjacentArticleType" }, "next": { "fno": "129800a389", "articleId": "1yeQD2GH6Wk", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/lcn/2010/8387/0/p244kurusingal", "title": "Modeling signal strength of body-worn devices", "doi": null, "abstractUrl": "/proceedings-article/lcn/2010/p244kurusingal/12OmNvA1hhZ", "parentPublication": { "id": "proceedings/lcn/2010/8387/0", "title": "IEEE Local Computer Network Conference", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iiki/2016/5952/0/5952a122", "title": "SmartSKI: Application of Sensors Integrated into Sport Equipment", "doi": null, "abstractUrl": "/proceedings-article/iiki/2016/5952a122/12OmNxwWor2", "parentPublication": { "id": "proceedings/iiki/2016/5952/0", "title": "2016 International Conference on Identification, Information and Knowledge in the Internet of Things (IIKI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2011/348/0/06011840", "title": "Extraction and representation of human body for pitching style recognition in broadcast baseball video", "doi": null, "abstractUrl": "/proceedings-article/icme/2011/06011840/12OmNzd7bDp", "parentPublication": { "id": "proceedings/icme/2011/348/0", "title": "2011 IEEE International Conference on Multimedia and Expo", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vrw/2022/8402/0/840200a570", "title": "Augmenting VR Ski Training using Time Distortion", "doi": null, "abstractUrl": "/proceedings-article/vrw/2022/840200a570/1CJd7kuE86c", "parentPublication": { "id": "proceedings/vrw/2022/8402/0", "title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a786", "title": "Reimagining the Stadium Spectator Experience using Augmented Reality and Visual Positioning System", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a786/1J7W91pf7ry", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a605", "title": "A Context-aware Interface for Immersive Sports Spectating", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a605/1J7WeVJrh5K", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vr/2019/1377/0/08797717", "title": "VR Ski Coach: Indoor Ski Training System Visualizing Difference from Leading Skier", "doi": null, "abstractUrl": "/proceedings-article/vr/2019/08797717/1cJ1cNPz0eA", "parentPublication": { "id": "proceedings/vr/2019/1377/0", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2019/3131/0/313100a308", "title": "Motion Capture from Pan-Tilt Cameras with Unknown Orientation", "doi": null, "abstractUrl": "/proceedings-article/3dv/2019/313100a308/1ezRBTghOZq", "parentPublication": { "id": "proceedings/3dv/2019/3131/0", "title": "2019 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2020/7675/0/767500a195", "title": "EmnDash: M-sequence Dashed Markers on Vector-based Laser Projection for Robust High-speed Spatial Tracking", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a195/1pBMhHIpXdm", "parentPublication": { "id": "proceedings/ismar-adjunct/2020/7675/0", "title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/12/09446582", "title": "Performance Improvement and Skill Transfer in Table Tennis Through Training in Virtual Reality", "doi": null, "abstractUrl": "/journal/tg/2022/12/09446582/1u8lz4qWghi", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1H1gVMlkl32", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1H0L0Gta396", "doi": "10.1109/CVPR52688.2022.00761", "title": "Modeling Image Composition for Complex Scene Generation", "normalizedTitle": "Modeling Image Composition for Complex Scene Generation", "abstract": "We present a method that achieves state-of-the-art results on challenging (few-shot) layout-to-image generation tasks by accurately modeling textures, structures and relationships contained in a complex scene. After compressing RGB images into patch tokens, we propose the Transformer with Focal Attention (TwFA) for exploring dependencies of object-to-object, object-to-patch and patch-to-patch. Compared to existing CNN-based and Transformer-based generation models that entangled modeling on pixel-level&patch-level and object-level&patch-level respectively, the proposed focal attention predicts the current patch token by only focusing on its highly-related tokens that specified by the spatial layout, thereby achieving disambiguation during training. Furthermore, the proposed TwFA largely increases the data efficiency during training, therefore we propose the first few-shot complex scene generation strategy based on the well-trained TwFA. Comprehensive experiments show the superiority of our method, which significantly increases both quantitative metrics and qualitative visual realism with respect to state-of-the-art CNN-based and transformer-based methods. Code is available at https://github.com/JohnDreamer/TwFA.", "abstracts": [ { "abstractType": "Regular", "content": "We present a method that achieves state-of-the-art results on challenging (few-shot) layout-to-image generation tasks by accurately modeling textures, structures and relationships contained in a complex scene. After compressing RGB images into patch tokens, we propose the Transformer with Focal Attention (TwFA) for exploring dependencies of object-to-object, object-to-patch and patch-to-patch. Compared to existing CNN-based and Transformer-based generation models that entangled modeling on pixel-level&patch-level and object-level&patch-level respectively, the proposed focal attention predicts the current patch token by only focusing on its highly-related tokens that specified by the spatial layout, thereby achieving disambiguation during training. Furthermore, the proposed TwFA largely increases the data efficiency during training, therefore we propose the first few-shot complex scene generation strategy based on the well-trained TwFA. Comprehensive experiments show the superiority of our method, which significantly increases both quantitative metrics and qualitative visual realism with respect to state-of-the-art CNN-based and transformer-based methods. Code is available at https://github.com/JohnDreamer/TwFA.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present a method that achieves state-of-the-art results on challenging (few-shot) layout-to-image generation tasks by accurately modeling textures, structures and relationships contained in a complex scene. After compressing RGB images into patch tokens, we propose the Transformer with Focal Attention (TwFA) for exploring dependencies of object-to-object, object-to-patch and patch-to-patch. Compared to existing CNN-based and Transformer-based generation models that entangled modeling on pixel-level&patch-level and object-level&patch-level respectively, the proposed focal attention predicts the current patch token by only focusing on its highly-related tokens that specified by the spatial layout, thereby achieving disambiguation during training. Furthermore, the proposed TwFA largely increases the data efficiency during training, therefore we propose the first few-shot complex scene generation strategy based on the well-trained TwFA. Comprehensive experiments show the superiority of our method, which significantly increases both quantitative metrics and qualitative visual realism with respect to state-of-the-art CNN-based and transformer-based methods. Code is available at https://github.com/JohnDreamer/TwFA.", "fno": "694600h754", "keywords": [ "Convolutional Neural Nets", "Image Colour Analysis", "Image Representation", "Image Sampling", "Image Segmentation", "Image Texture", "Learning Artificial Intelligence", "Image Composition", "Image Generation Tasks", "RGB Images", "Patch Tokens", "CNN Based", "Few Shot Complex Scene Generation Strategy", "Transformer Based Generation Models", "Transformer With Focal Attention", "Tw FA", "Training", "Measurement", "Visualization", "Image Coding", "Layout", "Genomics", "Predictive Models" ], "authors": [ { "affiliation": "Shanghai JiaoTong University", "fullName": "Zuopeng Yang", "givenName": "Zuopeng", "surname": "Yang", "__typename": "ArticleAuthorType" }, { "affiliation": "JD Explore Academy, JD.com", "fullName": "Daqing Liu", "givenName": "Daqing", "surname": "Liu", "__typename": "ArticleAuthorType" }, { "affiliation": "The University of Sydney", "fullName": "Chaoyue Wang", "givenName": "Chaoyue", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": "Shanghai JiaoTong University", "fullName": "Jie Yang", "givenName": "Jie", "surname": "Yang", "__typename": "ArticleAuthorType" }, { "affiliation": "JD Explore Academy, JD.com", "fullName": "Dacheng Tao", "givenName": "Dacheng", "surname": "Tao", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-06-01T00:00:00", "pubType": "proceedings", "pages": "7754-7763", "year": "2022", "issn": null, "isbn": "978-1-6654-6946-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [ { "id": "1H0L0BHXdS0", "name": "pcvpr202269460-09880418s1-mm_694600h754.zip", "size": "8.45 MB", "location": "https://www.computer.org/csdl/api/v1/extra/pcvpr202269460-09880418s1-mm_694600h754.zip", "__typename": "WebExtraType" } ], "adjacentArticles": { "previous": { "fno": "694600h744", "articleId": "1H1mP9xtjZC", "__typename": "AdjacentArticleType" }, "next": { "fno": "694600h764", "articleId": "1H1mMvyIwaQ", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccv/2021/2812/0/281200c866", "title": "TS-CAM: Token Semantic Coupled Attention Map for Weakly Supervised Object Localization", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200c866/1BmGioA2Ua4", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2022/8563/0/09859841", "title": "High-Quality Image Generation from Scene Graphs with Transformer", "doi": null, "abstractUrl": "/proceedings-article/icme/2022/09859841/1G9DSypmuNa", "parentPublication": { "id": "proceedings/icme/2022/8563/0", "title": "2022 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/6.946E305", "title": "Multi-class Token Transformer for Weakly Supervised Semantic Segmentation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/6.946E305/1H1iA9i6LD2", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600s8092", "title": "Text-to-Image Synthesis based on Object-Guided Joint-Decoding Transformer", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600s8092/1H1l7q04Gac", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2022/9062/0/09956620", "title": "Transforming Image Generation from Scene Graphs", "doi": null, "abstractUrl": "/proceedings-article/icpr/2022/09956620/1IHoZ7KWn96", "parentPublication": { "id": "proceedings/icpr/2022/9062/0", "title": "2022 26th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2022/9062/0/09956599", "title": "Transformer-based Scene Graph Generation Network With Relational Attention Module", "doi": null, "abstractUrl": "/proceedings-article/icpr/2022/09956599/1IHpdpALly8", "parentPublication": { "id": "proceedings/icpr/2022/9062/0", "title": "2022 26th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09973820", "title": "SAC-GAN: Structure-Aware Image Composition", "doi": null, "abstractUrl": "/journal/tg/5555/01/09973820/1IUAPHcYiD6", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/5555/01/10105507", "title": "RelTR: Relation Transformer for Scene Graph Generation", "doi": null, "abstractUrl": "/journal/tp/5555/01/10105507/1MtgpPN7eBq", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2021/4899/0/489900c150", "title": "BGT-Net: Bidirectional GRU Transformer Network for Scene Graph Generation", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2021/489900c150/1yJYlKsMo3C", "parentPublication": { "id": "proceedings/cvprw/2021/4899/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900d731", "title": "LayoutTransformer: Scene Layout Generation with Conceptual and Spatial Diversity", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900d731/1yeKJqXSUh2", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1H1gVMlkl32", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1H1lc56wESc", "doi": "10.1109/CVPR52688.2022.01102", "title": "StyleSwin: Transformer-based GAN for High-resolution Image Generation", "normalizedTitle": "StyleSwin: Transformer-based GAN for High-resolution Image Generation", "abstract": "Despite the tantalizing success in a broad of vision tasks, transformers have not yet demonstrated on-par ability as ConvNets in high-resolution image generative modeling. In this paper, we seek to explore using pure transformers to build a generative adversarial network for high-resolution image synthesis. To this end, we believe that local attention is crucial to strike the balance between computational efficiency and modeling capacity. Hence, the proposed generator adopts Swin transformer in a style-based architecture. To achieve a larger receptive field, we propose double attention which simultaneously leverages the context of the local and the shifted windows, leading to improved generation quality. Moreover, we show that offering the knowledge of the absolute position that has been lost in window-based transformers greatly benefits the generation quality. The proposed StyleSwin is scalable to high resolutions, with both the coarse geometry and fine structures benefit from the strong expressivity of transformers. However, blocking artifacts occur during high-resolution synthesis because performing the local attention in a block-wise manner may break the spatial coherency. To solve this, we empirically investigate various solutions, among which we find that employing a wavelet discriminator to examine the spectral discrepancy effectively suppresses the artifacts. Extensive experiments show the superiority over prior transformer-based GANs, especially on high resolutions, e.g., <tex>Z_$1024 \\times$_Z</tex> 1024. The StyleSwin, without complex training strategies, excels over StyleGAN on CelebA-HQ 1024, and achieves on-par performance on FFHQ-1024, proving the promise of using transformers for high-resolution image generation. The code and pretrained models are available at https://github.com/microsoft/StyleSwin.", "abstracts": [ { "abstractType": "Regular", "content": "Despite the tantalizing success in a broad of vision tasks, transformers have not yet demonstrated on-par ability as ConvNets in high-resolution image generative modeling. In this paper, we seek to explore using pure transformers to build a generative adversarial network for high-resolution image synthesis. To this end, we believe that local attention is crucial to strike the balance between computational efficiency and modeling capacity. Hence, the proposed generator adopts Swin transformer in a style-based architecture. To achieve a larger receptive field, we propose double attention which simultaneously leverages the context of the local and the shifted windows, leading to improved generation quality. Moreover, we show that offering the knowledge of the absolute position that has been lost in window-based transformers greatly benefits the generation quality. The proposed StyleSwin is scalable to high resolutions, with both the coarse geometry and fine structures benefit from the strong expressivity of transformers. However, blocking artifacts occur during high-resolution synthesis because performing the local attention in a block-wise manner may break the spatial coherency. To solve this, we empirically investigate various solutions, among which we find that employing a wavelet discriminator to examine the spectral discrepancy effectively suppresses the artifacts. Extensive experiments show the superiority over prior transformer-based GANs, especially on high resolutions, e.g., <tex>$1024 \\times$</tex> 1024. The StyleSwin, without complex training strategies, excels over StyleGAN on CelebA-HQ 1024, and achieves on-par performance on FFHQ-1024, proving the promise of using transformers for high-resolution image generation. The code and pretrained models are available at https://github.com/microsoft/StyleSwin.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Despite the tantalizing success in a broad of vision tasks, transformers have not yet demonstrated on-par ability as ConvNets in high-resolution image generative modeling. In this paper, we seek to explore using pure transformers to build a generative adversarial network for high-resolution image synthesis. To this end, we believe that local attention is crucial to strike the balance between computational efficiency and modeling capacity. Hence, the proposed generator adopts Swin transformer in a style-based architecture. To achieve a larger receptive field, we propose double attention which simultaneously leverages the context of the local and the shifted windows, leading to improved generation quality. Moreover, we show that offering the knowledge of the absolute position that has been lost in window-based transformers greatly benefits the generation quality. The proposed StyleSwin is scalable to high resolutions, with both the coarse geometry and fine structures benefit from the strong expressivity of transformers. However, blocking artifacts occur during high-resolution synthesis because performing the local attention in a block-wise manner may break the spatial coherency. To solve this, we empirically investigate various solutions, among which we find that employing a wavelet discriminator to examine the spectral discrepancy effectively suppresses the artifacts. Extensive experiments show the superiority over prior transformer-based GANs, especially on high resolutions, e.g., - 1024. The StyleSwin, without complex training strategies, excels over StyleGAN on CelebA-HQ 1024, and achieves on-par performance on FFHQ-1024, proving the promise of using transformers for high-resolution image generation. The code and pretrained models are available at https://github.com/microsoft/StyleSwin.", "fno": "694600l1294", "keywords": [ "Computer Vision", "Data Compression", "Feature Extraction", "Image Classification", "Image Coding", "Image Representation", "Image Resolution", "Learning Artificial Intelligence", "Neural Nets", "Wavelet Transforms", "Style Swin", "Transformer Based GAN", "High Resolution Image Generation", "High Resolution Image Generative Modeling", "Pure Transformers", "Generative Adversarial Network", "High Resolution Image Synthesis", "Local Attention", "Swin Transformer", "Style Based Architecture", "Improved Generation Quality", "Window Based Transformers", "High Resolution Synthesis", "Wavelet Transforms", "Training", "Image Synthesis", "Computational Modeling", "Spatial Coherence", "Transformers", "Generative Adversarial Networks" ], "authors": [ { "affiliation": "University of Science and Technology of China", "fullName": "Bowen Zhang", "givenName": "Bowen", "surname": "Zhang", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Science and Technology of China", "fullName": "Shuyang Gu", "givenName": "Shuyang", "surname": "Gu", "__typename": "ArticleAuthorType" }, { "affiliation": "Microsoft Research Asia", "fullName": "Bo Zhang", "givenName": "Bo", "surname": "Zhang", "__typename": "ArticleAuthorType" }, { "affiliation": "Microsoft Research Asia", "fullName": "Jianmin Bao", "givenName": "Jianmin", "surname": "Bao", "__typename": "ArticleAuthorType" }, { "affiliation": "Microsoft Research Asia", "fullName": "Dong Chen", "givenName": "Dong", "surname": "Chen", "__typename": "ArticleAuthorType" }, { "affiliation": "Microsoft Research Asia", "fullName": "Fang Wen", "givenName": "Fang", "surname": "Wen", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Science and Technology of China", "fullName": "Yong Wang", "givenName": "Yong", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": "Microsoft Research Asia", "fullName": "Baining Guo", "givenName": "Baining", "surname": "Guo", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-06-01T00:00:00", "pubType": "proceedings", "pages": "11294-11304", "year": "2022", "issn": null, "isbn": "978-1-6654-6946-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [ { "id": "1H1lc0ZUtck", "name": "pcvpr202269460-09880033s1-mm_694600l1294.zip", "size": "10.3 MB", "location": "https://www.computer.org/csdl/api/v1/extra/pcvpr202269460-09880033s1-mm_694600l1294.zip", "__typename": "WebExtraType" } ], "adjacentArticles": { "previous": { "fno": "694600l1284", "articleId": "1H0KIr5sLxS", "__typename": "AdjacentArticleType" }, "next": { "fno": "694600l1305", "articleId": "1H0NhSzLSik", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2018/6420/0/642000j455", "title": "ST-GAN: Spatial Transformer Generative Adversarial Networks for Image Compositing", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000j455/17D45XacGkf", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2022/8739/0/873900a456", "title": "Transformer for Single Image Super-Resolution", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2022/873900a456/1G57fSkJZYI", "parentPublication": { "id": "proceedings/cvprw/2022/8739/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2022/8563/0/09859791", "title": "GR-GAN: Gradual Refinement Text-To-Image Generation", "doi": null, "abstractUrl": "/proceedings-article/icme/2022/09859791/1G9ESovhXWw", "parentPublication": { "id": "proceedings/icme/2022/8563/0", "title": "2022 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600f718", "title": "Restormer: Efficient Transformer for High-Resolution Image Restoration", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600f718/1H0Nsrf0ZVK", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600f677", "title": "Learning Trajectory-Aware Transformer for Video Super-Resolution", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600f677/1H1iBltnico", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pacificvis/2020/5697/0/09086293", "title": "SSR-VFD: Spatial Super-Resolution for Vector Field Data Analysis and Visualization", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2020/09086293/1kuHkNHF4xG", "parentPublication": { "id": "proceedings/pacificvis/2020/5697/0", "title": "2020 IEEE Pacific Visualization Symposium (PacificVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2022/06/09229162", "title": "SSR-TVD: Spatial Super-Resolution for Time-Varying Data Analysis and Visualization", "doi": null, "abstractUrl": "/journal/tg/2022/06/09229162/1o3nNOnFv7G", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2021/8808/0/09412383", "title": "High Resolution Face Age Editing", "doi": null, "abstractUrl": "/proceedings-article/icpr/2021/09412383/1tmirf0Y0oM", "parentPublication": { "id": "proceedings/icpr/2021/8808/0", "title": "2020 25th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/2023/04/09626552", "title": "DCT-GAN: Dilated Convolutional Transformer-Based GAN for Time Series Anomaly Detection", "doi": null, "abstractUrl": "/journal/tk/2023/04/09626552/1yNcV2zDN5e", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900m2868", "title": "Taming Transformers for High-Resolution Image Synthesis", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900m2868/1yeLFyqrASk", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1H1gVMlkl32", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1H1m9v63vsk", "doi": "10.1109/CVPR52688.2022.01770", "title": "A Style-aware Discriminator for Controllable Image Translation", "normalizedTitle": "A Style-aware Discriminator for Controllable Image Translation", "abstract": "Current image-to-image translations do not control the output domain beyond the classes used during training, nor do they interpolate between different domains well, leading to implausible results. This limitation largely arises because labels do not consider the semantic distance. To mitigate such problems, we propose a style-aware discriminator that acts as a critic as well as a style encoder to provide conditions. The style-aware discriminator learns a controllable style space using prototype-based self-supervised learning and simultaneously guides the generator. Experiments on multiple datasets verify that the proposed model outperforms current state-of-the-art image-to-image translation methods. In contrast with current methods, the proposed approach supports various applications, including style interpolation, content transplantation, and local image translation. The code is available at github.com/kunheek/style-aware-discriminator.", "abstracts": [ { "abstractType": "Regular", "content": "Current image-to-image translations do not control the output domain beyond the classes used during training, nor do they interpolate between different domains well, leading to implausible results. This limitation largely arises because labels do not consider the semantic distance. To mitigate such problems, we propose a style-aware discriminator that acts as a critic as well as a style encoder to provide conditions. The style-aware discriminator learns a controllable style space using prototype-based self-supervised learning and simultaneously guides the generator. Experiments on multiple datasets verify that the proposed model outperforms current state-of-the-art image-to-image translation methods. In contrast with current methods, the proposed approach supports various applications, including style interpolation, content transplantation, and local image translation. The code is available at github.com/kunheek/style-aware-discriminator.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Current image-to-image translations do not control the output domain beyond the classes used during training, nor do they interpolate between different domains well, leading to implausible results. This limitation largely arises because labels do not consider the semantic distance. To mitigate such problems, we propose a style-aware discriminator that acts as a critic as well as a style encoder to provide conditions. The style-aware discriminator learns a controllable style space using prototype-based self-supervised learning and simultaneously guides the generator. Experiments on multiple datasets verify that the proposed model outperforms current state-of-the-art image-to-image translation methods. In contrast with current methods, the proposed approach supports various applications, including style interpolation, content transplantation, and local image translation. The code is available at github.com/kunheek/style-aware-discriminator.", "fno": "694600s8218", "keywords": [ "Interpolation", "Learning Artificial Intelligence", "Image To Image Translation Methods", "Style Interpolation", "Local Image Translation", "Style Aware Discriminator", "Controllable Image Translation", "Output Domain", "Style Encoder", "Controllable Style Space", "Prototype Based Self Supervised Learning", "Content Transplantation", "Training", "Interpolation", "Computer Vision", "Codes", "Computational Modeling", "Semantics", "Self Supervised Learning" ], "authors": [ { "affiliation": "Pohang University of Science and Technology (POSTECH)", "fullName": "Kunhee Kim", "givenName": "Kunhee", "surname": "Kim", "__typename": "ArticleAuthorType" }, { "affiliation": "Pohang University of Science and Technology (POSTECH)", "fullName": "Sanghun Park", "givenName": "Sanghun", "surname": "Park", "__typename": "ArticleAuthorType" }, { "affiliation": "Pohang University of Science and Technology (POSTECH)", "fullName": "Eunyeong Jeon", "givenName": "Eunyeong", "surname": "Jeon", "__typename": "ArticleAuthorType" }, { "affiliation": "Pohang University of Science and Technology (POSTECH)", "fullName": "Taehun Kim", "givenName": "Taehun", "surname": "Kim", "__typename": "ArticleAuthorType" }, { "affiliation": "Pohang University of Science and Technology (POSTECH)", "fullName": "Daijin Kim", "givenName": "Daijin", "surname": "Kim", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-06-01T00:00:00", "pubType": "proceedings", "pages": "18218-18227", "year": "2022", "issn": null, "isbn": "978-1-6654-6946-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [ { "id": "1H1m9pvbGvu", "name": "pcvpr202269460-09880454s1-mm_694600s8218.zip", "size": "8.05 MB", "location": "https://www.computer.org/csdl/api/v1/extra/pcvpr202269460-09880454s1-mm_694600s8218.zip", "__typename": "WebExtraType" } ], "adjacentArticles": { "previous": { "fno": "694600s8208", "articleId": "1H1jPMUCwZG", "__typename": "AdjacentArticleType" }, "next": { "fno": "694600s8228", "articleId": "1H1jR6dQv5u", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/big-data/2021/3902/0/09671820", "title": "Non-Parallel Text Style Transfer using Self-Attentional Discriminator as Supervisor", "doi": null, "abstractUrl": "/proceedings-article/big-data/2021/09671820/1A8hr4FqzAY", "parentPublication": { "id": "proceedings/big-data/2021/3902/0", "title": "2021 IEEE International Conference on Big Data (Big Data)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200o4589", "title": "Domain-Aware Universal Style Transfer", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200o4589/1BmEW5hrQNW", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600h171", "title": "Reusing the Task-specific Classifier as a Discriminator: Discriminator-free Adversarial Domain Adaptation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600h171/1H0O8xHTvDW", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600s8300", "title": "InstaFormer: Instance-Aware Image-to-Image Translation with Transformer", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600s8300/1H1mxvm4SeA", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2023/9346/0/934600a259", "title": "Panoptic-aware Image-to-Image Translation", "doi": null, "abstractUrl": "/proceedings-article/wacv/2023/934600a259/1L6LxMYKoiQ", "parentPublication": { "id": "proceedings/wacv/2023/9346/0", "title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2023/9346/0/9.346E214", "title": "Learning Style Subspaces for Controllable Unpaired Domain Translation", "doi": null, "abstractUrl": "/proceedings-article/wacv/2023/9.346E214/1La4JaMnIgE", "parentPublication": { "id": "proceedings/wacv/2023/9346/0", "title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2019/3293/0/329300b467", "title": "Attention-Aware Multi-Stroke Style Transfer", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2019/329300b467/1gyrAK80fHq", "parentPublication": { "id": "proceedings/cvpr/2019/3293/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900i635", "title": "Image-to-image Translation via Hierarchical Style Disentanglement", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900i635/1yeJ8euVFTO", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900k0780", "title": "Smoothing the Disentangled Latent Style Space for Unsupervised Image-to-Image Translation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900k0780/1yeJEL6YSJy", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900l1891", "title": "Progressive Semantic-Aware Style Transformation for Blind Face Restoration", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900l1891/1yeKvf7Usgw", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1m3n9N02qgE", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1m3oox77sD6", "doi": "10.1109/CVPR42600.2020.00473", "title": "DOA-GAN: Dual-Order Attentive Generative Adversarial Network for Image Copy-Move Forgery Detection and Localization", "normalizedTitle": "DOA-GAN: Dual-Order Attentive Generative Adversarial Network for Image Copy-Move Forgery Detection and Localization", "abstract": "Images can be manipulated for nefarious purposes to hide content or to duplicate certain objects through copy-move operations. Discovering a well-crafted copy-move forgery in images can be very challenging for both humans and machines; for example, an object on a uniform background can be replaced by an image patch of the same background. In this paper, we propose a Generative Adversarial Network with a dual-order attention model to detect and localize copy-move forgeries. In the generator, the first-order attention is designed to capture copy-move location information, and the second-order attention exploits more discriminative features for the patch co-occurrence. Both attention maps are extracted from the affinity matrix and are used to fuse location-aware and co-occurrence features for the final detection and localization branches of the network. The discriminator network is designed to further ensure more accurate localization results. To the best of our knowledge, we are the first to propose such a network architecture with the 1st-order attention mechanism from the affinity matrix. We have performed extensive experimental validation and our state-of-the-art results strongly demonstrate the efficacy of the proposed approach.", "abstracts": [ { "abstractType": "Regular", "content": "Images can be manipulated for nefarious purposes to hide content or to duplicate certain objects through copy-move operations. Discovering a well-crafted copy-move forgery in images can be very challenging for both humans and machines; for example, an object on a uniform background can be replaced by an image patch of the same background. In this paper, we propose a Generative Adversarial Network with a dual-order attention model to detect and localize copy-move forgeries. In the generator, the first-order attention is designed to capture copy-move location information, and the second-order attention exploits more discriminative features for the patch co-occurrence. Both attention maps are extracted from the affinity matrix and are used to fuse location-aware and co-occurrence features for the final detection and localization branches of the network. The discriminator network is designed to further ensure more accurate localization results. To the best of our knowledge, we are the first to propose such a network architecture with the 1st-order attention mechanism from the affinity matrix. We have performed extensive experimental validation and our state-of-the-art results strongly demonstrate the efficacy of the proposed approach.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Images can be manipulated for nefarious purposes to hide content or to duplicate certain objects through copy-move operations. Discovering a well-crafted copy-move forgery in images can be very challenging for both humans and machines; for example, an object on a uniform background can be replaced by an image patch of the same background. In this paper, we propose a Generative Adversarial Network with a dual-order attention model to detect and localize copy-move forgeries. In the generator, the first-order attention is designed to capture copy-move location information, and the second-order attention exploits more discriminative features for the patch co-occurrence. Both attention maps are extracted from the affinity matrix and are used to fuse location-aware and co-occurrence features for the final detection and localization branches of the network. The discriminator network is designed to further ensure more accurate localization results. To the best of our knowledge, we are the first to propose such a network architecture with the 1st-order attention mechanism from the affinity matrix. We have performed extensive experimental validation and our state-of-the-art results strongly demonstrate the efficacy of the proposed approach.", "fno": "716800e675", "keywords": [ "Feature Extraction", "DOA GAN", "Image Copy Move Forgery Detection", "Copy Move Operations", "Uniform Background", "Image Patch", "Dual Order Attention Model", "First Order Attention", "Copy Move Location Information", "Second Order Attention", "Discriminative Features", "Patch Co Occurrence", "Attention Maps", "Affinity Matrix", "Co Occurrence Features", "Localization Branches", "Discriminator Network", "Network Architecture", "1st Order Attention Mechanism", "Generative Adversarial Network", "Location Awareness", "Feature Extraction", "Forgery", "Generators", "Generative Adversarial Networks", "Task Analysis", "Kernel", "Tools" ], "authors": [ { "affiliation": "Rensselaer Polytechnic Institute, Troy, NY", "fullName": "Ashraful Islam", "givenName": "Ashraful", "surname": "Islam", "__typename": "ArticleAuthorType" }, { "affiliation": "Kitware Inc., NY", "fullName": "Chengjiang Long", "givenName": "Chengjiang", "surname": "Long", "__typename": "ArticleAuthorType" }, { "affiliation": "Kitware Inc., NY", "fullName": "Arslan Basharat", "givenName": "Arslan", "surname": "Basharat", "__typename": "ArticleAuthorType" }, { "affiliation": "Kitware Inc., NY", "fullName": "Anthony Hoogs", "givenName": "Anthony", "surname": "Hoogs", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-06-01T00:00:00", "pubType": "proceedings", "pages": "4675-4684", "year": "2020", "issn": null, "isbn": "978-1-7281-7168-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "716800e665", "articleId": "1m3ofMfQove", "__typename": "AdjacentArticleType" }, "next": { "fno": "716800e685", "articleId": "1m3ngsvU9zO", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/asiajcis/2015/1989/0/1989a033", "title": "An Efficient Detection Algorithm for Copy-Move Forgery", "doi": null, "abstractUrl": "/proceedings-article/asiajcis/2015/1989a033/12OmNBSBk0t", "parentPublication": { "id": "proceedings/asiajcis/2015/1989/0", "title": "2015 10th Asia Joint Conference on Information Security (AsiaJCIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ism/2017/2937/0/2937a553", "title": "LBP-SVD Based Copy Move Forgery Detection Algorithm", "doi": null, "abstractUrl": "/proceedings-article/ism/2017/2937a553/12OmNxwncqI", "parentPublication": { "id": "proceedings/ism/2017/2937/0", "title": "2017 IEEE International Symposium on Multimedia (ISM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cgiv/2011/4484/0/4484a103", "title": "Copy-Move Forgery Detection Using Dyadic Wavelet Transform", "doi": null, "abstractUrl": "/proceedings-article/cgiv/2011/4484a103/12OmNy3RRHz", "parentPublication": { "id": "proceedings/cgiv/2011/4484/0", "title": "2011 Eighth International Conference Computer Graphics, Imaging and Visualization", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ism/2016/4571/0/4571a341", "title": "Improving SURF Based Copy-Move Forgery Detection Using Super Resolution", "doi": null, "abstractUrl": "/proceedings-article/ism/2016/4571a341/12OmNyQGSgj", "parentPublication": { "id": "proceedings/ism/2016/4571/0", "title": "2016 IEEE International Symposium on Multimedia (ISM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ic2e/2014/3766/0/3766a510", "title": "Copy-Rotation-Move Forgery Detection Using the MROGH Descriptor", "doi": null, "abstractUrl": "/proceedings-article/ic2e/2014/3766a510/12OmNyXMQgk", "parentPublication": { "id": "proceedings/ic2e/2014/3766/0", "title": "2014 IEEE International Conference on Cloud Engineering (IC2E)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icndc/2011/0407/0/06047099", "title": "An Improved Lexicographical Sort Algorithm of Copy-move Forgery Detection", "doi": null, "abstractUrl": "/proceedings-article/icndc/2011/06047099/12OmNzAFSXs", "parentPublication": { "id": "proceedings/icndc/2011/0407/0", "title": "2011 Second International Conference on Networking and Distributed Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icsip/2014/5100/0/5100a213", "title": "Shape Based Copy Move Forgery Detection Using Level Set Approach", "doi": null, "abstractUrl": "/proceedings-article/icsip/2014/5100a213/12OmNzmtWwC", "parentPublication": { "id": "proceedings/icsip/2014/5100/0", "title": "2014 Fifth International Conference on Signal and Image Processing (ICSIP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ic3/2018/6834/0/08530489", "title": "Comparative Analysis of Different Keypoint Based Copy-Move Forgery Detection Methods", "doi": null, "abstractUrl": "/proceedings-article/ic3/2018/08530489/17D45WIXbPy", "parentPublication": { "id": "proceedings/ic3/2018/6834/0", "title": "2018 Eleventh International Conference on Contemporary Computing (IC3)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/trustcom/2022/9425/0/942500a739", "title": "Pyramid Copy-move Forgery Detection Using Adversarial Optimized Self Deep Matching Network", "doi": null, "abstractUrl": "/proceedings-article/trustcom/2022/942500a739/1LFM2ADekQo", "parentPublication": { "id": "proceedings/trustcom/2022/9425/0", "title": "2022 IEEE International Conference on Trust, Security and Privacy in Computing and Communications (TrustCom)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2017/2636/0/263600a203", "title": "A Novel Fusion Algorithm for Copy-Move Forgery Detection", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2017/263600a203/1ap5zaV6LIY", "parentPublication": { "id": "proceedings/icvrv/2017/2636/0", "title": "2017 International Conference on Virtual Reality and Visualization (ICVRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1tmhi3ly74c", "title": "2020 25th International Conference on Pattern Recognition (ICPR)", "acronym": "icpr", "groupId": "1000545", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1tmi26uWlws", "doi": "10.1109/ICPR48806.2021.9412539", "title": "A Self-supervised GAN for Unsupervised Few-shot Object Recognition", "normalizedTitle": "A Self-supervised GAN for Unsupervised Few-shot Object Recognition", "abstract": "This paper addresses unsupervised few-shot object recognition, where all training images are unlabeled, and test images are divided into queries and a few labeled support images per object class of interest. The training and test images do not share object classes. We extend the vanilla GAN with two loss functions, both aimed at self-supervised learning. The first is a reconstruction loss that enforces the discriminator to reconstruct the probabilistically sampled latent code which has been used for generating the &#x201C;fake&#x201D; image. The second is a triplet loss that enforces the discriminator to output image encodings that are closer for more similar images. Evaluation, comparisons, and detailed ablation studies are done in the context of few-shot classification. Our approach significantly outperforms the state of the art on the Mini-Imagenet and Tiered-Imagenet datasets.", "abstracts": [ { "abstractType": "Regular", "content": "This paper addresses unsupervised few-shot object recognition, where all training images are unlabeled, and test images are divided into queries and a few labeled support images per object class of interest. The training and test images do not share object classes. We extend the vanilla GAN with two loss functions, both aimed at self-supervised learning. The first is a reconstruction loss that enforces the discriminator to reconstruct the probabilistically sampled latent code which has been used for generating the &#x201C;fake&#x201D; image. The second is a triplet loss that enforces the discriminator to output image encodings that are closer for more similar images. Evaluation, comparisons, and detailed ablation studies are done in the context of few-shot classification. Our approach significantly outperforms the state of the art on the Mini-Imagenet and Tiered-Imagenet datasets.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper addresses unsupervised few-shot object recognition, where all training images are unlabeled, and test images are divided into queries and a few labeled support images per object class of interest. The training and test images do not share object classes. We extend the vanilla GAN with two loss functions, both aimed at self-supervised learning. The first is a reconstruction loss that enforces the discriminator to reconstruct the probabilistically sampled latent code which has been used for generating the “fake” image. The second is a triplet loss that enforces the discriminator to output image encodings that are closer for more similar images. Evaluation, comparisons, and detailed ablation studies are done in the context of few-shot classification. Our approach significantly outperforms the state of the art on the Mini-Imagenet and Tiered-Imagenet datasets.", "fno": "09412539", "keywords": [ "Image Classification", "Image Coding", "Image Reconstruction", "Neural Nets", "Object Recognition", "Supervised Learning", "Unsupervised Learning", "Self Supervised GAN", "Unsupervised Few Shot Object Recognition", "Training Images", "Labeled Support Images", "Vanilla GAN", "Self Supervised Learning", "Reconstruction Loss", "Fake Image", "Image Encoding", "Few Shot Classification", "Probabilistically Sampled Latent Code", "Mini Imagenet Datasets", "Tiered Imagenet Datasets", "Training", "Image Coding", "Performance Gain", "Probabilistic Logic", "Pattern Recognition", "Object Recognition", "Image Reconstruction" ], "authors": [ { "affiliation": "Oregon State University,Corvallis,OR,USA,97330", "fullName": "Khoi Nguyen", "givenName": "Khoi", "surname": "Nguyen", "__typename": "ArticleAuthorType" }, { "affiliation": "Oregon State University,Corvallis,OR,USA,97330", "fullName": "Sinisa Todorovic", "givenName": "Sinisa", "surname": "Todorovic", "__typename": "ArticleAuthorType" } ], "idPrefix": "icpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-01-01T00:00:00", "pubType": "proceedings", "pages": "3225-3231", "year": "2021", "issn": "1051-4651", "isbn": "978-1-7281-8808-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "09413118", "articleId": "1tmipC40Sic", "__typename": "AdjacentArticleType" }, "next": { "fno": "09412671", "articleId": "1tmixUXND44", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/wacv/2022/0915/0/091500c432", "title": "GraN-GAN: Piecewise Gradient Normalization for Generative Adversarial Networks", "doi": null, "abstractUrl": "/proceedings-article/wacv/2022/091500c432/1B140SxPWTe", "parentPublication": { "id": "proceedings/wacv/2022/0915/0", "title": "2022 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200o4041", "title": "Omni-GAN: On the Secrets of cGANs and Beyond", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200o4041/1BmLcpOiAXS", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2022/8739/0/873900c816", "title": "Semi-Supervised Few-Shot Learning from A Dependency-Discriminant Perspective", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2022/873900c816/1G55YAR4wKI", "parentPublication": { "id": "proceedings/cvprw/2022/8739/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600s8280", "title": "Self-Supervised Dense Consistency Regularization for Image-to-Image Translation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600s8280/1H0KKIa6pBS", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2019/3293/0/329300l1226", "title": "Rob-GAN: Generator, Discriminator, and Adversarial Attacker", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2019/329300l1226/1gyscbJL7J6", "parentPublication": { "id": "proceedings/cvpr/2019/3293/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdar/2019/3014/0/301400a178", "title": "TH-GAN: Generative Adversarial Network Based Transfer Learning for Historical Chinese Character Recognition", "doi": null, "abstractUrl": "/proceedings-article/icdar/2019/301400a178/1h81u6jDzSE", "parentPublication": { "id": "proceedings/icdar/2019/3014/0", "title": "2019 International Conference on Document Analysis and Recognition (ICDAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300a441", "title": "Few-Shot Image Recognition With Knowledge Transfer", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300a441/1hVlm1vgA0w", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2020/1331/0/09102917", "title": "Matchinggan: Matching-Based Few-Shot Image Generation", "doi": null, "abstractUrl": "/proceedings-article/icme/2020/09102917/1kwr3cBl864", "parentPublication": { "id": "proceedings/icme/2020/1331/0", "title": "2020 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2021/0477/0/047700d188", "title": "LT-GAN: Self-Supervised GAN with Latent Transformation Detection", "doi": null, "abstractUrl": "/proceedings-article/wacv/2021/047700d188/1uqGqafzEOI", "parentPublication": { "id": "proceedings/wacv/2021/0477/0", "title": "2021 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2021/4899/0/489900a203", "title": "DW-GAN: A Discrete Wavelet Transform GAN for NonHomogeneous Dehazing", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2021/489900a203/1yJYrxYT1hS", "parentPublication": { "id": "proceedings/cvprw/2021/4899/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1tmhi3ly74c", "title": "2020 25th International Conference on Pattern Recognition (ICPR)", "acronym": "icpr", "groupId": "1000545", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1tmiXDlffNK", "doi": "10.1109/ICPR48806.2021.9412305", "title": "Position-aware and Symmetry Enhanced GAN for Radial Distortion Correction", "normalizedTitle": "Position-aware and Symmetry Enhanced GAN for Radial Distortion Correction", "abstract": "This paper presents a novel method based on the generative adversarial network for radial distortion correction. Instead of generating a corrected image, our generator predicts a pixel flow map to measure the pixel offset between the distorted and corrected image. The quality of the generated pixel flow map and the warped image are judged by the discriminator. As texture far away from the image center has strong distortion, we develop an Adaptive Inverted Foveal layer which can transform the deformation to the intensity of the image to exploit this property. Rotation symmetry enhanced convolution kernels are applied to extract geometric features of different orientations explicitly. These learned features are recalibrated using the Squeeze-and-Excitation block to assign different weights for different directions. Moreover, we construct a first real-world radial distorted image dataset RD600 annotated with ground truth to evaluate our proposed method. We conduct extensive experiments to validate the effectiveness of each part of our framework. The further experiment shows our approach outperforms previous methods in both synthetic and real-world datasets quantitatively and qualitatively.", "abstracts": [ { "abstractType": "Regular", "content": "This paper presents a novel method based on the generative adversarial network for radial distortion correction. Instead of generating a corrected image, our generator predicts a pixel flow map to measure the pixel offset between the distorted and corrected image. The quality of the generated pixel flow map and the warped image are judged by the discriminator. As texture far away from the image center has strong distortion, we develop an Adaptive Inverted Foveal layer which can transform the deformation to the intensity of the image to exploit this property. Rotation symmetry enhanced convolution kernels are applied to extract geometric features of different orientations explicitly. These learned features are recalibrated using the Squeeze-and-Excitation block to assign different weights for different directions. Moreover, we construct a first real-world radial distorted image dataset RD600 annotated with ground truth to evaluate our proposed method. We conduct extensive experiments to validate the effectiveness of each part of our framework. The further experiment shows our approach outperforms previous methods in both synthetic and real-world datasets quantitatively and qualitatively.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper presents a novel method based on the generative adversarial network for radial distortion correction. Instead of generating a corrected image, our generator predicts a pixel flow map to measure the pixel offset between the distorted and corrected image. The quality of the generated pixel flow map and the warped image are judged by the discriminator. As texture far away from the image center has strong distortion, we develop an Adaptive Inverted Foveal layer which can transform the deformation to the intensity of the image to exploit this property. Rotation symmetry enhanced convolution kernels are applied to extract geometric features of different orientations explicitly. These learned features are recalibrated using the Squeeze-and-Excitation block to assign different weights for different directions. Moreover, we construct a first real-world radial distorted image dataset RD600 annotated with ground truth to evaluate our proposed method. We conduct extensive experiments to validate the effectiveness of each part of our framework. The further experiment shows our approach outperforms previous methods in both synthetic and real-world datasets quantitatively and qualitatively.", "fno": "09412305", "keywords": [ "Convolutional Neural Nets", "Distortion", "Feature Extraction", "Image Reconstruction", "Image Resolution", "Learning Artificial Intelligence", "Squeeze And Excitation Block", "Geometric Features Extraction", "Pixel Flow Map", "Position Aware GAN", "Rotation Symmetry Enhanced Convolution Kernels", "Adaptive Inverted Foveal Layer", "Image Distortion", "Generative Adversarial Network", "Radial Distortion Correction", "Symmetry Enhanced GAN", "Convolution", "Transforms", "Distortion", "Generative Adversarial Networks", "Feature Extraction", "Generators", "Pattern Recognition" ], "authors": [ { "affiliation": "Key Laboratory of Machine Perception (MOE), School of Electronics Engineering and Computer Science, Peking University", "fullName": "Yongjie Shi", "givenName": "Yongjie", "surname": "Shi", "__typename": "ArticleAuthorType" }, { "affiliation": "Key Laboratory of Machine Perception (MOE), School of Electronics Engineering and Computer Science, Peking University", "fullName": "Xin Tong", "givenName": "Xin", "surname": "Tong", "__typename": "ArticleAuthorType" }, { "affiliation": "Key Laboratory of Machine Perception (MOE), School of Electronics Engineering and Computer Science, Peking University", "fullName": "Jingsi Wen", "givenName": "Jingsi", "surname": "Wen", "__typename": "ArticleAuthorType" }, { "affiliation": "Key Laboratory of Machine Perception (MOE), School of Electronics Engineering and Computer Science, Peking University", "fullName": "He Zhao", "givenName": "He", "surname": "Zhao", "__typename": "ArticleAuthorType" }, { "affiliation": "Key Laboratory of Machine Perception (MOE), School of Electronics Engineering and Computer Science, Peking University", "fullName": "Xianghua Ying", "givenName": "Xianghua", "surname": "Ying", "__typename": "ArticleAuthorType" }, { "affiliation": "Key Laboratory of Machine Perception (MOE), School of Electronics Engineering and Computer Science, Peking University", "fullName": "Hongbin Zha", "givenName": "Hongbin", "surname": "Zha", "__typename": "ArticleAuthorType" } ], "idPrefix": "icpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-01-01T00:00:00", "pubType": "proceedings", "pages": "1701-1708", "year": "2021", "issn": "1051-4651", "isbn": "978-1-7281-8808-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "09412101", "articleId": "1tmj67qcsda", "__typename": "AdjacentArticleType" }, "next": { "fno": "09412943", "articleId": "1tmjcAszg4w", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2014/5118/0/5118a025", "title": "Critical Configurations for Radial Distortion Self-Calibration", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2014/5118a025/12OmNBpVQ9z", "parentPublication": { "id": "proceedings/cvpr/2014/5118/0", "title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2008/2174/0/04761021", "title": "Visual metrology with uncalibrated radial distorted images", "doi": null, "abstractUrl": "/proceedings-article/icpr/2008/04761021/12OmNwJgAKP", "parentPublication": { "id": "proceedings/icpr/2008/2174/0", "title": "ICPR 2008 19th International Conference on Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2015/6026/1/07163149", "title": "Correcting radial and perspective distortion by using face shape information", "doi": null, "abstractUrl": "/proceedings-article/fg/2015/07163149/12OmNyKJilp", "parentPublication": { "id": "proceedings/fg/2015/6026/5", "title": "2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2015/8391/0/8391c345", "title": "On the Equivalence of Moving Entrance Pupil and Radial Distortion for Camera Calibration", "doi": null, "abstractUrl": "/proceedings-article/iccv/2015/8391c345/12OmNyshmIc", "parentPublication": { "id": "proceedings/iccv/2015/8391/0", "title": "2015 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dasc-picom-datacom-cyberscitech/2017/1956/0/08328476", "title": "Robust Radial Distortion Correction from a Single Image", "doi": null, "abstractUrl": "/proceedings-article/dasc-picom-datacom-cyberscitech/2017/08328476/17D45VTRoD1", "parentPublication": { "id": "proceedings/dasc-picom-datacom-cyberscitech/2017/1956/0", "title": "2017 IEEE 15th Intl Conf on Dependable, Autonomic and Secure Computing, 15th Intl Conf on Pervasive Intelligence and Computing, 3rd Intl Conf on Big Data Intelligence and Computing and Cyber Science and Technology Congress(DASC/PiCom/DataCom/CyberSciTech)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000e824", "title": "Rolling Shutter and Radial Distortion are Features for High Frame Rate Multi-camera Tracking", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000e824/17D45WODaoT", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2019/3293/0/329300j673", "title": "Radial Distortion Triangulation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2019/329300j673/1gyrQv2dZvO", "parentPublication": { "id": "proceedings/cvpr/2019/3293/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300b062", "title": "Revisiting Radial Distortion Absolute Pose", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300b062/1hVlRpT15wA", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800h718", "title": "RDCFace: Radial Distortion Correction for Face Recognition", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800h718/1m3n9WusSUo", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icceai/2021/3960/0/396000a108", "title": "Generative Difference Image for Blind Image Quality Assessment", "doi": null, "abstractUrl": "/proceedings-article/icceai/2021/396000a108/1xqyS053BIc", "parentPublication": { "id": "proceedings/icceai/2021/3960/0", "title": "2021 International Conference on Computer Engineering and Artificial Intelligence (ICCEAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1wzs0vrjyWQ", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "acronym": "cvprw", "groupId": "1001809", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1yJYrxYT1hS", "doi": "10.1109/CVPRW53098.2021.00029", "title": "DW-GAN: A Discrete Wavelet Transform GAN for NonHomogeneous Dehazing", "normalizedTitle": "DW-GAN: A Discrete Wavelet Transform GAN for NonHomogeneous Dehazing", "abstract": "Hazy images are often subject to color distortion, blurring, and other visible quality degradation. Some existing CNN-based methods have great performance on removing homogeneous haze, but they are not robust in non-homogeneous case. The reasons are mainly in two folds. Firstly, due to the complicated haze distribution, texture details are easy to be lost during the dehazing process. Secondly, since the training pairs are hard to be collected, training on limited data can easily lead to over-fitting problem. To tackle these two issues, we introduce a novel dehazing network using 2D discrete wavelet transform, namely DW-GAN. Specifically, we propose a two-branch network to deal with the aforementioned problems. By utilizing wavelet transform in DWT branch, our proposed method can retain more high-frequency knowledge in feature maps. In order to prevent over-fitting, ImageNet pre-trained Res2Net is adopted in the knowledge adaptation branch. Owing to the robust feature representations of ImageNet pre-training, the generalization ability of our network is improved dramatically. Finally, a patch-based discriminator is used to reduce artifacts of the restored images. Extensive experimental results demonstrate that the proposed method outperforms the state-of-the-arts quantitatively and qualitatively. The source code is available at https://github.com/liuh127/DW-GAN-Dehazing.", "abstracts": [ { "abstractType": "Regular", "content": "Hazy images are often subject to color distortion, blurring, and other visible quality degradation. Some existing CNN-based methods have great performance on removing homogeneous haze, but they are not robust in non-homogeneous case. The reasons are mainly in two folds. Firstly, due to the complicated haze distribution, texture details are easy to be lost during the dehazing process. Secondly, since the training pairs are hard to be collected, training on limited data can easily lead to over-fitting problem. To tackle these two issues, we introduce a novel dehazing network using 2D discrete wavelet transform, namely DW-GAN. Specifically, we propose a two-branch network to deal with the aforementioned problems. By utilizing wavelet transform in DWT branch, our proposed method can retain more high-frequency knowledge in feature maps. In order to prevent over-fitting, ImageNet pre-trained Res2Net is adopted in the knowledge adaptation branch. Owing to the robust feature representations of ImageNet pre-training, the generalization ability of our network is improved dramatically. Finally, a patch-based discriminator is used to reduce artifacts of the restored images. Extensive experimental results demonstrate that the proposed method outperforms the state-of-the-arts quantitatively and qualitatively. The source code is available at https://github.com/liuh127/DW-GAN-Dehazing.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Hazy images are often subject to color distortion, blurring, and other visible quality degradation. Some existing CNN-based methods have great performance on removing homogeneous haze, but they are not robust in non-homogeneous case. The reasons are mainly in two folds. Firstly, due to the complicated haze distribution, texture details are easy to be lost during the dehazing process. Secondly, since the training pairs are hard to be collected, training on limited data can easily lead to over-fitting problem. To tackle these two issues, we introduce a novel dehazing network using 2D discrete wavelet transform, namely DW-GAN. Specifically, we propose a two-branch network to deal with the aforementioned problems. By utilizing wavelet transform in DWT branch, our proposed method can retain more high-frequency knowledge in feature maps. In order to prevent over-fitting, ImageNet pre-trained Res2Net is adopted in the knowledge adaptation branch. Owing to the robust feature representations of ImageNet pre-training, the generalization ability of our network is improved dramatically. Finally, a patch-based discriminator is used to reduce artifacts of the restored images. Extensive experimental results demonstrate that the proposed method outperforms the state-of-the-arts quantitatively and qualitatively. The source code is available at https://github.com/liuh127/DW-GAN-Dehazing.", "fno": "489900a203", "keywords": [ "Cellular Neural Nets", "Discrete Wavelet Transforms", "Feature Extraction", "Filtering Theory", "Image Classification", "Image Coding", "Image Colour Analysis", "Image Denoising", "Image Enhancement", "Image Representation", "Image Restoration", "Image Texture", "Learning Artificial Intelligence", "Wavelet Transforms", "Hazy Images", "Color Distortion", "Visible Quality Degradation", "Existing CNN Based Methods", "Homogeneous Haze", "Nonhomogeneous Case", "Complicated Haze Distribution", "Texture Details", "Dehazing Process", "Training Pairs", "Over Fitting Problem", "Novel Dehazing Network", "Two Branch Network", "DWT Branch", "High Frequency Knowledge", "Feature Maps", "Image Net Pre Trained Res 2 Net", "Knowledge Adaptation Branch", "Robust Feature Representations", "Image Net Pre Training", "Patch Based Discriminator", "Restored Images", "Discrete Wavelet Transform GAN", "Non Homogeneous Dehazing", "Training", "Wavelet Domain", "Image Color Analysis", "Frequency Domain Analysis", "Generative Adversarial Networks", "Discrete Wavelet Transforms", "Pattern Recognition" ], "authors": [ { "affiliation": "McMaster University,Department of Electrical and Computer Engineering,Hamilton,Canada", "fullName": "Minghan Fu", "givenName": "Minghan", "surname": "Fu", "__typename": "ArticleAuthorType" }, { "affiliation": "McMaster University,Department of Electrical and Computer Engineering,Hamilton,Canada", "fullName": "Huan Liu", "givenName": "Huan", "surname": "Liu", "__typename": "ArticleAuthorType" }, { "affiliation": "McMaster University,Department of Electrical and Computer Engineering,Hamilton,Canada", "fullName": "Yankun Yu", "givenName": "Yankun", "surname": "Yu", "__typename": "ArticleAuthorType" }, { "affiliation": "McMaster University,Department of Electrical and Computer Engineering,Hamilton,Canada", "fullName": "Jun Chen", "givenName": "Jun", "surname": "Chen", "__typename": "ArticleAuthorType" }, { "affiliation": "Xidian University,State Key Laboratory of Integrated Service Networks,Xi’an,China", "fullName": "Keyan Wang", "givenName": "Keyan", "surname": "Wang", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvprw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-06-01T00:00:00", "pubType": "proceedings", "pages": "203-212", "year": "2021", "issn": null, "isbn": "978-1-6654-4899-4", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "489900a193", "articleId": "1yVA4pFFIWs", "__typename": "AdjacentArticleType" }, "next": { "fno": "489900a213", "articleId": "1yJYk3NUaAM", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvprw/2018/6100/0/610000b025", "title": "High-Resolution Image Dehazing with Respect to Training Losses and Receptive Field Sizes", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2018/610000b025/17D45Vw15sE", "parentPublication": { "id": "proceedings/cvprw/2018/6100/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2018/6100/0/610000b250", "title": "Deep Learning Based Single Image Dehazing", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2018/610000b250/17D45Vw15tI", "parentPublication": { "id": "proceedings/cvprw/2018/6100/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2020/6553/0/09093528", "title": "Scale-aware Conditional Generative Adversarial Network for Image Dehazing", "doi": null, "abstractUrl": "/proceedings-article/wacv/2020/09093528/1jPbB3kD2PS", "parentPublication": { "id": "proceedings/wacv/2020/6553/0", "title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2020/9360/0/09150828", "title": "NTIRE 2020 Challenge on NonHomogeneous Dehazing", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2020/09150828/1lPH0VWdEAg", "parentPublication": { "id": "proceedings/cvprw/2020/9360/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2020/9360/0/09150997", "title": "Trident Dehazing Network", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2020/09150997/1lPHm6Y3MXe", "parentPublication": { "id": "proceedings/cvprw/2020/9360/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2020/9360/0/09150624", "title": "Knowledge Transfer Dehazing Network for NonHomogeneous Dehazing", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2020/09150624/1lPHoeGvl9m", "parentPublication": { "id": "proceedings/cvprw/2020/9360/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ictai/2020/9228/0/922800a545", "title": "PHC-GAN: Physical Constraint Generative Adversarial Network for Single Image Dehazing", "doi": null, "abstractUrl": "/proceedings-article/ictai/2020/922800a545/1pP3unJfNjG", "parentPublication": { "id": "proceedings/ictai/2020/9228/0", "title": "2020 IEEE 32nd International Conference on Tools with Artificial Intelligence (ICTAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2021/8808/0/09413155", "title": "SIDGAN: Single Image Dehazing without Paired Supervision", "doi": null, "abstractUrl": "/proceedings-article/icpr/2021/09413155/1tmhFPGEUqk", "parentPublication": { "id": "proceedings/icpr/2021/8808/0", "title": "2020 25th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2021/0477/0/047700a413", "title": "Domain-Aware Unsupervised Hyperspectral Reconstruction for Aerial Image Dehazing", "doi": null, "abstractUrl": "/proceedings-article/wacv/2021/047700a413/1uqGgsROcY8", "parentPublication": { "id": "proceedings/wacv/2021/0477/0", "title": "2021 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2021/4899/0/489900a627", "title": "NTIRE 2021 NonHomogeneous Dehazing Challenge Report", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2021/489900a627/1yZ4oQ4PSco", "parentPublication": { "id": "proceedings/cvprw/2021/4899/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1BmEezmpGrm", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "acronym": "iccv", "groupId": "1000149", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1BmHhfdx7dm", "doi": "10.1109/ICCV48922.2021.00315", "title": "Voxel Transformer for 3D Object Detection", "normalizedTitle": "Voxel Transformer for 3D Object Detection", "abstract": "We present Voxel Transformer (VoTr), a novel and effective voxel-based Transformer backbone for 3D object detection from point clouds. Conventional 3D convolutional backbones in voxel-based 3D detectors cannot efficiently capture large context information, which is crucial for object recognition and localization, owing to the limited receptive fields. In this paper, we resolve the problem by introducing a Transformer-based architecture that enables long-range relationships between voxels by self-attention. Given the fact that non-empty voxels are naturally sparse but numerous, directly applying standard Transformer on voxels is non-trivial. To this end, we propose the sparse voxel module and the submanifold voxel module, which can operate on the empty and non-empty voxel positions effectively. To further enlarge the attention range while maintaining comparable computational overhead to the convolutional counterparts, we propose two attention mechanisms for multi-head attention in those two modules: Local Attention and Dilated Attention, and we further propose Fast Voxel Query to accelerate the querying process in multi-head attention. VoTr contains a series of sparse and submanifold voxel modules, and can be applied in most voxel-based detectors. Our proposed VoTr shows consistent improvement over the convolutional baselines while maintaining computational efficiency on the KITTI dataset and the Waymo Open dataset.", "abstracts": [ { "abstractType": "Regular", "content": "We present Voxel Transformer (VoTr), a novel and effective voxel-based Transformer backbone for 3D object detection from point clouds. Conventional 3D convolutional backbones in voxel-based 3D detectors cannot efficiently capture large context information, which is crucial for object recognition and localization, owing to the limited receptive fields. In this paper, we resolve the problem by introducing a Transformer-based architecture that enables long-range relationships between voxels by self-attention. Given the fact that non-empty voxels are naturally sparse but numerous, directly applying standard Transformer on voxels is non-trivial. To this end, we propose the sparse voxel module and the submanifold voxel module, which can operate on the empty and non-empty voxel positions effectively. To further enlarge the attention range while maintaining comparable computational overhead to the convolutional counterparts, we propose two attention mechanisms for multi-head attention in those two modules: Local Attention and Dilated Attention, and we further propose Fast Voxel Query to accelerate the querying process in multi-head attention. VoTr contains a series of sparse and submanifold voxel modules, and can be applied in most voxel-based detectors. Our proposed VoTr shows consistent improvement over the convolutional baselines while maintaining computational efficiency on the KITTI dataset and the Waymo Open dataset.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present Voxel Transformer (VoTr), a novel and effective voxel-based Transformer backbone for 3D object detection from point clouds. Conventional 3D convolutional backbones in voxel-based 3D detectors cannot efficiently capture large context information, which is crucial for object recognition and localization, owing to the limited receptive fields. In this paper, we resolve the problem by introducing a Transformer-based architecture that enables long-range relationships between voxels by self-attention. Given the fact that non-empty voxels are naturally sparse but numerous, directly applying standard Transformer on voxels is non-trivial. To this end, we propose the sparse voxel module and the submanifold voxel module, which can operate on the empty and non-empty voxel positions effectively. To further enlarge the attention range while maintaining comparable computational overhead to the convolutional counterparts, we propose two attention mechanisms for multi-head attention in those two modules: Local Attention and Dilated Attention, and we further propose Fast Voxel Query to accelerate the querying process in multi-head attention. VoTr contains a series of sparse and submanifold voxel modules, and can be applied in most voxel-based detectors. Our proposed VoTr shows consistent improvement over the convolutional baselines while maintaining computational efficiency on the KITTI dataset and the Waymo Open dataset.", "fno": "281200d144", "keywords": [ "Point Cloud Compression", "Location Awareness", "Computer Vision", "Three Dimensional Displays", "Detectors", "Object Detection", "Computer Architecture", "Detection And Localization In 2 D And 3 D", "Stereo", "3 D From Multiview And Other Sensors", "Vision For Robotics And Autonomous Vehicles" ], "authors": [ { "affiliation": "The Chinese University of Hong Kong", "fullName": "Jiageng Mao", "givenName": "Jiageng", "surname": "Mao", "__typename": "ArticleAuthorType" }, { "affiliation": "National University of Singapore", "fullName": "Yujing Xue", "givenName": "Yujing", "surname": "Xue", "__typename": "ArticleAuthorType" }, { "affiliation": "Huawei Noah’s Ark Lab", "fullName": "Minzhe Niu", "givenName": "Minzhe", "surname": "Niu", "__typename": "ArticleAuthorType" }, { "affiliation": "HKUST", "fullName": "Haoyue Bai", "givenName": "Haoyue", "surname": "Bai", "__typename": "ArticleAuthorType" }, { "affiliation": "National University of Singapore", "fullName": "Jiashi Feng", "givenName": "Jiashi", "surname": "Feng", "__typename": "ArticleAuthorType" }, { "affiliation": "Sun Yat-Sen University", "fullName": "Xiaodan Liang", "givenName": "Xiaodan", "surname": "Liang", "__typename": "ArticleAuthorType" }, { "affiliation": "Huawei Noah’s Ark Lab", "fullName": "Hang Xu", "givenName": "Hang", "surname": "Xu", "__typename": "ArticleAuthorType" }, { "affiliation": "Huawei Noah’s Ark Lab", "fullName": "Chunjing Xu", "givenName": "Chunjing", "surname": "Xu", "__typename": "ArticleAuthorType" } ], "idPrefix": "iccv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-10-01T00:00:00", "pubType": "proceedings", "pages": "3144-3153", "year": "2021", "issn": null, "isbn": "978-1-6654-2812-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "281200d133", "articleId": "1BmFAZXbK0g", "__typename": "AdjacentArticleType" }, "next": { "fno": "281200d154", "articleId": "1BmGkInihuo", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2022/6946/0/694600q6928", "title": "Fast Point Transformer", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600q6928/1H0MTxeBHwY", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600i407", "title": "Voxel Set Transformer: A Set-to-Set Approach to 3D Object Detection from Point Clouds", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600i407/1H0N8QxLrgs", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600i521", "title": "PTTR: Relational 3D Point Cloud Object Tracking with Transformer", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600i521/1H1mwenTVBK", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccd/2022/6186/0/618600a509", "title": "VEA: An FPGA-Based Voxel Encoding Accelerator for 3D Object Detection with LiDAR", "doi": null, "abstractUrl": "/proceedings-article/iccd/2022/618600a509/1JeFPLaWKFG", "parentPublication": { "id": "proceedings/iccd/2022/6186/0", "title": "2022 IEEE 40th International Conference on Computer Design (ICCD)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ai/5555/01/10018876", "title": "Accelerating Point-Voxel Representation of 3D Object Detection for Automatic Driving", "doi": null, "abstractUrl": "/journal/ai/5555/01/10018876/1K0DHbaOuYg", "parentPublication": { "id": "trans/ai", "title": "IEEE Transactions on Artificial Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2023/9346/0/934600a663", "title": "Dense Voxel Fusion for 3D Object Detection", "doi": null, "abstractUrl": "/proceedings-article/wacv/2023/934600a663/1KxUCsBZVny", "parentPublication": { "id": "proceedings/wacv/2023/9346/0", "title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2023/9346/0/9.346E224", "title": "TransPillars: Coarse-to-Fine Aggregation for Multi-Frame 3D Object Detection", "doi": null, "abstractUrl": "/proceedings-article/wacv/2023/9.346E224/1KxV9wOosqA", "parentPublication": { "id": "proceedings/wacv/2023/9346/0", "title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacvw/2023/2056/0/205600a350", "title": "Masked Autoencoder for Self-Supervised Pre-training on Lidar Point Clouds", "doi": null, "abstractUrl": "/proceedings-article/wacvw/2023/205600a350/1Kzz7RCHLeU", "parentPublication": { "id": "proceedings/wacvw/2023/2056/0", "title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision Workshops (WACVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccgiv/2022/9250/0/925000a159", "title": "Surface Transformer for 3D Object Detection", "doi": null, "abstractUrl": "/proceedings-article/iccgiv/2022/925000a159/1LxfmUUVFUk", "parentPublication": { "id": "proceedings/iccgiv/2022/9250/0", "title": "2022 2nd International Conference on Computer Graphics, Image and Virtualization (ICCGIV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccst/2021/4254/0/425400a343", "title": "3D-VRVT: 3D Voxel Reconstruction from A Single Image with Vision Transformer", "doi": null, "abstractUrl": "/proceedings-article/iccst/2021/425400a343/1ziP8z7O3oA", "parentPublication": { "id": "proceedings/iccst/2021/4254/0", "title": "2021 International Conference on Culture-oriented Science & Technology (ICCST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1KYslFwrlyE", "title": "2022 International Conference on 3D Vision (3DV)", "acronym": "3dv", "groupId": "10044366", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1KYso11QHx6", "doi": "10.1109/3DV57658.2022.00055", "title": "GO-Surf: Neural Feature Grid Optimization for Fast, High-Fidelity RGB-D Surface Reconstruction", "normalizedTitle": "GO-Surf: Neural Feature Grid Optimization for Fast, High-Fidelity RGB-D Surface Reconstruction", "abstract": "We present GO-Surf, a direct feature grid optimization method for accurate and fast surface reconstruction from RGB-D sequences. We model the underlying scene with a learned hierarchical feature voxel grid that encapsulates multi-level geometric and appearance local information. Feature vectors are directly optimized such that after being tri-linearly interpolated, decoded by two shallow MLPs into signed distance and radiance values, and rendered via volume rendering, the discrepancy between synthesized and observed RGB/depth values is minimized. Our supervision signals - RGB, depth and approximate SDF - can be obtained directly from input images without any need for fusion or post-processing. We formulate a novel SDF gradient regularization term that encourages surface smoothness and hole filling while maintaining high frequency details. GO-Surf can optimize sequences of 1-2K frames in 15&#x2013;45 minutes, a speedup of <tex>Z_$\\times$_Z</tex> 60 over NeuralRGB-D [1], the most related approach based on an MLP representation, while maintaining on par performance on standard benchmarks. Project page: https://jingwenwang95.github.io/go_surf.", "abstracts": [ { "abstractType": "Regular", "content": "We present GO-Surf, a direct feature grid optimization method for accurate and fast surface reconstruction from RGB-D sequences. We model the underlying scene with a learned hierarchical feature voxel grid that encapsulates multi-level geometric and appearance local information. Feature vectors are directly optimized such that after being tri-linearly interpolated, decoded by two shallow MLPs into signed distance and radiance values, and rendered via volume rendering, the discrepancy between synthesized and observed RGB/depth values is minimized. Our supervision signals - RGB, depth and approximate SDF - can be obtained directly from input images without any need for fusion or post-processing. We formulate a novel SDF gradient regularization term that encourages surface smoothness and hole filling while maintaining high frequency details. GO-Surf can optimize sequences of 1-2K frames in 15&#x2013;45 minutes, a speedup of <tex>$\\times$</tex> 60 over NeuralRGB-D [1], the most related approach based on an MLP representation, while maintaining on par performance on standard benchmarks. Project page: https://jingwenwang95.github.io/go_surf.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present GO-Surf, a direct feature grid optimization method for accurate and fast surface reconstruction from RGB-D sequences. We model the underlying scene with a learned hierarchical feature voxel grid that encapsulates multi-level geometric and appearance local information. Feature vectors are directly optimized such that after being tri-linearly interpolated, decoded by two shallow MLPs into signed distance and radiance values, and rendered via volume rendering, the discrepancy between synthesized and observed RGB/depth values is minimized. Our supervision signals - RGB, depth and approximate SDF - can be obtained directly from input images without any need for fusion or post-processing. We formulate a novel SDF gradient regularization term that encourages surface smoothness and hole filling while maintaining high frequency details. GO-Surf can optimize sequences of 1-2K frames in 15–45 minutes, a speedup of - 60 over NeuralRGB-D [1], the most related approach based on an MLP representation, while maintaining on par performance on standard benchmarks. Project page: https://jingwenwang95.github.io/go_surf.", "fno": "567000a433", "keywords": [ "Feature Extraction", "Image Colour Analysis", "Image Reconstruction", "Interpolation", "Learning Artificial Intelligence", "Optimisation", "Rendering Computer Graphics", "Appearance Local Information", "Direct Feature Grid Optimization Method", "Feature Vectors", "GO Surf", "High Frequency Details", "High Fidelity RGB D Surface Reconstruction", "Hole Filling", "Learned Hierarchical Feature Voxel Grid", "Neural Feature Grid Optimization", "Radiance Values", "RGB", "SDF Gradient Regularization Term", "Shallow ML Ps", "Signed Distance", "Supervision Signals", "Surface Smoothness", "Time 15 0 Min To 45 0 Min", "Volume Rendering", "Training", "Surface Reconstruction", "Three Dimensional Displays", "Optimization Methods", "Benchmark Testing", "Rendering Computer Graphics", "Filling" ], "authors": [ { "affiliation": "University College,Department of Computer Science,London,England", "fullName": "Jingwen Wang", "givenName": "Jingwen", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": "University College,Department of Computer Science,London,England", "fullName": "Tymoteusz Bleja", "givenName": "Tymoteusz", "surname": "Bleja", "__typename": "ArticleAuthorType" }, { "affiliation": "University College,Department of Computer Science,London,England", "fullName": "Lourdes Agapito", "givenName": "Lourdes", "surname": "Agapito", "__typename": "ArticleAuthorType" } ], "idPrefix": "3dv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-09-01T00:00:00", "pubType": "proceedings", "pages": "433-442", "year": "2022", "issn": null, "isbn": "978-1-6654-5670-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "567000a424", "articleId": "1KYst5Zv6Tu", "__typename": "AdjacentArticleType" }, "next": { "fno": "567000a443", "articleId": "1KYsusW6Agw", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccvw/2011/0063/0/06130322", "title": "Surface reconstruction for RGB-D data using real-time depth propagation", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2011/06130322/12OmNBUAvUr", "parentPublication": { "id": "proceedings/iccvw/2011/0063/0", "title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2017/2610/0/261001a667", "title": "Matterport3D: Learning from RGB-D Data in Indoor Environments", "doi": null, "abstractUrl": "/proceedings-article/3dv/2017/261001a667/12OmNywxlOg", "parentPublication": { "id": "proceedings/3dv/2017/2610/0", "title": "2017 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200g505", "title": "Learning Signed Distance Field for Multi-view Surface Reconstruction", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200g505/1BmFLjuiAKs", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600g270", "title": "Gradient-SDF: A Semi-Implicit Surface Representation for 3D Reconstruction", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600g270/1H0MXW1GTN6", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600g280", "title": "Neural RGB-D Surface Reconstruction", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600g280/1H0O2AVymha", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600p5883", "title": "High-Fidelity Human Avatars from a Single RGB Camera", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600p5883/1H1hK72b9Je", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600f667", "title": "SphereSR: <tex>Z_$360^{\\circ}$_Z</tex> Image Super-Resolution with Arbitrary Projection via Continuous Spherical Image Representation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600f667/1H1mQNFEXEQ", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09969571", "title": "Vox-Surf: Voxel-Based Implicit Surface Representation", "doi": null, "abstractUrl": "/journal/tg/5555/01/09969571/1IMidH7hZhC", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2023/9346/0/934600e319", "title": "Recovering Fine Details for Neural Implicit Surface Reconstruction", "doi": null, "abstractUrl": "/proceedings-article/wacv/2023/934600e319/1KxUSVbk6He", "parentPublication": { "id": "proceedings/wacv/2023/9346/0", "title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2023/9346/0/934600d105", "title": "High-Quality RGB-D Reconstruction via Multi-View Uncalibrated Photometric Stereo and Gradient-SDF", "doi": null, "abstractUrl": "/proceedings-article/wacv/2023/934600d105/1KxVaVLkeLS", "parentPublication": { "id": "proceedings/wacv/2023/9346/0", "title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1m3n9N02qgE", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1m3oiGZMFwc", "doi": "10.1109/CVPR42600.2020.00137", "title": "Deep Implicit Volume Compression", "normalizedTitle": "Deep Implicit Volume Compression", "abstract": "We describe a novel approach for compressing truncated signed distance fields (TSDF) stored in 3D voxel grids, and their corresponding textures. To compress the TSDF, our method relies on a block-based neural network architecture trained end-to-end, achieving state-of-the-art rate-distortion trade-off. To prevent topological errors, we losslessly compress the signs of the TSDF, which also upper bounds the reconstruction error by the voxel size. To compress the corresponding texture, we designed a fast block-based UV parameterization, generating coherent texture maps that can be effectively compressed using existing video compression algorithms. We demonstrate the performance of our algorithms on two 4D performance capture datasets, reducing bitrate by 66% for the same distortion, or alternatively reducing the distortion by 50% for the same bitrate, compared to the state-of-the-art.", "abstracts": [ { "abstractType": "Regular", "content": "We describe a novel approach for compressing truncated signed distance fields (TSDF) stored in 3D voxel grids, and their corresponding textures. To compress the TSDF, our method relies on a block-based neural network architecture trained end-to-end, achieving state-of-the-art rate-distortion trade-off. To prevent topological errors, we losslessly compress the signs of the TSDF, which also upper bounds the reconstruction error by the voxel size. To compress the corresponding texture, we designed a fast block-based UV parameterization, generating coherent texture maps that can be effectively compressed using existing video compression algorithms. We demonstrate the performance of our algorithms on two 4D performance capture datasets, reducing bitrate by 66% for the same distortion, or alternatively reducing the distortion by 50% for the same bitrate, compared to the state-of-the-art.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We describe a novel approach for compressing truncated signed distance fields (TSDF) stored in 3D voxel grids, and their corresponding textures. To compress the TSDF, our method relies on a block-based neural network architecture trained end-to-end, achieving state-of-the-art rate-distortion trade-off. To prevent topological errors, we losslessly compress the signs of the TSDF, which also upper bounds the reconstruction error by the voxel size. To compress the corresponding texture, we designed a fast block-based UV parameterization, generating coherent texture maps that can be effectively compressed using existing video compression algorithms. We demonstrate the performance of our algorithms on two 4D performance capture datasets, reducing bitrate by 66% for the same distortion, or alternatively reducing the distortion by 50% for the same bitrate, compared to the state-of-the-art.", "fno": "716800b290", "keywords": [ "Data Compression", "Image Reconstruction", "Image Texture", "Neural Nets", "Video Coding", "Deep Implicit Volume Compression", "Distance Fields", "TSDF", "3 D Voxel Grids", "Rate Distortion Trade Off", "Topological Errors", "Reconstruction Error", "Voxel Size", "Fast Block Based UV Parameterization", "Coherent Texture Maps", "Video Compression Algorithms", "Block Based Neural Network Architecture", "Truncated Signed Distance Fields", "4 D Performance Capture Datasets", "Image Coding", "Three Dimensional Displays", "Geometry", "Bit Rate", "Image Reconstruction", "Surface Reconstruction", "Entropy" ], "authors": [ { "affiliation": "Google", "fullName": "Danhang Tang", "givenName": "Danhang", "surname": "Tang", "__typename": "ArticleAuthorType" }, { "affiliation": "Google", "fullName": "Saurabh Singh", "givenName": "Saurabh", "surname": "Singh", "__typename": "ArticleAuthorType" }, { "affiliation": "Google", "fullName": "Philip A. Chou", "givenName": "Philip A.", "surname": "Chou", "__typename": "ArticleAuthorType" }, { "affiliation": "Google", "fullName": "Christian Häne", "givenName": "Christian", "surname": "Häne", "__typename": "ArticleAuthorType" }, { "affiliation": "Google", "fullName": "Mingsong Dou", "givenName": "Mingsong", "surname": "Dou", "__typename": "ArticleAuthorType" }, { "affiliation": "Google", "fullName": "Sean Fanello", "givenName": "Sean", "surname": "Fanello", "__typename": "ArticleAuthorType" }, { "affiliation": "Google", "fullName": "Jonathan Taylor", "givenName": "Jonathan", "surname": "Taylor", "__typename": "ArticleAuthorType" }, { "affiliation": "Google", "fullName": "Philip Davidson", "givenName": "Philip", "surname": "Davidson", "__typename": "ArticleAuthorType" }, { "affiliation": "Google", "fullName": "Onur G. Guleryuz", "givenName": "Onur G.", "surname": "Guleryuz", "__typename": "ArticleAuthorType" }, { "affiliation": "Google", "fullName": "Yinda Zhang", "givenName": "Yinda", "surname": "Zhang", "__typename": "ArticleAuthorType" }, { "affiliation": "Google", "fullName": "Shahram Izadi", "givenName": "Shahram", "surname": "Izadi", "__typename": "ArticleAuthorType" }, { "affiliation": "Google", "fullName": "Andrea Tagliasacchi", "givenName": "Andrea", "surname": "Tagliasacchi", "__typename": "ArticleAuthorType" }, { "affiliation": "Google", "fullName": "Sofien Bouaziz", "givenName": "Sofien", "surname": "Bouaziz", "__typename": "ArticleAuthorType" }, { "affiliation": "Google", "fullName": "Cem Keskin", "givenName": "Cem", "surname": "Keskin", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-06-01T00:00:00", "pubType": "proceedings", "pages": "1290-1300", "year": "2020", "issn": null, "isbn": "978-1-7281-7168-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "716800b278", "articleId": "1m3neRj6c1O", "__typename": "AdjacentArticleType" }, "next": { "fno": "716800b301", "articleId": "1m3oeAqIA4U", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icpr/1992/2920/0/00201918", "title": "Image data compression without distortion by minimizing entropy", "doi": null, "abstractUrl": "/proceedings-article/icpr/1992/00201918/12OmNAfPITz", "parentPublication": { "id": "proceedings/icpr/1992/2920/0", "title": "11th IAPR International Conference on Pattern Recognition. Vol. III. Conference C: Image, Speech and Signal Analysis,", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2017/0560/0/08026221", "title": "Multi-Intensity Illuminated Infrared video compression using MV-HEVC and 3D-HEVC", "doi": null, "abstractUrl": "/proceedings-article/icmew/2017/08026221/12OmNAoUT8b", "parentPublication": { "id": "proceedings/icmew/2017/0560/0", "title": "2017 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2015/6683/0/6683b046", "title": "Motion Segmentation of Truncated Signed Distance Function Based Volumetric Surfaces", "doi": null, "abstractUrl": "/proceedings-article/wacv/2015/6683b046/12OmNzC5SPW", "parentPublication": { "id": "proceedings/wacv/2015/6683/0", "title": "2015 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000e394", "title": "Conditional Probability Models for Deep Image Compression", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000e394/17D45Xh13pl", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2021/08/09007740", "title": "Variational Level Set Evolution for Non-Rigid 3D Reconstruction From a Single Depth Camera", "doi": null, "abstractUrl": "/journal/tp/2021/08/09007740/1hGqrsQbjPO", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300d136", "title": "DSIC: Deep Stereo Image Compression", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300d136/1hQqjqoKrVS", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2021/10/09050860", "title": "Learning Content-Weighted Deep Image Compression", "doi": null, "abstractUrl": "/journal/tp/2021/10/09050860/1iCrOv81YHe", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2021/4989/0/09455990", "title": "Multiscale deep context modeling for lossless point cloud geometry compression", "doi": null, "abstractUrl": "/proceedings-article/icmew/2021/09455990/1uCgpHwLDoI", "parentPublication": { "id": "proceedings/icmew/2021/4989/0", "title": "2021 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900g038", "title": "VoxelContext-Net: An Octree based Framework for Point Cloud Compression", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900g038/1yeJtfgn8E8", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900i928", "title": "DI-Fusion: Online Implicit 3D Reconstruction with Deep Priors", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900i928/1yeLpskgFXi", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNvlxJwR", "title": "2017 IEEE 30th International Symposium on Computer-Based Medical Systems (CBMS)", "acronym": "cbms", "groupId": "1000153", "volume": "0", "displayVolume": "0", "year": "2017", "__typename": "ProceedingType" }, "article": { "id": "12OmNrAMEM6", "doi": "10.1109/CBMS.2017.110", "title": "A Differential Geometry Approach for Change Detection in Medical Images", "normalizedTitle": "A Differential Geometry Approach for Change Detection in Medical Images", "abstract": "Change detection is of paramount importance in medical imaging, serving as a non-invasive quantifiable powerful tool in diagnosis and in assessment of the outcome of treatment of tumors. We present a new quantitative method for detecting changes in volumetric medical data and in clustering of anatomical structures, based on assessment of volumetric distortions that are required in order to deform a test three-dimensional medical dataset segment onto its previously-acquired reference, or a given prototype in the case clustering. Unlike the voxel-based classical techniques of shape comparison, our algorithm operates on tetrahedral meshes and can, therefore be applied on both closed, simply-connected, surfaces and in volumetric domains with more sophisticated boundaries.", "abstracts": [ { "abstractType": "Regular", "content": "Change detection is of paramount importance in medical imaging, serving as a non-invasive quantifiable powerful tool in diagnosis and in assessment of the outcome of treatment of tumors. We present a new quantitative method for detecting changes in volumetric medical data and in clustering of anatomical structures, based on assessment of volumetric distortions that are required in order to deform a test three-dimensional medical dataset segment onto its previously-acquired reference, or a given prototype in the case clustering. Unlike the voxel-based classical techniques of shape comparison, our algorithm operates on tetrahedral meshes and can, therefore be applied on both closed, simply-connected, surfaces and in volumetric domains with more sophisticated boundaries.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Change detection is of paramount importance in medical imaging, serving as a non-invasive quantifiable powerful tool in diagnosis and in assessment of the outcome of treatment of tumors. We present a new quantitative method for detecting changes in volumetric medical data and in clustering of anatomical structures, based on assessment of volumetric distortions that are required in order to deform a test three-dimensional medical dataset segment onto its previously-acquired reference, or a given prototype in the case clustering. Unlike the voxel-based classical techniques of shape comparison, our algorithm operates on tetrahedral meshes and can, therefore be applied on both closed, simply-connected, surfaces and in volumetric domains with more sophisticated boundaries.", "fno": "1710a085", "keywords": [ "Computational Geometry", "Differential Geometry", "Image Segmentation", "Medical Image Processing", "Mesh Generation", "Tumours", "Differential Geometry Approach", "Change Detection", "Medical Images", "Quantitative Method", "Volumetric Medical Data", "Anatomical Structures", "Volumetric Distortions", "Case Clustering", "Noninvasive Quantifiable Tool", "Tumor Treatment", "Three Dimensional Medical Dataset Segment", "Tetrahedral Meshes", "Closed Simply Connected Surfaces", "Distortion", "Strain", "Distortion Measurement", "Medical Diagnostic Imaging", "Atmospheric Measurements", "Particle Measurements", "Brain Imaging", "Change Detection", "Tetrahedral Meshes", "Volumetric Deformations" ], "authors": [ { "affiliation": null, "fullName": "Alexander Naitsat", "givenName": "Alexander", "surname": "Naitsat", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Emil Saucan", "givenName": "Emil", "surname": "Saucan", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Yehoshua Zeevi", "givenName": "Yehoshua", "surname": "Zeevi", "__typename": "ArticleAuthorType" } ], "idPrefix": "cbms", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2017-06-01T00:00:00", "pubType": "proceedings", "pages": "85-88", "year": "2017", "issn": "2372-9198", "isbn": "978-1-5386-1710-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "1710a079", "articleId": "12OmNyuPL5T", "__typename": "AdjacentArticleType" }, "next": { "fno": "1710a089", "articleId": "12OmNBhHt7h", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/isise/2008/3494/2/3494b414", "title": "A Tetrahedral Mesh Generation Algorithm from Medical Images", "doi": null, "abstractUrl": "/proceedings-article/isise/2008/3494b414/12OmNBUS7cC", "parentPublication": { "id": "proceedings/isise/2008/3494/2", "title": "2008 International Symposium on Information Science and Engineering (ISISE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/conielecomp/2007/2799/0/27990032", "title": "Data Hiding Scheme for Medical Images", "doi": null, "abstractUrl": "/proceedings-article/conielecomp/2007/27990032/12OmNBp52Dh", "parentPublication": { "id": "proceedings/conielecomp/2007/2799/0", "title": "Electronics, Communications, and Computers, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/asonam/2012/4799/0/4799a810", "title": "Robot-Assisted Medical Visualization with Floating Images", "doi": null, "abstractUrl": "/proceedings-article/asonam/2012/4799a810/12OmNvDI41P", "parentPublication": { "id": "proceedings/asonam/2012/4799/0", "title": "2012 IEEE/ACM International Conference on Advances in Social Networks Analysis and Mining", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hisb/2012/4921/0/4921a147", "title": "Annio: A Web-Based Tool for Annotating Medical Images with Ontologies", "doi": null, "abstractUrl": "/proceedings-article/hisb/2012/4921a147/12OmNvjyxEN", "parentPublication": { "id": "proceedings/hisb/2012/4921/0", "title": "Healthcare Informatics, Imaging and Systems Biology, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/visual/1991/2245/0/00175802", "title": "Visualizing 4-D medical ultrasound data", "doi": null, "abstractUrl": "/proceedings-article/visual/1991/00175802/12OmNy314iQ", "parentPublication": { "id": "proceedings/visual/1991/2245/0", "title": "1991 Proceeding Visualization", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icip/1995/7310/2/73102017", "title": "On interframe coding models for volumetric medical data", "doi": null, "abstractUrl": "/proceedings-article/icip/1995/73102017/12OmNzlD9GU", "parentPublication": { "id": "proceedings/icip/1995/7310/2", "title": "Image Processing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acsat/2015/0423/0/07478751", "title": "Watermarking Algorithm for Medical Images Authentication", "doi": null, "abstractUrl": "/proceedings-article/acsat/2015/07478751/12OmNznCl1e", "parentPublication": { "id": "proceedings/acsat/2015/0423/0", "title": "2015 4th International Conference on Advanced Computer Science Applications and Technologies (ACSAT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2017/01/07539334", "title": "GazeDx: Interactive Visual Analytics Framework for Comparative Gaze Analysis with Volumetric Medical Images", "doi": null, "abstractUrl": "/journal/tg/2017/01/07539334/13rRUxjQyvp", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iisa/2018/8161/0/08633672", "title": "A New Dissimilarity Measure for Clustering with Application to Dermoscopic Images", "doi": null, "abstractUrl": "/proceedings-article/iisa/2018/08633672/17D45XDIXXA", "parentPublication": { "id": "proceedings/iisa/2018/8161/0", "title": "2018 9th International Conference on Information, Intelligence, Systems and Applications (IISA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800e725", "title": "A Spatiotemporal Volumetric Interpolation Network for 4D Dynamic Medical Image", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800e725/1m3o6jEDHEY", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNwCJOWA", "title": "2011 International Conference on Digital Image Computing: Techniques and Applications", "acronym": "dicta", "groupId": "1001512", "volume": "0", "displayVolume": "0", "year": "2011", "__typename": "ProceedingType" }, "article": { "id": "12OmNvUaNpk", "doi": "10.1109/DICTA.2011.96", "title": "Face Recognition across Pose on Video Using Eigen Light-Fields", "normalizedTitle": "Face Recognition across Pose on Video Using Eigen Light-Fields", "abstract": "We propose an approach to employ eigen light-fields for face recognition across pose on video. Faces of a subject are collected from video frames and combined based on the pose to obtain a set of probe light-fields. These probe data are then projected to the principal subspace of the eigen light-fields within which the classification takes place. We modify the original light-field projection and found that it is more robust in the proposed system. Evaluation on VidTIMIT dataset has demonstrated that the eigen light-fields method is able to take advantage of multiple observations contained in the video.", "abstracts": [ { "abstractType": "Regular", "content": "We propose an approach to employ eigen light-fields for face recognition across pose on video. Faces of a subject are collected from video frames and combined based on the pose to obtain a set of probe light-fields. These probe data are then projected to the principal subspace of the eigen light-fields within which the classification takes place. We modify the original light-field projection and found that it is more robust in the proposed system. Evaluation on VidTIMIT dataset has demonstrated that the eigen light-fields method is able to take advantage of multiple observations contained in the video.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We propose an approach to employ eigen light-fields for face recognition across pose on video. Faces of a subject are collected from video frames and combined based on the pose to obtain a set of probe light-fields. These probe data are then projected to the principal subspace of the eigen light-fields within which the classification takes place. We modify the original light-field projection and found that it is more robust in the proposed system. Evaluation on VidTIMIT dataset has demonstrated that the eigen light-fields method is able to take advantage of multiple observations contained in the video.", "fno": "4588a536", "keywords": [ "Face Recognition", "Video", "Pose", "Light Fields" ], "authors": [ { "affiliation": null, "fullName": "Moh Edi Wibowo", "givenName": "Moh Edi", "surname": "Wibowo", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Dian Tjondronegoro", "givenName": "Dian", "surname": "Tjondronegoro", "__typename": "ArticleAuthorType" } ], "idPrefix": "dicta", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2011-12-01T00:00:00", "pubType": "proceedings", "pages": "536-541", "year": "2011", "issn": null, "isbn": "978-0-7695-4588-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "4588a530", "articleId": "12OmNAR1aTQ", "__typename": "AdjacentArticleType" }, "next": { "fno": "4588a542", "articleId": "12OmNyQ7FYz", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/fg/2002/1602/0/16020003", "title": "Eigen Light-Fields and Face Recognition Across Pose", "doi": null, "abstractUrl": "/proceedings-article/fg/2002/16020003/12OmNvTBBcv", "parentPublication": { "id": "proceedings/fg/2002/1602/0", "title": "Proceedings of Fifth IEEE International Conference on Automatic Face Gesture Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/co/2006/08/r8046", "title": "Light Fields and Computational Imaging", "doi": null, "abstractUrl": "/magazine/co/2006/08/r8046/13rRUxD9h0I", "parentPublication": { "id": "mags/co", "title": "Computer", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2004/04/i0449", "title": "Appearance-Based Face Recognition and Light-Fields", "doi": null, "abstractUrl": "/journal/tp/2004/04/i0449/13rRUyYjKbp", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icnc/2019/9223/0/08685526", "title": "A New Prediction Structure for Efficient MV-HEVC based Light Field Video Compression", "doi": null, "abstractUrl": "/proceedings-article/icnc/2019/08685526/19RRHS0Ov6w", "parentPublication": { "id": "proceedings/icnc/2019/9223/0", "title": "2019 International Conference on Computing, Networking and Communications (ICNC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600t9787", "title": "Learning Neural Light Fields with Ray-Space Embedding", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600t9787/1H0OiVLs2TS", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600s8398", "title": "Neural Point Light Fields", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600s8398/1H1kUbIJXgY", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09968104", "title": "Neural Subspaces for Light Fields", "doi": null, "abstractUrl": "/journal/tg/5555/01/09968104/1IKDek8SF0c", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2019/3131/0/313100a482", "title": "Light Field Compression using Eigen Textures", "doi": null, "abstractUrl": "/proceedings-article/3dv/2019/313100a482/1ezRAUnTCpy", "parentPublication": { "id": "proceedings/3dv/2019/3131/0", "title": "2019 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/09/09043741", "title": "4D Light Field Segmentation From Light Field Super-Pixel Hypergraph Representation", "doi": null, "abstractUrl": "/journal/tg/2021/09/09043741/1ilQLDcivHa", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2020/8128/0/812800a672", "title": "A Linear Approach to Absolute Pose Estimation for Light Fields", "doi": null, "abstractUrl": "/proceedings-article/3dv/2020/812800a672/1qyxisAtUOI", "parentPublication": { "id": "proceedings/3dv/2020/8128/0", "title": "2020 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1BmEezmpGrm", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "acronym": "iccv", "groupId": "1000149", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1BmELbO5QZi", "doi": "10.1109/ICCV48922.2021.01396", "title": "SIGNET: Efficient Neural Representation for Light Fields", "normalizedTitle": "SIGNET: Efficient Neural Representation for Light Fields", "abstract": "We present a novel neural representation for light field content that enables compact storage and easy local reconstruction with high fidelity. We use a fully-connected neural network to learn the mapping function between each light field pixel&#x2019;s coordinates and its corresponding color values. Since neural networks that simply take in raw coordinates are unable to accurately learn data containing fine details, we present an input transformation strategy based on the Gegenbauer polynomials, which previously showed theoretical advantages over the Fourier basis. We conduct experiments that show our Gegenbauer-based design combined with sinusoidal activation functions leads to a better light field reconstruction quality than a variety of network designs, including those with Fourier-inspired techniques introduced by prior works. Moreover, our SInusoidal Gegenbauer NETwork, or SIGNET, can represent light field scenes more compactly than the state-of-the-art compression methods while maintaining a comparable reconstruction quality. SIGNET also innately allows random access to encoded light field pixels due to its functional design. We further demonstrate that SIGNET&#x2019;s super-resolution capability without any additional training.", "abstracts": [ { "abstractType": "Regular", "content": "We present a novel neural representation for light field content that enables compact storage and easy local reconstruction with high fidelity. We use a fully-connected neural network to learn the mapping function between each light field pixel&#x2019;s coordinates and its corresponding color values. Since neural networks that simply take in raw coordinates are unable to accurately learn data containing fine details, we present an input transformation strategy based on the Gegenbauer polynomials, which previously showed theoretical advantages over the Fourier basis. We conduct experiments that show our Gegenbauer-based design combined with sinusoidal activation functions leads to a better light field reconstruction quality than a variety of network designs, including those with Fourier-inspired techniques introduced by prior works. Moreover, our SInusoidal Gegenbauer NETwork, or SIGNET, can represent light field scenes more compactly than the state-of-the-art compression methods while maintaining a comparable reconstruction quality. SIGNET also innately allows random access to encoded light field pixels due to its functional design. We further demonstrate that SIGNET&#x2019;s super-resolution capability without any additional training.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present a novel neural representation for light field content that enables compact storage and easy local reconstruction with high fidelity. We use a fully-connected neural network to learn the mapping function between each light field pixel’s coordinates and its corresponding color values. Since neural networks that simply take in raw coordinates are unable to accurately learn data containing fine details, we present an input transformation strategy based on the Gegenbauer polynomials, which previously showed theoretical advantages over the Fourier basis. We conduct experiments that show our Gegenbauer-based design combined with sinusoidal activation functions leads to a better light field reconstruction quality than a variety of network designs, including those with Fourier-inspired techniques introduced by prior works. Moreover, our SInusoidal Gegenbauer NETwork, or SIGNET, can represent light field scenes more compactly than the state-of-the-art compression methods while maintaining a comparable reconstruction quality. SIGNET also innately allows random access to encoded light field pixels due to its functional design. We further demonstrate that SIGNET’s super-resolution capability without any additional training.", "fno": "281200o4204", "keywords": [ "Training", "Visualization", "Computer Vision", "Image Color Analysis", "Neural Networks", "Superresolution", "Light Fields", "Image And Video Synthesis", "Computational Photography", "Machine Learning Architectures And Formulations", "Neural Generative Models", "Representation Learning" ], "authors": [ { "affiliation": "University of Maryland,College Park", "fullName": "Brandon Yushan Feng", "givenName": "Brandon Yushan", "surname": "Feng", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Maryland,College Park", "fullName": "Amitabh Varshney", "givenName": "Amitabh", "surname": "Varshney", "__typename": "ArticleAuthorType" } ], "idPrefix": "iccv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-10-01T00:00:00", "pubType": "proceedings", "pages": "14204-14213", "year": "2021", "issn": null, "isbn": "978-1-6654-2812-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "281200o4194", "articleId": "1BmHAqsQxO0", "__typename": "AdjacentArticleType" }, "next": { "fno": "281200o4214", "articleId": "1BmFw9x3lKM", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/dicta/2011/4588/0/4588a536", "title": "Face Recognition across Pose on Video Using Eigen Light-Fields", "doi": null, "abstractUrl": "/proceedings-article/dicta/2011/4588a536/12OmNvUaNpk", "parentPublication": { "id": "proceedings/dicta/2011/4588/0", "title": "2011 International Conference on Digital Image Computing: Techniques and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2004/04/i0449", "title": "Appearance-Based Face Recognition and Light-Fields", "doi": null, "abstractUrl": "/journal/tp/2004/04/i0449/13rRUyYjKbp", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2022/0915/0/091500d152", "title": "Fast and Efficient Restoration of Extremely Dark Light Fields", "doi": null, "abstractUrl": "/proceedings-article/wacv/2022/091500d152/1B12JT0S3y8", "parentPublication": { "id": "proceedings/wacv/2022/0915/0", "title": "2022 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600t9787", "title": "Learning Neural Light Fields with Ray-Space Embedding", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600t9787/1H0OiVLs2TS", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600m2943", "title": "Towards Multimodal Depth Estimation from Light Fields", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600m2943/1H1k4uRP4sM", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600s8398", "title": "Neural Point Light Fields", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600s8398/1H1kUbIJXgY", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09968104", "title": "Neural Subspaces for Light Fields", "doi": null, "abstractUrl": "/journal/tg/5555/01/09968104/1IKDek8SF0c", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/09/09043741", "title": "4D Light Field Segmentation From Light Field Super-Pixel Hypergraph Representation", "doi": null, "abstractUrl": "/journal/tg/2021/09/09043741/1ilQLDcivHa", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2020/8128/0/812800a672", "title": "A Linear Approach to Absolute Pose Estimation for Light Fields", "doi": null, "abstractUrl": "/proceedings-article/3dv/2020/812800a672/1qyxisAtUOI", "parentPublication": { "id": "proceedings/3dv/2020/8128/0", "title": "2020 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900k0005", "title": "Light Field Super-Resolution with Zero-Shot Learning", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900k0005/1yeISN5Dx4c", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1G4EUUmGcrS", "title": "2022 IEEE International Conference on Multimedia and Expo Workshops (ICMEW)", "acronym": "icmew", "groupId": "1801805", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1G4F0ndbVoQ", "doi": "10.1109/ICMEW56448.2022.9859373", "title": "LFC-SASR: Light Field Coding Using Spatial and Angular Super-Resolution", "normalizedTitle": "LFC-SASR: Light Field Coding Using Spatial and Angular Super-Resolution", "abstract": "Light field imaging enables post-capture actions such as refocusing and changing view perspective by capturing both spatial and angular information. However, capturing richer information of the 3D scene results in a huge amount of data. To improve the compression efficiency of the existing light field compression methods, we investigate the impact of light field super-resolution approaches (both spatial and angular super-resolution) on the compression efficiency. To this end, firstly, we downscale light field images over (i) spatial resolution, (ii) angular resolution, and (iii) spatial-angular resolution and encode them using Versatile Video Coding (VVC). We then apply a set of light field super-resolution deep neural networks to reconstruct light field images in their full spatial-angular resolution and compare their compression efficiency. Experimental results show that encoding the low angular resolution light field image and applying angular super-resolution yield bitrate savings of 51.16% and 53.41% to maintain the same PSNR and SSIM, respectively, compared to encoding the light field image in high-resolution.", "abstracts": [ { "abstractType": "Regular", "content": "Light field imaging enables post-capture actions such as refocusing and changing view perspective by capturing both spatial and angular information. However, capturing richer information of the 3D scene results in a huge amount of data. To improve the compression efficiency of the existing light field compression methods, we investigate the impact of light field super-resolution approaches (both spatial and angular super-resolution) on the compression efficiency. To this end, firstly, we downscale light field images over (i) spatial resolution, (ii) angular resolution, and (iii) spatial-angular resolution and encode them using Versatile Video Coding (VVC). We then apply a set of light field super-resolution deep neural networks to reconstruct light field images in their full spatial-angular resolution and compare their compression efficiency. Experimental results show that encoding the low angular resolution light field image and applying angular super-resolution yield bitrate savings of 51.16% and 53.41% to maintain the same PSNR and SSIM, respectively, compared to encoding the light field image in high-resolution.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Light field imaging enables post-capture actions such as refocusing and changing view perspective by capturing both spatial and angular information. However, capturing richer information of the 3D scene results in a huge amount of data. To improve the compression efficiency of the existing light field compression methods, we investigate the impact of light field super-resolution approaches (both spatial and angular super-resolution) on the compression efficiency. To this end, firstly, we downscale light field images over (i) spatial resolution, (ii) angular resolution, and (iii) spatial-angular resolution and encode them using Versatile Video Coding (VVC). We then apply a set of light field super-resolution deep neural networks to reconstruct light field images in their full spatial-angular resolution and compare their compression efficiency. Experimental results show that encoding the low angular resolution light field image and applying angular super-resolution yield bitrate savings of 51.16% and 53.41% to maintain the same PSNR and SSIM, respectively, compared to encoding the light field image in high-resolution.", "fno": "09859373", "keywords": [ "Image Reconstruction", "Image Resolution", "Image Restoration", "Neural Nets", "Video Coding", "Light Field Coding", "Spatial Super Resolution", "Post Capture Actions", "Spatial Information", "Angular Information", "Light Field Compression Methods", "Light Field Super Resolution Approaches", "Spatial Angular Resolution", "Angular Super Resolution Yield Bitrate Savings", "Angular Resolution Light Field Image", "Super Resolution Deep Neural Networks", "Versatile Video Coding", "Encoding", "PSNR", "SSIM", "Video Coding", "Image Coding", "Three Dimensional Displays", "Superresolution", "Neural Networks", "Imaging", "Light Fields", "Light Field", "Compression", "Super Resolution", "VVC" ], "authors": [ { "affiliation": "Alpen-Adria-Universität Klagenfurt,Christian Doppler Laboratory ATHENA,Klagenfurt,Austria", "fullName": "Ekrem Çetinkaya", "givenName": "Ekrem", "surname": "Çetinkaya", "__typename": "ArticleAuthorType" }, { "affiliation": "Alpen-Adria-Universität Klagenfurt,Christian Doppler Laboratory ATHENA,Klagenfurt,Austria", "fullName": "Hadi Amirpour", "givenName": "Hadi", "surname": "Amirpour", "__typename": "ArticleAuthorType" }, { "affiliation": "Alpen-Adria-Universität Klagenfurt,Christian Doppler Laboratory ATHENA,Klagenfurt,Austria", "fullName": "Christian Timmerer", "givenName": "Christian", "surname": "Timmerer", "__typename": "ArticleAuthorType" } ], "idPrefix": "icmew", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-07-01T00:00:00", "pubType": "proceedings", "pages": "1-6", "year": "2022", "issn": null, "isbn": "978-1-6654-7218-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "09859482", "articleId": "1G4EWDH2yKk", "__typename": "AdjacentArticleType" }, "next": { "fno": "09859382", "articleId": "1G4F1tWFNbq", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccvw/2015/9711/0/5720a057", "title": "Learning a Deep Convolutional Network for Light-Field Image Super-Resolution", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2015/5720a057/12OmNyL0TyY", "parentPublication": { "id": "proceedings/iccvw/2015/9711/0", "title": "2015 IEEE International Conference on Computer Vision Workshop (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2020/05/08620368", "title": "Light Field Super-Resolution Using a Low-Rank Prior and Deep Convolutional Neural Networks", "doi": null, "abstractUrl": "/journal/tp/2020/05/08620368/17D45Wt3Exc", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09798876", "title": "Deep Light Field Spatial Super-Resolution Using Heterogeneous Imaging", "doi": null, "abstractUrl": "/journal/tg/5555/01/09798876/1Eho8QXQucg", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sitis/2022/6495/0/649500a600", "title": "The effect of angular resolution and 3D rendering on the perceived quality of the industrial use cases of light field visualization", "doi": null, "abstractUrl": "/proceedings-article/sitis/2022/649500a600/1MeoEsRvvI4", "parentPublication": { "id": "proceedings/sitis/2022/6495/0", "title": "2022 16th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/06/08924770", "title": "Revisiting Spatio-Angular Trade-off in Light Field Cameras and Extended Applications in Super-Resolution", "doi": null, "abstractUrl": "/journal/tg/2021/06/08924770/1fvZlX1pNU4", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2019/3293/0/329300l1038", "title": "Residual Networks for Light Field Image Super-Resolution", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2019/329300l1038/1gyrMPr3gcw", "parentPublication": { "id": "proceedings/cvpr/2019/3293/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2019/2506/0/250600b804", "title": "Light Field Super-Resolution: A Benchmark", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2019/250600b804/1iTvo7kjJFm", "parentPublication": { "id": "proceedings/cvprw/2019/2506/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/09/09043741", "title": "4D Light Field Segmentation From Light Field Super-Pixel Hypergraph Representation", "doi": null, "abstractUrl": "/journal/tg/2021/09/09043741/1ilQLDcivHa", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/mu/2021/04/09392312", "title": "LFI-Augmenter: Intelligent Light Field Image Editing With Interleaved Spatial-Angular Convolution", "doi": null, "abstractUrl": "/magazine/mu/2021/04/09392312/1sq7wcFIASI", "parentPublication": { "id": "mags/mu", "title": "IEEE MultiMedia", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900k0005", "title": "Light Field Super-Resolution with Zero-Shot Learning", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900k0005/1yeISN5Dx4c", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1H1gVMlkl32", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1H0OiVLs2TS", "doi": "10.1109/CVPR52688.2022.01920", "title": "Learning Neural Light Fields with Ray-Space Embedding", "normalizedTitle": "Learning Neural Light Fields with Ray-Space Embedding", "abstract": "Neural radiance fields (NeRFs) produce state-of-the-art view synthesis results, but are slow to render, requiring hundreds of network evaluations per pixel to approximate a volume rendering integral. Baking NeRFs into explicit data structures enables efficient rendering, but results in large memory footprints and, in some cases, quality reduction. Additionally, volumetric representations for view synthesis often struggle to represent challenging view dependent effects such as distorted reflections and refractions. We present a novel neural light field representation that, in contrast to prior work, is fast, memory efficient, and excels at modeling complicated view dependence. Our method supports rendering with a single network evaluation per pixel for small baseline light fields and with only a few evaluations per pixel for light fields with larger baselines. At the core of our approach is a ray-space embedding network that maps 4D ray-space into an intermediate, interpolable latent space. Our method achieves state-of-the-art quality on dense forward-facing datasets such as the Stanford Light Field dataset. In addition, for forward-facing scenes with sparser inputs we achieve results that are competitive with NeRF-based approaches while providing a better speed/quality/memory trade-off with far fewer network evaluations.", "abstracts": [ { "abstractType": "Regular", "content": "Neural radiance fields (NeRFs) produce state-of-the-art view synthesis results, but are slow to render, requiring hundreds of network evaluations per pixel to approximate a volume rendering integral. Baking NeRFs into explicit data structures enables efficient rendering, but results in large memory footprints and, in some cases, quality reduction. Additionally, volumetric representations for view synthesis often struggle to represent challenging view dependent effects such as distorted reflections and refractions. We present a novel neural light field representation that, in contrast to prior work, is fast, memory efficient, and excels at modeling complicated view dependence. Our method supports rendering with a single network evaluation per pixel for small baseline light fields and with only a few evaluations per pixel for light fields with larger baselines. At the core of our approach is a ray-space embedding network that maps 4D ray-space into an intermediate, interpolable latent space. Our method achieves state-of-the-art quality on dense forward-facing datasets such as the Stanford Light Field dataset. In addition, for forward-facing scenes with sparser inputs we achieve results that are competitive with NeRF-based approaches while providing a better speed/quality/memory trade-off with far fewer network evaluations.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Neural radiance fields (NeRFs) produce state-of-the-art view synthesis results, but are slow to render, requiring hundreds of network evaluations per pixel to approximate a volume rendering integral. Baking NeRFs into explicit data structures enables efficient rendering, but results in large memory footprints and, in some cases, quality reduction. Additionally, volumetric representations for view synthesis often struggle to represent challenging view dependent effects such as distorted reflections and refractions. We present a novel neural light field representation that, in contrast to prior work, is fast, memory efficient, and excels at modeling complicated view dependence. Our method supports rendering with a single network evaluation per pixel for small baseline light fields and with only a few evaluations per pixel for light fields with larger baselines. At the core of our approach is a ray-space embedding network that maps 4D ray-space into an intermediate, interpolable latent space. Our method achieves state-of-the-art quality on dense forward-facing datasets such as the Stanford Light Field dataset. In addition, for forward-facing scenes with sparser inputs we achieve results that are competitive with NeRF-based approaches while providing a better speed/quality/memory trade-off with far fewer network evaluations.", "fno": "694600t9787", "keywords": [ "Data Structures", "Learning Artificial Intelligence", "Neural Nets", "Rendering Computer Graphics", "Neural Light Fields", "Neural Radiance Fields", "State Of The Art View Synthesis Results", "Approximate A Volume Rendering Integral", "Baking Ne R Fs", "Explicit Data Structures", "Efficient Rendering", "Memory Footprints", "Quality Reduction", "Volumetric Representations", "Challenging View Dependent Effects", "Distorted Reflections", "Refractions", "Novel Neural Light Field Representation", "Memory Efficient", "Complicated View Dependence", "Single Network Evaluation", "Baseline Light Fields", "Larger Baselines", "Ray Space Embedding Network", "Intermediate Space", "Interpolable Latent Space", "State Of The Art Quality", "Stanford Light Field Dataset", "Ne RF Based Approaches", "Fewer Network Evaluations", "Photography", "Interpolation", "Three Dimensional Displays", "Memory Management", "Rendering Computer Graphics", "Light Fields", "Reflection" ], "authors": [ { "affiliation": "Carnegie Mellon University", "fullName": "Benjamin Attal", "givenName": "Benjamin", "surname": "Attal", "__typename": "ArticleAuthorType" }, { "affiliation": "Meta", "fullName": "Jia-Bin Huang", "givenName": "Jia-Bin", "surname": "Huang", "__typename": "ArticleAuthorType" }, { "affiliation": "Reality Labs Research", "fullName": "Michael Zollhöfer", "givenName": "Michael", "surname": "Zollhöfer", "__typename": "ArticleAuthorType" }, { "affiliation": "Meta", "fullName": "Johannes Kopf", "givenName": "Johannes", "surname": "Kopf", "__typename": "ArticleAuthorType" }, { "affiliation": "Meta", "fullName": "Changil Kim", "givenName": "Changil", "surname": "Kim", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-06-01T00:00:00", "pubType": "proceedings", "pages": "19787-19797", "year": "2022", "issn": null, "isbn": "978-1-6654-6946-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [ { "id": "1H0OiSjZH5m", "name": "pcvpr202269460-09878939s1-mm_694600t9787.zip", "size": "4.09 MB", "location": "https://www.computer.org/csdl/api/v1/extra/pcvpr202269460-09878939s1-mm_694600t9787.zip", "__typename": "WebExtraType" } ], "adjacentArticles": { "previous": { "fno": "694600t9777", "articleId": "1H0OlhX9DfW", "__typename": "AdjacentArticleType" }, "next": { "fno": "694600t9798", "articleId": "1H0NzLKdqMw", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/dicta/2011/4588/0/4588a536", "title": "Face Recognition across Pose on Video Using Eigen Light-Fields", "doi": null, "abstractUrl": "/proceedings-article/dicta/2011/4588a536/12OmNvUaNpk", "parentPublication": { "id": "proceedings/dicta/2011/4588/0", "title": "2011 International Conference on Digital Image Computing: Techniques and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/co/2006/08/r8046", "title": "Light Fields and Computational Imaging", "doi": null, "abstractUrl": "/magazine/co/2006/08/r8046/13rRUxD9h0I", "parentPublication": { "id": "mags/co", "title": "Computer", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2004/04/i0449", "title": "Appearance-Based Face Recognition and Light-Fields", "doi": null, "abstractUrl": "/journal/tp/2004/04/i0449/13rRUyYjKbp", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2022/0915/0/091500d152", "title": "Fast and Efficient Restoration of Extremely Dark Light Fields", "doi": null, "abstractUrl": "/proceedings-article/wacv/2022/091500d152/1B12JT0S3y8", "parentPublication": { "id": "proceedings/wacv/2022/0915/0", "title": "2022 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600m2943", "title": "Towards Multimodal Depth Estimation from Light Fields", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600m2943/1H1k4uRP4sM", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600s8398", "title": "Neural Point Light Fields", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600s8398/1H1kUbIJXgY", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09968104", "title": "Neural Subspaces for Light Fields", "doi": null, "abstractUrl": "/journal/tg/5555/01/09968104/1IKDek8SF0c", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2019/3131/0/313100a482", "title": "Light Field Compression using Eigen Textures", "doi": null, "abstractUrl": "/proceedings-article/3dv/2019/313100a482/1ezRAUnTCpy", "parentPublication": { "id": "proceedings/3dv/2019/3131/0", "title": "2019 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmew/2020/1485/0/09106041", "title": "A Benchmark of Light Field View Interpolation Methods", "doi": null, "abstractUrl": "/proceedings-article/icmew/2020/09106041/1kwqyUmJPIk", "parentPublication": { "id": "proceedings/icmew/2020/1485/0", "title": "2020 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2020/8128/0/812800a672", "title": "A Linear Approach to Absolute Pose Estimation for Light Fields", "doi": null, "abstractUrl": "/proceedings-article/3dv/2020/812800a672/1qyxisAtUOI", "parentPublication": { "id": "proceedings/3dv/2020/8128/0", "title": "2020 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1H1gVMlkl32", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1H1kUbIJXgY", "doi": "10.1109/CVPR52688.2022.01787", "title": "Neural Point Light Fields", "normalizedTitle": "Neural Point Light Fields", "abstract": "We introduce Neural Point Light Fields that represent scenes implicitly with a light field living on a sparse point cloud. Combining differentiable volume rendering with learned implicit density representations has made it possible to synthesize photo-realistic images for novel views of small scenes. As neural volumetric rendering methods require dense sampling of the underlying functional scene representation, at hundreds of samples along a ray cast through the volume, they are fundamentally limited to small scenes with the same objects projected to hundreds of training views. Promoting sparse point clouds to neural implicit light fields allows us to represent large scenes effectively with only a single radiance evaluation per ray. These point light fields are as a function of the ray direction, and local point feature neighborhood, allowing us to interpolate the light field conditioned training images without dense object coverage and parallax. We assess the proposed method for novel view synthesis on large driving scenarios, where we synthesize realistic unseen views that existing implicit approaches fail to represent. We validate that Neural Point Light Fields make it possible to predict videos along unseen trajectories previously only feasible to generate by explicitly modeling the scene.", "abstracts": [ { "abstractType": "Regular", "content": "We introduce Neural Point Light Fields that represent scenes implicitly with a light field living on a sparse point cloud. Combining differentiable volume rendering with learned implicit density representations has made it possible to synthesize photo-realistic images for novel views of small scenes. As neural volumetric rendering methods require dense sampling of the underlying functional scene representation, at hundreds of samples along a ray cast through the volume, they are fundamentally limited to small scenes with the same objects projected to hundreds of training views. Promoting sparse point clouds to neural implicit light fields allows us to represent large scenes effectively with only a single radiance evaluation per ray. These point light fields are as a function of the ray direction, and local point feature neighborhood, allowing us to interpolate the light field conditioned training images without dense object coverage and parallax. We assess the proposed method for novel view synthesis on large driving scenarios, where we synthesize realistic unseen views that existing implicit approaches fail to represent. We validate that Neural Point Light Fields make it possible to predict videos along unseen trajectories previously only feasible to generate by explicitly modeling the scene.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We introduce Neural Point Light Fields that represent scenes implicitly with a light field living on a sparse point cloud. Combining differentiable volume rendering with learned implicit density representations has made it possible to synthesize photo-realistic images for novel views of small scenes. As neural volumetric rendering methods require dense sampling of the underlying functional scene representation, at hundreds of samples along a ray cast through the volume, they are fundamentally limited to small scenes with the same objects projected to hundreds of training views. Promoting sparse point clouds to neural implicit light fields allows us to represent large scenes effectively with only a single radiance evaluation per ray. These point light fields are as a function of the ray direction, and local point feature neighborhood, allowing us to interpolate the light field conditioned training images without dense object coverage and parallax. We assess the proposed method for novel view synthesis on large driving scenarios, where we synthesize realistic unseen views that existing implicit approaches fail to represent. We validate that Neural Point Light Fields make it possible to predict videos along unseen trajectories previously only feasible to generate by explicitly modeling the scene.", "fno": "694600s8398", "keywords": [ "Image Motion Analysis", "Learning Artificial Intelligence", "Realistic Images", "Rendering Computer Graphics", "Neural Point Light Fields", "Light Field", "Sparse Point Cloud", "Learned Implicit Density Representations", "Neural Volumetric Rendering Methods", "Underlying Functional Scene Representation", "Neural Implicit Light Fields", "Local Point Feature Neighborhood", "Point Cloud Compression", "Training", "Photography", "Three Dimensional Displays", "Predictive Models", "Rendering Computer Graphics", "Light Fields" ], "authors": [ { "affiliation": "Algolux", "fullName": "Julian Ost", "givenName": "Julian", "surname": "Ost", "__typename": "ArticleAuthorType" }, { "affiliation": "McGill", "fullName": "Issam Laradji", "givenName": "Issam", "surname": "Laradji", "__typename": "ArticleAuthorType" }, { "affiliation": "Princeton University", "fullName": "Alejandro Newell", "givenName": "Alejandro", "surname": "Newell", "__typename": "ArticleAuthorType" }, { "affiliation": "Princeton University", "fullName": "Yuval Bahat", "givenName": "Yuval", "surname": "Bahat", "__typename": "ArticleAuthorType" }, { "affiliation": "Algolux", "fullName": "Felix Heide", "givenName": "Felix", "surname": "Heide", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-06-01T00:00:00", "pubType": "proceedings", "pages": "18398-18408", "year": "2022", "issn": null, "isbn": "978-1-6654-6946-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [ { "id": "1H1kU61CXsI", "name": "pcvpr202269460-09879861s1-mm_694600s8398.zip", "size": "19.3 MB", "location": "https://www.computer.org/csdl/api/v1/extra/pcvpr202269460-09879861s1-mm_694600s8398.zip", "__typename": "WebExtraType" } ], "adjacentArticles": { "previous": { "fno": "694600s8388", "articleId": "1H1nhdo3vFe", "__typename": "AdjacentArticleType" }, "next": { "fno": "694600s8409", "articleId": "1H1iiuGSP28", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/fg/2002/1602/0/16020003", "title": "Eigen Light-Fields and Face Recognition Across Pose", "doi": null, "abstractUrl": "/proceedings-article/fg/2002/16020003/12OmNvTBBcv", "parentPublication": { "id": "proceedings/fg/2002/1602/0", "title": "Proceedings of Fifth IEEE International Conference on Automatic Face Gesture Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dicta/2011/4588/0/4588a536", "title": "Face Recognition across Pose on Video Using Eigen Light-Fields", "doi": null, "abstractUrl": "/proceedings-article/dicta/2011/4588a536/12OmNvUaNpk", "parentPublication": { "id": "proceedings/dicta/2011/4588/0", "title": "2011 International Conference on Digital Image Computing: Techniques and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2017/1032/0/1032c262", "title": "Learning to Synthesize a 4D RGBD Light Field from a Single Image", "doi": null, "abstractUrl": "/proceedings-article/iccv/2017/1032c262/12OmNzmclkx", "parentPublication": { "id": "proceedings/iccv/2017/1032/0", "title": "2017 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2016/07/07244244", "title": "Enhancing Light Fields through Ray-Space Stitching", "doi": null, "abstractUrl": "/journal/tg/2016/07/07244244/13rRUx0xPn1", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/co/2006/08/r8046", "title": "Light Fields and Computational Imaging", "doi": null, "abstractUrl": "/magazine/co/2006/08/r8046/13rRUxD9h0I", "parentPublication": { "id": "mags/co", "title": "Computer", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2004/04/i0449", "title": "Appearance-Based Face Recognition and Light-Fields", "doi": null, "abstractUrl": "/journal/tp/2004/04/i0449/13rRUyYjKbp", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200o4204", "title": "SIGNET: Efficient Neural Representation for Light Fields", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200o4204/1BmELbO5QZi", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600t9787", "title": "Learning Neural Light Fields with Ray-Space Embedding", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600t9787/1H0OiVLs2TS", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600f428", "title": "Point-NeRF: Point-based Neural Radiance Fields", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600f428/1H1mrGLgvra", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09968104", "title": "Neural Subspaces for Light Fields", "doi": null, "abstractUrl": "/journal/tg/5555/01/09968104/1IKDek8SF0c", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1qyxi3OgORy", "title": "2020 International Conference on 3D Vision (3DV)", "acronym": "3dv", "groupId": "1800494", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1qyxisAtUOI", "doi": "10.1109/3DV50981.2020.00077", "title": "A Linear Approach to Absolute Pose Estimation for Light Fields", "normalizedTitle": "A Linear Approach to Absolute Pose Estimation for Light Fields", "abstract": "This paper presents the first absolute pose estimation approach tailored to Light Field cameras. It builds on the observation that the ratio between the disparity arising in different sub-aperture images and their corresponding baseline is constant. Hence, we augment the 2D pixel coordinates with the corresponding normalised disparity to obtain the Light Field feature. This new representation reduces the effect of noise by aggregating multiple projections and allows for linear estimation of the absolute pose of a Light Field camera using the well-known Direct Linear Transformation algorithm. We evaluate the resulting absolute pose estimates with extensive simulations and experiments involving real Light Field datasets, demonstrating the competitive performance of our linear approach. Furthermore, we integrate our approach in a state-of-the-art Light Field Structure from Motion pipeline and demonstrate accurate multi-view 3D reconstruction.", "abstracts": [ { "abstractType": "Regular", "content": "This paper presents the first absolute pose estimation approach tailored to Light Field cameras. It builds on the observation that the ratio between the disparity arising in different sub-aperture images and their corresponding baseline is constant. Hence, we augment the 2D pixel coordinates with the corresponding normalised disparity to obtain the Light Field feature. This new representation reduces the effect of noise by aggregating multiple projections and allows for linear estimation of the absolute pose of a Light Field camera using the well-known Direct Linear Transformation algorithm. We evaluate the resulting absolute pose estimates with extensive simulations and experiments involving real Light Field datasets, demonstrating the competitive performance of our linear approach. Furthermore, we integrate our approach in a state-of-the-art Light Field Structure from Motion pipeline and demonstrate accurate multi-view 3D reconstruction.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper presents the first absolute pose estimation approach tailored to Light Field cameras. It builds on the observation that the ratio between the disparity arising in different sub-aperture images and their corresponding baseline is constant. Hence, we augment the 2D pixel coordinates with the corresponding normalised disparity to obtain the Light Field feature. This new representation reduces the effect of noise by aggregating multiple projections and allows for linear estimation of the absolute pose of a Light Field camera using the well-known Direct Linear Transformation algorithm. We evaluate the resulting absolute pose estimates with extensive simulations and experiments involving real Light Field datasets, demonstrating the competitive performance of our linear approach. Furthermore, we integrate our approach in a state-of-the-art Light Field Structure from Motion pipeline and demonstrate accurate multi-view 3D reconstruction.", "fno": "812800a672", "keywords": [ "Cameras", "Computational Geometry", "Image Reconstruction", "Pose Estimation", "Direct Linear Transformation Algorithm", "Light Field Datasets", "Absolute Pose Estimation Approach", "Light Field Camera", "Subaperture Images", "2 D Pixel Coordinates", "Normalised Disparity", "Light Field Feature", "Linear Estimation", "State Of The Art Light Field Structure", "Absolute Pose Estimates", "Multiview 3 D Reconstruction", "Cameras", "Three Dimensional Displays", "Light Fields", "Pose Estimation", "Image Reconstruction", "Pipelines", "Lenses" ], "authors": [ { "affiliation": "University College,London,UK", "fullName": "Sotiris Nousias", "givenName": "Sotiris", "surname": "Nousias", "__typename": "ArticleAuthorType" }, { "affiliation": "Foundation for Research and Technology,Hellas,GR", "fullName": "Manolis Lourakis", "givenName": "Manolis", "surname": "Lourakis", "__typename": "ArticleAuthorType" }, { "affiliation": "University College,London,UK", "fullName": "Pearse Keane", "givenName": "Pearse", "surname": "Keane", "__typename": "ArticleAuthorType" }, { "affiliation": "King’s College,London,UK", "fullName": "Sebastien Ourselin", "givenName": "Sebastien", "surname": "Ourselin", "__typename": "ArticleAuthorType" }, { "affiliation": "King’s College,London,UK", "fullName": "Christos Bergeles", "givenName": "Christos", "surname": "Bergeles", "__typename": "ArticleAuthorType" } ], "idPrefix": "3dv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-11-01T00:00:00", "pubType": "proceedings", "pages": "672-681", "year": "2020", "issn": null, "isbn": "978-1-7281-8128-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "812800a663", "articleId": "1qyxkDucGpG", "__typename": "AdjacentArticleType" }, "next": { "fno": "812800a682", "articleId": "1qyxmqwLJcs", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/fg/2002/1602/0/16020003", "title": "Eigen Light-Fields and Face Recognition Across Pose", "doi": null, "abstractUrl": "/proceedings-article/fg/2002/16020003/12OmNvTBBcv", "parentPublication": { "id": "proceedings/fg/2002/1602/0", "title": "Proceedings of Fifth IEEE International Conference on Automatic Face Gesture Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dicta/2011/4588/0/4588a536", "title": "Face Recognition across Pose on Video Using Eigen Light-Fields", "doi": null, "abstractUrl": "/proceedings-article/dicta/2011/4588a536/12OmNvUaNpk", "parentPublication": { "id": "proceedings/dicta/2011/4588/0", "title": "2011 International Conference on Digital Image Computing: Techniques and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/amfg/2003/2010/0/20100092", "title": "Absolute Head Pose Estimation From Overhead Wide-Angle Cameras", "doi": null, "abstractUrl": "/proceedings-article/amfg/2003/20100092/12OmNyen1y9", "parentPublication": { "id": "proceedings/amfg/2003/2010/0", "title": "2003 IEEE International Workshop on Analysis and Modeling of Faces and Gestures", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2004/04/i0449", "title": "Appearance-Based Face Recognition and Light-Fields", "doi": null, "abstractUrl": "/journal/tp/2004/04/i0449/13rRUyYjKbp", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200o4204", "title": "SIGNET: Efficient Neural Representation for Light Fields", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200o4204/1BmELbO5QZi", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600n3076", "title": "Single-Stage is Enough: Multi-Person Absolute 3D Pose Estimation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600n3076/1H1kJ6yI2nC", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600s8398", "title": "Neural Point Light Fields", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600s8398/1H1kUbIJXgY", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09968104", "title": "Neural Subspaces for Light Fields", "doi": null, "abstractUrl": "/journal/tg/5555/01/09968104/1IKDek8SF0c", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2019/3131/0/313100a249", "title": "Learning to Think Outside the Box: Wide-Baseline Light Field Depth Estimation with EPI-Shift", "doi": null, "abstractUrl": "/proceedings-article/3dv/2019/313100a249/1ezRE2YcSLC", "parentPublication": { "id": "proceedings/3dv/2019/3131/0", "title": "2019 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aivr/2021/3225/0/322500a047", "title": "Speed Up Light Field Synthesis from Stereo Images", "doi": null, "abstractUrl": "/proceedings-article/aivr/2021/322500a047/1zxLBXQNOtq", "parentPublication": { "id": "proceedings/aivr/2021/3225/0", "title": "2021 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1G9DtzCwrjW", "title": "2022 IEEE International Conference on Multimedia and Expo (ICME)", "acronym": "icme", "groupId": "1000477", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1G9EIhRN8C4", "doi": "10.1109/ICME52920.2022.9859639", "title": "Multi-Head Attention Fusion Network for Visual Question Answering", "normalizedTitle": "Multi-Head Attention Fusion Network for Visual Question Answering", "abstract": "Visual Question Answering (VQA) is a challenging task to answer questions with respect to the image. Most approaches concentrate on utilizing attention networks to focus on crucial objects of the image and key words of the question. However, the attention distribution of these prior attempts tends to lo-cate similar regions, which leads to lack of the ability to derive important entities. To address the issue, we propose a multi-head attention fusion network (MHAFN), which can achieve hierarchical multimodal fusion with various branches to capture the fine-grained and intricate relationship in the perspective of multiple levels: word, region and the interaction of them. Furthermore, it can also capture distinct attention distribution for attending to multiple different visual and textual components that are vital to infer the answer. Extensive experiments on the benchmark of VQA-v2 dataset demonstrate that MHAFN significantly outperforms previous methods.", "abstracts": [ { "abstractType": "Regular", "content": "Visual Question Answering (VQA) is a challenging task to answer questions with respect to the image. Most approaches concentrate on utilizing attention networks to focus on crucial objects of the image and key words of the question. However, the attention distribution of these prior attempts tends to lo-cate similar regions, which leads to lack of the ability to derive important entities. To address the issue, we propose a multi-head attention fusion network (MHAFN), which can achieve hierarchical multimodal fusion with various branches to capture the fine-grained and intricate relationship in the perspective of multiple levels: word, region and the interaction of them. Furthermore, it can also capture distinct attention distribution for attending to multiple different visual and textual components that are vital to infer the answer. Extensive experiments on the benchmark of VQA-v2 dataset demonstrate that MHAFN significantly outperforms previous methods.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Visual Question Answering (VQA) is a challenging task to answer questions with respect to the image. Most approaches concentrate on utilizing attention networks to focus on crucial objects of the image and key words of the question. However, the attention distribution of these prior attempts tends to lo-cate similar regions, which leads to lack of the ability to derive important entities. To address the issue, we propose a multi-head attention fusion network (MHAFN), which can achieve hierarchical multimodal fusion with various branches to capture the fine-grained and intricate relationship in the perspective of multiple levels: word, region and the interaction of them. Furthermore, it can also capture distinct attention distribution for attending to multiple different visual and textual components that are vital to infer the answer. Extensive experiments on the benchmark of VQA-v2 dataset demonstrate that MHAFN significantly outperforms previous methods.", "fno": "09859639", "keywords": [ "Data Visualisation", "Document Handling", "Human Computer Interaction", "Medical Image Processing", "Text Analysis", "Video Signal Processing", "Multihead Attention Fusion Network", "Visual Question", "Answer Questions", "Attention Networks", "Hierarchical Multimodal Fusion", "Distinct Attention Distribution", "Multiple Different Visual Components", "Textual Components", "Visualization", "Fuses", "Benchmark Testing", "Question Answering Information Retrieval", "Task Analysis", "VQA", "Attention Distribution", "Hierarchi Cal Fusion" ], "authors": [ { "affiliation": "School of computer science, Beijing University of Posts and Telecommunications,Beijing,China", "fullName": "Haiyang Zhang", "givenName": "Haiyang", "surname": "Zhang", "__typename": "ArticleAuthorType" }, { "affiliation": "School of computer science, Beijing University of Posts and Telecommunications,Beijing,China", "fullName": "Ruoyu Li", "givenName": "Ruoyu", "surname": "Li", "__typename": "ArticleAuthorType" }, { "affiliation": "School of computer science, Beijing University of Posts and Telecommunications,Beijing,China", "fullName": "Liang Liu", "givenName": "Liang", "surname": "Liu", "__typename": "ArticleAuthorType" } ], "idPrefix": "icme", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-07-01T00:00:00", "pubType": "proceedings", "pages": "1-6", "year": "2022", "issn": null, "isbn": "978-1-6654-8563-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "09859743", "articleId": "1G9EwGVP30Y", "__typename": "AdjacentArticleType" }, "next": { "fno": "09860007", "articleId": "1G9DQEHdTuo", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icme/2022/8563/0/09859865", "title": "Modality-Specific Multimodal Global Enhanced Network for Text-Based Visual Question Answering", "doi": null, "abstractUrl": "/proceedings-article/icme/2022/09859865/1G9DvDkSzPG", "parentPublication": { "id": "proceedings/icme/2022/8563/0", "title": "2022 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2022/8563/0/09859591", "title": "Question-Driven Graph Fusion Network for Visual Question Answering", "doi": null, "abstractUrl": "/proceedings-article/icme/2022/09859591/1G9Ep1BWxIQ", "parentPublication": { "id": "proceedings/icme/2022/8563/0", "title": "2022 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09965773", "title": "3D Question Answering", "doi": null, "abstractUrl": "/journal/tg/5555/01/09965773/1IHMR48xnyM", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2022/9062/0/09956247", "title": "CAT: Re-Conv Attention in Transformer for Visual Question Answering", "doi": null, "abstractUrl": "/proceedings-article/icpr/2022/09956247/1IHoudteQJa", "parentPublication": { "id": "proceedings/icpr/2022/9062/0", "title": "2022 26th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2023/9346/0/934600b084", "title": "Barlow constrained optimization for Visual Question Answering", "doi": null, "abstractUrl": "/proceedings-article/wacv/2023/934600b084/1L8qm6uqYWQ", "parentPublication": { "id": "proceedings/wacv/2023/9346/0", "title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/trustcom/2022/9425/0/942500b410", "title": "Multimodal Graph Reasoning and Fusion for Video Question Answering", "doi": null, "abstractUrl": "/proceedings-article/trustcom/2022/942500b410/1LFLW33hrX2", "parentPublication": { "id": "proceedings/trustcom/2022/9425/0", "title": "2022 IEEE International Conference on Trust, Security and Privacy in Computing and Communications (TrustCom)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sitis/2022/6495/0/649500a224", "title": "Multimodal Knowledge Reasoning for Enhanced Visual Question Answering", "doi": null, "abstractUrl": "/proceedings-article/sitis/2022/649500a224/1MeoND4bVV6", "parentPublication": { "id": "proceedings/sitis/2022/6495/0", "title": "2022 16th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2019/3293/0/329300j554", "title": "Explicit Bias Discovery in Visual Question Answering Models", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2019/329300j554/1gyrXiAfK6c", "parentPublication": { "id": "proceedings/cvpr/2019/3293/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2019/3293/0/329300i329", "title": "Progressive Attention Memory Network for Movie Story Question Answering", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2019/329300i329/1gyrqw6W2GY", "parentPublication": { "id": "proceedings/cvpr/2019/3293/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2019/3293/0/329300b989", "title": "MUREL: Multimodal Relational Reasoning for Visual Question Answering", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2019/329300b989/1gyruLBNwU8", "parentPublication": { "id": "proceedings/cvpr/2019/3293/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1GeCtnTct8I", "title": "2022 19th Conference on Robots and Vision (CRV)", "acronym": "crv", "groupId": "1001794", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1GeCye8PHzO", "doi": "10.1109/CRV55824.2022.00038", "title": "3DVQA: Visual Question Answering for 3D Environments", "normalizedTitle": "3DVQA: Visual Question Answering for 3D Environments", "abstract": "Visual Question Answering (VQA) is a widely studied problem in computer vision and natural language processing. However, current approaches to VQA have been investigated primarily in the 2D image domain. We study VQA in the 3D domain, with our input being point clouds of real-world 3D scenes, instead of 2D images. We believe that this 3D data modality provide richer spatial relation information that is of interest in the VQA task. In this paper, we introduce the 3DVQA-ScanNet dataset, the first VQA dataset in 3D, and we investigate the performance of a spectrum of baseline approaches on the 3D VQA task.", "abstracts": [ { "abstractType": "Regular", "content": "Visual Question Answering (VQA) is a widely studied problem in computer vision and natural language processing. However, current approaches to VQA have been investigated primarily in the 2D image domain. We study VQA in the 3D domain, with our input being point clouds of real-world 3D scenes, instead of 2D images. We believe that this 3D data modality provide richer spatial relation information that is of interest in the VQA task. In this paper, we introduce the 3DVQA-ScanNet dataset, the first VQA dataset in 3D, and we investigate the performance of a spectrum of baseline approaches on the 3D VQA task.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Visual Question Answering (VQA) is a widely studied problem in computer vision and natural language processing. However, current approaches to VQA have been investigated primarily in the 2D image domain. We study VQA in the 3D domain, with our input being point clouds of real-world 3D scenes, instead of 2D images. We believe that this 3D data modality provide richer spatial relation information that is of interest in the VQA task. In this paper, we introduce the 3DVQA-ScanNet dataset, the first VQA dataset in 3D, and we investigate the performance of a spectrum of baseline approaches on the 3D VQA task.", "fno": "977400a233", "keywords": [ "Computer Vision", "Data Visualisation", "Interactive Systems", "Medical Image Processing", "Natural Language Processing", "Video Signal Processing", "3 D Data Modality", "Richer Spatial Relation Information", "VQA Task", "3 DVQA Scan Net Dataset", "VQA Dataset", "Visual Question Answering", "Widely Studied Problem", "Computer Vision", "Natural Language Processing", "2 D Image Domain", "Point Cloud Compression", "Surface Reconstruction", "Computer Vision", "Three Dimensional Displays", "Lighting", "Question Answering Information Retrieval", "Noise Measurement", "3 D Visual Question Answering", "Visual Question Answering", "3 DVQA", "3 D" ], "authors": [ { "affiliation": "School of Computing Science, Simon Fraser University,Burnaby,BC,Canada", "fullName": "Yasaman Etesam", "givenName": "Yasaman", "surname": "Etesam", "__typename": "ArticleAuthorType" }, { "affiliation": "School of Computing Science, Simon Fraser University,Burnaby,BC,Canada", "fullName": "Leon Kochiev", "givenName": "Leon", "surname": "Kochiev", "__typename": "ArticleAuthorType" }, { "affiliation": "School of Computing Science, Simon Fraser University,Burnaby,BC,Canada", "fullName": "Angel X. Chang", "givenName": "Angel X.", "surname": "Chang", "__typename": "ArticleAuthorType" } ], "idPrefix": "crv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-05-01T00:00:00", "pubType": "proceedings", "pages": "233-240", "year": "2022", "issn": null, "isbn": "978-1-6654-9774-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "977400a224", "articleId": "1GeCtGxCh3y", "__typename": "AdjacentArticleType" }, "next": { "fno": "977400a241", "articleId": "1GeCysidODe", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icme/2022/8563/0/09859639", "title": "Multi-Head Attention Fusion Network for Visual Question Answering", "doi": null, "abstractUrl": "/proceedings-article/icme/2022/09859639/1G9EIhRN8C4", "parentPublication": { "id": "proceedings/icme/2022/8563/0", "title": "2022 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2022/8563/0/09859591", "title": "Question-Driven Graph Fusion Network for Visual Question Answering", "doi": null, "abstractUrl": "/proceedings-article/icme/2022/09859591/1G9Ep1BWxIQ", "parentPublication": { "id": "proceedings/icme/2022/8563/0", "title": "2022 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iiai-aai/2022/9755/0/975500a089", "title": "Visual Question Answering Focusing on Object Positional Relation with Capsule Network", "doi": null, "abstractUrl": "/proceedings-article/iiai-aai/2022/975500a089/1GU77RvtyV2", "parentPublication": { "id": "proceedings/iiai-aai/2022/9755/0", "title": "2022 12th International Congress on Advanced Applied Informatics (IIAI-AAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600t9107", "title": "ScanQA: 3D Question Answering for Spatial Scene Understanding", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600t9107/1H1k0fH76uc", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600f068", "title": "SwapMix: Diagnosing and Regularizing the Over-Reliance on Visual Context in Visual Question Answering", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600f068/1H1kUvOR0ic", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09965773", "title": "3D Question Answering", "doi": null, "abstractUrl": "/journal/tg/5555/01/09965773/1IHMR48xnyM", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2023/9346/0/934600b084", "title": "Barlow constrained optimization for Visual Question Answering", "doi": null, "abstractUrl": "/proceedings-article/wacv/2023/934600b084/1L8qm6uqYWQ", "parentPublication": { "id": "proceedings/wacv/2023/9346/0", "title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sitis/2022/6495/0/649500a224", "title": "Multimodal Knowledge Reasoning for Enhanced Visual Question Answering", "doi": null, "abstractUrl": "/proceedings-article/sitis/2022/649500a224/1MeoND4bVV6", "parentPublication": { "id": "proceedings/sitis/2022/6495/0", "title": "2022 16th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/5555/01/10102595", "title": "Event-Oriented Visual Question Answering: The E-VQA Dataset and Benchmark", "doi": null, "abstractUrl": "/journal/tk/5555/01/10102595/1MkXSR95oIM", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2019/3131/0/313100a756", "title": "Incorporating 3D Information Into Visual Question Answering", "doi": null, "abstractUrl": "/proceedings-article/3dv/2019/313100a756/1ezRE66VHZS", "parentPublication": { "id": "proceedings/3dv/2019/3131/0", "title": "2019 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1H1gVMlkl32", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1H1k0fH76uc", "doi": "10.1109/CVPR52688.2022.01854", "title": "ScanQA: 3D Question Answering for Spatial Scene Understanding", "normalizedTitle": "ScanQA: 3D Question Answering for Spatial Scene Understanding", "abstract": "We propose a new 3D spatial understanding task for 3D question answering (3D-QA). In the 3D-QA task, models receive visual information from the entire 3D scene of a rich RGB-D indoor scan and answer given textual questions about the 3D scene. Unlike the 2D-question answering of visual question answering, the conventional 2D-QA models suffer from problems with spatial understanding of object alignment and directions and fail in object localization from the textual questions in 3D-QA. We propose a baseline model for 3D-QA, called the ScanQA<sup>1</sup><sup>1</sup>https://github.com/ATR-DBI/ScanQA, which learns a fused descriptor from 3D object proposals and encoded sentence embeddings. This learned descriptor correlates language expressions with the underlying geometric features of the 3D scan and facilitates the regression of 3D bounding boxes to determine the described objects in textual questions. We collected human-edited question-answer pairs with free-form answers grounded in 3D objects in each 3D scene. Our new ScanQA dataset contains over 41k question-answer pairs from 800 indoor scenes obtained from the ScanNet dataset. To the best of our knowledge, ScanQA is the first large-scale effort to perform object-grounded question answering in 3D environments.", "abstracts": [ { "abstractType": "Regular", "content": "We propose a new 3D spatial understanding task for 3D question answering (3D-QA). In the 3D-QA task, models receive visual information from the entire 3D scene of a rich RGB-D indoor scan and answer given textual questions about the 3D scene. Unlike the 2D-question answering of visual question answering, the conventional 2D-QA models suffer from problems with spatial understanding of object alignment and directions and fail in object localization from the textual questions in 3D-QA. We propose a baseline model for 3D-QA, called the ScanQA<sup>1</sup><sup>1</sup>https://github.com/ATR-DBI/ScanQA, which learns a fused descriptor from 3D object proposals and encoded sentence embeddings. This learned descriptor correlates language expressions with the underlying geometric features of the 3D scan and facilitates the regression of 3D bounding boxes to determine the described objects in textual questions. We collected human-edited question-answer pairs with free-form answers grounded in 3D objects in each 3D scene. Our new ScanQA dataset contains over 41k question-answer pairs from 800 indoor scenes obtained from the ScanNet dataset. To the best of our knowledge, ScanQA is the first large-scale effort to perform object-grounded question answering in 3D environments.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We propose a new 3D spatial understanding task for 3D question answering (3D-QA). In the 3D-QA task, models receive visual information from the entire 3D scene of a rich RGB-D indoor scan and answer given textual questions about the 3D scene. Unlike the 2D-question answering of visual question answering, the conventional 2D-QA models suffer from problems with spatial understanding of object alignment and directions and fail in object localization from the textual questions in 3D-QA. We propose a baseline model for 3D-QA, called the ScanQA11https://github.com/ATR-DBI/ScanQA, which learns a fused descriptor from 3D object proposals and encoded sentence embeddings. This learned descriptor correlates language expressions with the underlying geometric features of the 3D scan and facilitates the regression of 3D bounding boxes to determine the described objects in textual questions. We collected human-edited question-answer pairs with free-form answers grounded in 3D objects in each 3D scene. Our new ScanQA dataset contains over 41k question-answer pairs from 800 indoor scenes obtained from the ScanNet dataset. To the best of our knowledge, ScanQA is the first large-scale effort to perform object-grounded question answering in 3D environments.", "fno": "694600t9107", "keywords": [ "Data Visualisation", "Feature Extraction", "Image Representation", "Information Retrieval", "Learning Artificial Intelligence", "Natural Language Processing", "Natural Languages", "Object Detection", "Stereo Image Processing", "Text Analysis", "Spatial Understanding", "Textual Questions", "Scan QA", "3 D Object Proposals", "3 D Bounding Boxes", "Human Edited Question Answer Pairs", "Free Form Answers", "800 Indoor Scenes", "Object Grounded Question Answering", "3 D Question Answering", "Spatial Scene Understanding", "3 D QA Task", "Entire 3 D Scene", "2 D Question Answering", "Visual Question Answering", "2 D QA Models", "Location Awareness", "Measurement", "Solid Modeling", "Visualization", "Computer Vision", "Three Dimensional Displays", "Question Answering Information Retrieval" ], "authors": [ { "affiliation": "Kyoto University", "fullName": "Daichi Azuma", "givenName": "Daichi", "surname": "Azuma", "__typename": "ArticleAuthorType" }, { "affiliation": "ATR, RIKEN AIP", "fullName": "Taiki Miyanishi", "givenName": "Taiki", "surname": "Miyanishi", "__typename": "ArticleAuthorType" }, { "affiliation": "RIKEN AIP, JST PRESTO", "fullName": "Shuhei Kurita", "givenName": "Shuhei", "surname": "Kurita", "__typename": "ArticleAuthorType" }, { "affiliation": "ATR, RIKEN AIP", "fullName": "Motoaki Kawanabe", "givenName": "Motoaki", "surname": "Kawanabe", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-06-01T00:00:00", "pubType": "proceedings", "pages": "19107-19117", "year": "2022", "issn": null, "isbn": "978-1-6654-6946-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [ { "id": "1H1k0cBegyA", "name": "pcvpr202269460-09878756s1-mm_694600t9107.zip", "size": "2.36 MB", "location": "https://www.computer.org/csdl/api/v1/extra/pcvpr202269460-09878756s1-mm_694600t9107.zip", "__typename": "WebExtraType" } ], "adjacentArticles": { "previous": { "fno": "694600t9097", "articleId": "1H1mR2C9Gdq", "__typename": "AdjacentArticleType" }, "next": { "fno": "694600t9118", "articleId": "1H0Lm6gI98c", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/isda/2008/3382/2/3382b035", "title": "Pattern Optimization and the Application in Question Answering", "doi": null, "abstractUrl": "/proceedings-article/isda/2008/3382b035/12OmNqJ8tmX", "parentPublication": { "id": "proceedings/isda/2008/3382/2", "title": "2008 Eighth International Conference on Intelligent Systems Design and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icsc/2012/4859/0/4859a079", "title": "Evaluating Temporal Information Understanding with Temporal Question Answering", "doi": null, "abstractUrl": "/proceedings-article/icsc/2012/4859a079/12OmNqzu6So", "parentPublication": { "id": "proceedings/icsc/2012/4859/0", "title": "2012 IEEE Sixth International Conference on Semantic Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdmw/2011/4409/0/4409a089", "title": "AQA: Aspect-based Opinion Question Answering", "doi": null, "abstractUrl": "/proceedings-article/icdmw/2011/4409a089/12OmNrJAe9F", "parentPublication": { "id": "proceedings/icdmw/2011/4409/0", "title": "2011 IEEE 11th International Conference on Data Mining Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/conielecomp/2009/3587/0/3587a139", "title": "OntoQuestion: An Ontologies-Based Framework for Factoid Question Answering on Abstracts", "doi": null, "abstractUrl": "/proceedings-article/conielecomp/2009/3587a139/12OmNwOnn0Z", "parentPublication": { "id": "proceedings/conielecomp/2009/3587/0", "title": "Electronics, Communications, and Computers, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cis/2010/4297/0/4297a634", "title": "Graph-Based Answer Passage Ranking for Question Answering", "doi": null, "abstractUrl": "/proceedings-article/cis/2010/4297a634/12OmNx57HHo", "parentPublication": { "id": "proceedings/cis/2010/4297/0", "title": "2010 International Conference on Computational Intelligence and Security", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icicta/2012/4637/0/4637a573", "title": "Question Answering System Based on Web", "doi": null, "abstractUrl": "/proceedings-article/icicta/2012/4637a573/12OmNxGALb3", "parentPublication": { "id": "proceedings/icicta/2012/4637/0", "title": "Intelligent Computation Technology and Automation, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ncm/2008/3322/2/3322b365", "title": "Design of Question Answering System with Automated Question Generation", "doi": null, "abstractUrl": "/proceedings-article/ncm/2008/3322b365/12OmNyGbIij", "parentPublication": { "id": "proceedings/ncm/2008/3322/2", "title": "Networked Computing and Advanced Information Management, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/kse/2009/3846/0/3846a044", "title": "Extensible Framework for Distinct Question Answering Agents", "doi": null, "abstractUrl": "/proceedings-article/kse/2009/3846a044/12OmNylKARH", "parentPublication": { "id": "proceedings/kse/2009/3846/0", "title": "Knowledge and Systems Engineering, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/itng/2009/3596/0/3596b522", "title": "A Trend Analysis of the Question Answering Domain", "doi": null, "abstractUrl": "/proceedings-article/itng/2009/3596b522/12OmNzcPAF2", "parentPublication": { "id": "proceedings/itng/2009/3596/0", "title": "Information Technology: New Generations, Third International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2019/3293/0/329300i329", "title": "Progressive Attention Memory Network for Movie Story Question Answering", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2019/329300i329/1gyrqw6W2GY", "parentPublication": { "id": "proceedings/cvpr/2019/3293/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1JC1F8KcINO", "title": "2022 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)", "acronym": "bibm", "groupId": "9994793", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1JC2qFvLZ96", "doi": "10.1109/BIBM55620.2022.9995473", "title": "MHKD-MVQA: Multimodal Hierarchical Knowledge Distillation for Medical Visual Question Answering", "normalizedTitle": "MHKD-MVQA: Multimodal Hierarchical Knowledge Distillation for Medical Visual Question Answering", "abstract": "Medical Visual Question Answering (VQA) has emerged as a promising solution to enhance clinic-decision making and patient interactions. Given a medical image and a corresponding question, medical VQA aims to predict an informative answer by reasoning the visual and textual information. However, datasets with limited samples circumscribe the generalization of medical VQA, reducing its accuracy when applied to unseen medical samples. Existing works tried to solve this problem with meta-learning or self-supervised learning but still failed to achieve satisfactory performance on medical VQA with insufficient samples. To address this problem, we propose multimodal hierarchical knowledge distillation for medical VQA (MHKD-MVQA). In the primary novelty of MHKD-MVQA, we distill knowledge from not only the output but also the intermediate layers, which leverages the knowledge from limited samples to a greater extent. Meanwhile, medical images and questions are embedded in a shared latent space, enabling our model to tackle multimodal samples. We evaluate our model on two medical VQA datasets, VQA-MED 2019 and VQA-RAD, where MHKD-MVQA achieves state-of-the-art performance and outperforms baselines by 3.6&#x0025; and 1.6&#x0025;, respectively. The extensive experiments also highlight the generalization of knowledge distillation by analyzing the class activation maps on medical images concerning specific questions.", "abstracts": [ { "abstractType": "Regular", "content": "Medical Visual Question Answering (VQA) has emerged as a promising solution to enhance clinic-decision making and patient interactions. Given a medical image and a corresponding question, medical VQA aims to predict an informative answer by reasoning the visual and textual information. However, datasets with limited samples circumscribe the generalization of medical VQA, reducing its accuracy when applied to unseen medical samples. Existing works tried to solve this problem with meta-learning or self-supervised learning but still failed to achieve satisfactory performance on medical VQA with insufficient samples. To address this problem, we propose multimodal hierarchical knowledge distillation for medical VQA (MHKD-MVQA). In the primary novelty of MHKD-MVQA, we distill knowledge from not only the output but also the intermediate layers, which leverages the knowledge from limited samples to a greater extent. Meanwhile, medical images and questions are embedded in a shared latent space, enabling our model to tackle multimodal samples. We evaluate our model on two medical VQA datasets, VQA-MED 2019 and VQA-RAD, where MHKD-MVQA achieves state-of-the-art performance and outperforms baselines by 3.6&#x0025; and 1.6&#x0025;, respectively. The extensive experiments also highlight the generalization of knowledge distillation by analyzing the class activation maps on medical images concerning specific questions.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Medical Visual Question Answering (VQA) has emerged as a promising solution to enhance clinic-decision making and patient interactions. Given a medical image and a corresponding question, medical VQA aims to predict an informative answer by reasoning the visual and textual information. However, datasets with limited samples circumscribe the generalization of medical VQA, reducing its accuracy when applied to unseen medical samples. Existing works tried to solve this problem with meta-learning or self-supervised learning but still failed to achieve satisfactory performance on medical VQA with insufficient samples. To address this problem, we propose multimodal hierarchical knowledge distillation for medical VQA (MHKD-MVQA). In the primary novelty of MHKD-MVQA, we distill knowledge from not only the output but also the intermediate layers, which leverages the knowledge from limited samples to a greater extent. Meanwhile, medical images and questions are embedded in a shared latent space, enabling our model to tackle multimodal samples. We evaluate our model on two medical VQA datasets, VQA-MED 2019 and VQA-RAD, where MHKD-MVQA achieves state-of-the-art performance and outperforms baselines by 3.6% and 1.6%, respectively. The extensive experiments also highlight the generalization of knowledge distillation by analyzing the class activation maps on medical images concerning specific questions.", "fno": "09995473", "keywords": [ "Data Visualisation", "Decision Making", "Learning Artificial Intelligence", "Medical Image Processing", "Medical Information Systems", "Question Answering Information Retrieval", "Video Signal Processing", "Clinic Decision Making", "Corresponding Question", "Informative Answer", "Medical Image", "Medical Visual Question Answering", "Medical VQA Aims", "Medical VQA Datasets", "MHKD MVQA Achieves State Of The Art Performance", "Multimodal Hierarchical Knowledge Distillation", "Multimodal Samples", "Patient Interactions", "Textual Information", "Unseen Medical Samples", "Visual Information", "VQA MED", "VQA RAD", "Adaptation Models", "Visualization", "Biological System Modeling", "Self Supervised Learning", "Transformers", "Question Answering Information Retrieval", "Cognition", "Medical Visual Question Answering", "Knowledge Distillation", "Multimodal Learning" ], "authors": [ { "affiliation": "Fudan University,Academy for Engineering and Technology,Shanghai,P.R. China", "fullName": "Jianfeng Wang", "givenName": "Jianfeng", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": "Imperial College London,London,United Kingdom,SW7 2AZ", "fullName": "Shuokang Huang", "givenName": "Shuokang", "surname": "Huang", "__typename": "ArticleAuthorType" }, { "affiliation": "Tongji University,College of Design and Innovation,Shanghai,P.R. China", "fullName": "Huifang Du", "givenName": "Huifang", "surname": "Du", "__typename": "ArticleAuthorType" }, { "affiliation": "Imperial College London,London,United Kingdom,SW7 2AZ", "fullName": "Yu Qin", "givenName": "Yu", "surname": "Qin", "__typename": "ArticleAuthorType" }, { "affiliation": "Tongji University,College of Design and Innovation,Shanghai,P.R. China", "fullName": "Haofen Wang", "givenName": "Haofen", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": "Fudan University,Academy for Engineering and Technology,Shanghai,P.R. China", "fullName": "Wenqiang Zhang", "givenName": "Wenqiang", "surname": "Zhang", "__typename": "ArticleAuthorType" } ], "idPrefix": "bibm", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-12-01T00:00:00", "pubType": "proceedings", "pages": "567-574", "year": "2022", "issn": null, "isbn": "978-1-6654-6819-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "09994905", "articleId": "1JC3kZVLCXC", "__typename": "AdjacentArticleType" }, "next": { "fno": "09994982", "articleId": "1JC2LKRlV60", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icme/2022/8563/0/09859865", "title": "Modality-Specific Multimodal Global Enhanced Network for Text-Based Visual Question Answering", "doi": null, "abstractUrl": "/proceedings-article/icme/2022/09859865/1G9DvDkSzPG", "parentPublication": { "id": "proceedings/icme/2022/8563/0", "title": "2022 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600p5354", "title": "Dual-Key Multimodal Backdoors for Visual Question Answering", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600p5354/1H0N2Lz2HW8", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600q6474", "title": "WebQA: Multihop and Multimodal QA", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600q6474/1H1hwOyPRGU", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09965773", "title": "3D Question Answering", "doi": null, "abstractUrl": "/journal/tg/5555/01/09965773/1IHMR48xnyM", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2022/9062/0/09956469", "title": "A Transformer-based Medical Visual Question Answering Model", "doi": null, "abstractUrl": "/proceedings-article/icpr/2022/09956469/1IHpQGSROpO", "parentPublication": { "id": "proceedings/icpr/2022/9062/0", "title": "2022 26th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2023/9346/0/934600b155", "title": "VLC-BERT: Visual Question Answering with Contextualized Commonsense Knowledge", "doi": null, "abstractUrl": "/proceedings-article/wacv/2023/934600b155/1KxVl7qMe3e", "parentPublication": { "id": "proceedings/wacv/2023/9346/0", "title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2023/9346/0/934600b084", "title": "Barlow constrained optimization for Visual Question Answering", "doi": null, "abstractUrl": "/proceedings-article/wacv/2023/934600b084/1L8qm6uqYWQ", "parentPublication": { "id": "proceedings/wacv/2023/9346/0", "title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sitis/2022/6495/0/649500a224", "title": "Multimodal Knowledge Reasoning for Enhanced Visual Question Answering", "doi": null, "abstractUrl": "/proceedings-article/sitis/2022/649500a224/1MeoND4bVV6", "parentPublication": { "id": "proceedings/sitis/2022/6495/0", "title": "2022 16th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/5555/01/10102595", "title": "Event-Oriented Visual Question Answering: The E-VQA Dataset and Benchmark", "doi": null, "abstractUrl": "/journal/tk/5555/01/10102595/1MkXSR95oIM", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2019/3293/0/329300b989", "title": "MUREL: Multimodal Relational Reasoning for Visual Question Answering", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2019/329300b989/1gyruLBNwU8", "parentPublication": { "id": "proceedings/cvpr/2019/3293/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1KxUhhFgzlK", "title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "acronym": "wacv", "groupId": "1000040", "volume": "0", "displayVolume": "0", "year": "2023", "__typename": "ProceedingType" }, "article": { "id": "1L8qm6uqYWQ", "doi": "10.1109/WACV56688.2023.00114", "title": "Barlow constrained optimization for Visual Question Answering", "normalizedTitle": "Barlow constrained optimization for Visual Question Answering", "abstract": "Visual question answering is a vision-and-language multimodal task, that aims at predicting answers given samples from the question and image modalities. Most recent methods focus on learning a good joint embedding space of images and questions, either by improving the interaction between these two modalities, or by making it a more discriminant space. However, how informative this joint space is, has not been well explored. In this paper, we propose a novel regularization for VQA models, Constrained Optimization using Barlow&#x2019;s theory (COB), that improves the information content of the joint space by minimizing the redundancy. It reduces the correlation between the learned feature components and thereby disentangles semantic concepts. Our model also aligns the joint space with the answer embedding space, where we consider the answer and image+question as two different &#x2018;views&#x2019; of what in essence is the same semantic information. We propose a constrained optimization policy to balance the categorical and redundancy minimization forces. When built on the state-of-the-art GGE model, the resulting model improves VQA accuracy by 1.4% and 4% on the VQA-CP v2 and VQA v2 datasets respectively. The model also exhibits better interpretability. Code is made available: https://github.com/abskjha/Barlow-constrained-VQA", "abstracts": [ { "abstractType": "Regular", "content": "Visual question answering is a vision-and-language multimodal task, that aims at predicting answers given samples from the question and image modalities. Most recent methods focus on learning a good joint embedding space of images and questions, either by improving the interaction between these two modalities, or by making it a more discriminant space. However, how informative this joint space is, has not been well explored. In this paper, we propose a novel regularization for VQA models, Constrained Optimization using Barlow&#x2019;s theory (COB), that improves the information content of the joint space by minimizing the redundancy. It reduces the correlation between the learned feature components and thereby disentangles semantic concepts. Our model also aligns the joint space with the answer embedding space, where we consider the answer and image+question as two different &#x2018;views&#x2019; of what in essence is the same semantic information. We propose a constrained optimization policy to balance the categorical and redundancy minimization forces. When built on the state-of-the-art GGE model, the resulting model improves VQA accuracy by 1.4% and 4% on the VQA-CP v2 and VQA v2 datasets respectively. The model also exhibits better interpretability. Code is made available: https://github.com/abskjha/Barlow-constrained-VQA", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Visual question answering is a vision-and-language multimodal task, that aims at predicting answers given samples from the question and image modalities. Most recent methods focus on learning a good joint embedding space of images and questions, either by improving the interaction between these two modalities, or by making it a more discriminant space. However, how informative this joint space is, has not been well explored. In this paper, we propose a novel regularization for VQA models, Constrained Optimization using Barlow’s theory (COB), that improves the information content of the joint space by minimizing the redundancy. It reduces the correlation between the learned feature components and thereby disentangles semantic concepts. Our model also aligns the joint space with the answer embedding space, where we consider the answer and image+question as two different ‘views’ of what in essence is the same semantic information. We propose a constrained optimization policy to balance the categorical and redundancy minimization forces. When built on the state-of-the-art GGE model, the resulting model improves VQA accuracy by 1.4% and 4% on the VQA-CP v2 and VQA v2 datasets respectively. The model also exhibits better interpretability. Code is made available: https://github.com/abskjha/Barlow-constrained-VQA", "fno": "934600b084", "keywords": [ "Computer Vision", "Data Visualisation", "Learning Artificial Intelligence", "Question Answering Information Retrieval", "Answer Embedding Space", "COB", "Constrained Optimization Using Barlows Theory", "GGE Model", "Image Modalities", "Image Question", "Joint Embedding Space", "Semantic Information", "Vision And Language Multimodal Task", "Visual Question Answering", "VQA Models", "Training", "Visualization", "Computational Modeling", "Redundancy", "Semantics", "Minimization", "Question Answering Information Retrieval", "Algorithms Vision Language And Or Other Modalities", "Image Recognition And Understanding Object Detection", "Categorization", "Segmentation", "Scene Modeling", "Visual Reasoning" ], "authors": [ { "affiliation": "KU Leuven,ESAT-PSI", "fullName": "Abhishek Jha", "givenName": "Abhishek", "surname": "Jha", "__typename": "ArticleAuthorType" }, { "affiliation": "KU Leuven,ESAT-PSI", "fullName": "Badri Patro", "givenName": "Badri", "surname": "Patro", "__typename": "ArticleAuthorType" }, { "affiliation": "KU Leuven,ESAT-PSI", "fullName": "Luc Van Gool", "givenName": "Luc", "surname": "Van Gool", "__typename": "ArticleAuthorType" }, { "affiliation": "KU Leuven,ESAT-PSI", "fullName": "Tinne Tuytelaars", "givenName": "Tinne", "surname": "Tuytelaars", "__typename": "ArticleAuthorType" } ], "idPrefix": "wacv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2023-01-01T00:00:00", "pubType": "proceedings", "pages": "1084-1093", "year": "2023", "issn": null, "isbn": "978-1-6654-9346-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "934600b073", "articleId": "1KxVn29Eb6g", "__typename": "AdjacentArticleType" }, "next": { "fno": "934600b094", "articleId": "1KxV4p0t9i8", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icme/2022/8563/0/09859639", "title": "Multi-Head Attention Fusion Network for Visual Question Answering", "doi": null, "abstractUrl": "/proceedings-article/icme/2022/09859639/1G9EIhRN8C4", "parentPublication": { "id": "proceedings/icme/2022/8563/0", "title": "2022 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2022/8563/0/09859591", "title": "Question-Driven Graph Fusion Network for Visual Question Answering", "doi": null, "abstractUrl": "/proceedings-article/icme/2022/09859591/1G9Ep1BWxIQ", "parentPublication": { "id": "proceedings/icme/2022/8563/0", "title": "2022 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/crv/2022/9774/0/977400a233", "title": "3DVQA: Visual Question Answering for 3D Environments", "doi": null, "abstractUrl": "/proceedings-article/crv/2022/977400a233/1GeCye8PHzO", "parentPublication": { "id": "proceedings/crv/2022/9774/0", "title": "2022 19th Conference on Robots and Vision (CRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09965773", "title": "3D Question Answering", "doi": null, "abstractUrl": "/journal/tg/5555/01/09965773/1IHMR48xnyM", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bibm/2022/6819/0/09995473", "title": "MHKD-MVQA: Multimodal Hierarchical Knowledge Distillation for Medical Visual Question Answering", "doi": null, "abstractUrl": "/proceedings-article/bibm/2022/09995473/1JC2qFvLZ96", "parentPublication": { "id": "proceedings/bibm/2022/6819/0", "title": "2022 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hpcc-dss-smartcity-dependsys/2022/1993/0/199300b655", "title": "BPCN:A simple and efficient model for visual question answering", "doi": null, "abstractUrl": "/proceedings-article/hpcc-dss-smartcity-dependsys/2022/199300b655/1LSPwUnkIco", "parentPublication": { "id": "proceedings/hpcc-dss-smartcity-dependsys/2022/1993/0", "title": "2022 IEEE 24th Int Conf on High Performance Computing & Communications; 8th Int Conf on Data Science & Systems; 20th Int Conf on Smart City; 8th Int Conf on Dependability in Sensor, Cloud & Big Data Systems & Application (HPCC/DSS/SmartCity/DependSys)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sitis/2022/6495/0/649500a224", "title": "Multimodal Knowledge Reasoning for Enhanced Visual Question Answering", "doi": null, "abstractUrl": "/proceedings-article/sitis/2022/649500a224/1MeoND4bVV6", "parentPublication": { "id": "proceedings/sitis/2022/6495/0", "title": "2022 16th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/5555/01/10102595", "title": "Event-Oriented Visual Question Answering: The E-VQA Dataset and Benchmark", "doi": null, "abstractUrl": "/journal/tk/5555/01/10102595/1MkXSR95oIM", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2019/3293/0/329300j554", "title": "Explicit Bias Discovery in Visual Question Answering Models", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2019/329300j554/1gyrXiAfK6c", "parentPublication": { "id": "proceedings/cvpr/2019/3293/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2019/3293/0/329300b989", "title": "MUREL: Multimodal Relational Reasoning for Visual Question Answering", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2019/329300b989/1gyruLBNwU8", "parentPublication": { "id": "proceedings/cvpr/2019/3293/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1MeoElmyyEo", "title": "2022 16th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)", "acronym": "sitis", "groupId": "10089803", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1MeoND4bVV6", "doi": "10.1109/SITIS57111.2022.00048", "title": "Multimodal Knowledge Reasoning for Enhanced Visual Question Answering", "normalizedTitle": "Multimodal Knowledge Reasoning for Enhanced Visual Question Answering", "abstract": "In the domains of Natural Language Processing (NLP) and Computer Vision (CV) Visual Question Answering (VQA) is a multidisciplinary task, in which an image and a question are given to a VQA system, which is responsible for giving the answer. The VQA system is used for a variety of real-world applications, such as providing situational information based on visual material, making judgments using a vast quantity of surveillance data, interacting with robots, and helping individuals who are blind or visually impaired. Although it is required yet challenging to complete comprehensive VQA, Fact-based VQA (FVQA) approaches in which external knowledge is required to process with image and question. Existing FVQA methods combine all types of data without fine-grained selection, thereby generating unexpected noise while reasoning about the final result. The problem solution should be able to collect complementary-information evidence based on question-attention. We represent an image with different layers of information by a multimodal knowledge graph relating to the features of visual, factual, and semantic. We propose a multimodal knowledge graph-convolutional-network (GCN) to collect relevant-information evidence from different information layers based on the given question. In particular, intra-modal knowledge graph attention takes evidence from each modality, while inter-modal knowledge graph attention gets evidence across the different information layers. To get an optimal answer, we stack this process multiple times to perform a reasoning mechanism. Over the FVQA dataset, we achieved state-of-the-art results by improving 10.86% test accuracy, which demonstrates the usefulness and interpretability of our approach.", "abstracts": [ { "abstractType": "Regular", "content": "In the domains of Natural Language Processing (NLP) and Computer Vision (CV) Visual Question Answering (VQA) is a multidisciplinary task, in which an image and a question are given to a VQA system, which is responsible for giving the answer. The VQA system is used for a variety of real-world applications, such as providing situational information based on visual material, making judgments using a vast quantity of surveillance data, interacting with robots, and helping individuals who are blind or visually impaired. Although it is required yet challenging to complete comprehensive VQA, Fact-based VQA (FVQA) approaches in which external knowledge is required to process with image and question. Existing FVQA methods combine all types of data without fine-grained selection, thereby generating unexpected noise while reasoning about the final result. The problem solution should be able to collect complementary-information evidence based on question-attention. We represent an image with different layers of information by a multimodal knowledge graph relating to the features of visual, factual, and semantic. We propose a multimodal knowledge graph-convolutional-network (GCN) to collect relevant-information evidence from different information layers based on the given question. In particular, intra-modal knowledge graph attention takes evidence from each modality, while inter-modal knowledge graph attention gets evidence across the different information layers. To get an optimal answer, we stack this process multiple times to perform a reasoning mechanism. Over the FVQA dataset, we achieved state-of-the-art results by improving 10.86% test accuracy, which demonstrates the usefulness and interpretability of our approach.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In the domains of Natural Language Processing (NLP) and Computer Vision (CV) Visual Question Answering (VQA) is a multidisciplinary task, in which an image and a question are given to a VQA system, which is responsible for giving the answer. The VQA system is used for a variety of real-world applications, such as providing situational information based on visual material, making judgments using a vast quantity of surveillance data, interacting with robots, and helping individuals who are blind or visually impaired. Although it is required yet challenging to complete comprehensive VQA, Fact-based VQA (FVQA) approaches in which external knowledge is required to process with image and question. Existing FVQA methods combine all types of data without fine-grained selection, thereby generating unexpected noise while reasoning about the final result. The problem solution should be able to collect complementary-information evidence based on question-attention. We represent an image with different layers of information by a multimodal knowledge graph relating to the features of visual, factual, and semantic. We propose a multimodal knowledge graph-convolutional-network (GCN) to collect relevant-information evidence from different information layers based on the given question. In particular, intra-modal knowledge graph attention takes evidence from each modality, while inter-modal knowledge graph attention gets evidence across the different information layers. To get an optimal answer, we stack this process multiple times to perform a reasoning mechanism. Over the FVQA dataset, we achieved state-of-the-art results by improving 10.86% test accuracy, which demonstrates the usefulness and interpretability of our approach.", "fno": "649500a224", "keywords": [ "Computer Vision", "Convolutional Neural Nets", "Graph Theory", "Inference Mechanisms", "Natural Language Processing", "Question Answering Information Retrieval", "Semantic Networks", "Video Signal Processing", "Visual Databases", "Complementary Information Evidence", "Complete Comprehensive VQA", "Computer Vision Visual Question Answering", "Different Information Layers", "Enhanced Visual Question", "External Knowledge", "Fact Based VQA Approaches", "Fine Grained Selection", "FVQA Dataset", "FVQA Methods", "Helping Individuals", "Inter Modal Knowledge Graph Attention", "Intra Modal Knowledge Graph Attention", "Multidisciplinary Task", "Multimodal Knowledge Graph Convolutional Network", "Multimodal Knowledge Reasoning", "Natural Language Processing", "Optimal Answer", "Process Multiple Times", "Question Attention", "Real World Applications", "Reasoning Mechanism", "Relevant Information Evidence", "Situational Information", "Surveillance Data", "Unexpected Noise While Reasoning", "Vast Quantity", "Visual Material", "VQA System", "Visualization", "Computer Vision", "Surveillance", "Semantics", "Knowledge Graphs", "Cognition", "Question Answering Information Retrieval", "VQA", "Computer Vision", "FVQA", "Knowledge Graphs" ], "authors": [ { "affiliation": "National University of Sciences and Technology (NUST),Islamabad,Pakistan", "fullName": "Afzaal Hussain", "givenName": "Afzaal", "surname": "Hussain", "__typename": "ArticleAuthorType" }, { "affiliation": "National University of Sciences and Technology (NUST),Islamabad,Pakistan", "fullName": "Ifrah Maqsood", "givenName": "Ifrah", "surname": "Maqsood", "__typename": "ArticleAuthorType" }, { "affiliation": "National University of Sciences and Technology (NUST),Islamabad,Pakistan", "fullName": "Muhammad Shahzad", "givenName": "Muhammad", "surname": "Shahzad", "__typename": "ArticleAuthorType" }, { "affiliation": "National University of Sciences and Technology (NUST),Islamabad,Pakistan", "fullName": "Muhammad Moazam Fraz", "givenName": "Muhammad Moazam", "surname": "Fraz", "__typename": "ArticleAuthorType" } ], "idPrefix": "sitis", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-10-01T00:00:00", "pubType": "proceedings", "pages": "224-230", "year": "2022", "issn": null, "isbn": "978-1-6654-6495-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "649500a218", "articleId": "1MeoJvVmo5G", "__typename": "AdjacentArticleType" }, "next": { "fno": "649500a231", "articleId": "1MeoKxun81q", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/kam/2009/3888/3/3888c383", "title": "A Question Answering System Based on Conceptual Graph Formalism", "doi": null, "abstractUrl": "/proceedings-article/kam/2009/3888c383/12OmNqyUUGx", "parentPublication": { "id": "proceedings/kam/2009/3888/1", "title": "Knowledge Acquisition and Modeling, International Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccet/2009/3521/2/3521b346", "title": "Domain Ontology Based Automatic Question Answering", "doi": null, "abstractUrl": "/proceedings-article/iccet/2009/3521b346/12OmNzXFoEW", "parentPublication": { "id": "proceedings/iccet/2009/3521/1", "title": "Computer Engineering and Technology, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2022/8563/0/09859865", "title": "Modality-Specific Multimodal Global Enhanced Network for Text-Based Visual Question Answering", "doi": null, "abstractUrl": "/proceedings-article/icme/2022/09859865/1G9DvDkSzPG", "parentPublication": { "id": "proceedings/icme/2022/8563/0", "title": "2022 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2022/8563/0/09859591", "title": "Question-Driven Graph Fusion Network for Visual Question Answering", "doi": null, "abstractUrl": "/proceedings-article/icme/2022/09859591/1G9Ep1BWxIQ", "parentPublication": { "id": "proceedings/icme/2022/8563/0", "title": "2022 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600f089", "title": "Maintaining Reasoning Consistency in Compositional Visual Question Answering", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600f089/1H0MXvE8cjm", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09965773", "title": "3D Question Answering", "doi": null, "abstractUrl": "/journal/tg/5555/01/09965773/1IHMR48xnyM", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bibm/2022/6819/0/09995473", "title": "MHKD-MVQA: Multimodal Hierarchical Knowledge Distillation for Medical Visual Question Answering", "doi": null, "abstractUrl": "/proceedings-article/bibm/2022/09995473/1JC2qFvLZ96", "parentPublication": { "id": "proceedings/bibm/2022/6819/0", "title": "2022 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/trustcom/2022/9425/0/942500b410", "title": "Multimodal Graph Reasoning and Fusion for Video Question Answering", "doi": null, "abstractUrl": "/proceedings-article/trustcom/2022/942500b410/1LFLW33hrX2", "parentPublication": { "id": "proceedings/trustcom/2022/9425/0", "title": "2022 IEEE International Conference on Trust, Security and Privacy in Computing and Communications (TrustCom)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/5555/01/10102595", "title": "Event-Oriented Visual Question Answering: The E-VQA Dataset and Benchmark", "doi": null, "abstractUrl": "/journal/tk/5555/01/10102595/1MkXSR95oIM", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2019/3293/0/329300b989", "title": "MUREL: Multimodal Relational Reasoning for Visual Question Answering", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2019/329300b989/1gyruLBNwU8", "parentPublication": { "id": "proceedings/cvpr/2019/3293/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1ezRzLyH4bu", "title": "2019 International Conference on 3D Vision (3DV)", "acronym": "3dv", "groupId": "1800494", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1ezRE66VHZS", "doi": "10.1109/3DV.2019.00088", "title": "Incorporating 3D Information Into Visual Question Answering", "normalizedTitle": "Incorporating 3D Information Into Visual Question Answering", "abstract": "We propose a tactic of advancing Visual Question Answering (VQA) task by incorporating 3D information via multi-view images. Conventional VQA approaches, which reply an answer in words against a linguistic question about a given RGB image, have less ability to recognize geometrical information so that they tend to fail to count things or guess positional relationship. Moreover, they have no ability to determine blinded space, so it is not feasible to invent VQA function to robots which will work in highly-occluded real-world environments. To achieve the situation, we introduce a new multi-view VQA dataset along with an approach that incorporating 3D scene information directly captured from multi-view images into VQA without using depth images or employing SLAM. Our proposed approach achieves strong performance with an overall accuracy of 95.4% on the challenging multi-view VQA dataset setup, which contains relatively severe occlusion. This work also demonstrates the promising aspects of bridging the gap between 3D vision and language.", "abstracts": [ { "abstractType": "Regular", "content": "We propose a tactic of advancing Visual Question Answering (VQA) task by incorporating 3D information via multi-view images. Conventional VQA approaches, which reply an answer in words against a linguistic question about a given RGB image, have less ability to recognize geometrical information so that they tend to fail to count things or guess positional relationship. Moreover, they have no ability to determine blinded space, so it is not feasible to invent VQA function to robots which will work in highly-occluded real-world environments. To achieve the situation, we introduce a new multi-view VQA dataset along with an approach that incorporating 3D scene information directly captured from multi-view images into VQA without using depth images or employing SLAM. Our proposed approach achieves strong performance with an overall accuracy of 95.4% on the challenging multi-view VQA dataset setup, which contains relatively severe occlusion. This work also demonstrates the promising aspects of bridging the gap between 3D vision and language.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We propose a tactic of advancing Visual Question Answering (VQA) task by incorporating 3D information via multi-view images. Conventional VQA approaches, which reply an answer in words against a linguistic question about a given RGB image, have less ability to recognize geometrical information so that they tend to fail to count things or guess positional relationship. Moreover, they have no ability to determine blinded space, so it is not feasible to invent VQA function to robots which will work in highly-occluded real-world environments. To achieve the situation, we introduce a new multi-view VQA dataset along with an approach that incorporating 3D scene information directly captured from multi-view images into VQA without using depth images or employing SLAM. Our proposed approach achieves strong performance with an overall accuracy of 95.4% on the challenging multi-view VQA dataset setup, which contains relatively severe occlusion. This work also demonstrates the promising aspects of bridging the gap between 3D vision and language.", "fno": "313100a756", "keywords": [ "Image Colour Analysis", "Natural Language Processing", "Question Answering Information Retrieval", "Robot Vision", "SLAM Robots", "Stereo Image Processing", "Multiview VQA Dataset", "3 D Scene Information", "Multiview Images", "Depth Images", "Language", "Linguistic Question", "RGB Image", "Geometrical Information", "Visual Question Answering", "SLAM", "Robots", "3 D Vision", "Three Dimensional Displays", "Feature Extraction", "Task Analysis", "Visualization", "Natural Language Processing", "Cognition", "Real Time Systems", "Applied Perception", "Human Computer Interaction" ], "authors": [ { "affiliation": "National Institute of Advanced Industrial Science and Technology, University of Tsukuba", "fullName": "Yue Qiu", "givenName": "Yue", "surname": "Qiu", "__typename": "ArticleAuthorType" }, { "affiliation": "National Institute of Advanced Industrial Science and Technology, University of Tsukuba", "fullName": "Yutaka Satoh", "givenName": "Yutaka", "surname": "Satoh", "__typename": "ArticleAuthorType" }, { "affiliation": "National Institute of Advanced Industrial Science and Technology", "fullName": "Ryota Suzuki", "givenName": "Ryota", "surname": "Suzuki", "__typename": "ArticleAuthorType" }, { "affiliation": "National Institute of Advanced Industrial Science and Technology", "fullName": "Hirokatsu Kataoka", "givenName": "Hirokatsu", "surname": "Kataoka", "__typename": "ArticleAuthorType" } ], "idPrefix": "3dv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-09-01T00:00:00", "pubType": "proceedings", "pages": "756-765", "year": "2019", "issn": null, "isbn": "978-1-7281-3131-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "313100a747", "articleId": "1ezRDWD4rCg", "__typename": "AdjacentArticleType" }, "next": { "fno": "313100a767", "articleId": "1ezRAs38cog", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccv/2015/8391/0/8391c425", "title": "VQA: Visual Question Answering", "doi": null, "abstractUrl": "/proceedings-article/iccv/2015/8391c425/12OmNrYlmBL", "parentPublication": { "id": "proceedings/iccv/2015/8391/0", "title": "2015 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2018/4886/0/488601b888", "title": "Structured Triplet Learning with POS-Tag Guided Attention for Visual Question Answering", "doi": null, "abstractUrl": "/proceedings-article/wacv/2018/488601b888/12OmNs4S8Fc", "parentPublication": { "id": "proceedings/wacv/2018/4886/0", "title": "2018 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2018/4886/0/488601b852", "title": "Semantically Guided Visual Question Answering", "doi": null, "abstractUrl": "/proceedings-article/wacv/2018/488601b852/12OmNweTvL6", "parentPublication": { "id": "proceedings/wacv/2018/4886/0", "title": "2018 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2017/1032/0/1032b983", "title": "An Analysis of Visual Question Answering Algorithms", "doi": null, "abstractUrl": "/proceedings-article/iccv/2017/1032b983/12OmNzZWbD1", "parentPublication": { "id": "proceedings/iccv/2017/1032/0", "title": "2017 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2018/10/08046084", "title": "FVQA: Fact-Based Visual Question Answering", "doi": null, "abstractUrl": "/journal/tp/2018/10/08046084/13rRUwInvC9", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000i611", "title": "iVQA: Inverse Visual Question Answering", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000i611/17D45WWzW3F", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/crv/2022/9774/0/977400a233", "title": "3DVQA: Visual Question Answering for 3D Environments", "doi": null, "abstractUrl": "/proceedings-article/crv/2022/977400a233/1GeCye8PHzO", "parentPublication": { "id": "proceedings/crv/2022/9774/0", "title": "2022 19th Conference on Robots and Vision (CRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600f046", "title": "Sim VQA: Exploring Simulated Environments for Visual Question Answering", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600f046/1H0LdfgdNjG", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09965773", "title": "3D Question Answering", "doi": null, "abstractUrl": "/journal/tg/5555/01/09965773/1IHMR48xnyM", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2020/6553/0/09093452", "title": "Visual Question Answering on 360&#x00B0; Images", "doi": null, "abstractUrl": "/proceedings-article/wacv/2020/09093452/1jPbCyCHgkw", "parentPublication": { "id": "proceedings/wacv/2020/6553/0", "title": "2020 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNyS6RMG", "title": "2016 IEEE/ACM Joint Conference on Digital Libraries (JCDL)", "acronym": "jcdl", "groupId": "1804605", "volume": "0", "displayVolume": "0", "year": "2016", "__typename": "ProceedingType" }, "article": { "id": "12OmNscOUib", "doi": "", "title": "Curve separation for line graphs in scholarly documents", "normalizedTitle": "Curve separation for line graphs in scholarly documents", "abstract": "Line graphs are abundant in scholarly papers. They are usually generated from a data table and that data can not be accessed. One important step in an automated data extraction pipeline is the curve separation problem: segmenting the pixels into separate curves. Previous work in this domain has focused on raster graphics extracted from scholarly PDFs, whereas most scholarly plots are embedded as vector graphics. We report a system to extract these plots as SVG images and show how that can improve both the accuracy (90%) and the scalability (5–8 seconds) of the curve separation problem.", "abstracts": [ { "abstractType": "Regular", "content": "Line graphs are abundant in scholarly papers. They are usually generated from a data table and that data can not be accessed. One important step in an automated data extraction pipeline is the curve separation problem: segmenting the pixels into separate curves. Previous work in this domain has focused on raster graphics extracted from scholarly PDFs, whereas most scholarly plots are embedded as vector graphics. We report a system to extract these plots as SVG images and show how that can improve both the accuracy (90%) and the scalability (5–8 seconds) of the curve separation problem.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Line graphs are abundant in scholarly papers. They are usually generated from a data table and that data can not be accessed. One important step in an automated data extraction pipeline is the curve separation problem: segmenting the pixels into separate curves. Previous work in this domain has focused on raster graphics extracted from scholarly PDFs, whereas most scholarly plots are embedded as vector graphics. We report a system to extract these plots as SVG images and show how that can improve both the accuracy (90%) and the scalability (5–8 seconds) of the curve separation problem.", "fno": "07559622", "keywords": [ "Image Color Analysis", "Transforms", "Data Mining", "Shape", "XML", "Vector Graphics", "Data Extraction", "Line Graph" ], "authors": [ { "affiliation": "Information Sciences and Technology, Pennsylvania State University", "fullName": "Sagnik Ray Choudhury", "givenName": "Sagnik Ray", "surname": "Choudhury", "__typename": "ArticleAuthorType" }, { "affiliation": "EECS, Pennsylvania State University", "fullName": "Shuting Wang", "givenName": "Shuting", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": "Information Sciences and Technology, Pennsylvania State University", "fullName": "C. Lee. Giles", "givenName": "C. Lee.", "surname": "Giles", "__typename": "ArticleAuthorType" } ], "idPrefix": "jcdl", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2016-06-01T00:00:00", "pubType": "proceedings", "pages": "277-278", "year": "2016", "issn": null, "isbn": "978-1-4503-4229-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07559621", "articleId": "12OmNwbcJ4Y", "__typename": "AdjacentArticleType" }, "next": { "fno": "07559623", "articleId": "12OmNzXWZHE", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icdar/2015/1805/0/07333871", "title": "Automated analysis of line plots in documents", "doi": null, "abstractUrl": "/proceedings-article/icdar/2015/07333871/12OmNBOCWfV", "parentPublication": { "id": "proceedings/icdar/2015/1805/0", "title": "2015 13th International Conference on Document Analysis and Recognition (ICDAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icsc/2018/4408/0/440801a041", "title": "SAANSET: Semi-Automated Acquisition of Scholarly Metadata Using OpenResearch.org Platform", "doi": null, "abstractUrl": "/proceedings-article/icsc/2018/440801a041/12OmNC1Y5o8", "parentPublication": { "id": "proceedings/icsc/2018/4408/0", "title": "2018 IEEE 12th International Conference on Semantic Computing (ICSC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cimca/2006/2731/0/04052839", "title": "Curve Shape Modification and Similarity Evaluation", "doi": null, "abstractUrl": "/proceedings-article/cimca/2006/04052839/12OmNCzsKFD", "parentPublication": { "id": "proceedings/cimca/2006/2731/0", "title": "2006 International Conference on Computational Inteligence for Modelling Control and Automation and International Conference on Intelligent Agents Web Technologies and International Commerce (CIMCA'06)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cgiv/2017/0852/0/0852a001", "title": "Pythagorean Hodograph Quintic Trigonometric Bezier Transtion Curve", "doi": null, "abstractUrl": "/proceedings-article/cgiv/2017/0852a001/12OmNwbukeO", "parentPublication": { "id": "proceedings/cgiv/2017/0852/0", "title": "2017 14th International Conference on Computer Graphics, Imaging and Visualization (CGiV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/das/2016/1792/0/1792a108", "title": "Understanding Line Plots Using Bayesian Network", "doi": null, "abstractUrl": "/proceedings-article/das/2016/1792a108/12OmNwqx46A", "parentPublication": { "id": "proceedings/das/2016/1792/0", "title": "2016 12th IAPR Workshop on Document Analysis Systems (DAS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/big-data/2014/5666/0/07004455", "title": "Integrating Data Mining and Data Management Technologies for Scholarly Inquiry", "doi": null, "abstractUrl": "/proceedings-article/big-data/2014/07004455/12OmNyKJicL", "parentPublication": { "id": "proceedings/big-data/2014/5666/0", "title": "2014 IEEE International Conference on Big Data (Big Data)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acv/1992/2840/0/00240308", "title": "Curve recognition using B-spline representation", "doi": null, "abstractUrl": "/proceedings-article/acv/1992/00240308/12OmNzTppBg", "parentPublication": { "id": "proceedings/acv/1992/2840/0", "title": "Proceedings IEEE Workshop on Applications of Computer Vision", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/jcdl/2022/9345/0/09852933", "title": "X-SCITLDR: Cross-Lingual Extreme Summarization of Scholarly Documents", "doi": null, "abstractUrl": "/proceedings-article/jcdl/2022/09852933/1FT2nsbidig", "parentPublication": { "id": "proceedings/jcdl/2022/9345/0", "title": "2022 ACM/IEEE Joint Conference on Digital Libraries (JCDL)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/jcdl/2022/9345/0/09852828", "title": "Opening scholarly documents through text analytics", "doi": null, "abstractUrl": "/proceedings-article/jcdl/2022/09852828/1FT2qnG4iLS", "parentPublication": { "id": "proceedings/jcdl/2022/9345/0", "title": "2022 ACM/IEEE Joint Conference on Digital Libraries (JCDL)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdar/2019/3014/0/301400b438", "title": "A Neural Approach for Text Extraction from Scholarly Figures", "doi": null, "abstractUrl": "/proceedings-article/icdar/2019/301400b438/1h81qny2qyI", "parentPublication": { "id": "proceedings/icdar/2019/3014/0", "title": "2019 International Conference on Document Analysis and Recognition (ICDAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1lFJ9Evt0pG", "title": "2019 IEEE 18th International Conference on Cognitive Informatics & Cognitive Computing (ICCI*CC)", "acronym": "icci*cc", "groupId": "1000097", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1lFJdiVGPio", "doi": "10.1109/ICCICC46617.2019.9146098", "title": "Visualizing the Temporal Similarity Between Clusters of Dynamic Graphs", "normalizedTitle": "Visualizing the Temporal Similarity Between Clusters of Dynamic Graphs", "abstract": "The evolution of graph structures in large time-varying graphs is often difficult to visualize and interpret due to excessive clutter from overlapping nodes and edges. With limited display area, visual clutter often increases and makes it difficult to recognize developing patterns in embedded sub-graphs. In such situations viewers are often hampered in observing and exploring significant changes of the graph components. This poses a cognitive barrier in the visual analytics of large dynamic structures. Another important problem in visualizing dynamic graphs is capturing the difference between graph states. Their state changes often become intractable. In this paper we propose to construct cognitive templates for grouping closely related entities using community detection techniques. The induced subgraphs are collapsed into meta-nodes in order to simplify the representation of large graphs and induce similarities between communities. In order to compute the new structures, we introduce the GCN, or Graph Convolution Network, that learns the representations of sub-graphs induced by communities. The pair-wise similarities can then be calculated by graph-based cluster search algorithms. Furthermore, the proximity state might change temporally. We need to extract the matched communities between consecutive snapshots. Using multi-dimensional scaling and color mappings, we reveal the evolution of graphs at the community level. We evaluate the effectiveness of our method by applying it to the Wikipedia edit history data set.", "abstracts": [ { "abstractType": "Regular", "content": "The evolution of graph structures in large time-varying graphs is often difficult to visualize and interpret due to excessive clutter from overlapping nodes and edges. With limited display area, visual clutter often increases and makes it difficult to recognize developing patterns in embedded sub-graphs. In such situations viewers are often hampered in observing and exploring significant changes of the graph components. This poses a cognitive barrier in the visual analytics of large dynamic structures. Another important problem in visualizing dynamic graphs is capturing the difference between graph states. Their state changes often become intractable. In this paper we propose to construct cognitive templates for grouping closely related entities using community detection techniques. The induced subgraphs are collapsed into meta-nodes in order to simplify the representation of large graphs and induce similarities between communities. In order to compute the new structures, we introduce the GCN, or Graph Convolution Network, that learns the representations of sub-graphs induced by communities. The pair-wise similarities can then be calculated by graph-based cluster search algorithms. Furthermore, the proximity state might change temporally. We need to extract the matched communities between consecutive snapshots. Using multi-dimensional scaling and color mappings, we reveal the evolution of graphs at the community level. We evaluate the effectiveness of our method by applying it to the Wikipedia edit history data set.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The evolution of graph structures in large time-varying graphs is often difficult to visualize and interpret due to excessive clutter from overlapping nodes and edges. With limited display area, visual clutter often increases and makes it difficult to recognize developing patterns in embedded sub-graphs. In such situations viewers are often hampered in observing and exploring significant changes of the graph components. This poses a cognitive barrier in the visual analytics of large dynamic structures. Another important problem in visualizing dynamic graphs is capturing the difference between graph states. Their state changes often become intractable. In this paper we propose to construct cognitive templates for grouping closely related entities using community detection techniques. The induced subgraphs are collapsed into meta-nodes in order to simplify the representation of large graphs and induce similarities between communities. In order to compute the new structures, we introduce the GCN, or Graph Convolution Network, that learns the representations of sub-graphs induced by communities. The pair-wise similarities can then be calculated by graph-based cluster search algorithms. Furthermore, the proximity state might change temporally. We need to extract the matched communities between consecutive snapshots. Using multi-dimensional scaling and color mappings, we reveal the evolution of graphs at the community level. We evaluate the effectiveness of our method by applying it to the Wikipedia edit history data set.", "fno": "09146098", "keywords": [ "Convolutional Neural Nets", "Data Visualisation", "Graph Theory", "Pattern Clustering", "GCN", "Time Varying Graphs", "Graph Structures", "Temporal Similarity", "Graph Based Cluster Search Algorithms", "Pair Wise Similarities", "Graph Convolution Network", "Community Detection Techniques", "Grouping Closely Related Entities", "State Changes", "Graph States", "Dynamic Graphs", "Dynamic Structures", "Visual Analytics", "Cognitive Barrier", "Graph Components", "Embedded Sub Graphs", "Visual Clutter", "Excessive Clutter", "Visualization", "Image Edge Detection", "Clutter", "Stability Analysis", "Partitioning Algorithms", "Topology", "Measurement", "Cognitive Social Networks", "Temporal Graph Visualization", "Graph Similarity", "Evolutionary Networks" ], "authors": [ { "affiliation": "The Hong Kong Polytechnic University,Department of Computing,Hong Kong", "fullName": "Yunzhe Wang", "givenName": "Yunzhe", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": "The Hong Kong Polytechnic University,Department of Computing,Hong Kong", "fullName": "George Baciu", "givenName": "George", "surname": "Baciu", "__typename": "ArticleAuthorType" }, { "affiliation": "School of Computer Science East China Normal University,China", "fullName": "Chenhui Li", "givenName": "Chenhui", "surname": "Li", "__typename": "ArticleAuthorType" } ], "idPrefix": "icci*cc", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-07-01T00:00:00", "pubType": "proceedings", "pages": "205-210", "year": "2019", "issn": null, "isbn": "978-1-7281-1419-4", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "09146093", "articleId": "1lFJdbsJ49a", "__typename": "AdjacentArticleType" }, "next": { "fno": "09146101", "articleId": "1lFJe6yEBhK", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icdm/2014/4302/0/4302a737", "title": "TRIBAC: Discovering Interpretable Clusters and Latent Structures in Graphs", "doi": null, "abstractUrl": "/proceedings-article/icdm/2014/4302a737/12OmNBghtsa", "parentPublication": { "id": "proceedings/icdm/2014/4302/0", "title": "2014 IEEE International Conference on Data Mining (ICDM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pacificvis/2015/6879/0/07156354", "title": "Attribute-driven edge bundling for general graphs with applications in trail analysis", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2015/07156354/12OmNCaLEnG", "parentPublication": { "id": "proceedings/pacificvis/2015/6879/0", "title": "2015 IEEE Pacific Visualization Symposium (PacificVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bigcomp/2015/7303/0/07072830", "title": "Discovering large subsets with high quality partitions in real world graphs", "doi": null, "abstractUrl": "/proceedings-article/bigcomp/2015/07072830/12OmNqyUUsG", "parentPublication": { "id": "proceedings/bigcomp/2015/7303/0", "title": "2015 International Conference on Big Data and Smart Computing (BigComp)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pacificvis/2013/4797/0/06596126", "title": "Smooth bundling of large streaming and sequence graphs", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2013/06596126/12OmNscfI0r", "parentPublication": { "id": "proceedings/pacificvis/2013/4797/0", "title": "2013 IEEE Pacific Visualization Symposium (PacificVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pacificvis/2011/935/0/05742389", "title": "Multilevel agglomerative edge bundling for visualizing large graphs", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2011/05742389/12OmNxj233Y", "parentPublication": { "id": "proceedings/pacificvis/2011/935/0", "title": "2011 IEEE Pacific Visualization Symposium (PacificVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pacificvis/2014/2874/0/2874a105", "title": "Improved Optimal and Approximate Power Graph Compression for Clearer Visualisation of Dense Graphs", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2014/2874a105/12OmNywOWNM", "parentPublication": { "id": "proceedings/pacificvis/2014/2874/0", "title": "2014 IEEE Pacific Visualization Symposium (PacificVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/2019/11/08481531", "title": "Effective and Efficient Community Search Over Large Directed Graphs", "doi": null, "abstractUrl": "/journal/tk/2019/11/08481531/146z4GOCJh5", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icde/2019/7474/0/747400b714", "title": "Efficient Partitioning and Query Processing of Spatio-Temporal Graphs with Trillion Edges", "doi": null, "abstractUrl": "/proceedings-article/icde/2019/747400b714/1aDSUGtkz28", "parentPublication": { "id": "proceedings/icde/2019/7474/0", "title": "2019 IEEE 35th International Conference on Data Engineering (ICDE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/2022/08/09240056", "title": "K-Core Based Temporal Graph Convolutional Network for Dynamic Graphs", "doi": null, "abstractUrl": "/journal/tk/2022/08/09240056/1oeZANsdEje", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/2023/02/09479741", "title": "When Convolutional Network Meets Temporal Heterogeneous Graphs: An Effective Community Detection Method", "doi": null, "abstractUrl": "/journal/tk/2023/02/09479741/1v65M1Ax9Kw", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNxRnvRV", "title": "2006 IEEE/ACM 6th Joint Conference on Digital Libraries", "acronym": "jcdl", "groupId": "1804605", "volume": "0", "displayVolume": "0", "year": "2006", "__typename": "ProceedingType" }, "article": { "id": "12OmNAle6iq", "doi": "10.1145/1141753.1141800", "title": "Keyphrase extraction-based query expansion in digital libraries", "normalizedTitle": "Keyphrase extraction-based query expansion in digital libraries", "abstract": "In pseudo-relevance feedback, the two key factors affecting the retrieval performance most are the source from which expansion terms are generated and the method of ranking those expansion terms. In this paper, we present a novel unsupervised query expansion technique that utilizes keyphrases and POS phrase categorization. The keyphrases are extracted from the retrieved documents and weighted with an algorithm based on information gain and co-occurrence of phrases. The selected keyphrases are translated into disjunctive normal form (DNF) based on the POS phrase categorization technique for better query refomulation. Furthermore, we study whether ontologies such as WordNet and MeSH improve the retrieval performance in conjunction with the keyphrases. We test our techniques on TREC 5, 6, and 7 as well as a MEDLINE collection. The experimental results show that the use of keyphrases with POS phrase categorization produces the best average precision", "abstracts": [ { "abstractType": "Regular", "content": "In pseudo-relevance feedback, the two key factors affecting the retrieval performance most are the source from which expansion terms are generated and the method of ranking those expansion terms. In this paper, we present a novel unsupervised query expansion technique that utilizes keyphrases and POS phrase categorization. The keyphrases are extracted from the retrieved documents and weighted with an algorithm based on information gain and co-occurrence of phrases. The selected keyphrases are translated into disjunctive normal form (DNF) based on the POS phrase categorization technique for better query refomulation. Furthermore, we study whether ontologies such as WordNet and MeSH improve the retrieval performance in conjunction with the keyphrases. We test our techniques on TREC 5, 6, and 7 as well as a MEDLINE collection. The experimental results show that the use of keyphrases with POS phrase categorization produces the best average precision", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In pseudo-relevance feedback, the two key factors affecting the retrieval performance most are the source from which expansion terms are generated and the method of ranking those expansion terms. In this paper, we present a novel unsupervised query expansion technique that utilizes keyphrases and POS phrase categorization. The keyphrases are extracted from the retrieved documents and weighted with an algorithm based on information gain and co-occurrence of phrases. The selected keyphrases are translated into disjunctive normal form (DNF) based on the POS phrase categorization technique for better query refomulation. Furthermore, we study whether ontologies such as WordNet and MeSH improve the retrieval performance in conjunction with the keyphrases. We test our techniques on TREC 5, 6, and 7 as well as a MEDLINE collection. The experimental results show that the use of keyphrases with POS phrase categorization produces the best average precision", "fno": "04119126", "keywords": [ "Document Handling", "Ontologies Artificial Intelligence", "Query Formulation", "Query Processing", "Relevance Feedback", "Unsupervised Learning", "Keyphrase Extraction", "Unsupervised Query Expansion Technique", "Digital Libraries", "Pseudorelevance Feedback", "Retrieval Performance", "POS Phrase Categorization", "Documents Retrieval", "Disjunctive Normal Form", "Query Refomulation", "Ontologies", "MEDLINE Collection", "Software Libraries", "Feedback", "Information Retrieval", "Data Mining", "Information Science", "Ontologies", "Educational Institutions", "Proteins", "Testing", "Speech", "POS", "Word Net", "Information Gain", "Keyphrase Extraction", "Query Expansion" ], "authors": [ { "affiliation": "Drexel University, Philadelphia, PA", "fullName": "Il Yeol Song", "givenName": "Il Yeol", "surname": "Song", "__typename": "ArticleAuthorType" }, { "affiliation": "Drexel University, Philadelphia, PA", "fullName": "Robert B. Allen", "givenName": "Robert B.", "surname": "Allen", "__typename": "ArticleAuthorType" }, { "affiliation": "Temple University, Philadelphia, PA", "fullName": "Zoran Obradovic", "givenName": "Zoran", "surname": "Obradovic", "__typename": "ArticleAuthorType" }, { "affiliation": "Temple University, Philadelphia, PA", "fullName": "Min Song", "givenName": "Min", "surname": "Song", "__typename": "ArticleAuthorType" } ], "idPrefix": "jcdl", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2006-06-01T00:00:00", "pubType": "proceedings", "pages": "202-209", "year": "2006", "issn": null, "isbn": "1-59593-354-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "04119125", "articleId": "12OmNARRYuU", "__typename": "AdjacentArticleType" }, "next": { "fno": "04119127", "articleId": "12OmNxzuMK7", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icdm/2006/2701/0/270100275", "title": "Keyphrase Extraction Using Semantic Networks Structure Analysis", "doi": null, "abstractUrl": "/proceedings-article/icdm/2006/270100275/12OmNB0nWfJ", "parentPublication": { "id": "proceedings/icdm/2006/2701/0", "title": "Sixth International Conference on Data Mining (ICDM'06)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wi-iat/2008/3496/1/3496a214", "title": "An Automatic Online News Topic Keyphrase Extraction System", "doi": null, "abstractUrl": "/proceedings-article/wi-iat/2008/3496a214/12OmNCcbEgB", "parentPublication": { "id": "proceedings/wi-iat/2008/3496/1", "title": "Web Intelligence and Intelligent Agent Technology, IEEE/WIC/ACM International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hicss/2004/2056/4/205640104c", "title": "Automating Keyphrase Extraction with Multi-Objective Genetic Algorithms", "doi": null, "abstractUrl": "/proceedings-article/hicss/2004/205640104c/12OmNrK9q21", "parentPublication": { "id": "proceedings/hicss/2004/2056/4", "title": "37th Annual Hawaii International Conference on System Sciences, 2004. Proceedings of the", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/trustcom/2011/2135/0/06121007", "title": "The Hot Keyphrase Extraction Based on TF*PDF", "doi": null, "abstractUrl": "/proceedings-article/trustcom/2011/06121007/12OmNvlPkAH", "parentPublication": { "id": "proceedings/trustcom/2011/2135/0", "title": "2011IEEE 10th International Conference on Trust, Security and Privacy in Computing and Communications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wi-iat/2015/9618/3/9618c199", "title": "Subject-Keyphrase Extraction Based on Definition-Use Chain", "doi": null, "abstractUrl": "/proceedings-article/wi-iat/2015/9618c199/12OmNvqW6XM", "parentPublication": { "id": "proceedings/wi-iat/2015/9618/3", "title": "2015 IEEE / WIC / ACM International Conference on Web Intelligence and Intelligent Agent Technology (WI-IAT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icicta/2008/3357/1/3357b061", "title": "Improved Automatic Keyphrase Extraction by Using Semantic Information", "doi": null, "abstractUrl": "/proceedings-article/icicta/2008/3357b061/12OmNxwWozl", "parentPublication": { "id": "proceedings/icicta/2008/3357/1", "title": "Intelligent Computation Technology and Automation, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wi-iat/2009/3801/1/3801a576", "title": "Automatic Keyphrase Extraction with a Refined Candidate Set", "doi": null, "abstractUrl": "/proceedings-article/wi-iat/2009/3801a576/12OmNyQYtvB", "parentPublication": { "id": "proceedings/wi-iat/2009/3801/1", "title": "2009 IEEE/WIC/ACM International Joint Conference on Web Intelligence and Intelligent Agent Technology (WI-IAT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/2021/04/08844794", "title": "Duration Modeling with Semi-Markov Conditional Random Fields for Keyphrase Extraction", "doi": null, "abstractUrl": "/journal/tk/2021/04/08844794/1ds7hxNZurS", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/big-data/2019/0858/0/09006409", "title": "From Text Classification to Keyphrase Extraction for Short Text", "doi": null, "abstractUrl": "/proceedings-article/big-data/2019/09006409/1hJsw76ZZa8", "parentPublication": { "id": "proceedings/big-data/2019/0858/0", "title": "2019 IEEE International Conference on Big Data (Big Data)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ickg/2020/8156/0/09194571", "title": "Graph-based Keyphrase Extraction Using Word and Document Em beddings", "doi": null, "abstractUrl": "/proceedings-article/ickg/2020/09194571/1n2nhMFpPb2", "parentPublication": { "id": "proceedings/ickg/2020/8156/0", "title": "2020 IEEE International Conference on Knowledge Graph (ICKG)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNxETa7P", "title": "Web Intelligence and Intelligent Agent Technology, IEEE/WIC/ACM International Conference on", "acronym": "wi-iat", "groupId": "1001411", "volume": "1", "displayVolume": "1", "year": "2008", "__typename": "ProceedingType" }, "article": { "id": "12OmNCcbEgB", "doi": "10.1109/WIIAT.2008.225", "title": "An Automatic Online News Topic Keyphrase Extraction System", "normalizedTitle": "An Automatic Online News Topic Keyphrase Extraction System", "abstract": "News Topics are related to a set of keywords or keyphrases. Topic keyphrases briefly describe the key content of topics and help users decide whether to do further reading about them. Moreover, keyphrases of a news topic can be considered as a cluster of related terms, which provides term relationship information that can be integrated into information retrieval models. In this paper, an automatic online news topic keyphrase extraction system is proposed. News stories are organized into topics. Keyword candidates are firstly extracted from single news stories and filtered with topic information. Then a phrase identification process combines keywords into phrases using position information. Finally, the phrases are ranked and top ones are selected as topic keyphrases. Experiments performed on practical Web datasets show that the proposed system works effectively, with a performance of precision=70.61% and recall=67.94%.", "abstracts": [ { "abstractType": "Regular", "content": "News Topics are related to a set of keywords or keyphrases. Topic keyphrases briefly describe the key content of topics and help users decide whether to do further reading about them. Moreover, keyphrases of a news topic can be considered as a cluster of related terms, which provides term relationship information that can be integrated into information retrieval models. In this paper, an automatic online news topic keyphrase extraction system is proposed. News stories are organized into topics. Keyword candidates are firstly extracted from single news stories and filtered with topic information. Then a phrase identification process combines keywords into phrases using position information. Finally, the phrases are ranked and top ones are selected as topic keyphrases. Experiments performed on practical Web datasets show that the proposed system works effectively, with a performance of precision=70.61% and recall=67.94%.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "News Topics are related to a set of keywords or keyphrases. Topic keyphrases briefly describe the key content of topics and help users decide whether to do further reading about them. Moreover, keyphrases of a news topic can be considered as a cluster of related terms, which provides term relationship information that can be integrated into information retrieval models. In this paper, an automatic online news topic keyphrase extraction system is proposed. News stories are organized into topics. Keyword candidates are firstly extracted from single news stories and filtered with topic information. Then a phrase identification process combines keywords into phrases using position information. Finally, the phrases are ranked and top ones are selected as topic keyphrases. Experiments performed on practical Web datasets show that the proposed system works effectively, with a performance of precision=70.61% and recall=67.94%.", "fno": "3496a214", "keywords": [], "authors": [ { "affiliation": null, "fullName": "Canhui Wang", "givenName": "Canhui", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Min Zhang", "givenName": "Min", "surname": "Zhang", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Liyun Ru", "givenName": "Liyun", "surname": "Ru", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Shaoping Ma", "givenName": "Shaoping", "surname": "Ma", "__typename": "ArticleAuthorType" } ], "idPrefix": "wi-iat", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2008-12-01T00:00:00", "pubType": "proceedings", "pages": "214-219", "year": "2008", "issn": null, "isbn": "978-0-7695-3496-1", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "3496a208", "articleId": "12OmNxzuMHs", "__typename": "AdjacentArticleType" }, "next": { "fno": "3496a220", "articleId": "12OmNCctfmT", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/jcdl/2006/354/0/04119126", "title": "Keyphrase extraction-based query expansion in digital libraries", "doi": null, "abstractUrl": "/proceedings-article/jcdl/2006/04119126/12OmNAle6iq", "parentPublication": { "id": "proceedings/jcdl/2006/354/0", "title": "2006 IEEE/ACM 6th Joint Conference on Digital Libraries", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/grc/2011/0372/0/06122641", "title": "Improving keyphrase extraction by using document topic information", "doi": null, "abstractUrl": "/proceedings-article/grc/2011/06122641/12OmNApcuv4", "parentPublication": { "id": "proceedings/grc/2011/0372/0", "title": "2011 IEEE International Conference on Granular Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icm/2011/4522/1/4522a367", "title": "Topic Feature Extraction of Chinese News Title", "doi": null, "abstractUrl": "/proceedings-article/icm/2011/4522a367/12OmNqFrGzR", "parentPublication": { "id": "icm/2011/4522/1", "title": "Information Technology, Computer Engineering and Management Sciences, International Conference of", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/coginf/2010/8042/0/05599721", "title": "Keyphrase extraction based on semantic relatedness", "doi": null, "abstractUrl": "/proceedings-article/coginf/2010/05599721/12OmNrJRPpn", "parentPublication": { "id": "proceedings/coginf/2010/8042/0", "title": "2010 9th IEEE International Conference on Cognitive Informatics (ICCI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hicss/2004/2056/4/205640104c", "title": "Automating Keyphrase Extraction with Multi-Objective Genetic Algorithms", "doi": null, "abstractUrl": "/proceedings-article/hicss/2004/205640104c/12OmNrK9q21", "parentPublication": { "id": "proceedings/hicss/2004/2056/4", "title": "37th Annual Hawaii International Conference on System Sciences, 2004. Proceedings of the", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/trustcom/2011/2135/0/06121007", "title": "The Hot Keyphrase Extraction Based on TF*PDF", "doi": null, "abstractUrl": "/proceedings-article/trustcom/2011/06121007/12OmNvlPkAH", "parentPublication": { "id": "proceedings/trustcom/2011/2135/0", "title": "2011IEEE 10th International Conference on Trust, Security and Privacy in Computing and Communications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wi-iat/2015/9618/3/9618c199", "title": "Subject-Keyphrase Extraction Based on Definition-Use Chain", "doi": null, "abstractUrl": "/proceedings-article/wi-iat/2015/9618c199/12OmNvqW6XM", "parentPublication": { "id": "proceedings/wi-iat/2015/9618/3", "title": "2015 IEEE / WIC / ACM International Conference on Web Intelligence and Intelligent Agent Technology (WI-IAT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/big-data/2014/5666/0/07004362", "title": "Pairwise Topic Model via relation extraction", "doi": null, "abstractUrl": "/proceedings-article/big-data/2014/07004362/12OmNwDACcX", "parentPublication": { "id": "proceedings/big-data/2014/5666/0", "title": "2014 IEEE International Conference on Big Data (Big Data)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icicta/2008/3357/1/3357b061", "title": "Improved Automatic Keyphrase Extraction by Using Semantic Information", "doi": null, "abstractUrl": "/proceedings-article/icicta/2008/3357b061/12OmNxwWozl", "parentPublication": { "id": "proceedings/icicta/2008/3357/1", "title": "Intelligent Computation Technology and Automation, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdm/2021/2398/0/239800b529", "title": "Topic-Attentive Encoder-Decoder with Pre-Trained Language Model for Keyphrase Generation", "doi": null, "abstractUrl": "/proceedings-article/icdm/2021/239800b529/1Aqxk8Dt7R6", "parentPublication": { "id": "proceedings/icdm/2021/2398/0", "title": "2021 IEEE International Conference on Data Mining (ICDM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNwJPMXq", "title": "37th Annual Hawaii International Conference on System Sciences, 2004. Proceedings of the", "acronym": "hicss", "groupId": "1000730", "volume": "4", "displayVolume": "5", "year": "2004", "__typename": "ProceedingType" }, "article": { "id": "12OmNrK9q21", "doi": "10.1109/HICSS.2004.1265278", "title": "Automating Keyphrase Extraction with Multi-Objective Genetic Algorithms", "normalizedTitle": "Automating Keyphrase Extraction with Multi-Objective Genetic Algorithms", "abstract": "Keyphrases have been used extensively in IR systems to facilitate information exchange, organize information and assist information retrieval. Automation of keyphrase generation is essential for the timely creation of keyphrases for large repositories in new domains where previous thesauri do not exist or for metacollections in which keyphrases that are meaningful across disparate collections are needed. In this paper we propose an automated keyphrase extraction algorithm using a non-dominated sorting multi-objective genetic algorithm. The \"clumping\" property of keyphrases is used to judge the appropriateness of a phrase and is quantified by a condensation clustering measure proposed by Bookstein. The objective is to find the smallest phrase set that has the best precision, as measured by average condensation clustering. Keyphrases were retrieved from a collection of design conference papers and the results were presented to domain experts for evaluation. Ninety percent of the generated phrases were deemed appropriate for use in a thesaurus for engineering design.", "abstracts": [ { "abstractType": "Regular", "content": "Keyphrases have been used extensively in IR systems to facilitate information exchange, organize information and assist information retrieval. Automation of keyphrase generation is essential for the timely creation of keyphrases for large repositories in new domains where previous thesauri do not exist or for metacollections in which keyphrases that are meaningful across disparate collections are needed. In this paper we propose an automated keyphrase extraction algorithm using a non-dominated sorting multi-objective genetic algorithm. The \"clumping\" property of keyphrases is used to judge the appropriateness of a phrase and is quantified by a condensation clustering measure proposed by Bookstein. The objective is to find the smallest phrase set that has the best precision, as measured by average condensation clustering. Keyphrases were retrieved from a collection of design conference papers and the results were presented to domain experts for evaluation. Ninety percent of the generated phrases were deemed appropriate for use in a thesaurus for engineering design.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Keyphrases have been used extensively in IR systems to facilitate information exchange, organize information and assist information retrieval. Automation of keyphrase generation is essential for the timely creation of keyphrases for large repositories in new domains where previous thesauri do not exist or for metacollections in which keyphrases that are meaningful across disparate collections are needed. In this paper we propose an automated keyphrase extraction algorithm using a non-dominated sorting multi-objective genetic algorithm. The \"clumping\" property of keyphrases is used to judge the appropriateness of a phrase and is quantified by a condensation clustering measure proposed by Bookstein. The objective is to find the smallest phrase set that has the best precision, as measured by average condensation clustering. Keyphrases were retrieved from a collection of design conference papers and the results were presented to domain experts for evaluation. Ninety percent of the generated phrases were deemed appropriate for use in a thesaurus for engineering design.", "fno": "205640104c", "keywords": [], "authors": [ { "affiliation": "University of California at Berkeley", "fullName": "Jia-Long Wu", "givenName": "Jia-Long", "surname": "Wu", "__typename": "ArticleAuthorType" }, { "affiliation": "University of California at Berkeley", "fullName": "Alice M. Agogino", "givenName": "Alice M.", "surname": "Agogino", "__typename": "ArticleAuthorType" } ], "idPrefix": "hicss", "isOpenAccess": true, "showRecommendedArticles": true, "showBuyMe": false, "hasPdf": true, "pubDate": "2004-01-01T00:00:00", "pubType": "proceedings", "pages": "40104c", "year": "2004", "issn": "1530-1605", "isbn": "0-7695-2056-1", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "01265076", "articleId": "12OmNyGbIb7", "__typename": "AdjacentArticleType" }, "next": { "fno": "01265077", "articleId": "12OmNwwuE1v", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/grc/2011/0372/0/06122641", "title": "Improving keyphrase extraction by using document topic information", "doi": null, "abstractUrl": "/proceedings-article/grc/2011/06122641/12OmNApcuv4", "parentPublication": { "id": "proceedings/grc/2011/0372/0", "title": "2011 IEEE International Conference on Granular Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wi-iat/2008/3496/1/3496a214", "title": "An Automatic Online News Topic Keyphrase Extraction System", "doi": null, "abstractUrl": "/proceedings-article/wi-iat/2008/3496a214/12OmNCcbEgB", "parentPublication": { "id": "proceedings/wi-iat/2008/3496/1", "title": "Web Intelligence and Intelligent Agent Technology, IEEE/WIC/ACM International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/acomp/2016/6143/0/07809554", "title": "Software Keyphrase Extraction with Domain-Specific Features", "doi": null, "abstractUrl": "/proceedings-article/acomp/2016/07809554/12OmNqyUUxn", "parentPublication": { "id": "proceedings/acomp/2016/6143/0", "title": "2016 International Conference on Advanced Computing and Applications (ACOMP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/trustcom/2011/2135/0/06121007", "title": "The Hot Keyphrase Extraction Based on TF*PDF", "doi": null, "abstractUrl": "/proceedings-article/trustcom/2011/06121007/12OmNvlPkAH", "parentPublication": { "id": "proceedings/trustcom/2011/2135/0", "title": "2011IEEE 10th International Conference on Trust, Security and Privacy in Computing and Communications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icicta/2008/3357/1/3357b061", "title": "Improved Automatic Keyphrase Extraction by Using Semantic Information", "doi": null, "abstractUrl": "/proceedings-article/icicta/2008/3357b061/12OmNxwWozl", "parentPublication": { "id": "proceedings/icicta/2008/3357/1", "title": "Intelligent Computation Technology and Automation, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iita/2008/3497/2/3497b042", "title": "Improving Keyphrase Extraction Using Wikipedia Semantics", "doi": null, "abstractUrl": "/proceedings-article/iita/2008/3497b042/12OmNy2ah14", "parentPublication": { "id": "iita/2008/3497/2", "title": "2008 Second International Symposium on Intelligent Information Technology Application", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdm/2014/4302/0/4302b055", "title": "Document-Specific Keyphrase Extraction Using Sequential Patterns with Wildcards", "doi": null, "abstractUrl": "/proceedings-article/icdm/2014/4302b055/12OmNzd7beB", "parentPublication": { "id": "proceedings/icdm/2014/4302/0", "title": "2014 IEEE International Conference on Data Mining (ICDM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/2021/04/08844794", "title": "Duration Modeling with Semi-Markov Conditional Random Fields for Keyphrase Extraction", "doi": null, "abstractUrl": "/journal/tk/2021/04/08844794/1ds7hxNZurS", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/big-data/2019/0858/0/09006409", "title": "From Text Classification to Keyphrase Extraction for Short Text", "doi": null, "abstractUrl": "/proceedings-article/big-data/2019/09006409/1hJsw76ZZa8", "parentPublication": { "id": "proceedings/big-data/2019/0858/0", "title": "2019 IEEE International Conference on Big Data (Big Data)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ickg/2020/8156/0/09194571", "title": "Graph-based Keyphrase Extraction Using Word and Document Em beddings", "doi": null, "abstractUrl": "/proceedings-article/ickg/2020/09194571/1n2nhMFpPb2", "parentPublication": { "id": "proceedings/ickg/2020/8156/0", "title": "2020 IEEE International Conference on Knowledge Graph (ICKG)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNzn38Kh", "title": "Intelligent Computation Technology and Automation, International Conference on", "acronym": "icicta", "groupId": "1002487", "volume": "1", "displayVolume": "1", "year": "2008", "__typename": "ProceedingType" }, "article": { "id": "12OmNxwWozl", "doi": "10.1109/ICICTA.2008.180", "title": "Improved Automatic Keyphrase Extraction by Using Semantic Information", "normalizedTitle": "Improved Automatic Keyphrase Extraction by Using Semantic Information", "abstract": "Keyphrases provide semantic metadata producing an overview of the content of a document, they are used in many text-mining applications. This paper proposes a new method that improves automatic keyphrase extraction by using semantic information of candidate keyphrases. Our method is realized in two stages. In selecting candidates stage, after extraction of all phrases from document, a word sense disambiguation method is used to get senses of phrases, then term conflation is performed by using case folding, stemming, and semantic relatedness between candidates. In filtering stage, four features are used to compute for each candidate: the TFxIDF measure describing the specificity of a phrase, first occurrence of a phrase in the document, length of a phrase, and coherence score which measure the semantic relatedness between the phrase and other candidates. A Naive Bayes scheme builds a prediction model training data with known keyphrases, and then uses the model to calculate the overall probability for each candidate. We evaluate semantically improved method against the well known Kea system by using a more effective semantically enhanced evaluation method. The inter-domain experiment shows that quality of keyphrases extraction can be improved significantly when semantic information is exploited. The intra-domain experiment shows our method is competitive with Kea++ algorithm, and not domain-specific.", "abstracts": [ { "abstractType": "Regular", "content": "Keyphrases provide semantic metadata producing an overview of the content of a document, they are used in many text-mining applications. This paper proposes a new method that improves automatic keyphrase extraction by using semantic information of candidate keyphrases. Our method is realized in two stages. In selecting candidates stage, after extraction of all phrases from document, a word sense disambiguation method is used to get senses of phrases, then term conflation is performed by using case folding, stemming, and semantic relatedness between candidates. In filtering stage, four features are used to compute for each candidate: the TFxIDF measure describing the specificity of a phrase, first occurrence of a phrase in the document, length of a phrase, and coherence score which measure the semantic relatedness between the phrase and other candidates. A Naive Bayes scheme builds a prediction model training data with known keyphrases, and then uses the model to calculate the overall probability for each candidate. We evaluate semantically improved method against the well known Kea system by using a more effective semantically enhanced evaluation method. The inter-domain experiment shows that quality of keyphrases extraction can be improved significantly when semantic information is exploited. The intra-domain experiment shows our method is competitive with Kea++ algorithm, and not domain-specific.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Keyphrases provide semantic metadata producing an overview of the content of a document, they are used in many text-mining applications. This paper proposes a new method that improves automatic keyphrase extraction by using semantic information of candidate keyphrases. Our method is realized in two stages. In selecting candidates stage, after extraction of all phrases from document, a word sense disambiguation method is used to get senses of phrases, then term conflation is performed by using case folding, stemming, and semantic relatedness between candidates. In filtering stage, four features are used to compute for each candidate: the TFxIDF measure describing the specificity of a phrase, first occurrence of a phrase in the document, length of a phrase, and coherence score which measure the semantic relatedness between the phrase and other candidates. A Naive Bayes scheme builds a prediction model training data with known keyphrases, and then uses the model to calculate the overall probability for each candidate. We evaluate semantically improved method against the well known Kea system by using a more effective semantically enhanced evaluation method. The inter-domain experiment shows that quality of keyphrases extraction can be improved significantly when semantic information is exploited. The intra-domain experiment shows our method is competitive with Kea++ algorithm, and not domain-specific.", "fno": "3357b061", "keywords": [ "Keyphrase Extraction", "Word Sense Disambiguation", "Semantic Information" ], "authors": [ { "affiliation": null, "fullName": "XiaoLing Wang", "givenName": "XiaoLing", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "DeJun Mu", "givenName": "DeJun", "surname": "Mu", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Jun Fang", "givenName": "Jun", "surname": "Fang", "__typename": "ArticleAuthorType" } ], "idPrefix": "icicta", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2008-10-01T00:00:00", "pubType": "proceedings", "pages": "1061-1065", "year": "2008", "issn": null, "isbn": "978-0-7695-3357-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "3357b130", "articleId": "12OmNAPBbhB", "__typename": "AdjacentArticleType" }, "next": { "fno": "3357b135", "articleId": "12OmNqG0SQ5", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/jcdl/2006/354/0/04119126", "title": "Keyphrase extraction-based query expansion in digital libraries", "doi": null, "abstractUrl": "/proceedings-article/jcdl/2006/04119126/12OmNAle6iq", "parentPublication": { "id": "proceedings/jcdl/2006/354/0", "title": "2006 IEEE/ACM 6th Joint Conference on Digital Libraries", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/eait/2011/4329/0/4329a125", "title": "Automatic Keyphrase Extraction from Bengali Documents: A Preliminary Study", "doi": null, "abstractUrl": "/proceedings-article/eait/2011/4329a125/12OmNBd9T35", "parentPublication": { "id": "proceedings/eait/2011/4329/0", "title": "Emerging Applications of Information Technology, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wi-iat/2008/3496/1/3496a214", "title": "An Automatic Online News Topic Keyphrase Extraction System", "doi": null, "abstractUrl": "/proceedings-article/wi-iat/2008/3496a214/12OmNCcbEgB", "parentPublication": { "id": "proceedings/wi-iat/2008/3496/1", "title": "Web Intelligence and Intelligent Agent Technology, IEEE/WIC/ACM International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/coginf/2010/8042/0/05599721", "title": "Keyphrase extraction based on semantic relatedness", "doi": null, "abstractUrl": "/proceedings-article/coginf/2010/05599721/12OmNrJRPpn", "parentPublication": { "id": "proceedings/coginf/2010/8042/0", "title": "2010 9th IEEE International Conference on Cognitive Informatics (ICCI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fit/2012/4946/0/4927a030", "title": "KeaKAT: An Online Automatic Keyphrase Assignment Tool", "doi": null, "abstractUrl": "/proceedings-article/fit/2012/4927a030/12OmNrkBwkT", "parentPublication": { "id": "proceedings/fit/2012/4946/0", "title": "2012 10th International Conference on Frontiers of Information Technology (FIT 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icsc/2010/4154/0/4154a470", "title": "Calculating Word Sense Probability Distributions for Semantic Web Applications", "doi": null, "abstractUrl": "/proceedings-article/icsc/2010/4154a470/12OmNvT2oPw", "parentPublication": { "id": "proceedings/icsc/2010/4154/0", "title": "2010 IEEE Fourth International Conference on Semantic Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wisa/2014/5726/0/07058022", "title": "Adding Lexical Chain to Keyphrase Extraction", "doi": null, "abstractUrl": "/proceedings-article/wisa/2014/07058022/12OmNwDACfx", "parentPublication": { "id": "proceedings/wisa/2014/5726/0", "title": "2014 11th Web Information System and Application Conference (WISA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icsc/2008/3279/0/3279a182", "title": "Improving Verb Sense Disambiguation with Automatically Retrieved Semantic Knowledge", "doi": null, "abstractUrl": "/proceedings-article/icsc/2008/3279a182/12OmNwFRpaL", "parentPublication": { "id": "proceedings/icsc/2008/3279/0", "title": "2008 IEEE International Conference on Semantic Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wi-iatw/2007/3028/0/3028a056", "title": "Web Document Clustering by Using Automatic Keyphrase Extraction", "doi": null, "abstractUrl": "/proceedings-article/wi-iatw/2007/3028a056/12OmNwM6A2G", "parentPublication": { "id": "proceedings/wi-iatw/2007/3028/0", "title": "Web Intelligence and Intelligent Agent Technology, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iita/2008/3497/2/3497b042", "title": "Improving Keyphrase Extraction Using Wikipedia Semantics", "doi": null, "abstractUrl": "/proceedings-article/iita/2008/3497b042/12OmNy2ah14", "parentPublication": { "id": "iita/2008/3497/2", "title": "2008 Second International Symposium on Intelligent Information Technology Application", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1AqwYO1eX72", "title": "2021 IEEE International Conference on Data Mining (ICDM)", "acronym": "icdm", "groupId": "1000179", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1Aqxk8Dt7R6", "doi": "10.1109/ICDM51629.2021.00200", "title": "Topic-Attentive Encoder-Decoder with Pre-Trained Language Model for Keyphrase Generation", "normalizedTitle": "Topic-Attentive Encoder-Decoder with Pre-Trained Language Model for Keyphrase Generation", "abstract": "Keyphrase annotation task aims to retrieve the most representative phrases that express the essential gist of documents. In reality, some phrases that best summarize documents are often absent from the original text, which motivates researchers to develop generation methods, being able to create phrases. Existing generation approaches usually adopt the encoder-decoder framework for sequence generation. However, the widely-used recurrent neural network might fail to capture long-range dependencies among items. In addition, intuitively, as keyphrases are likely to correlate with topical words, some methods propose to introduce topic models into keyphrase generation. But they hardly leverage the global information of topics. In view of this, we employ the Transformer architecture with the pre-trained BERT model as the encoder-decoder framework for keyphrase generation. BERT and Transformer are demonstrated to be effective for many text mining tasks. But they have not been extensively studied for keyphrase generation. Furthermore, we propose a topic attention mechanism to utilize the corpus-level topic information globally for keyphrase generation. Specifically, we propose BertTKG, a keyphrase generation method that uses a contextualized neural topic model for corpus-level topic representation learning, and then enhances the document representations learned by pre-trained language model for better keyphrase decoding. Extensive experiments conducted on three public datasets manifest the superiority of BertTKG.", "abstracts": [ { "abstractType": "Regular", "content": "Keyphrase annotation task aims to retrieve the most representative phrases that express the essential gist of documents. In reality, some phrases that best summarize documents are often absent from the original text, which motivates researchers to develop generation methods, being able to create phrases. Existing generation approaches usually adopt the encoder-decoder framework for sequence generation. However, the widely-used recurrent neural network might fail to capture long-range dependencies among items. In addition, intuitively, as keyphrases are likely to correlate with topical words, some methods propose to introduce topic models into keyphrase generation. But they hardly leverage the global information of topics. In view of this, we employ the Transformer architecture with the pre-trained BERT model as the encoder-decoder framework for keyphrase generation. BERT and Transformer are demonstrated to be effective for many text mining tasks. But they have not been extensively studied for keyphrase generation. Furthermore, we propose a topic attention mechanism to utilize the corpus-level topic information globally for keyphrase generation. Specifically, we propose BertTKG, a keyphrase generation method that uses a contextualized neural topic model for corpus-level topic representation learning, and then enhances the document representations learned by pre-trained language model for better keyphrase decoding. Extensive experiments conducted on three public datasets manifest the superiority of BertTKG.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Keyphrase annotation task aims to retrieve the most representative phrases that express the essential gist of documents. In reality, some phrases that best summarize documents are often absent from the original text, which motivates researchers to develop generation methods, being able to create phrases. Existing generation approaches usually adopt the encoder-decoder framework for sequence generation. However, the widely-used recurrent neural network might fail to capture long-range dependencies among items. In addition, intuitively, as keyphrases are likely to correlate with topical words, some methods propose to introduce topic models into keyphrase generation. But they hardly leverage the global information of topics. In view of this, we employ the Transformer architecture with the pre-trained BERT model as the encoder-decoder framework for keyphrase generation. BERT and Transformer are demonstrated to be effective for many text mining tasks. But they have not been extensively studied for keyphrase generation. Furthermore, we propose a topic attention mechanism to utilize the corpus-level topic information globally for keyphrase generation. Specifically, we propose BertTKG, a keyphrase generation method that uses a contextualized neural topic model for corpus-level topic representation learning, and then enhances the document representations learned by pre-trained language model for better keyphrase decoding. Extensive experiments conducted on three public datasets manifest the superiority of BertTKG.", "fno": "239800b529", "keywords": [ "Data Mining", "Information Retrieval", "Learning Artificial Intelligence", "Natural Language Processing", "Recurrent Neural Nets", "Text Analysis", "Contextualized Neural Topic Model", "Corpus Level Topic Information", "Corpus Level Topic Representation Learning", "Encoder Decoder Framework", "Generation Approaches", "Generation Methods", "Keyphrase Annotation Task", "Keyphrase Decoding", "Keyphrase Generation Method", "Pre Trained BERT Model", "Pre Trained Language Model", "Sequence Generation", "Topic Attention Mechanism", "Topic Attentive Encoder Decoder", "Text Mining", "Representation Learning", "Measurement", "Recurrent Neural Networks", "Bit Error Rate", "Transformers", "Data Models", "Keyphrase Generation", "Pre Trained BERT", "Transformer", "Topic Attention", "Neural Topic Model" ], "authors": [ { "affiliation": "Nanjing University of Science and Technology,Nanjing,China,210094", "fullName": "Cangqi Zhou", "givenName": "Cangqi", "surname": "Zhou", "__typename": "ArticleAuthorType" }, { "affiliation": "Chinese Academy of Sciences,National Science Library,Beijing,China,100190", "fullName": "Jinling Shang", "givenName": "Jinling", "surname": "Shang", "__typename": "ArticleAuthorType" }, { "affiliation": "Nanjing University of Science and Technology,Nanjing,China,210094", "fullName": "Jing Zhang", "givenName": "Jing", "surname": "Zhang", "__typename": "ArticleAuthorType" }, { "affiliation": "Nanjing University of Science and Technology,Nanjing,China,210094", "fullName": "Qianmu Li", "givenName": "Qianmu", "surname": "Li", "__typename": "ArticleAuthorType" }, { "affiliation": "SenseDeal Intelligent Technology Co., Ltd.,Beijing,China,100084", "fullName": "Dianming Hu", "givenName": "Dianming", "surname": "Hu", "__typename": "ArticleAuthorType" } ], "idPrefix": "icdm", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-12-01T00:00:00", "pubType": "proceedings", "pages": "1529-1534", "year": "2021", "issn": null, "isbn": "978-1-6654-2398-4", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "239800b523", "articleId": "1Aqxqlxxvl6", "__typename": "AdjacentArticleType" }, "next": { "fno": "239800b535", "articleId": "1Aqx2WjjOAE", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/wi-iat/2008/3496/1/3496a214", "title": "An Automatic Online News Topic Keyphrase Extraction System", "doi": null, "abstractUrl": "/proceedings-article/wi-iat/2008/3496a214/12OmNCcbEgB", "parentPublication": { "id": "proceedings/wi-iat/2008/3496/1", "title": "Web Intelligence and Intelligent Agent Technology, IEEE/WIC/ACM International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hicss/2004/2056/4/205640104c", "title": "Automating Keyphrase Extraction with Multi-Objective Genetic Algorithms", "doi": null, "abstractUrl": "/proceedings-article/hicss/2004/205640104c/12OmNrK9q21", "parentPublication": { "id": "proceedings/hicss/2004/2056/4", "title": "37th Annual Hawaii International Conference on System Sciences, 2004. Proceedings of the", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/trustcom/2011/2135/0/06121007", "title": "The Hot Keyphrase Extraction Based on TF*PDF", "doi": null, "abstractUrl": "/proceedings-article/trustcom/2011/06121007/12OmNvlPkAH", "parentPublication": { "id": "proceedings/trustcom/2011/2135/0", "title": "2011IEEE 10th International Conference on Trust, Security and Privacy in Computing and Communications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wi-iat/2009/3801/1/3801a576", "title": "Automatic Keyphrase Extraction with a Refined Candidate Set", "doi": null, "abstractUrl": "/proceedings-article/wi-iat/2009/3801a576/12OmNyQYtvB", "parentPublication": { "id": "proceedings/wi-iat/2009/3801/1", "title": "2009 IEEE/WIC/ACM International Joint Conference on Web Intelligence and Intelligent Agent Technology (WI-IAT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/scset/2022/7876/0/787600a267", "title": "Unsupervised Keyphrase Extraction from Single Document Based on Bert", "doi": null, "abstractUrl": "/proceedings-article/scset/2022/787600a267/1ANLTCGHRL2", "parentPublication": { "id": "proceedings/scset/2022/7876/0", "title": "2022 International Seminar on Computer Science and Engineering Technology (SCSET)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ase/2021/0337/0/033700b332", "title": "What do pre-trained code models know about code?", "doi": null, "abstractUrl": "/proceedings-article/ase/2021/033700b332/1AjSXUJQ7oA", "parentPublication": { "id": "proceedings/ase/2021/0337/0", "title": "2021 36th IEEE/ACM International Conference on Automated Software Engineering (ASE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ictai/2022/9744/0/974400a297", "title": "KPE-GCN: A Keyphrase-Enhanced Graph Convolutional Network for Imbalanced Text Classification", "doi": null, "abstractUrl": "/proceedings-article/ictai/2022/974400a297/1MrG3opNQ1G", "parentPublication": { "id": "proceedings/ictai/2022/9744/0", "title": "2022 IEEE 34th International Conference on Tools with Artificial Intelligence (ICTAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/big-data/2019/0858/0/09006409", "title": "From Text Classification to Keyphrase Extraction for Short Text", "doi": null, "abstractUrl": "/proceedings-article/big-data/2019/09006409/1hJsw76ZZa8", "parentPublication": { "id": "proceedings/big-data/2019/0858/0", "title": "2019 IEEE International Conference on Big Data (Big Data)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tq/2022/01/09064919", "title": "DeepMnemonic: Password Mnemonic Generation via Deep Attentive Encoder-Decoder Model", "doi": null, "abstractUrl": "/journal/tq/2022/01/09064919/1iZGCkt7f4Q", "parentPublication": { "id": "trans/tq", "title": "IEEE Transactions on Dependable and Secure Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/caibda/2021/2490/0/249000a137", "title": "News text classification based on Bidirectional Encoder Representation from Transformers", "doi": null, "abstractUrl": "/proceedings-article/caibda/2021/249000a137/1xgBpSxuhri", "parentPublication": { "id": "proceedings/caibda/2021/2490/0", "title": "2021 International Conference on Artificial Intelligence, Big Data and Algorithms (CAIBDA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1Gvd72eGkh2", "title": "2022 IEEE 5th International Conference on Multimedia Information Processing and Retrieval (MIPR)", "acronym": "mipr", "groupId": "1825825", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1GvdeT51bXy", "doi": "10.1109/MIPR54900.2022.00021", "title": "Information-Seeking in Localization and Mission Planning of Multi-Agent Systems", "normalizedTitle": "Information-Seeking in Localization and Mission Planning of Multi-Agent Systems", "abstract": "Real-time and accurate position estimation is critical for various multi-robot applications and serves as a prerequisite for location-based multi-sensor data analysis. However, it is often impeded by energy, sensing, and processing limitations. In this work, we study the problem of information-seeking in localization and navigation in multi-agent systems, which aims to navigate mobile agents while reducing position errors. We formalize information-seeking as reducing spatial uncertainty and introduce an efficient motion controller based on artificial potential fields superimposing attractive, repulsive, and information-seeking forces. We evaluate the effect of information-seeking on localization and mission planning in a simulation study with non-collaborative and collaborative localization approaches.", "abstracts": [ { "abstractType": "Regular", "content": "Real-time and accurate position estimation is critical for various multi-robot applications and serves as a prerequisite for location-based multi-sensor data analysis. However, it is often impeded by energy, sensing, and processing limitations. In this work, we study the problem of information-seeking in localization and navigation in multi-agent systems, which aims to navigate mobile agents while reducing position errors. We formalize information-seeking as reducing spatial uncertainty and introduce an efficient motion controller based on artificial potential fields superimposing attractive, repulsive, and information-seeking forces. We evaluate the effect of information-seeking on localization and mission planning in a simulation study with non-collaborative and collaborative localization approaches.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Real-time and accurate position estimation is critical for various multi-robot applications and serves as a prerequisite for location-based multi-sensor data analysis. However, it is often impeded by energy, sensing, and processing limitations. In this work, we study the problem of information-seeking in localization and navigation in multi-agent systems, which aims to navigate mobile agents while reducing position errors. We formalize information-seeking as reducing spatial uncertainty and introduce an efficient motion controller based on artificial potential fields superimposing attractive, repulsive, and information-seeking forces. We evaluate the effect of information-seeking on localization and mission planning in a simulation study with non-collaborative and collaborative localization approaches.", "fno": "954800a077", "keywords": [ "Data Analysis", "Mobile Robots", "Motion Control", "Multi Robot Systems", "Navigation", "Path Planning", "Position Control", "Sensor Fusion", "Information Seeking", "Mission Planning", "Multiagent Systems", "Accurate Position Estimation", "Multirobot Applications", "Location Based Multisensor Data Analysis", "Mobile Agents", "Motion Controller", "Location Awareness", "Uncertainty", "Navigation", "Multimedia Systems", "Mobile Agents", "Information Processing", "Real Time Systems", "Multi Robot System", "Cram Amp X 00 E 9 R Amp X 2013 Rao Bound CRB", "Fisher Information", "Spatial Uncertainty", "Artificial Potential Fields" ], "authors": [ { "affiliation": "Institute of Networked and Embedded Systems, University of Klagenfurt,Austria", "fullName": "Kyriakos Lite", "givenName": "Kyriakos", "surname": "Lite", "__typename": "ArticleAuthorType" }, { "affiliation": "Institute of Networked and Embedded Systems, University of Klagenfurt,Austria", "fullName": "Bernhard Rinner", "givenName": "Bernhard", "surname": "Rinner", "__typename": "ArticleAuthorType" } ], "idPrefix": "mipr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-08-01T00:00:00", "pubType": "proceedings", "pages": "77-83", "year": "2022", "issn": null, "isbn": "978-1-6654-9548-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "954800a071", "articleId": "1Gvd9IJgUve", "__typename": "AdjacentArticleType" }, "next": { "fno": "954800a084", "articleId": "1Gvdc0jZegw", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cts/2016/2300/0/07870993", "title": "Supporting Collaborative Information Seeking in Online Community Engagement", "doi": null, "abstractUrl": "/proceedings-article/cts/2016/07870993/12OmNxFsmnV", "parentPublication": { "id": "proceedings/cts/2016/2300/0", "title": "2016 International Conference on Collaboration Technologies and Systems (CTS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/co/2009/03/mco2009030047", "title": "Collaborative Information Seeking", "doi": null, "abstractUrl": "/magazine/co/2009/03/mco2009030047/13rRUyekJ0S", "parentPublication": { "id": "mags/co", "title": "Computer", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600r7316", "title": "Privacy Preserving Partial Localization", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600r7316/1H0KLEFJZWo", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mlise/2022/9246/0/924600a160", "title": "Multi-robot Safe Navigation Under Localization Uncertainty", "doi": null, "abstractUrl": "/proceedings-article/mlise/2022/924600a160/1Ik91bTQ1jO", "parentPublication": { "id": "proceedings/mlise/2022/9246/0", "title": "2022 International Conference on Machine Learning and Intelligent Systems Engineering (MLISE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/avss/2022/6382/0/09959648", "title": "A Data-Driven Approach for the Localization of Interacting Agents via a Multi-Modal Dynamic Bayesian Network Framework", "doi": null, "abstractUrl": "/proceedings-article/avss/2022/09959648/1Iz5hhW9lfi", "parentPublication": { "id": "proceedings/avss/2022/6382/0", "title": "2022 18th IEEE International Conference on Advanced Video and Signal Based Surveillance (AVSS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/irc/2022/7260/0/726000a238", "title": "Coverage Path Planning and Precise Localization for Autonomous Lawn Mowers", "doi": null, "abstractUrl": "/proceedings-article/irc/2022/726000a238/1KckgueNUas", "parentPublication": { "id": "proceedings/irc/2022/7260/0", "title": "2022 Sixth IEEE International Conference on Robotic Computing (IRC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/msn/2022/6457/0/645700a679", "title": "Multiuser Collaborative Localization based on Inter-user Distance Estimation using Wi-Fi RSS Fingerprints", "doi": null, "abstractUrl": "/proceedings-article/msn/2022/645700a679/1LUtW2yqkYo", "parentPublication": { "id": "proceedings/msn/2022/6457/0", "title": "2022 18th International Conference on Mobility, Sensing and Networking (MSN)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2019/3293/0/329300h375", "title": "Mapping, Localization and Path Planning for Image-Based Navigation Using Visual Features and Map", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2019/329300h375/1gyrs6jzQKQ", "parentPublication": { "id": "proceedings/cvpr/2019/3293/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/trustcom/2020/4380/0/438000c001", "title": "A Low-Cost Method for Accurate Localization of Traffic Participants", "doi": null, "abstractUrl": "/proceedings-article/trustcom/2020/438000c001/1r54dcOUVNu", "parentPublication": { "id": "proceedings/trustcom/2020/4380/0", "title": "2020 IEEE 19th International Conference on Trust, Security and Privacy in Computing and Communications (TrustCom)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/smartiot/2021/4511/0/451100a028", "title": "An Integrated Navigation and Localization System", "doi": null, "abstractUrl": "/proceedings-article/smartiot/2021/451100a028/1xDQfxiaNZ6", "parentPublication": { "id": "proceedings/smartiot/2021/4511/0", "title": "2021 IEEE International Conference on Smart Internet of Things (SmartIoT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNBqMDAX", "title": "Proceedings., 33rd Annual Symposium on Foundations of Computer Science", "acronym": "focs", "groupId": "1000292", "volume": "0", "displayVolume": "0", "year": "1992", "__typename": "ProceedingType" }, "article": { "id": "12OmNxw5BlJ", "doi": "10.1109/SFCS.1992.267783", "title": "Maximizing non-linear concave functions in fixed dimension", "normalizedTitle": "Maximizing non-linear concave functions in fixed dimension", "abstract": "Consider a convex set P in R/sup d/ and a piece wise polynomial concave function F: P to R. Let A be an algorithm that given a point x in IR/sup d/ computes F(x) if x in P, or returns a concave polynomial p such that p(x) <0 but for any y in P, p(y) >or= 0. The author assumes that d is fixed and that all comparisons in A depend on the sign of polynomial functions of the input point. He shows that under these conditions, one can find max/sub P/ F in time which is polynomial in the number of arithmetic operations of A. Using this method he gives the first strongly polynomial algorithms for many nonlinear parametric problems in fixed dimension, such as the parametric max flow problem, the parametric minimum s-t distance, the parametric spanning tree problem and other problems. In addition he shows that in one dimension, the same result holds even if one only knows how to approximate the value of F. Specifically, if one can obtain an alpha -approximation for F(x) then one can alpha -approximate the value of maxF. He thus obtains the first polynomial approximation algorithms for many NP-hard problems such as the parametric Euclidean traveling salesman problem.", "abstracts": [ { "abstractType": "Regular", "content": "Consider a convex set P in R/sup d/ and a piece wise polynomial concave function F: P to R. Let A be an algorithm that given a point x in IR/sup d/ computes F(x) if x in P, or returns a concave polynomial p such that p(x) <0 but for any y in P, p(y) >or= 0. The author assumes that d is fixed and that all comparisons in A depend on the sign of polynomial functions of the input point. He shows that under these conditions, one can find max/sub P/ F in time which is polynomial in the number of arithmetic operations of A. Using this method he gives the first strongly polynomial algorithms for many nonlinear parametric problems in fixed dimension, such as the parametric max flow problem, the parametric minimum s-t distance, the parametric spanning tree problem and other problems. In addition he shows that in one dimension, the same result holds even if one only knows how to approximate the value of F. Specifically, if one can obtain an alpha -approximation for F(x) then one can alpha -approximate the value of maxF. He thus obtains the first polynomial approximation algorithms for many NP-hard problems such as the parametric Euclidean traveling salesman problem.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Consider a convex set P in R/sup d/ and a piece wise polynomial concave function F: P to R. Let A be an algorithm that given a point x in IR/sup d/ computes F(x) if x in P, or returns a concave polynomial p such that p(x) or= 0. The author assumes that d is fixed and that all comparisons in A depend on the sign of polynomial functions of the input point. He shows that under these conditions, one can find max/sub P/ F in time which is polynomial in the number of arithmetic operations of A. Using this method he gives the first strongly polynomial algorithms for many nonlinear parametric problems in fixed dimension, such as the parametric max flow problem, the parametric minimum s-t distance, the parametric spanning tree problem and other problems. In addition he shows that in one dimension, the same result holds even if one only knows how to approximate the value of F. Specifically, if one can obtain an alpha -approximation for F(x) then one can alpha -approximate the value of maxF. He thus obtains the first polynomial approximation algorithms for many NP-hard problems such as the parametric Euclidean traveling salesman problem.", "fno": "0267783", "keywords": [ "Parametric Euclidean Traveling Salesman Problem", "Fixed Dimension", "Convex Set", "Piece Wise Polynomial Concave Function", "Concave Polynomial", "Polynomial Functions", "Input Point", "Arithmetic Operations", "Nonlinear Parametric Problems", "Parametric Max Flow Problem", "Parametric Minimum S T Distance", "Parametric Spanning Tree", "NP Hard Problems" ], "authors": [ { "affiliation": "Lab. for Comput. Sci., MIT, Cambridge, MA, USA", "fullName": "S. Toledo", "givenName": "S.", "surname": "Toledo", "__typename": "ArticleAuthorType" } ], "idPrefix": "focs", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "1992-10-01T00:00:00", "pubType": "proceedings", "pages": "676-685", "year": "1992", "issn": null, "isbn": "0-8186-2900-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "0267782", "articleId": "12OmNym2c57", "__typename": "AdjacentArticleType" }, "next": { "fno": "0267784", "articleId": "12OmNyoSbdG", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/focs/1992/2900/0/0267823", "title": "Proof verification and hardness of approximation problems", "doi": null, "abstractUrl": "/proceedings-article/focs/1992/0267823/12OmNCykm8W", "parentPublication": { "id": "proceedings/focs/1992/2900/0", "title": "Proceedings., 33rd Annual Symposium on Foundations of Computer Science", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccit/2009/3896/0/3896a999", "title": "A Relation between Self-Reciprocal Transformation and Normal Basis over Odd Characteristic Field", "doi": null, "abstractUrl": "/proceedings-article/iccit/2009/3896a999/12OmNrH1PBg", "parentPublication": { "id": "proceedings/iccit/2009/3896/0", "title": "Convergence Information Technology, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/focs/1992/2900/0/0267801", "title": "Reconstructing algebraic functions from mixed data", "doi": null, "abstractUrl": "/proceedings-article/focs/1992/0267801/12OmNs59JG9", "parentPublication": { "id": "proceedings/focs/1992/2900/0", "title": "Proceedings., 33rd Annual Symposium on Foundations of Computer Science", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vts/1995/7000/0/70000244", "title": "Improving the efficiency of error identification via signature analysis", "doi": null, "abstractUrl": "/proceedings-article/vts/1995/70000244/12OmNvqmUJ9", "parentPublication": { "id": "proceedings/vts/1995/7000/0", "title": "Proceedings 13th IEEE VLSI Test Symposium", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/focs/1996/7594/0/75940115", "title": "Solving systems of polynomial congruences modulo a large prime", "doi": null, "abstractUrl": "/proceedings-article/focs/1996/75940115/12OmNwDSdKW", "parentPublication": { "id": "proceedings/focs/1996/7594/0", "title": "Proceedings of 37th Conference on Foundations of Computer Science", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/focs/2010/4244/0/4244a011", "title": "Bounded Independence Fools Degree-2 Threshold Functions", "doi": null, "abstractUrl": "/proceedings-article/focs/2010/4244a011/12OmNwtn3r5", "parentPublication": { "id": "proceedings/focs/2010/4244/0", "title": "2010 IEEE 51st Annual Symposium on Foundations of Computer Science", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/focs/1989/1982/0/063545", "title": "Every polynomial-time 1-degree collapses iff P=PSPACE", "doi": null, "abstractUrl": "/proceedings-article/focs/1989/063545/12OmNyKJioA", "parentPublication": { "id": "proceedings/focs/1989/1982/0", "title": "30th Annual Symposium on Foundations of Computer Science", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/focs/1993/4370/0/0366825", "title": "Counting rational points on curves over finite fields", "doi": null, "abstractUrl": "/proceedings-article/focs/1993/0366825/12OmNzUPpy5", "parentPublication": { "id": "proceedings/focs/1993/4370/0", "title": "Proceedings of 1993 IEEE 34th Annual Foundations of Computer Science", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sct/1994/5670/0/00315823", "title": "Polynomial-time membership comparable sets", "doi": null, "abstractUrl": "/proceedings-article/sct/1994/00315823/12OmNzZWbDm", "parentPublication": { "id": "proceedings/sct/1994/5670/0", "title": "Proceedings of IEEE 9th Annual Conference on Structure in Complexity Theory", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ccc/2010/4060/0/4060a211", "title": "A Regularity Lemma, and Low-Weight Approximators, for Low-Degree Polynomial Threshold Functions", "doi": null, "abstractUrl": "/proceedings-article/ccc/2010/4060a211/12OmNzsJ7Au", "parentPublication": { "id": "proceedings/ccc/2010/4060/0", "title": "2010 IEEE 25th Annual Conference on Computational Complexity", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "13bd1eJgohO", "title": "2018 IEEE/ACM 40th International Conference on Software Engineering: New Ideas and Emerging Technologies Results (ICSE-NIER)", "acronym": "icse-nier", "groupId": "1820865", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "13bd1h03qO8", "doi": "", "title": "Explainable Software Analytics", "normalizedTitle": "Explainable Software Analytics", "abstract": "Software analytics has been the subject of considerable recent attention but is yet to receive significant industry traction. One of the key reasons is that software practitioners are reluctant to trust predictions produced by the analytics machinery without understanding the rationale for those predictions. While complex models such as deep learning and ensemble methods improve predictive performance, they have limited explainability. In this paper, we argue that making software analytics models explainable to software practitioners is as important as achieving accurate predictions. Explainability should therefore be a key measure for evaluating software analytics models. We envision that explainability will be a key driver for developing software analytics models that are useful in practice. We outline a research roadmap for this space, building on social science, explainable artificial intelligence and software engineering.", "abstracts": [ { "abstractType": "Regular", "content": "Software analytics has been the subject of considerable recent attention but is yet to receive significant industry traction. One of the key reasons is that software practitioners are reluctant to trust predictions produced by the analytics machinery without understanding the rationale for those predictions. While complex models such as deep learning and ensemble methods improve predictive performance, they have limited explainability. In this paper, we argue that making software analytics models explainable to software practitioners is as important as achieving accurate predictions. Explainability should therefore be a key measure for evaluating software analytics models. We envision that explainability will be a key driver for developing software analytics models that are useful in practice. We outline a research roadmap for this space, building on social science, explainable artificial intelligence and software engineering.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Software analytics has been the subject of considerable recent attention but is yet to receive significant industry traction. One of the key reasons is that software practitioners are reluctant to trust predictions produced by the analytics machinery without understanding the rationale for those predictions. While complex models such as deep learning and ensemble methods improve predictive performance, they have limited explainability. In this paper, we argue that making software analytics models explainable to software practitioners is as important as achieving accurate predictions. Explainability should therefore be a key measure for evaluating software analytics models. We envision that explainability will be a key driver for developing software analytics models that are useful in practice. We outline a research roadmap for this space, building on social science, explainable artificial intelligence and software engineering.", "fno": "566201a053", "keywords": [ "Artificial Intelligence", "Data Analysis", "Software Engineering", "Software Analytics Models", "Explainability", "Software Engineering", "Explainable Software Analytics", "Software Practitioners", "Research Roadmap", "Social Science", "Artificial Intelligence", "Software", "Analytical Models", "Predictive Models", "Software Engineering", "Machine Learning", "Neural Networks", "Software Engineering", "Software Analytics", "Mining Software Repositories" ], "authors": [ { "affiliation": null, "fullName": "Hoa Khanh Dam", "givenName": "Hoa Khanh", "surname": "Dam", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Truyen Tran", "givenName": "Truyen", "surname": "Tran", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Aditya Ghose", "givenName": "Aditya", "surname": "Ghose", "__typename": "ArticleAuthorType" } ], "idPrefix": "icse-nier", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-05-01T00:00:00", "pubType": "proceedings", "pages": "53-56", "year": "2018", "issn": null, "isbn": "978-1-4503-5662-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "566201a049", "articleId": "13bd1f3HvFg", "__typename": "AdjacentArticleType" }, "next": { "fno": "566201a057", "articleId": "13bd1gQYgEy", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icdmw/2018/9288/0/928800a707", "title": "Explainable Predictions of Adverse Drug Events from Electronic Health Records Via Oracle Coaching", "doi": null, "abstractUrl": "/proceedings-article/icdmw/2018/928800a707/18jXJ7EdEXu", "parentPublication": { "id": "proceedings/icdmw/2018/9288/0", "title": "2018 IEEE International Conference on Data Mining Workshops (ICDMW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/big-data/2021/3902/0/09671630", "title": "Does Dataset Complexity Matters for Model Explainers?", "doi": null, "abstractUrl": "/proceedings-article/big-data/2021/09671630/1A8jnyNOiWI", "parentPublication": { "id": "proceedings/big-data/2021/3902/0", "title": "2021 IEEE International Conference on Big Data (Big Data)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ase/2021/0337/0/033700a001", "title": "Explainable AI for Software Engineering", "doi": null, "abstractUrl": "/proceedings-article/ase/2021/033700a001/1AjTdAjnaFO", "parentPublication": { "id": "proceedings/ase/2021/0337/0", "title": "2021 36th IEEE/ACM International Conference on Automated Software Engineering (ASE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/csde/2022/5305/0/10089349", "title": "Cardiovascular Disease Detection Based on Interpretable and Explainable AI", "doi": null, "abstractUrl": "/proceedings-article/csde/2022/10089349/1M7L8zissG4", "parentPublication": { "id": "proceedings/csde/2022/5305/0", "title": "2022 IEEE Asia-Pacific Conference on Computer Science and Data Engineering (CSDE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2020/01/08807299", "title": "<bold>explAIner</bold>: A Visual Analytics Framework for Interactive and Explainable Machine Learning", "doi": null, "abstractUrl": "/journal/tg/2020/01/08807299/1cG6r4enRnO", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cogmi/2019/6737/0/673700a202", "title": "Building Explainable Predictive Analytics for Location-Dependent Time-Series Data", "doi": null, "abstractUrl": "/proceedings-article/cogmi/2019/673700a202/1htC8EWTJPa", "parentPublication": { "id": "proceedings/cogmi/2019/6737/0", "title": "2019 IEEE First International Conference on Cognitive Machine Intelligence (CogMI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ase/2020/6768/0/676800b336", "title": "JITBot: An Explainable Just-In-Time Defect Prediction Bot", "doi": null, "abstractUrl": "/proceedings-article/ase/2020/676800b336/1pP3MfcFLG0", "parentPublication": { "id": "proceedings/ase/2020/6768/0", "title": "2020 35th IEEE/ACM International Conference on Automated Software Engineering (ASE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2021/8808/0/09412560", "title": "Attack-agnostic Adversarial Detection on Medical Data Using Explainable Machine Learning", "doi": null, "abstractUrl": "/proceedings-article/icpr/2021/09412560/1tmjiR19xm0", "parentPublication": { "id": "proceedings/icpr/2021/8808/0", "title": "2020 25th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/so/2021/04/09460977", "title": "Actionable Analytics: Stop Telling Me What It Is; Please Tell Me What To Do", "doi": null, "abstractUrl": "/magazine/so/2021/04/09460977/1uCdDRREz7i", "parentPublication": { "id": "mags/so", "title": "IEEE Software", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ichi/2021/0132/0/013200a138", "title": "Explainable Deep Learning for Readmission Prediction with Tree-GloVe Embedding", "doi": null, "abstractUrl": "/proceedings-article/ichi/2021/013200a138/1xIOPvI9QMU", "parentPublication": { "id": "proceedings/ichi/2021/0132/0", "title": "2021 IEEE 9th International Conference on Healthcare Informatics (ICHI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1HriKl24EXS", "title": "2022 IEEE 42nd International Conference on Distributed Computing Systems (ICDCS)", "acronym": "icdcs", "groupId": "1000213", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1HriVQprIEE", "doi": "10.1109/ICDCS54860.2022.00125", "title": "Explainable Deep Learning Methodologies for Biomedical Images Classification", "normalizedTitle": "Explainable Deep Learning Methodologies for Biomedical Images Classification", "abstract": "Often when we have a lot of data available we can not give them an interpretability and an explainability such as to be able to extract answers, and even more so diagnosis in the medical field. The aim of this contribution is to introduce a way to provide explainability to data and features that could escape even medical doctors, and that with the use of Machine Learning models can be categorized and \"explained\".", "abstracts": [ { "abstractType": "Regular", "content": "Often when we have a lot of data available we can not give them an interpretability and an explainability such as to be able to extract answers, and even more so diagnosis in the medical field. The aim of this contribution is to introduce a way to provide explainability to data and features that could escape even medical doctors, and that with the use of Machine Learning models can be categorized and \"explained\".", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Often when we have a lot of data available we can not give them an interpretability and an explainability such as to be able to extract answers, and even more so diagnosis in the medical field. The aim of this contribution is to introduce a way to provide explainability to data and features that could escape even medical doctors, and that with the use of Machine Learning models can be categorized and \"explained\".", "fno": "717700b262", "keywords": [ "Image Classification", "Learning Artificial Intelligence", "Medical Image Processing", "Explainable Deep Learning Methodologies", "Biomedical Images Classification", "Explainability", "Medical Field", "Medical Doctors", "Machine Learning Models", "Deep Learning", "Computational Modeling", "Biological System Modeling", "Medical Services", "Feature Extraction", "Data Models", "Data Mining", "Deep Learning Model", "Explainability", "Biomedical Images", "Classification", "Robustness" ], "authors": [ { "affiliation": "Institute of Informatics and Telematics (IIT),National Research Council of Italy (CNR),Pisa,Italy", "fullName": "Marcello Di Giammarco", "givenName": "Marcello", "surname": "Di Giammarco", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Molise & IIT-CNR,Department of Medicine and Health Sciences \"Vincenzo Tiberio\",Campobasso,Italy", "fullName": "Francesco Mercaldo", "givenName": "Francesco", "surname": "Mercaldo", "__typename": "ArticleAuthorType" }, { "affiliation": "Institute of Informatics and Telematics (IIT),National Research Council of Italy (CNR),Pisa,Italy", "fullName": "Fabio Martinelli", "givenName": "Fabio", "surname": "Martinelli", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Molise,Department of Medicine and Health Sciences \"Vincenzo Tiberio\",Campobasso,Italy", "fullName": "Antonella Santone", "givenName": "Antonella", "surname": "Santone", "__typename": "ArticleAuthorType" } ], "idPrefix": "icdcs", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-07-01T00:00:00", "pubType": "proceedings", "pages": "1262-1264", "year": "2022", "issn": null, "isbn": "978-1-6654-7177-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "717700b260", "articleId": "1HriPfO9zUI", "__typename": "AdjacentArticleType" }, "next": { "fno": "717700b265", "articleId": "1HriXSzzQ4w", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/chase/2021/3965/0/396500a139", "title": "Deep Learning and its Benefits in Prediction of Patients Through Medical Images", "doi": null, "abstractUrl": "/proceedings-article/chase/2021/396500a139/1AIMKEFJiJG", "parentPublication": { "id": "proceedings/chase/2021/3965/0", "title": "2021 IEEE/ACM Conference on Connected Health: Applications, Systems and Engineering Technologies (CHASE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09956753", "title": "EVNet: An Explainable Deep Network for Dimension Reduction", "doi": null, "abstractUrl": "/journal/tg/5555/01/09956753/1Iu2J7nECo8", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bibe/2022/8487/0/848700a075", "title": "Deep Learning for Heartbeat Phonocardiogram Signals Explainable Classification", "doi": null, "abstractUrl": "/proceedings-article/bibe/2022/848700a075/1J6hBJC1fGw", "parentPublication": { "id": "proceedings/bibe/2022/8487/0", "title": "2022 IEEE 22nd International Conference on Bioinformatics and Bioengineering (BIBE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bibe/2022/8487/0/848700a301", "title": "Examining Effects of Schizophrenia on EEG with Explainable Deep Learning Models", "doi": null, "abstractUrl": "/proceedings-article/bibe/2022/848700a301/1J6hHCaEf7O", "parentPublication": { "id": "proceedings/bibe/2022/8487/0", "title": "2022 IEEE 22nd International Conference on Bioinformatics and Bioengineering (BIBE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bibm/2022/6819/0/09995532", "title": "Explainable Pulmonary Disease Diagnosis with Prompt-Based Knowledge Extraction", "doi": null, "abstractUrl": "/proceedings-article/bibm/2022/09995532/1JC2R76gtlS", "parentPublication": { "id": "proceedings/bibm/2022/6819/0", "title": "2022 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icvrv/2017/2636/0/263600a150", "title": "A Novel Intelligent Thyroid Nodule Diagnosis System over Ultrasound Images Based on Deep Learning", "doi": null, "abstractUrl": "/proceedings-article/icvrv/2017/263600a150/1ap5wZwJk52", "parentPublication": { "id": "proceedings/icvrv/2017/2636/0", "title": "2017 International Conference on Virtual Reality and Visualization (ICVRV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icaice/2020/9146/0/914600a134", "title": "Construction of Knowledge Graph of HIV-associated Neurocognitive Disorders Syndrome based on Deep Learning", "doi": null, "abstractUrl": "/proceedings-article/icaice/2020/914600a134/1rCg7mFbydq", "parentPublication": { "id": "proceedings/icaice/2020/9146/0", "title": "2020 International Conference on Artificial Intelligence and Computer Engineering (ICAICE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/percom-workshops/2021/0424/0/09431114", "title": "Keynote: Explainable-by-design Deep Learning", "doi": null, "abstractUrl": "/proceedings-article/percom-workshops/2021/09431114/1tROKymq68E", "parentPublication": { "id": "proceedings/percom-workshops/2021/0424/0", "title": "2021 IEEE International Conference on Pervasive Computing and Communications Workshops and other Affiliated Events (PerCom Workshops)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cbms/2021/4121/0/412100a213", "title": "Recommending What Drug to Prescribe Next for Accurate and Explainable Medical Decisions", "doi": null, "abstractUrl": "/proceedings-article/cbms/2021/412100a213/1vb8UfzfZ0Q", "parentPublication": { "id": "proceedings/cbms/2021/4121/0", "title": "2021 IEEE 34th International Symposium on Computer-Based Medical Systems (CBMS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ichi/2021/0132/0/013200a138", "title": "Explainable Deep Learning for Readmission Prediction with Tree-GloVe Embedding", "doi": null, "abstractUrl": "/proceedings-article/ichi/2021/013200a138/1xIOPvI9QMU", "parentPublication": { "id": "proceedings/ichi/2021/0132/0", "title": "2021 IEEE 9th International Conference on Healthcare Informatics (ICHI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1xIOMBjN5EQ", "title": "2021 IEEE 9th International Conference on Healthcare Informatics (ICHI)", "acronym": "ichi", "groupId": "1803080", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1xIOPvI9QMU", "doi": "10.1109/ICHI52183.2021.00031", "title": "Explainable Deep Learning for Readmission Prediction with Tree-GloVe Embedding", "normalizedTitle": "Explainable Deep Learning for Readmission Prediction with Tree-GloVe Embedding", "abstract": "Preventable hospital readmissions have been identified as one of the primary targets for improving the efficiency of the current healthcare system. Over the past decade, several data-driven solutions for predicting readmissions have been presented. While maintaining high predictive accuracy is the obvious main goal for such solutions, ensuring explainability of the model and its predictions, is equally important for adoption in the healthcare domain. Unfortunately, most solutions have struggled to strike an optimal balance between accuracy and explainability. Linear models only provide moderately accurate results while complex machine learning models are non-explainable black boxes, which precludes them from being used effectively within the decision support systems in the hospitals. We propose a solution that integrates domain knowledge, in the form of a hierarchical taxonomy defined for disease codes, into the learning framework to advance state-of-the-art in readmission prediction. We first propose a novel tree-structured embedding method to map disease codes into an explainable domain-guided representation. Next, we propose an attention-driven recurrent deep learning architecture. Results on two healthcare claims data sets show that the proposed model outperforms state-of-the-art methods proposed for this task, both in terms of accuracy and explainability.", "abstracts": [ { "abstractType": "Regular", "content": "Preventable hospital readmissions have been identified as one of the primary targets for improving the efficiency of the current healthcare system. Over the past decade, several data-driven solutions for predicting readmissions have been presented. While maintaining high predictive accuracy is the obvious main goal for such solutions, ensuring explainability of the model and its predictions, is equally important for adoption in the healthcare domain. Unfortunately, most solutions have struggled to strike an optimal balance between accuracy and explainability. Linear models only provide moderately accurate results while complex machine learning models are non-explainable black boxes, which precludes them from being used effectively within the decision support systems in the hospitals. We propose a solution that integrates domain knowledge, in the form of a hierarchical taxonomy defined for disease codes, into the learning framework to advance state-of-the-art in readmission prediction. We first propose a novel tree-structured embedding method to map disease codes into an explainable domain-guided representation. Next, we propose an attention-driven recurrent deep learning architecture. Results on two healthcare claims data sets show that the proposed model outperforms state-of-the-art methods proposed for this task, both in terms of accuracy and explainability.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Preventable hospital readmissions have been identified as one of the primary targets for improving the efficiency of the current healthcare system. Over the past decade, several data-driven solutions for predicting readmissions have been presented. While maintaining high predictive accuracy is the obvious main goal for such solutions, ensuring explainability of the model and its predictions, is equally important for adoption in the healthcare domain. Unfortunately, most solutions have struggled to strike an optimal balance between accuracy and explainability. Linear models only provide moderately accurate results while complex machine learning models are non-explainable black boxes, which precludes them from being used effectively within the decision support systems in the hospitals. We propose a solution that integrates domain knowledge, in the form of a hierarchical taxonomy defined for disease codes, into the learning framework to advance state-of-the-art in readmission prediction. We first propose a novel tree-structured embedding method to map disease codes into an explainable domain-guided representation. Next, we propose an attention-driven recurrent deep learning architecture. Results on two healthcare claims data sets show that the proposed model outperforms state-of-the-art methods proposed for this task, both in terms of accuracy and explainability.", "fno": "013200a138", "keywords": [ "Data Mining", "Decision Support Systems", "Diseases", "Health Care", "Hospitals", "Learning Artificial Intelligence", "Medical Information Systems", "Explainability", "Linear Models", "Moderately Accurate Results", "Complex Machine Learning Models", "Nonexplainable Black Boxes", "Decision Support Systems", "Hospitals", "Domain Knowledge", "Learning Framework", "Readmission Prediction", "Novel Tree Structured", "Map Disease Codes", "Explainable Domain Guided Representation", "Attention Driven Recurrent Deep Learning Architecture", "Healthcare Claims Data Sets", "Explainable Deep", "Tree Glo Ve Embedding", "Preventable Hospital Readmissions", "Primary Targets", "Current Healthcare System", "Data Driven Solutions", "Predicting Readmissions", "High Predictive Accuracy", "Obvious Main Goal", "Healthcare Domain", "Deep Learning", "Codes", "Hospitals", "Computational Modeling", "Taxonomy", "Computer Architecture", "Predictive Models", "Readmission Prediction", "XAI", "Structural Embedding" ], "authors": [ { "affiliation": "University at Buffalo,Computer Science and Engineering,Buffalo,New York,USA", "fullName": "Jialiang Jiang", "givenName": "Jialiang", "surname": "Jiang", "__typename": "ArticleAuthorType" }, { "affiliation": "University at Buffalo,School of Nursing,Buffalo,New York,USA", "fullName": "Sharon Hewner", "givenName": "Sharon", "surname": "Hewner", "__typename": "ArticleAuthorType" }, { "affiliation": "University at Buffalo,Computer Science and Engineering,Buffalo,New York,USA", "fullName": "Varun Chandola", "givenName": "Varun", "surname": "Chandola", "__typename": "ArticleAuthorType" } ], "idPrefix": "ichi", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-08-01T00:00:00", "pubType": "proceedings", "pages": "138-147", "year": "2021", "issn": null, "isbn": "978-1-6654-0132-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "013200a130", "articleId": "1xIORWPhP6o", "__typename": "AdjacentArticleType" }, "next": { "fno": "013200a148", "articleId": "1xIOSIuLhq8", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/big-data/2015/9926/0/07364123", "title": "30 Day hospital readmission analysis", "doi": null, "abstractUrl": "/proceedings-article/big-data/2015/07364123/12OmNCvumPZ", "parentPublication": { "id": "proceedings/big-data/2015/9926/0", "title": "2015 IEEE International Conference on Big Data (Big Data)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icse-nier/2018/5662/0/566201a053", "title": "Explainable Software Analytics", "doi": null, "abstractUrl": "/proceedings-article/icse-nier/2018/566201a053/13bd1h03qO8", "parentPublication": { "id": "proceedings/icse-nier/2018/5662/0", "title": "2018 IEEE/ACM 40th International Conference on Software Engineering: New Ideas and Emerging Technologies Results (ICSE-NIER)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bibm/2021/0126/0/09669879", "title": "Predicting Same Hospital Readmission following Fontan Cavopulmonary Anastomosis using Machine Learning", "doi": null, "abstractUrl": "/proceedings-article/bibm/2021/09669879/1A9VLnohaz6", "parentPublication": { "id": "proceedings/bibm/2021/0126/0", "title": "2021 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icde/2022/0883/0/088300d268", "title": "Tower Bridge Net (TB-Net): Bidirectional Knowledge Graph Aware Embedding Propagation for Explainable Recommender Systems", "doi": null, "abstractUrl": "/proceedings-article/icde/2022/088300d268/1FwBwMvKHq8", "parentPublication": { "id": "proceedings/icde/2022/0883/0", "title": "2022 IEEE 38th International Conference on Data Engineering (ICDE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/stc/2022/8864/0/886400a019", "title": "ExClaim: Explainable Neural Claim Verification Using Rationalization", "doi": null, "abstractUrl": "/proceedings-article/stc/2022/886400a019/1Ip7AOr0sne", "parentPublication": { "id": "proceedings/stc/2022/8864/0", "title": "2022 IEEE 29th Annual Software Technology Conference (STC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/big-data/2022/8045/0/10020315", "title": "Semantic-based Attention model for Hospital Readmission Prediction", "doi": null, "abstractUrl": "/proceedings-article/big-data/2022/10020315/1KfRju6IW2s", "parentPublication": { "id": "proceedings/big-data/2022/8045/0", "title": "2022 IEEE International Conference on Big Data (Big Data)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bibm/2020/6215/0/09313588", "title": "A multi-modal machine learning approach towards predicting patient readmission", "doi": null, "abstractUrl": "/proceedings-article/bibm/2020/09313588/1qmfRwjPnX2", "parentPublication": { "id": "proceedings/bibm/2020/6215/0", "title": "2020 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2021/8808/0/09412560", "title": "Attack-agnostic Adversarial Detection on Medical Data Using Explainable Machine Learning", "doi": null, "abstractUrl": "/proceedings-article/icpr/2021/09412560/1tmjiR19xm0", "parentPublication": { "id": "proceedings/icpr/2021/8808/0", "title": "2020 25th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tb/2022/05/09457087", "title": "Predictive Modeling of Hospital Readmission: Challenges and Solutions", "doi": null, "abstractUrl": "/journal/tb/2022/05/09457087/1utUZZibKso", "parentPublication": { "id": "trans/tb", "title": "IEEE/ACM Transactions on Computational Biology and Bioinformatics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ichi/2021/0132/0/013200a214", "title": "Acute Myocardial Infarction Readmission Risk Prediction Model in Admit and Discharge patients", "doi": null, "abstractUrl": "/proceedings-article/ichi/2021/013200a214/1xIOWtAyEOQ", "parentPublication": { "id": "proceedings/ichi/2021/0132/0", "title": "2021 IEEE 9th International Conference on Healthcare Informatics (ICHI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1wzs0vrjyWQ", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "acronym": "cvprw", "groupId": "1001809", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1yXsUbOxd8A", "doi": "10.1109/CVPRW53098.2021.00361", "title": "Explainable Deep Classification Models for Domain Generalization", "normalizedTitle": "Explainable Deep Classification Models for Domain Generalization", "abstract": "Conventionally, AI models are thought to trade off explainability for lower accuracy. We develop a training strategy that not only leads to a more explainable AI system for object classification, but as a consequence, suffers no perceptible accuracy degradation. Explanations are defined as regions of visual evidence upon which a deep classification network makes a decision. This is represented in the form of a saliency map conveying how much each pixel contributed to the network&#x2019;s decision. Our training strategy enforces a periodic saliency-based feedback to encourage the model to focus on the image regions that directly correspond to the ground-truth object. We quantify explainability using an automated metric, and using human judgement. We propose explainability as a means for bridging the visual-semantic gap between different domains where model explanations are used as a means of disentagling domain specific information from otherwise relevant features. We demonstrate that this leads to improved generalization to new domains without hindering performance on the original domain.", "abstracts": [ { "abstractType": "Regular", "content": "Conventionally, AI models are thought to trade off explainability for lower accuracy. We develop a training strategy that not only leads to a more explainable AI system for object classification, but as a consequence, suffers no perceptible accuracy degradation. Explanations are defined as regions of visual evidence upon which a deep classification network makes a decision. This is represented in the form of a saliency map conveying how much each pixel contributed to the network&#x2019;s decision. Our training strategy enforces a periodic saliency-based feedback to encourage the model to focus on the image regions that directly correspond to the ground-truth object. We quantify explainability using an automated metric, and using human judgement. We propose explainability as a means for bridging the visual-semantic gap between different domains where model explanations are used as a means of disentagling domain specific information from otherwise relevant features. We demonstrate that this leads to improved generalization to new domains without hindering performance on the original domain.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Conventionally, AI models are thought to trade off explainability for lower accuracy. We develop a training strategy that not only leads to a more explainable AI system for object classification, but as a consequence, suffers no perceptible accuracy degradation. Explanations are defined as regions of visual evidence upon which a deep classification network makes a decision. This is represented in the form of a saliency map conveying how much each pixel contributed to the network’s decision. Our training strategy enforces a periodic saliency-based feedback to encourage the model to focus on the image regions that directly correspond to the ground-truth object. We quantify explainability using an automated metric, and using human judgement. We propose explainability as a means for bridging the visual-semantic gap between different domains where model explanations are used as a means of disentagling domain specific information from otherwise relevant features. We demonstrate that this leads to improved generalization to new domains without hindering performance on the original domain.", "fno": "489900d227", "keywords": [ "Training", "Degradation", "Measurement", "Visualization", "Computer Vision", "Conferences", "Computational Modeling" ], "authors": [ { "affiliation": "Huawei Ireland Research Center", "fullName": "Andrea Zunino", "givenName": "Andrea", "surname": "Zunino", "__typename": "ArticleAuthorType" }, { "affiliation": "Boston University,Department of Computer Science", "fullName": "Sarah Adel Bargal", "givenName": "Sarah Adel", "surname": "Bargal", "__typename": "ArticleAuthorType" }, { "affiliation": "Naver Labs Europe", "fullName": "Riccardo Volpi", "givenName": "Riccardo", "surname": "Volpi", "__typename": "ArticleAuthorType" }, { "affiliation": "Microsoft", "fullName": "Mehrnoosh Sameki", "givenName": "Mehrnoosh", "surname": "Sameki", "__typename": "ArticleAuthorType" }, { "affiliation": "Adobe Research", "fullName": "Jianming Zhang", "givenName": "Jianming", "surname": "Zhang", "__typename": "ArticleAuthorType" }, { "affiliation": "Boston University,Department of Computer Science", "fullName": "Stan Sclaroff", "givenName": "Stan", "surname": "Sclaroff", "__typename": "ArticleAuthorType" }, { "affiliation": "Huawei Ireland Research Center", "fullName": "Vittorio Murino", "givenName": "Vittorio", "surname": "Murino", "__typename": "ArticleAuthorType" }, { "affiliation": "Boston University,Department of Computer Science", "fullName": "Kate Saenko", "givenName": "Kate", "surname": "Saenko", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvprw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-06-01T00:00:00", "pubType": "proceedings", "pages": "3227-3236", "year": "2021", "issn": null, "isbn": "978-1-6654-4899-4", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "489900d218", "articleId": "1yVzVi5V3os", "__typename": "AdjacentArticleType" }, "next": { "fno": "489900d237", "articleId": "1yXsM8wYd2M", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icde/2022/0883/0/088300c709", "title": "Effective Explanations for Entity Resolution Models", "doi": null, "abstractUrl": "/proceedings-article/icde/2022/088300c709/1FwFo9P2xB6", "parentPublication": { "id": "proceedings/icde/2022/0883/0", "title": "2022 IEEE 38th International Conference on Data Engineering (ICDE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/rew/2022/6000/0/600000a162", "title": "Can Requirements Engineering Support Explainable Artificial Intelligence? Towards a User-Centric Approach for Explainability Requirements", "doi": null, "abstractUrl": "/proceedings-article/rew/2022/600000a162/1HCVeXjynAI", "parentPublication": { "id": "proceedings/rew/2022/6000/0", "title": "2022 IEEE 30th International Requirements Engineering Conference Workshops (REW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09956753", "title": "EVNet: An Explainable Deep Network for Dimension Reduction", "doi": null, "abstractUrl": "/journal/tg/5555/01/09956753/1Iu2J7nECo8", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdm/2022/5099/0/509900a101", "title": "Class-Specific Explainability for Deep Time Series Classifiers", "doi": null, "abstractUrl": "/proceedings-article/icdm/2022/509900a101/1KpCofcZVh6", "parentPublication": { "id": "proceedings/icdm/2022/5099/0", "title": "2022 IEEE International Conference on Data Mining (ICDM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2023/9346/0/934600b461", "title": "Learning How to MIMIC: Using Model Explanations to Guide Deep Learning Training", "doi": null, "abstractUrl": "/proceedings-article/wacv/2023/934600b461/1KxVmdY3lf2", "parentPublication": { "id": "proceedings/wacv/2023/9346/0", "title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/06/09316988", "title": "Differentiated Explanation of Deep Neural Networks With Skewed Distributions", "doi": null, "abstractUrl": "/journal/tp/2022/06/09316988/1qdT4pwR0SQ", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2021/0477/0/047700c441", "title": "DeepOpht: Medical Report Generation for Retinal Images via Deep Models and Visual Explanation", "doi": null, "abstractUrl": "/proceedings-article/wacv/2021/047700c441/1uqGwXjNwKk", "parentPublication": { "id": "proceedings/wacv/2021/0477/0", "title": "2021 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/ic/2021/05/09514440", "title": "Knowledge-Intensive Language Understanding for Explainable AI", "doi": null, "abstractUrl": "/magazine/ic/2021/05/09514440/1w7adbkAWBy", "parentPublication": { "id": "mags/ic", "title": "IEEE Internet Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2021/4899/0/489900b802", "title": "Towards Domain-Specific Explainable AI: Model Interpretation of a Skin Image Classifier using a Human Approach", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2021/489900b802/1yJYjHCm2g8", "parentPublication": { "id": "proceedings/cvprw/2021/4899/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/edocw/2021/4488/0/448800a075", "title": "A call for more explainable AI in law enforcement", "doi": null, "abstractUrl": "/proceedings-article/edocw/2021/448800a075/1yZ5BCAdFza", "parentPublication": { "id": "proceedings/edocw/2021/4488/0", "title": "2021 IEEE 25th International Enterprise Distributed Object Computing Workshop (EDOCW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNyFCvPp", "title": "Tenth International Conference on Information Visualisation (IV'06)", "acronym": "iv", "groupId": "1000370", "volume": "0", "displayVolume": "0", "year": "2006", "__typename": "ProceedingType" }, "article": { "id": "12OmNzd7bl4", "doi": "10.1109/IV.2006.1", "title": "\"GeoAnalytics\" - Exploring spatio-temporal and multivariate data", "normalizedTitle": "\"GeoAnalytics\" - Exploring spatio-temporal and multivariate data", "abstract": "The voluminous nature of social scientific, spatialtemporal statistical databases calls for high interactive performance and creative integrated information and geovisualization tools. A solution to this challenge can be found in the emerging Visual Analytics (VA), a science of analytical reasoning facilitated by interactive visual interfaces and innovative visualization and is now actively pursued by research groups worldwide. In this paper, we present a tool called \"GeoAnalytics\", based on the principles behind VA. Our objective is to define new suitable approaches and tools for exploring time variant and multivariate attributes simultaneous including a spatial dimension. We introduce parallel coordinates integrated with time series and trend graph that serves as the visual control panel for the application. Multivariate attribute dynamic queries can express simultaneously queries involving time varying spatial data. VA encourages the need to build a bridge between the advantages of both human perception and computer science technologies. The sense of immediacy and speedof- thought interaction is achieved in our dynamically linked components and maximum allocation of screen area for visual displays that helps users stay focused on their work and shortens their time to enlightenment.", "abstracts": [ { "abstractType": "Regular", "content": "The voluminous nature of social scientific, spatialtemporal statistical databases calls for high interactive performance and creative integrated information and geovisualization tools. A solution to this challenge can be found in the emerging Visual Analytics (VA), a science of analytical reasoning facilitated by interactive visual interfaces and innovative visualization and is now actively pursued by research groups worldwide. In this paper, we present a tool called \"GeoAnalytics\", based on the principles behind VA. Our objective is to define new suitable approaches and tools for exploring time variant and multivariate attributes simultaneous including a spatial dimension. We introduce parallel coordinates integrated with time series and trend graph that serves as the visual control panel for the application. Multivariate attribute dynamic queries can express simultaneously queries involving time varying spatial data. VA encourages the need to build a bridge between the advantages of both human perception and computer science technologies. The sense of immediacy and speedof- thought interaction is achieved in our dynamically linked components and maximum allocation of screen area for visual displays that helps users stay focused on their work and shortens their time to enlightenment.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The voluminous nature of social scientific, spatialtemporal statistical databases calls for high interactive performance and creative integrated information and geovisualization tools. A solution to this challenge can be found in the emerging Visual Analytics (VA), a science of analytical reasoning facilitated by interactive visual interfaces and innovative visualization and is now actively pursued by research groups worldwide. In this paper, we present a tool called \"GeoAnalytics\", based on the principles behind VA. Our objective is to define new suitable approaches and tools for exploring time variant and multivariate attributes simultaneous including a spatial dimension. We introduce parallel coordinates integrated with time series and trend graph that serves as the visual control panel for the application. Multivariate attribute dynamic queries can express simultaneously queries involving time varying spatial data. VA encourages the need to build a bridge between the advantages of both human perception and computer science technologies. The sense of immediacy and speedof- thought interaction is achieved in our dynamically linked components and maximum allocation of screen area for visual displays that helps users stay focused on their work and shortens their time to enlightenment.", "fno": "26020025", "keywords": [], "authors": [ { "affiliation": "Linkoping University, Sweden", "fullName": "Mikael Jern", "givenName": "Mikael", "surname": "Jern", "__typename": "ArticleAuthorType" }, { "affiliation": "Linkoping University, Sweden", "fullName": "Johan Franz?", "givenName": "Johan", "surname": "Franz?", "__typename": "ArticleAuthorType" } ], "idPrefix": "iv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2006-07-01T00:00:00", "pubType": "proceedings", "pages": "25-31", "year": "2006", "issn": "1550-6037", "isbn": "0-7695-2602-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "26020017", "articleId": "12OmNz4SOxE", "__typename": "AdjacentArticleType" }, "next": { "fno": "26020032", "articleId": "12OmNC0guAM", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iv/2007/2900/0/29000511", "title": "Integrating InfoVis and GeoVis Components", "doi": null, "abstractUrl": "/proceedings-article/iv/2007/29000511/12OmNCcbEfp", "parentPublication": { "id": "proceedings/iv/2007/2900/0", "title": "2007 11th International Conference Information Visualization (IV '07)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdmw/2010/4257/0/4257a025", "title": "PTCR-Miner: Progressive Temporal Class Rule Mining for Multivariate Temporal Data Classification", "doi": null, "abstractUrl": "/proceedings-article/icdmw/2010/4257a025/12OmNvTTc7b", "parentPublication": { "id": "proceedings/icdmw/2010/4257/0", "title": "2010 IEEE International Conference on Data Mining Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cmv/2007/2903/0/29030085", "title": "The GAV Toolkit for Multiple Linked Views", "doi": null, "abstractUrl": "/proceedings-article/cmv/2007/29030085/12OmNwI8cfC", "parentPublication": { "id": "proceedings/cmv/2007/2903/0", "title": "Coordinated and Multiple Views in Exploratory Visualization, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/e-science/2017/2686/0/08109149", "title": "Change Frequency Heatmaps for Temporal Multivariate Phenological Data Analysis", "doi": null, "abstractUrl": "/proceedings-article/e-science/2017/08109149/12OmNxjjEgf", "parentPublication": { "id": "proceedings/e-science/2017/2686/0", "title": "2017 IEEE 13th International Conference on e-Science (e-Science)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2016/01/07192673", "title": "Temporal MDS Plots for Analysis of Multivariate Data", "doi": null, "abstractUrl": "/journal/tg/2016/01/07192673/13rRUx0gefm", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/01/08440120", "title": "Exploring Time-Varying Multivariate Volume Data Using Matrix of Isosurface Similarity Maps", "doi": null, "abstractUrl": "/journal/tg/2019/01/08440120/17D45Wuc38E", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09956758", "title": "Multivariate Data Explanation by Jumping Emerging Patterns Visualization", "doi": null, "abstractUrl": "/journal/tg/5555/01/09956758/1Iu2JIUXLR6", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/big-data/2022/8045/0/10020729", "title": "Spatio-Temporal Based Architecture Topology Search for Multivariate Time Series Prediction", "doi": null, "abstractUrl": "/proceedings-article/big-data/2022/10020729/1KfQW8NLtLi", "parentPublication": { "id": "proceedings/big-data/2022/8045/0", "title": "2022 IEEE International Conference on Big Data (Big Data)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vast/2018/6861/0/08802513", "title": "Phoenixmap: Spatio-Temporal Distribution Analysis With Deep Learning Classifications", "doi": null, "abstractUrl": "/proceedings-article/vast/2018/08802513/1cJ6XWDewoM", "parentPublication": { "id": "proceedings/vast/2018/6861/0", "title": "2018 IEEE Conference on Visual Analytics Science and Technology (VAST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/svr/2020/9231/0/923100a443", "title": "3D visualization of temporal data: exploring Visual Attention and Machine Learning", "doi": null, "abstractUrl": "/proceedings-article/svr/2020/923100a443/1oZBAABYXHW", "parentPublication": { "id": "proceedings/svr/2020/9231/0", "title": "2020 22nd Symposium on Virtual and Augmented Reality (SVR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1s645BaTzVu", "title": "2020 IEEE International Conference on Big Data (Big Data)", "acronym": "big-data", "groupId": "1802964", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1s64SdG49Hi", "doi": "10.1109/BigData50022.2020.9378380", "title": "Combining Global and Sequential Patterns for Multivariate Time Series Forecasting", "normalizedTitle": "Combining Global and Sequential Patterns for Multivariate Time Series Forecasting", "abstract": "Multivariate time series forecasting is very important for many applications. Many studies have been conducted for accurate and interpretable prediction methods. However, existing methods either cannot take both times series and covariates into consideration, lacking of interpretability, or ignore global trends across multivariate time series. In this paper, we aim to solve these issues. To this end, we propose a new model named TEDGE for accurate and interpretable time series prediction. In this model, we extract global trends hidden across multivariate times series to improve prediction accuracy. Meanwhile, we utilize a deep recurrent model with attention mechanism to find long-and short-term sequential patterns hidden in individual time series with interpretability. We conduct experiments on several datasets to evaluate the proposed models performance. Results demonstrate the superior performance of our proposed model.", "abstracts": [ { "abstractType": "Regular", "content": "Multivariate time series forecasting is very important for many applications. Many studies have been conducted for accurate and interpretable prediction methods. However, existing methods either cannot take both times series and covariates into consideration, lacking of interpretability, or ignore global trends across multivariate time series. In this paper, we aim to solve these issues. To this end, we propose a new model named TEDGE for accurate and interpretable time series prediction. In this model, we extract global trends hidden across multivariate times series to improve prediction accuracy. Meanwhile, we utilize a deep recurrent model with attention mechanism to find long-and short-term sequential patterns hidden in individual time series with interpretability. We conduct experiments on several datasets to evaluate the proposed models performance. Results demonstrate the superior performance of our proposed model.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Multivariate time series forecasting is very important for many applications. Many studies have been conducted for accurate and interpretable prediction methods. However, existing methods either cannot take both times series and covariates into consideration, lacking of interpretability, or ignore global trends across multivariate time series. In this paper, we aim to solve these issues. To this end, we propose a new model named TEDGE for accurate and interpretable time series prediction. In this model, we extract global trends hidden across multivariate times series to improve prediction accuracy. Meanwhile, we utilize a deep recurrent model with attention mechanism to find long-and short-term sequential patterns hidden in individual time series with interpretability. We conduct experiments on several datasets to evaluate the proposed models performance. Results demonstrate the superior performance of our proposed model.", "fno": "09378380", "keywords": [ "Forecasting Theory", "Time Series", "Global Patterns", "Multivariate Time Series Forecasting", "Accurate Prediction Methods", "Interpretable Prediction Methods", "Accurate Time Series Prediction", "Interpretable Time Series Prediction", "Multivariate Times Series", "Short Term Sequential Patterns", "Sequential Patterns", "TEDGE", "Conferences", "Time Series Analysis", "Predictive Models", "Big Data", "Market Research", "Data Models", "Forecasting", "Time Series Forecasting", "Matrix Factorization", "Deep Learning" ], "authors": [ { "affiliation": "Ministry of Education,Key Laboratory of Data Engineering and Knowledge Engineering,China", "fullName": "Zhaoxi Li", "givenName": "Zhaoxi", "surname": "Li", "__typename": "ArticleAuthorType" }, { "affiliation": "Ministry of Education,Key Laboratory of Data Engineering and Knowledge Engineering,China", "fullName": "Jun He", "givenName": "Jun", "surname": "He", "__typename": "ArticleAuthorType" }, { "affiliation": "Tsinghua g University,School of Economics and Management,Beijing,China", "fullName": "Hongyan Liu", "givenName": "Hongyan", "surname": "Liu", "__typename": "ArticleAuthorType" }, { "affiliation": "Ministry of Education,Key Laboratory of Data Engineering and Knowledge Engineering,China", "fullName": "Xiaoyong Du", "givenName": "Xiaoyong", "surname": "Du", "__typename": "ArticleAuthorType" } ], "idPrefix": "big-data", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-12-01T00:00:00", "pubType": "proceedings", "pages": "180-187", "year": "2020", "issn": null, "isbn": "978-1-7281-6251-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "09378063", "articleId": "1s64HPUolDq", "__typename": "AdjacentArticleType" }, "next": { "fno": "09378424", "articleId": "1s6539YQfTO", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cbd/2017/1072/0/1072a343", "title": "RBTA: A Multivariate Time-Series Method for City Incidents Mining and Forecasting", "doi": null, "abstractUrl": "/proceedings-article/cbd/2017/1072a343/12OmNyRg4qU", "parentPublication": { "id": "proceedings/cbd/2017/1072/0", "title": "2017 Fifth International Conference on Advanced Cloud and Big Data (CBD)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/paap/2018/9403/0/940300a171", "title": "Time Series Forecasting Using Sequence-to-Sequence Deep Learning Framework", "doi": null, "abstractUrl": "/proceedings-article/paap/2018/940300a171/19JE9MimPza", "parentPublication": { "id": "proceedings/paap/2018/9403/0", "title": "2018 9th International Symposium on Parallel Architectures, Algorithms and Programming (PAAP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dsc/2021/1815/0/181500a083", "title": "Sequence Attention for Multivariate Time Series Forecasting", "doi": null, "abstractUrl": "/proceedings-article/dsc/2021/181500a083/1CuhWbfuEYU", "parentPublication": { "id": "proceedings/dsc/2021/1815/0", "title": "2021 IEEE Sixth International Conference on Data Science in Cyberspace (DSC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/5555/01/09950330", "title": "Multivariate Time Series Forecasting with Dynamic Graph Neural ODEs", "doi": null, "abstractUrl": "/journal/tk/5555/01/09950330/1IiLdUwEK7m", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/big-data/2022/8045/0/10020729", "title": "Spatio-Temporal Based Architecture Topology Search for Multivariate Time Series Prediction", "doi": null, "abstractUrl": "/proceedings-article/big-data/2022/10020729/1KfQW8NLtLi", "parentPublication": { "id": "proceedings/big-data/2022/8045/0", "title": "2022 IEEE International Conference on Big Data (Big Data)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aiipcc/2022/6287/0/628700a299", "title": "Multivariate time series prediction based on graph convolutional neural networks", "doi": null, "abstractUrl": "/proceedings-article/aiipcc/2022/628700a299/1LR9XZb9AXu", "parentPublication": { "id": "proceedings/aiipcc/2022/6287/0", "title": "2022 International Conference on Artificial Intelligence, Information Processing and Cloud Computing (AIIPCC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hpcc-dss-smartcity-dependsys/2022/1993/0/199300a785", "title": "Key Factor Selection Transformer for Multivariate Time Series Forecasting", "doi": null, "abstractUrl": "/proceedings-article/hpcc-dss-smartcity-dependsys/2022/199300a785/1LSPpseFmo0", "parentPublication": { "id": "proceedings/hpcc-dss-smartcity-dependsys/2022/1993/0", "title": "2022 IEEE 24th Int Conf on High Performance Computing & Communications; 8th Int Conf on Data Science & Systems; 20th Int Conf on Smart City; 8th Int Conf on Dependability in Sensor, Cloud & Big Data Systems & Application (HPCC/DSS/SmartCity/DependSys)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/big-data/2019/0858/0/09006192", "title": "Deep Learning for Non-stationary Multivariate Time Series Forecasting", "doi": null, "abstractUrl": "/proceedings-article/big-data/2019/09006192/1hJsE3dcmaI", "parentPublication": { "id": "proceedings/big-data/2019/0858/0", "title": "2019 IEEE International Conference on Big Data (Big Data)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdm/2020/8316/0/831600a841", "title": "Multivariate Time-Series Anomaly Detection via Graph Attention Network", "doi": null, "abstractUrl": "/proceedings-article/icdm/2020/831600a841/1r54xrHxN72", "parentPublication": { "id": "proceedings/icdm/2020/8316/0", "title": "2020 IEEE International Conference on Data Mining (ICDM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mdm/2021/2845/0/284500a029", "title": "IFP-ADAC: A Two-stage Interpretable Fault Prediction Model for Multivariate Time Series", "doi": null, "abstractUrl": "/proceedings-article/mdm/2021/284500a029/1v2QCOiWOI0", "parentPublication": { "id": "proceedings/mdm/2021/2845/0", "title": "2021 22nd IEEE International Conference on Mobile Data Management (MDM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNyoiYVr", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2017", "__typename": "ProceedingType" }, "article": { "id": "12OmNBAqZH0", "doi": "10.1109/CVPR.2017.330", "title": "Scene Graph Generation by Iterative Message Passing", "normalizedTitle": "Scene Graph Generation by Iterative Message Passing", "abstract": "Understanding a visual scene goes beyond recognizing individual objects in isolation. Relationships between objects also constitute rich semantic information about the scene. In this work, we explicitly model the objects and their relationships using scene graphs, a visually-grounded graphical structure of an image. We propose a novel end-to-end model that generates such structured scene representation from an input image. Our key insight is that the graph generation problem can be formulated as message passing between the primal node graph and its dual edge graph. Our joint inference model can take advantage of contextual cues to make better predictions on objects and their relationships. The experiments show that our model significantly outperforms previous methods on the Visual Genome dataset as well as support relation inference in NYU Depth V2 dataset.", "abstracts": [ { "abstractType": "Regular", "content": "Understanding a visual scene goes beyond recognizing individual objects in isolation. Relationships between objects also constitute rich semantic information about the scene. In this work, we explicitly model the objects and their relationships using scene graphs, a visually-grounded graphical structure of an image. We propose a novel end-to-end model that generates such structured scene representation from an input image. Our key insight is that the graph generation problem can be formulated as message passing between the primal node graph and its dual edge graph. Our joint inference model can take advantage of contextual cues to make better predictions on objects and their relationships. The experiments show that our model significantly outperforms previous methods on the Visual Genome dataset as well as support relation inference in NYU Depth V2 dataset.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Understanding a visual scene goes beyond recognizing individual objects in isolation. Relationships between objects also constitute rich semantic information about the scene. In this work, we explicitly model the objects and their relationships using scene graphs, a visually-grounded graphical structure of an image. We propose a novel end-to-end model that generates such structured scene representation from an input image. Our key insight is that the graph generation problem can be formulated as message passing between the primal node graph and its dual edge graph. Our joint inference model can take advantage of contextual cues to make better predictions on objects and their relationships. The experiments show that our model significantly outperforms previous methods on the Visual Genome dataset as well as support relation inference in NYU Depth V2 dataset.", "fno": "0457d097", "keywords": [ "Graph Theory", "Image Colour Analysis", "Image Representation", "Image Segmentation", "Inference Mechanisms", "Message Passing", "Object Recognition", "Visual Genome Dataset", "NYU Depth V 2 Dataset", "Scene Graph Generation", "Dual Edge Graph", "Primal Node Graph", "Graph Generation Problem", "Structured Scene Representation", "Graphical Structure", "Scene Graphs", "Visual Scene", "Iterative Message Passing", "Visualization", "Proposals", "Predictive Models", "Semantics", "Image Edge Detection", "Message Passing" ], "authors": [ { "affiliation": null, "fullName": "Danfei Xu", "givenName": "Danfei", "surname": "Xu", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Yuke Zhu", "givenName": "Yuke", "surname": "Zhu", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Christopher B. Choy", "givenName": "Christopher B.", "surname": "Choy", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Li Fei-Fei", "givenName": "Li", "surname": "Fei-Fei", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2017-07-01T00:00:00", "pubType": "proceedings", "pages": "3097-3106", "year": "2017", "issn": "1063-6919", "isbn": "978-1-5386-0457-1", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "0457d087", "articleId": "12OmNx1IwfP", "__typename": "AdjacentArticleType" }, "next": { "fno": "0457d107", "articleId": "12OmNvm6VFg", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccv/2017/1032/0/1032b270", "title": "Scene Graph Generation from Objects, Phrases and Region Captions", "doi": null, "abstractUrl": "/proceedings-article/iccv/2017/1032b270/12OmNrkjVqu", "parentPublication": { "id": "proceedings/iccv/2017/1032/0", "title": "2017 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2022/8563/0/09859970", "title": "Multi-Scale Graph Attention Network for Scene Graph Generation", "doi": null, "abstractUrl": "/proceedings-article/icme/2022/09859970/1G9EpEewD16", "parentPublication": { "id": "proceedings/icme/2022/8563/0", "title": "2022 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600t9454", "title": "HL-Net: Heterophily Learning Network for Scene Graph Generation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600t9454/1H1lCef5GSY", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/5555/01/10040751", "title": "Neural Belief Propagation for Scene Graph Generation", "doi": null, "abstractUrl": "/journal/tp/5555/01/10040751/1KB9whiRqA8", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2023/9346/0/934600e380", "title": "Grounding Scene Graphs on Natural Images via Visio-Lingual Message Passing", "doi": null, "abstractUrl": "/proceedings-article/wacv/2023/934600e380/1KxUEJZWihW", "parentPublication": { "id": "proceedings/wacv/2023/9346/0", "title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/5555/01/10105507", "title": "RelTR: Relation Transformer for Scene Graph Generation", "doi": null, "abstractUrl": "/journal/tp/5555/01/10105507/1MtgpPN7eBq", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2019/5023/0/502300b730", "title": "Visual Relationships as Functions:Enabling Few-Shot Scene Graph Prediction", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2019/502300b730/1i5msdOeSl2", "parentPublication": { "id": "proceedings/iccvw/2019/5023/0", "title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800d743", "title": "GPS-Net: Graph Property Sensing Network for Scene Graph Generation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800d743/1m3o2oONVsY", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2021/3864/0/09428472", "title": "Relationship-Aware Primal-Dual Graph Attention Network For Scene Graph Generation", "doi": null, "abstractUrl": "/proceedings-article/icme/2021/09428472/1uilSfRdZcs", "parentPublication": { "id": "proceedings/icme/2021/3864/0", "title": "2021 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2021/4899/0/489900b663", "title": "Target-Tailored Source-Transformation for Scene Graph Generation", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2021/489900b663/1yJYwdXPYYg", "parentPublication": { "id": "proceedings/cvprw/2021/4899/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1G9DtzCwrjW", "title": "2022 IEEE International Conference on Multimedia and Expo (ICME)", "acronym": "icme", "groupId": "1000477", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1G9EpEewD16", "doi": "10.1109/ICME52920.2022.9859970", "title": "Multi-Scale Graph Attention Network for Scene Graph Generation", "normalizedTitle": "Multi-Scale Graph Attention Network for Scene Graph Generation", "abstract": "Scene graph provides a high-level scene understanding of the image, which has a wide range of applications in computer vision. Previous methods elaborately design many message passing strategies and uniformly treat instances in the image to capture contextual information. These methods, however, fail to grasp the salient objects and their relations, which are the basis of understanding the content of images. To capture the interaction among salient instances, we propose a novel Multi-Scale Graph Attention Network (MSGAT) that gradually shrinks the graph scale to retain salient instances, and then expands it to encode the multi-scale context. Our proposed MSGAT contains two sub-modules: Multi-Scale Message Passing (MSMP) and Relationship Filtering Module (RFM), which are designed to enhance features of salient instances and filter redundant relationships, respectively. Extensive experiments demonstrate that MSGAT outperforms previous methods and achieves state-of-the-art performances on Visual Genome.", "abstracts": [ { "abstractType": "Regular", "content": "Scene graph provides a high-level scene understanding of the image, which has a wide range of applications in computer vision. Previous methods elaborately design many message passing strategies and uniformly treat instances in the image to capture contextual information. These methods, however, fail to grasp the salient objects and their relations, which are the basis of understanding the content of images. To capture the interaction among salient instances, we propose a novel Multi-Scale Graph Attention Network (MSGAT) that gradually shrinks the graph scale to retain salient instances, and then expands it to encode the multi-scale context. Our proposed MSGAT contains two sub-modules: Multi-Scale Message Passing (MSMP) and Relationship Filtering Module (RFM), which are designed to enhance features of salient instances and filter redundant relationships, respectively. Extensive experiments demonstrate that MSGAT outperforms previous methods and achieves state-of-the-art performances on Visual Genome.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Scene graph provides a high-level scene understanding of the image, which has a wide range of applications in computer vision. Previous methods elaborately design many message passing strategies and uniformly treat instances in the image to capture contextual information. These methods, however, fail to grasp the salient objects and their relations, which are the basis of understanding the content of images. To capture the interaction among salient instances, we propose a novel Multi-Scale Graph Attention Network (MSGAT) that gradually shrinks the graph scale to retain salient instances, and then expands it to encode the multi-scale context. Our proposed MSGAT contains two sub-modules: Multi-Scale Message Passing (MSMP) and Relationship Filtering Module (RFM), which are designed to enhance features of salient instances and filter redundant relationships, respectively. Extensive experiments demonstrate that MSGAT outperforms previous methods and achieves state-of-the-art performances on Visual Genome.", "fno": "09859970", "keywords": [ "Computer Vision", "Feature Extraction", "Graph Theory", "Image Filtering", "Message Passing", "MSGAT", "Salient Instances", "Scene Understanding", "Computer Vision", "Contextual Information", "Salient Objects", "Multiscale Graph Attention Network", "MSMP", "Multiscale Message Passing", "Relationship Filtering Module", "RFM", "Scene Graph Generation", "Visualization", "Message Passing", "Design Methodology", "Semantics", "Genomics", "Feature Extraction", "Information Filters", "Scene Graph Generation", "Multi Scale Framework", "Graph Attention Network", "Relation Filtering" ], "authors": [ { "affiliation": "University of Electronic Science and Technology of China,China", "fullName": "Min Chen", "givenName": "Min", "surname": "Chen", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Electronic Science and Technology of China,China", "fullName": "Xinyu Lyu", "givenName": "Xinyu", "surname": "Lyu", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Electronic Science and Technology of China,China", "fullName": "Yuyu Guo", "givenName": "Yuyu", "surname": "Guo", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Electronic Science and Technology of China,China", "fullName": "Jingwei Liu", "givenName": "Jingwei", "surname": "Liu", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Electronic Science and Technology of China,China", "fullName": "Lianli Gao", "givenName": "Lianli", "surname": "Gao", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Electronic Science and Technology of China,China", "fullName": "Jingkuan Song", "givenName": "Jingkuan", "surname": "Song", "__typename": "ArticleAuthorType" } ], "idPrefix": "icme", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-07-01T00:00:00", "pubType": "proceedings", "pages": "1-6", "year": "2022", "issn": null, "isbn": "978-1-6654-8563-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "09859695", "articleId": "1G9EEAsdfNe", "__typename": "AdjacentArticleType" }, "next": { "fno": "09859787", "articleId": "1G9DHYOMoSI", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2017/0457/0/0457d097", "title": "Scene Graph Generation by Iterative Message Passing", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2017/0457d097/12OmNBAqZH0", "parentPublication": { "id": "proceedings/cvpr/2017/0457/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2017/0457/0/0457h062", "title": "A Study of Lagrangean Decompositions and Dual Ascent Solvers for Graph Matching", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2017/0457h062/12OmNxXCGGh", "parentPublication": { "id": "proceedings/cvpr/2017/0457/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600k0853", "title": "Automatic Relation-aware Graph Network Proliferation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600k0853/1H0NsGhtidG", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600t9454", "title": "HL-Net: Heterophily Learning Network for Scene Graph Generation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600t9454/1H1lCef5GSY", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/5555/01/10040751", "title": "Neural Belief Propagation for Scene Graph Generation", "doi": null, "abstractUrl": "/journal/tp/5555/01/10040751/1KB9whiRqA8", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ispa-bdcloud-socialcom-sustaincom/2022/6497/0/649700a018", "title": "CopGAT: Co-propagation Self-supervised Graph Attention Network", "doi": null, "abstractUrl": "/proceedings-article/ispa-bdcloud-socialcom-sustaincom/2022/649700a018/1LKwt8WY3cc", "parentPublication": { "id": "proceedings/ispa-bdcloud-socialcom-sustaincom/2022/6497/0", "title": "2022 IEEE Intl Conf on Parallel & Distributed Processing with Applications, Big Data & Cloud Computing, Sustainable Computing & Communications, Social Computing & Networking (ISPA/BDCloud/SocialCom/SustainCom)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800d743", "title": "GPS-Net: Graph Property Sensing Network for Scene Graph Generation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800d743/1m3o2oONVsY", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2021/3864/0/09428472", "title": "Relationship-Aware Primal-Dual Graph Attention Network For Scene Graph Generation", "doi": null, "abstractUrl": "/proceedings-article/icme/2021/09428472/1uilSfRdZcs", "parentPublication": { "id": "proceedings/icme/2021/3864/0", "title": "2021 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2021/4899/0/489900b663", "title": "Target-Tailored Source-Transformation for Scene Graph Generation", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2021/489900b663/1yJYwdXPYYg", "parentPublication": { "id": "proceedings/cvprw/2021/4899/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900l1104", "title": "Bipartite Graph Network with Adaptive Message Passing for Unbiased Scene Graph Generation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900l1104/1yeJ9bKPSqQ", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1G9DtzCwrjW", "title": "2022 IEEE International Conference on Multimedia and Expo (ICME)", "acronym": "icme", "groupId": "1000477", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1G9EuqL6nzG", "doi": "10.1109/ICME52920.2022.9859944", "title": "Zero-Shot Scene Graph Generation with Knowledge Graph Completion", "normalizedTitle": "Zero-Shot Scene Graph Generation with Knowledge Graph Completion", "abstract": "Limited by the incomprehensive training samples, existing scene graph generation (SGG) methods perform poorly on predicting zero-shot (i.e., unseen) subject-predicate-object triples. To address this problem, we propose a general SGG framework to improve their zero-shot performance. The main idea of our method is to generate the information of zero-shot triples before the training of the predicate classifier and thus make the original zero-shot triples non-zero-shot. Specifically, the missing information of zero-shot triples is generated by our proposed knowledge graph completion strategy and then integrated with visual features of images. Therefore, the predicate classification of zero-shot triples is no longer just regarded as a single visual classification task but also transformed into a prediction task of missing links in a knowledge graph. The experiments on the dataset Visual Genome demonstrate that our proposed method outperforms the state-of-the-art methods in popular zero-shot metrics (i.e., zR@N, ng-zR@N) for all popular SGG tasks.", "abstracts": [ { "abstractType": "Regular", "content": "Limited by the incomprehensive training samples, existing scene graph generation (SGG) methods perform poorly on predicting zero-shot (i.e., unseen) subject-predicate-object triples. To address this problem, we propose a general SGG framework to improve their zero-shot performance. The main idea of our method is to generate the information of zero-shot triples before the training of the predicate classifier and thus make the original zero-shot triples non-zero-shot. Specifically, the missing information of zero-shot triples is generated by our proposed knowledge graph completion strategy and then integrated with visual features of images. Therefore, the predicate classification of zero-shot triples is no longer just regarded as a single visual classification task but also transformed into a prediction task of missing links in a knowledge graph. The experiments on the dataset Visual Genome demonstrate that our proposed method outperforms the state-of-the-art methods in popular zero-shot metrics (i.e., zR@N, ng-zR@N) for all popular SGG tasks.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Limited by the incomprehensive training samples, existing scene graph generation (SGG) methods perform poorly on predicting zero-shot (i.e., unseen) subject-predicate-object triples. To address this problem, we propose a general SGG framework to improve their zero-shot performance. The main idea of our method is to generate the information of zero-shot triples before the training of the predicate classifier and thus make the original zero-shot triples non-zero-shot. Specifically, the missing information of zero-shot triples is generated by our proposed knowledge graph completion strategy and then integrated with visual features of images. Therefore, the predicate classification of zero-shot triples is no longer just regarded as a single visual classification task but also transformed into a prediction task of missing links in a knowledge graph. The experiments on the dataset Visual Genome demonstrate that our proposed method outperforms the state-of-the-art methods in popular zero-shot metrics (i.e., zR@N, ng-zR@N) for all popular SGG tasks.", "fno": "09859944", "keywords": [ "Computer Vision", "Feature Extraction", "Image Classification", "Object Detection", "Semantic Networks", "Zero Shot Triples Nonzero Shot", "Knowledge Graph Completion", "Zero Shot Scene Graph Generation", "SGG", "Visual Features", "Predicate Classification", "Training", "Measurement", "Visualization", "Correlation", "Semantics", "Genomics", "Task Analysis", "Scene Graph Generation", "Predicate Classification", "Zero Shot", "Knowledge Graph Completion" ], "authors": [ { "affiliation": "Shanghai Jiao Tong University,Department of Computer Science and Engineering,China", "fullName": "Xiang Yu", "givenName": "Xiang", "surname": "Yu", "__typename": "ArticleAuthorType" }, { "affiliation": "Shanghai Jiao Tong University,Department of Computer Science and Engineering,China", "fullName": "Ruoxin Chen", "givenName": "Ruoxin", "surname": "Chen", "__typename": "ArticleAuthorType" }, { "affiliation": "Shanghai Jiao Tong University,Department of Computer Science and Engineering,China", "fullName": "Jie Li", "givenName": "Jie", "surname": "Li", "__typename": "ArticleAuthorType" }, { "affiliation": "Shanghai Jiao Tong University,Department of Computer Science and Engineering,China", "fullName": "Jiawei Sun", "givenName": "Jiawei", "surname": "Sun", "__typename": "ArticleAuthorType" }, { "affiliation": "Shanghai Jiao Tong University,Department of Computer Science and Engineering,China", "fullName": "Shijing Yuan", "givenName": "Shijing", "surname": "Yuan", "__typename": "ArticleAuthorType" }, { "affiliation": "Shanghai Jiao Tong University,Department of Computer Science and Engineering,China", "fullName": "Huxiao Ji", "givenName": "Huxiao", "surname": "Ji", "__typename": "ArticleAuthorType" }, { "affiliation": "Shanghai Jiao Tong University,Department of Computer Science and Engineering,China", "fullName": "Xinyu Lu", "givenName": "Xinyu", "surname": "Lu", "__typename": "ArticleAuthorType" }, { "affiliation": "Shanghai Jiao Tong University,Department of Computer Science and Engineering,China", "fullName": "Chentao Wu", "givenName": "Chentao", "surname": "Wu", "__typename": "ArticleAuthorType" } ], "idPrefix": "icme", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-07-01T00:00:00", "pubType": "proceedings", "pages": "1-6", "year": "2022", "issn": null, "isbn": "978-1-6654-8563-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "09859898", "articleId": "1G9EH0iIl4Q", "__typename": "AdjacentArticleType" }, "next": { "fno": "09859583", "articleId": "1G9DGnXxXdC", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2018/6420/0/642000d598", "title": "Zero-Shot Sketch-Image Hashing", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000d598/17D45VUZMYR", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000h603", "title": "Preserving Semantic Relations for Zero-Shot Learning", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000h603/17D45XwUAIy", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bdeim/2021/8288/0/828800a217", "title": "Bert-based Knowledge Graph Completion Algorithm for Few-Shot", "doi": null, "abstractUrl": "/proceedings-article/bdeim/2021/828800a217/1B4mkgrRe36", "parentPublication": { "id": "proceedings/bdeim/2021/8288/0", "title": "2021 2nd International Conference on Big Data Economy and Information Management (BDEIM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200i692", "title": "Semantics Disentangling for Generalized Zero-Shot Learning", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200i692/1BmLjfXgUiQ", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2023/04/09858006", "title": "Debiased Scene Graph Generation for Dual Imbalance Learning", "doi": null, "abstractUrl": "/journal/tp/2023/04/09858006/1FSY5Czw3Kw", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2022/9062/0/09956712", "title": "Zero-shot Scene Graph Generation with Relational Graph Neural Networks", "doi": null, "abstractUrl": "/proceedings-article/icpr/2022/09956712/1IHpXOouE7u", "parentPublication": { "id": "proceedings/icpr/2022/9062/0", "title": "2022 26th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ictai/2022/9744/0/974400a391", "title": "A Zero-shot Learning Method with a Multi-Modal Knowledge Graph", "doi": null, "abstractUrl": "/proceedings-article/ictai/2022/974400a391/1MrFUR4sDSg", "parentPublication": { "id": "proceedings/ictai/2022/9744/0", "title": "2022 IEEE 34th International Conference on Tools with Artificial Intelligence (ICTAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2019/4803/0/480300g081", "title": "Transductive Learning for Zero-Shot Object Detection", "doi": null, "abstractUrl": "/proceedings-article/iccv/2019/480300g081/1hVlw1Lmb96", "parentPublication": { "id": "proceedings/iccv/2019/4803/0", "title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800d713", "title": "Unbiased Scene Graph Generation From Biased Training", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800d713/1m3o31iArJe", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ictai/2021/0898/0/089800b050", "title": "Zero or few shot knowledge graph completions by text enhancement with multi-grained attention", "doi": null, "abstractUrl": "/proceedings-article/ictai/2021/089800b050/1zw61h9Ithu", "parentPublication": { "id": "proceedings/ictai/2021/0898/0", "title": "2021 IEEE 33rd International Conference on Tools with Artificial Intelligence (ICTAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1H1gVMlkl32", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1H1iflRfdEA", "doi": "10.1109/CVPR52688.2022.01515", "title": "Not All Relations are Equal: Mining Informative Labels for Scene Graph Generation", "normalizedTitle": "Not All Relations are Equal: Mining Informative Labels for Scene Graph Generation", "abstract": "Scene graph generation (SGG) aims to capture a wide variety of interactions between pairs of objects, which is essential for full scene understanding. Existing SGG methods trained on the entire set of relations fail to acquire complex reasoning about visual and textual correlations due to various biases in training data. Learning on trivial relations that indicate generic spatial configuration like &#x2018;on&#x2019; instead of informative relations such as &#x2018;parked on&#x2019; does not enforce this complex reasoning, harming generalization. To address this problem, we propose a novel framework for SGG training that exploits relation labels based on their informativeness. Our model-agnostic training procedure imputes missing informative relations for less informative samples in the training data and trains a SGG model on the imputed labels along with existing annotations. We show that this approach can successfully be used in conjunction with state-of-the-art SGG methods and improves their performance significantly in multiple metrics on the standard Visual Genome benchmark. Furthermore, we obtain considerable improvements for unseen triplets in a more challenging zero-shot setting.", "abstracts": [ { "abstractType": "Regular", "content": "Scene graph generation (SGG) aims to capture a wide variety of interactions between pairs of objects, which is essential for full scene understanding. Existing SGG methods trained on the entire set of relations fail to acquire complex reasoning about visual and textual correlations due to various biases in training data. Learning on trivial relations that indicate generic spatial configuration like &#x2018;on&#x2019; instead of informative relations such as &#x2018;parked on&#x2019; does not enforce this complex reasoning, harming generalization. To address this problem, we propose a novel framework for SGG training that exploits relation labels based on their informativeness. Our model-agnostic training procedure imputes missing informative relations for less informative samples in the training data and trains a SGG model on the imputed labels along with existing annotations. We show that this approach can successfully be used in conjunction with state-of-the-art SGG methods and improves their performance significantly in multiple metrics on the standard Visual Genome benchmark. Furthermore, we obtain considerable improvements for unseen triplets in a more challenging zero-shot setting.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Scene graph generation (SGG) aims to capture a wide variety of interactions between pairs of objects, which is essential for full scene understanding. Existing SGG methods trained on the entire set of relations fail to acquire complex reasoning about visual and textual correlations due to various biases in training data. Learning on trivial relations that indicate generic spatial configuration like ‘on’ instead of informative relations such as ‘parked on’ does not enforce this complex reasoning, harming generalization. To address this problem, we propose a novel framework for SGG training that exploits relation labels based on their informativeness. Our model-agnostic training procedure imputes missing informative relations for less informative samples in the training data and trains a SGG model on the imputed labels along with existing annotations. We show that this approach can successfully be used in conjunction with state-of-the-art SGG methods and improves their performance significantly in multiple metrics on the standard Visual Genome benchmark. Furthermore, we obtain considerable improvements for unseen triplets in a more challenging zero-shot setting.", "fno": "694600p5575", "keywords": [ "Data Mining", "Genomics", "Graph Theory", "Learning Artificial Intelligence", "Model Agnostic Training Procedure", "Informative Relations", "Informative Samples", "Training Data", "SGG Model", "Imputed Labels", "Existing Annotations", "State Of The Art SGG Methods", "Mining Informative Labels", "Scene Graph Generation", "Scene Understanding", "Complex Reasoning", "Visual Correlations", "Textual Correlations", "Trivial Relations", "Generic Spatial Configuration", "SGG Training", "Relation Labels", "Training", "Measurement", "Visualization", "Training Data", "Data Models", "Cognition", "Pattern Recognition" ], "authors": [ { "affiliation": "School of Informatics, University of Edinburgh,UK", "fullName": "Arushi Goel", "givenName": "Arushi", "surname": "Goel", "__typename": "ArticleAuthorType" }, { "affiliation": "CFAR, IHPC, A*STAR,Singapore", "fullName": "Basura Fernando", "givenName": "Basura", "surname": "Fernando", "__typename": "ArticleAuthorType" }, { "affiliation": "School of Informatics, University of Edinburgh,UK", "fullName": "Frank Keller", "givenName": "Frank", "surname": "Keller", "__typename": "ArticleAuthorType" }, { "affiliation": "School of Informatics, University of Edinburgh,UK", "fullName": "Hakan Bilen", "givenName": "Hakan", "surname": "Bilen", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-06-01T00:00:00", "pubType": "proceedings", "pages": "15575-15585", "year": "2022", "issn": null, "isbn": "978-1-6654-6946-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [ { "id": "1H1ifiT1yeI", "name": "pcvpr202269460-09879293s1-mm_694600p5575.zip", "size": "583 kB", "location": "https://www.computer.org/csdl/api/v1/extra/pcvpr202269460-09879293s1-mm_694600p5575.zip", "__typename": "WebExtraType" } ], "adjacentArticles": { "previous": { "fno": "694600p5565", "articleId": "1H0N7XcPS4E", "__typename": "AdjacentArticleType" }, "next": { "fno": "694600p5586", "articleId": "1H1nkFtin04", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2017/0457/0/0457a771", "title": "The Impact of Typicality for Informative Representative Selection", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2017/0457a771/12OmNArbG1c", "parentPublication": { "id": "proceedings/cvpr/2017/0457/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2010/4109/0/4109c041", "title": "Using Spatial Relations for Graphical Symbol Description", "doi": null, "abstractUrl": "/proceedings-article/icpr/2010/4109c041/12OmNzzxurA", "parentPublication": { "id": "proceedings/icpr/2010/4109/0", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200q6363", "title": "From General to Specific: Informative Scene Graph Generation via Balance Adjustment", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200q6363/1BmFrjRtaPS", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600t9415", "title": "Structured Sparse R-CNN for Direct Scene Graph Generation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600t9415/1H0KTEtRsje", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600s8847", "title": "The Devil is in the Labels: Noisy Label Correction for Robust Scene Graph Generation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600s8847/1H0LxkY0vXW", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800j420", "title": "Interactive Multi-Label CNN Learning With Partial Labels", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800j420/1m3nXEI6nAs", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800d713", "title": "Unbiased Scene Graph Generation From Biased Training", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800d713/1m3o31iArJe", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900a933", "title": "Multi-Label Learning from Single Positive Labels", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900a933/1yeIVTYm12g", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900i726", "title": "Fine-grained Angular Contrastive Learning with Coarse Labels", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900i726/1yeKYMkuxpu", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900h237", "title": "All Labels Are Not Created Equal: Enhancing Semi-supervision via Label Grouping and Co-training", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900h237/1yeLKorSSOc", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1H1gVMlkl32", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1H1lCef5GSY", "doi": "10.1109/CVPR52688.2022.01887", "title": "HL-Net: Heterophily Learning Network for Scene Graph Generation", "normalizedTitle": "HL-Net: Heterophily Learning Network for Scene Graph Generation", "abstract": "Scene graph generation (SGG) aims to detect objects and predict their pairwise relationships within an image. Current SGG methods typically utilize graph neural net-works (GNNs) to acquire context information between ob-jects/relationships. Despite their effectiveness, however, current SGG methods only assume scene graph homophily while ignoring heterophily. Accordingly, in this paper, we propose a novel Heterophily Learning Network (HL-Net) to comprehensively explore the homophily and heterophily be-tween objects/relationships in scene graphs. More specif-ically, HL-Net comprises the following 1) an adaptive reweighting transformer module, which adaptively inte-grates the information from different layers to exploit both the heterophily and homophily in objects; 2) a relation-ship feature propagation module that efficiently explores the connections between relationships by considering het-erophily in order to refine the relationship representation; 3) a heterophily-aware message-passing scheme to fur-ther distinguish the heterophily and homophily between ob-jects/relationships, thereby facilitating improved message passing in graphs. We conducted extensive experiments on two public datasets: Visual Genome (VG) and Open Images (OI). The experimental results demonstrate the superiority of our proposed HL-Net over existing state-of-the-art approaches. In more detail, HL-Net outperforms the second-best competitors by 2.1&#x0025; on the VG datasetfor scene graph classification and 1.2&#x0025; on the IO dataset for the final score. Code is available at https://github.com/simI3/HL-Net.", "abstracts": [ { "abstractType": "Regular", "content": "Scene graph generation (SGG) aims to detect objects and predict their pairwise relationships within an image. Current SGG methods typically utilize graph neural net-works (GNNs) to acquire context information between ob-jects/relationships. Despite their effectiveness, however, current SGG methods only assume scene graph homophily while ignoring heterophily. Accordingly, in this paper, we propose a novel Heterophily Learning Network (HL-Net) to comprehensively explore the homophily and heterophily be-tween objects/relationships in scene graphs. More specif-ically, HL-Net comprises the following 1) an adaptive reweighting transformer module, which adaptively inte-grates the information from different layers to exploit both the heterophily and homophily in objects; 2) a relation-ship feature propagation module that efficiently explores the connections between relationships by considering het-erophily in order to refine the relationship representation; 3) a heterophily-aware message-passing scheme to fur-ther distinguish the heterophily and homophily between ob-jects/relationships, thereby facilitating improved message passing in graphs. We conducted extensive experiments on two public datasets: Visual Genome (VG) and Open Images (OI). The experimental results demonstrate the superiority of our proposed HL-Net over existing state-of-the-art approaches. In more detail, HL-Net outperforms the second-best competitors by 2.1&#x0025; on the VG datasetfor scene graph classification and 1.2&#x0025; on the IO dataset for the final score. Code is available at https://github.com/simI3/HL-Net.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Scene graph generation (SGG) aims to detect objects and predict their pairwise relationships within an image. Current SGG methods typically utilize graph neural net-works (GNNs) to acquire context information between ob-jects/relationships. Despite their effectiveness, however, current SGG methods only assume scene graph homophily while ignoring heterophily. Accordingly, in this paper, we propose a novel Heterophily Learning Network (HL-Net) to comprehensively explore the homophily and heterophily be-tween objects/relationships in scene graphs. More specif-ically, HL-Net comprises the following 1) an adaptive reweighting transformer module, which adaptively inte-grates the information from different layers to exploit both the heterophily and homophily in objects; 2) a relation-ship feature propagation module that efficiently explores the connections between relationships by considering het-erophily in order to refine the relationship representation; 3) a heterophily-aware message-passing scheme to fur-ther distinguish the heterophily and homophily between ob-jects/relationships, thereby facilitating improved message passing in graphs. We conducted extensive experiments on two public datasets: Visual Genome (VG) and Open Images (OI). The experimental results demonstrate the superiority of our proposed HL-Net over existing state-of-the-art approaches. In more detail, HL-Net outperforms the second-best competitors by 2.1% on the VG datasetfor scene graph classification and 1.2% on the IO dataset for the final score. Code is available at https://github.com/simI3/HL-Net.", "fno": "694600t9454", "keywords": [ "Graph Theory", "Image Classification", "Learning Artificial Intelligence", "Message Passing", "Neural Nets", "Relationship Representation", "Heterophily Aware Message Passing Scheme", "Scene Graph Generation", "Pairwise Relationships", "SGG", "Graph Neural Networks", "Scene Graph Homophily", "HL Net", "Heterophily Learning Network", "Relationship Feature Propagation Module", "Visual Genome", "Open Images", "Visualization", "Computer Vision", "Image Analysis", "Codes", "Message Passing", "Genomics", "Transformers" ], "authors": [ { "affiliation": "South China University of Technology", "fullName": "Xin Lin", "givenName": "Xin", "surname": "Lin", "__typename": "ArticleAuthorType" }, { "affiliation": "South China University of Technology", "fullName": "Changxing Ding", "givenName": "Changxing", "surname": "Ding", "__typename": "ArticleAuthorType" }, { "affiliation": "JD Explore Academy", "fullName": "Yibing Zhan", "givenName": "Yibing", "surname": "Zhan", "__typename": "ArticleAuthorType" }, { "affiliation": "South China University of Technology", "fullName": "Zijian Li", "givenName": "Zijian", "surname": "Li", "__typename": "ArticleAuthorType" }, { "affiliation": "JD Explore Academy", "fullName": "Dacheng Tao", "givenName": "Dacheng", "surname": "Tao", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-06-01T00:00:00", "pubType": "proceedings", "pages": "19454-19463", "year": "2022", "issn": null, "isbn": "978-1-6654-6946-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [ { "id": "1H1lCb7dF2U", "name": "pcvpr202269460-09878538s1-mm_694600t9454.zip", "size": "261 kB", "location": "https://www.computer.org/csdl/api/v1/extra/pcvpr202269460-09878538s1-mm_694600t9454.zip", "__typename": "WebExtraType" } ], "adjacentArticles": { "previous": { "fno": "694600t9445", "articleId": "1H1i1vLR0CA", "__typename": "AdjacentArticleType" }, "next": { "fno": "694600t9464", "articleId": "1H0LgZ9EgDu", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icme/2022/8563/0/09859970", "title": "Multi-Scale Graph Attention Network for Scene Graph Generation", "doi": null, "abstractUrl": "/proceedings-article/icme/2022/09859970/1G9EpEewD16", "parentPublication": { "id": "proceedings/icme/2022/8563/0", "title": "2022 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2022/8563/0/09859944", "title": "Zero-Shot Scene Graph Generation with Knowledge Graph Completion", "doi": null, "abstractUrl": "/proceedings-article/icme/2022/09859944/1G9EuqL6nzG", "parentPublication": { "id": "proceedings/icme/2022/8563/0", "title": "2022 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600t9435", "title": "RU-Net: Regularized Unrolling Network for Scene Graph Generation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600t9435/1H1j3IGn6WQ", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2022/9062/0/09956712", "title": "Zero-shot Scene Graph Generation with Relational Graph Neural Networks", "doi": null, "abstractUrl": "/proceedings-article/icpr/2022/09956712/1IHpXOouE7u", "parentPublication": { "id": "proceedings/icpr/2022/9062/0", "title": "2022 26th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09947006", "title": "Explore Contextual Information for 3D Scene Graph Generation", "doi": null, "abstractUrl": "/journal/tg/5555/01/09947006/1Idr5neUL5e", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2023/9346/0/934600a052", "title": "Composite Relationship Fields with Transformers for Scene Graph Generation", "doi": null, "abstractUrl": "/proceedings-article/wacv/2023/934600a052/1KxUDlkTf8I", "parentPublication": { "id": "proceedings/wacv/2023/9346/0", "title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800d743", "title": "GPS-Net: Graph Property Sensing Network for Scene Graph Generation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800d743/1m3o2oONVsY", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800d713", "title": "Unbiased Scene Graph Generation From Biased Training", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800d713/1m3o31iArJe", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2021/3864/0/09428472", "title": "Relationship-Aware Primal-Dual Graph Attention Network For Scene Graph Generation", "doi": null, "abstractUrl": "/proceedings-article/icme/2021/09428472/1uilSfRdZcs", "parentPublication": { "id": "proceedings/icme/2021/3864/0", "title": "2021 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2021/4899/0/489900b663", "title": "Target-Tailored Source-Transformation for Scene Graph Generation", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2021/489900b663/1yJYwdXPYYg", "parentPublication": { "id": "proceedings/cvprw/2021/4899/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1IHotVZum6Q", "title": "2022 26th International Conference on Pattern Recognition (ICPR)", "acronym": "icpr", "groupId": "9956007", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1IHpXOouE7u", "doi": "10.1109/ICPR56361.2022.9956712", "title": "Zero-shot Scene Graph Generation with Relational Graph Neural Networks", "normalizedTitle": "Zero-shot Scene Graph Generation with Relational Graph Neural Networks", "abstract": "Existing scene graph generation (SGG) methods are far from practical, primarily due to their poor performance on predicting zero-shot (i.e., unseen) subject-predicate-object triples. We observe that these SGG methods treat images along with the triples in them independently and thus fail to consider the complex and hidden information that is inherently implicit in the triples of other images. To this effect, our paper proposes a novel encoder-decoder SGG framework to leverage the semantic correlations between the triples of different images into the prediction of a zero-shot triple. Specifically, the encoder aggregates the triples in each image of training set into a large knowledge graph and learns the entity embeddings that capture the features of their neighborhoods with a relational graph neural network. The neighborhood-aware embeddings are then fed into the vision-based decoder to predict the predicates in images. Extensive experiments on the popular benchmark Visual Genome demonstrate that our proposed method outperforms the state-of-the-art methods in popular zero-shot metrics (i.e., zR@N, ngzR@N) for all SGG tasks.", "abstracts": [ { "abstractType": "Regular", "content": "Existing scene graph generation (SGG) methods are far from practical, primarily due to their poor performance on predicting zero-shot (i.e., unseen) subject-predicate-object triples. We observe that these SGG methods treat images along with the triples in them independently and thus fail to consider the complex and hidden information that is inherently implicit in the triples of other images. To this effect, our paper proposes a novel encoder-decoder SGG framework to leverage the semantic correlations between the triples of different images into the prediction of a zero-shot triple. Specifically, the encoder aggregates the triples in each image of training set into a large knowledge graph and learns the entity embeddings that capture the features of their neighborhoods with a relational graph neural network. The neighborhood-aware embeddings are then fed into the vision-based decoder to predict the predicates in images. Extensive experiments on the popular benchmark Visual Genome demonstrate that our proposed method outperforms the state-of-the-art methods in popular zero-shot metrics (i.e., zR@N, ngzR@N) for all SGG tasks.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Existing scene graph generation (SGG) methods are far from practical, primarily due to their poor performance on predicting zero-shot (i.e., unseen) subject-predicate-object triples. We observe that these SGG methods treat images along with the triples in them independently and thus fail to consider the complex and hidden information that is inherently implicit in the triples of other images. To this effect, our paper proposes a novel encoder-decoder SGG framework to leverage the semantic correlations between the triples of different images into the prediction of a zero-shot triple. Specifically, the encoder aggregates the triples in each image of training set into a large knowledge graph and learns the entity embeddings that capture the features of their neighborhoods with a relational graph neural network. The neighborhood-aware embeddings are then fed into the vision-based decoder to predict the predicates in images. Extensive experiments on the popular benchmark Visual Genome demonstrate that our proposed method outperforms the state-of-the-art methods in popular zero-shot metrics (i.e., zR@N, ngzR@N) for all SGG tasks.", "fno": "09956712", "keywords": [ "Genomics", "Graph Theory", "Image Processing", "Neural Nets", "Complex Information", "Encoder Decoder SGG Framework", "Hidden Information", "Knowledge Graph", "Neighborhood Aware Embeddings", "Relational Graph Neural Network", "Scene Graph Generation Methods", "Semantic Correlations", "SGG Methods", "Subject Predicate Object Triples", "Vision Based Decoder", "Visual Genome", "Zero Shot Metrics", "Zero Shot Scene Graph Generation", "Zero Shot Triple", "Training", "Measurement", "Visualization", "Correlation", "Semantics", "Genomics", "Feature Extraction" ], "authors": [ { "affiliation": "Shanghai Jiao Tong University,School of Computer Science and Engineering,Shanghai,China", "fullName": "Xiang Yu", "givenName": "Xiang", "surname": "Yu", "__typename": "ArticleAuthorType" }, { "affiliation": "Shanghai Jiao Tong University,School of Computer Science and Engineering,Shanghai,China", "fullName": "Jie Li", "givenName": "Jie", "surname": "Li", "__typename": "ArticleAuthorType" }, { "affiliation": "Shanghai Jiao Tong University,School of Computer Science and Engineering,Shanghai,China", "fullName": "Shijing Yuan", "givenName": "Shijing", "surname": "Yuan", "__typename": "ArticleAuthorType" }, { "affiliation": "Shanghai Jiao Tong University,School of Computer Science and Engineering,Shanghai,China", "fullName": "Chao Wang", "givenName": "Chao", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": "Shanghai Jiao Tong University,School of Computer Science and Engineering,Shanghai,China", "fullName": "Chentao Wu", "givenName": "Chentao", "surname": "Wu", "__typename": "ArticleAuthorType" } ], "idPrefix": "icpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-08-01T00:00:00", "pubType": "proceedings", "pages": "1894-1900", "year": "2022", "issn": null, "isbn": "978-1-6654-9062-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "09956252", "articleId": "1IHpjCDUTcc", "__typename": "AdjacentArticleType" }, "next": { "fno": "09956061", "articleId": "1IHov0UQFvW", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2014/5118/0/5118c441", "title": "COSTA: Co-Occurrence Statistics for Zero-Shot Classification", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2014/5118c441/12OmNqGA50v", "parentPublication": { "id": "proceedings/cvpr/2014/5118/0", "title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000b004", "title": "A Generative Adversarial Approach for Zero-Shot Learning from Noisy Texts", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000b004/17D45VUZMYQ", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdm/2018/9159/0/08594904", "title": "Zero-Shot Learning: An Energy Based Approach", "doi": null, "abstractUrl": "/proceedings-article/icdm/2018/08594904/17D45WGGoMy", "parentPublication": { "id": "proceedings/icdm/2018/9159/0", "title": "2018 IEEE International Conference on Data Mining (ICDM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200i692", "title": "Semantics Disentangling for Generalized Zero-Shot Learning", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200i692/1BmLjfXgUiQ", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2022/8563/0/09859944", "title": "Zero-Shot Scene Graph Generation with Knowledge Graph Completion", "doi": null, "abstractUrl": "/proceedings-article/icme/2022/09859944/1G9EuqL6nzG", "parentPublication": { "id": "proceedings/icme/2022/8563/0", "title": "2022 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600j306", "title": "VGSE: Visually-Grounded Semantic Embeddings for Zero-Shot Learning", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600j306/1H1j2mN3nuE", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdmw/2022/4609/0/460900a926", "title": "Zero-shot Object Detection Through Vision-Language Embedding Alignment", "doi": null, "abstractUrl": "/proceedings-article/icdmw/2022/460900a926/1KBr0nPnwuA", "parentPublication": { "id": "proceedings/icdmw/2022/4609/0", "title": "2022 IEEE International Conference on Data Mining Workshops (ICDMW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ictai/2022/9744/0/974400a391", "title": "A Zero-shot Learning Method with a Multi-Modal Knowledge Graph", "doi": null, "abstractUrl": "/proceedings-article/ictai/2022/974400a391/1MrFUR4sDSg", "parentPublication": { "id": "proceedings/ictai/2022/9744/0", "title": "2022 IEEE 34th International Conference on Tools with Artificial Intelligence (ICTAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800j270", "title": "Hyperbolic Visual Embedding Learning for Zero-Shot Recognition", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800j270/1m3olTHyUWk", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ictai/2021/0898/0/089800b050", "title": "Zero or few shot knowledge graph completions by text enhancement with multi-grained attention", "doi": null, "abstractUrl": "/proceedings-article/ictai/2021/089800b050/1zw61h9Ithu", "parentPublication": { "id": "proceedings/ictai/2021/0898/0", "title": "2021 IEEE 33rd International Conference on Tools with Artificial Intelligence (ICTAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1m3n9N02qgE", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1m3o2oONVsY", "doi": "10.1109/CVPR42600.2020.00380", "title": "GPS-Net: Graph Property Sensing Network for Scene Graph Generation", "normalizedTitle": "GPS-Net: Graph Property Sensing Network for Scene Graph Generation", "abstract": "Scene graph generation (SGG) aims to detect objects in an image along with their pairwise relationships. There are three key properties of scene graph that have been underexplored in recent works: namely, the edge direction information, the difference in priority between nodes, and the long-tailed distribution of relationships. Accordingly, in this paper, we propose a Graph Property Sensing Network (GPS-Net) that fully explores these three properties for SGG. First, we propose a novel message passing module that augments the node feature with node-specific contextual information and encodes the edge direction information via a tri-linear model. Second, we introduce a node priority sensitive loss to reflect the difference in priority between nodes during training. This is achieved by designing a mapping function that adjusts the focusing parameter in the focal loss. Third, since the frequency of relationships is affected by the long-tailed distribution problem, we mitigate this issue by first softening the distribution and then enabling it to be adjusted for each subject-object pair according to their visual appearance. Systematic experiments demonstrate the effectiveness of the proposed techniques. Moreover, GPS-Net achieves state-of-the-art performance on three popular databases: VG, OI, and VRD by significant gains under various settings and metrics. The code and models are available at https://github.com/taksau/GPS-Net.", "abstracts": [ { "abstractType": "Regular", "content": "Scene graph generation (SGG) aims to detect objects in an image along with their pairwise relationships. There are three key properties of scene graph that have been underexplored in recent works: namely, the edge direction information, the difference in priority between nodes, and the long-tailed distribution of relationships. Accordingly, in this paper, we propose a Graph Property Sensing Network (GPS-Net) that fully explores these three properties for SGG. First, we propose a novel message passing module that augments the node feature with node-specific contextual information and encodes the edge direction information via a tri-linear model. Second, we introduce a node priority sensitive loss to reflect the difference in priority between nodes during training. This is achieved by designing a mapping function that adjusts the focusing parameter in the focal loss. Third, since the frequency of relationships is affected by the long-tailed distribution problem, we mitigate this issue by first softening the distribution and then enabling it to be adjusted for each subject-object pair according to their visual appearance. Systematic experiments demonstrate the effectiveness of the proposed techniques. Moreover, GPS-Net achieves state-of-the-art performance on three popular databases: VG, OI, and VRD by significant gains under various settings and metrics. The code and models are available at https://github.com/taksau/GPS-Net.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Scene graph generation (SGG) aims to detect objects in an image along with their pairwise relationships. There are three key properties of scene graph that have been underexplored in recent works: namely, the edge direction information, the difference in priority between nodes, and the long-tailed distribution of relationships. Accordingly, in this paper, we propose a Graph Property Sensing Network (GPS-Net) that fully explores these three properties for SGG. First, we propose a novel message passing module that augments the node feature with node-specific contextual information and encodes the edge direction information via a tri-linear model. Second, we introduce a node priority sensitive loss to reflect the difference in priority between nodes during training. This is achieved by designing a mapping function that adjusts the focusing parameter in the focal loss. Third, since the frequency of relationships is affected by the long-tailed distribution problem, we mitigate this issue by first softening the distribution and then enabling it to be adjusted for each subject-object pair according to their visual appearance. Systematic experiments demonstrate the effectiveness of the proposed techniques. Moreover, GPS-Net achieves state-of-the-art performance on three popular databases: VG, OI, and VRD by significant gains under various settings and metrics. The code and models are available at https://github.com/taksau/GPS-Net.", "fno": "716800d743", "keywords": [ "Graph Theory", "Message Passing", "Object Detection", "Scene Graph Generation", "SGG", "Pairwise Relationships", "Edge Direction Information", "Node Feature", "Node Specific Contextual Information", "Node Priority Sensitive Loss", "Long Tailed Distribution Problem", "GPS Net", "Graph Property Sensing Network", "Image Object Detection", "Mapping Function", "Focal Loss", "Subject Object Pair", "Visual Appearance", "VRD", "Message Passing", "Context Modeling", "Mathematical Model", "Ear", "Visualization", "Message Passing", "Dogs", "Legged Locomotion" ], "authors": [ { "affiliation": "School of Electronic and Information Engineering, South China University of Technology", "fullName": "Xin Lin", "givenName": "Xin", "surname": "Lin", "__typename": "ArticleAuthorType" }, { "affiliation": "School of Electronic and Information Engineering, South China University of Technology", "fullName": "Changxing Ding", "givenName": "Changxing", "surname": "Ding", "__typename": "ArticleAuthorType" }, { "affiliation": "School of Electronic and Information Engineering, South China University of Technology", "fullName": "Jinquan Zeng", "givenName": "Jinquan", "surname": "Zeng", "__typename": "ArticleAuthorType" }, { "affiliation": "UBTECH Sydney AI Centre, School of Computer Science, Faculty of Engineering, The University of Sydney, Australia", "fullName": "Dacheng Tao", "givenName": "Dacheng", "surname": "Tao", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-06-01T00:00:00", "pubType": "proceedings", "pages": "3743-3752", "year": "2020", "issn": null, "isbn": "978-1-7281-7168-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "716800d733", "articleId": "1m3noyLGP5u", "__typename": "AdjacentArticleType" }, "next": { "fno": "716800d753", "articleId": "1m3ooUhHlVC", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/aiccsa/2008/1967/0/04493564", "title": "A critical assessment for RINEX data from OS NET for GPS accuracy improvement", "doi": null, "abstractUrl": "/proceedings-article/aiccsa/2008/04493564/12OmNAHEpAK", "parentPublication": { "id": "proceedings/aiccsa/2008/1967/0", "title": "2008 IEEE/ACS International Conference on Computer Systems and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icic/2011/688/0/05954500", "title": "Research on the Method of Eliminating Gross Error of GPS Output Information", "doi": null, "abstractUrl": "/proceedings-article/icic/2011/05954500/12OmNwDACwj", "parentPublication": { "id": "proceedings/icic/2011/688/0", "title": "2011 Fourth International Conference on Information and Computing (ICIC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icws/2017/0752/0/0752a692", "title": "Learning Transportation Mode Choice for Context-Aware Services with Directed-Graph-Guided Fused Lasso from GPS Trajectory Data", "doi": null, "abstractUrl": "/proceedings-article/icws/2017/0752a692/12OmNywxlO5", "parentPublication": { "id": "proceedings/icws/2017/0752/0", "title": "2017 IEEE International Conference on Web Services (ICWS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2023/04/09858006", "title": "Debiased Scene Graph Generation for Dual Imbalance Learning", "doi": null, "abstractUrl": "/journal/tp/2023/04/09858006/1FSY5Czw3Kw", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600t9435", "title": "RU-Net: Regularized Unrolling Network for Scene Graph Generation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600t9435/1H1j3IGn6WQ", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600t9454", "title": "HL-Net: Heterophily Learning Network for Scene Graph Generation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600t9454/1H1lCef5GSY", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09947006", "title": "Explore Contextual Information for 3D Scene Graph Generation", "doi": null, "abstractUrl": "/journal/tg/5555/01/09947006/1Idr5neUL5e", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800d713", "title": "Unbiased Scene Graph Generation From Biased Training", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800d713/1m3o31iArJe", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2021/4899/0/489900c150", "title": "BGT-Net: Bidirectional GRU Transformer Network for Scene Graph Generation", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2021/489900c150/1yJYlKsMo3C", "parentPublication": { "id": "proceedings/cvprw/2021/4899/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900l1104", "title": "Bipartite Graph Network with Adaptive Message Passing for Unbiased Scene Graph Generation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900l1104/1yeJ9bKPSqQ", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1m3n9N02qgE", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2020", "__typename": "ProceedingType" }, "article": { "id": "1m3o31iArJe", "doi": "10.1109/CVPR42600.2020.00377", "title": "Unbiased Scene Graph Generation From Biased Training", "normalizedTitle": "Unbiased Scene Graph Generation From Biased Training", "abstract": "Today's scene graph generation (SGG) task is still far from practical, mainly due to the severe training bias, e.g., collapsing diverse \"human walk on / sit on / lay on beach\" into \"human on beach\". Given such SGG, the down-stream tasks such as VQA can hardly infer better scene structures than merely a bag of objects. However, debiasing in SGG is not trivial because traditional debiasing methods cannot distinguish between the good and bad bias, e.g., good context prior (e.g., \"person read book\" rather than \"eat\") and bad long-tailed bias (e.g., \"near\" dominating \"behind / in front of\"). In this paper, we present a novel SGG framework based on causal inference but not the conventional likelihood. We first build a causal graph for SGG, and perform traditional biased training with the graph. Then, we propose to draw the counterfactual causality from the trained graph to infer the effect from the bad bias, which should be removed. In particular, we use Total Direct Effect (TDE) as the proposed final predicate score for unbiased SGG. Note that our framework is agnostic to any SGG model and thus can be widely applied in the community who seeks unbiased predictions. By using the proposed Scene Graph Diagnosis toolkit on the SGG benchmark Visual Genome and several prevailing models, we observed significant improvements over the previous state-of-the-art methods.", "abstracts": [ { "abstractType": "Regular", "content": "Today's scene graph generation (SGG) task is still far from practical, mainly due to the severe training bias, e.g., collapsing diverse \"human walk on / sit on / lay on beach\" into \"human on beach\". Given such SGG, the down-stream tasks such as VQA can hardly infer better scene structures than merely a bag of objects. However, debiasing in SGG is not trivial because traditional debiasing methods cannot distinguish between the good and bad bias, e.g., good context prior (e.g., \"person read book\" rather than \"eat\") and bad long-tailed bias (e.g., \"near\" dominating \"behind / in front of\"). In this paper, we present a novel SGG framework based on causal inference but not the conventional likelihood. We first build a causal graph for SGG, and perform traditional biased training with the graph. Then, we propose to draw the counterfactual causality from the trained graph to infer the effect from the bad bias, which should be removed. In particular, we use Total Direct Effect (TDE) as the proposed final predicate score for unbiased SGG. Note that our framework is agnostic to any SGG model and thus can be widely applied in the community who seeks unbiased predictions. By using the proposed Scene Graph Diagnosis toolkit on the SGG benchmark Visual Genome and several prevailing models, we observed significant improvements over the previous state-of-the-art methods.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Today's scene graph generation (SGG) task is still far from practical, mainly due to the severe training bias, e.g., collapsing diverse \"human walk on / sit on / lay on beach\" into \"human on beach\". Given such SGG, the down-stream tasks such as VQA can hardly infer better scene structures than merely a bag of objects. However, debiasing in SGG is not trivial because traditional debiasing methods cannot distinguish between the good and bad bias, e.g., good context prior (e.g., \"person read book\" rather than \"eat\") and bad long-tailed bias (e.g., \"near\" dominating \"behind / in front of\"). In this paper, we present a novel SGG framework based on causal inference but not the conventional likelihood. We first build a causal graph for SGG, and perform traditional biased training with the graph. Then, we propose to draw the counterfactual causality from the trained graph to infer the effect from the bad bias, which should be removed. In particular, we use Total Direct Effect (TDE) as the proposed final predicate score for unbiased SGG. Note that our framework is agnostic to any SGG model and thus can be widely applied in the community who seeks unbiased predictions. By using the proposed Scene Graph Diagnosis toolkit on the SGG benchmark Visual Genome and several prevailing models, we observed significant improvements over the previous state-of-the-art methods.", "fno": "716800d713", "keywords": [ "Genomics", "Graph Theory", "Object Detection", "Scene Graph Generation Task", "Training Bias", "Diverse Human Walk", "Down Stream Tasks", "Scene Structures", "Debiasing Methods", "Causal Inference", "Causal Graph", "Biased Training", "Counterfactual Causality", "Trained Graph", "Unbiased SGG", "SGG Model", "Scene Graph Diagnosis Toolkit", "SGG Benchmark Visual Genome", "Unbiased Scene Graph Generation", "Visualization", "Training", "Task Analysis", "Predictive Models", "Dogs", "Cognition", "Genomics" ], "authors": [ { "affiliation": "Nanyang Technological University", "fullName": "Kaihua Tang", "givenName": "Kaihua", "surname": "Tang", "__typename": "ArticleAuthorType" }, { "affiliation": "Renmin University of China", "fullName": "Yulei Niu", "givenName": "Yulei", "surname": "Niu", "__typename": "ArticleAuthorType" }, { "affiliation": "Nanyang Technological University; Damo Academy, Alibaba Group", "fullName": "Jianqiang Huang", "givenName": "Jianqiang", "surname": "Huang", "__typename": "ArticleAuthorType" }, { "affiliation": "Tsinghua University", "fullName": "Jiaxin Shi", "givenName": "Jiaxin", "surname": "Shi", "__typename": "ArticleAuthorType" }, { "affiliation": "Nanyang Technological University", "fullName": "Hanwang Zhang", "givenName": "Hanwang", "surname": "Zhang", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2020-06-01T00:00:00", "pubType": "proceedings", "pages": "3713-3722", "year": "2020", "issn": null, "isbn": "978-1-7281-7168-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "716800d703", "articleId": "1m3orL1gC88", "__typename": "AdjacentArticleType" }, "next": { "fno": "716800d723", "articleId": "1m3o96JH440", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "trans/tp/2023/04/09858006", "title": "Debiased Scene Graph Generation for Dual Imbalance Learning", "doi": null, "abstractUrl": "/journal/tp/2023/04/09858006/1FSY5Czw3Kw", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2022/8563/0/09859944", "title": "Zero-Shot Scene Graph Generation with Knowledge Graph Completion", "doi": null, "abstractUrl": "/proceedings-article/icme/2022/09859944/1G9EuqL6nzG", "parentPublication": { "id": "proceedings/icme/2022/8563/0", "title": "2022 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600t9415", "title": "Structured Sparse R-CNN for Direct Scene Graph Generation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600t9415/1H0KTEtRsje", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600t9405", "title": "Stacked Hybrid-Attention and Group Collaborative Learning for Unbiased Scene Graph Generation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600t9405/1H1iRUH8DvO", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600t9454", "title": "HL-Net: Heterophily Learning Network for Scene Graph Generation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600t9454/1H1lCef5GSY", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2022/9062/0/09956712", "title": "Zero-shot Scene Graph Generation with Relational Graph Neural Networks", "doi": null, "abstractUrl": "/proceedings-article/icpr/2022/09956712/1IHpXOouE7u", "parentPublication": { "id": "proceedings/icpr/2022/9062/0", "title": "2022 26th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09947006", "title": "Explore Contextual Information for 3D Scene Graph Generation", "doi": null, "abstractUrl": "/journal/tg/5555/01/09947006/1Idr5neUL5e", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2023/9346/0/934600a052", "title": "Composite Relationship Fields with Transformers for Scene Graph Generation", "doi": null, "abstractUrl": "/proceedings-article/wacv/2023/934600a052/1KxUDlkTf8I", "parentPublication": { "id": "proceedings/wacv/2023/9346/0", "title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2023/9346/0/934600c739", "title": "Improving Predicate Representation in Scene Graph Generation by Self-Supervised Learning", "doi": null, "abstractUrl": "/proceedings-article/wacv/2023/934600c739/1KxUossHorK", "parentPublication": { "id": "proceedings/wacv/2023/9346/0", "title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800d743", "title": "GPS-Net: Graph Property Sensing Network for Scene Graph Generation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800d743/1m3o2oONVsY", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1uiluGq0Oo8", "title": "2021 IEEE International Conference on Multimedia and Expo (ICME)", "acronym": "icme", "groupId": "1000477", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1uilSfRdZcs", "doi": "10.1109/ICME51207.2021.9428472", "title": "Relationship-Aware Primal-Dual Graph Attention Network For Scene Graph Generation", "normalizedTitle": "Relationship-Aware Primal-Dual Graph Attention Network For Scene Graph Generation", "abstract": "The relationships and interactions between objects contain rich semantic information, which plays a crucial role in scene understanding. Existing methods do not attach great importance to the expression of relational features. To tackle this problem, we propose a novel Relationship-aware Primal-Dual Graph Attention Network (RPDGAT) to extract the comprehensive semantic features of objects and explore the sparse graph inference for scene graph generation. RPDGAT mines the inherent attributes and the relationships between objects by fusing multiple features, e.g. appearance, spatial, and category features. After feature extraction, we design a trainable relationship distance measure network to construct the robust and sparse graph structure for efficient graphical message passing. Moreover, it can preserve the contextual cues and neighboring dependency for objects and relationships from the interaction between primal and dual graphs. Extensive experimental results present the improved performance of our method over several state-of-the-art methods on the visual genome datasets.", "abstracts": [ { "abstractType": "Regular", "content": "The relationships and interactions between objects contain rich semantic information, which plays a crucial role in scene understanding. Existing methods do not attach great importance to the expression of relational features. To tackle this problem, we propose a novel Relationship-aware Primal-Dual Graph Attention Network (RPDGAT) to extract the comprehensive semantic features of objects and explore the sparse graph inference for scene graph generation. RPDGAT mines the inherent attributes and the relationships between objects by fusing multiple features, e.g. appearance, spatial, and category features. After feature extraction, we design a trainable relationship distance measure network to construct the robust and sparse graph structure for efficient graphical message passing. Moreover, it can preserve the contextual cues and neighboring dependency for objects and relationships from the interaction between primal and dual graphs. Extensive experimental results present the improved performance of our method over several state-of-the-art methods on the visual genome datasets.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The relationships and interactions between objects contain rich semantic information, which plays a crucial role in scene understanding. Existing methods do not attach great importance to the expression of relational features. To tackle this problem, we propose a novel Relationship-aware Primal-Dual Graph Attention Network (RPDGAT) to extract the comprehensive semantic features of objects and explore the sparse graph inference for scene graph generation. RPDGAT mines the inherent attributes and the relationships between objects by fusing multiple features, e.g. appearance, spatial, and category features. After feature extraction, we design a trainable relationship distance measure network to construct the robust and sparse graph structure for efficient graphical message passing. Moreover, it can preserve the contextual cues and neighboring dependency for objects and relationships from the interaction between primal and dual graphs. Extensive experimental results present the improved performance of our method over several state-of-the-art methods on the visual genome datasets.", "fno": "09428472", "keywords": [ "Feature Extraction", "Genomics", "Graph Theory", "Message Passing", "Scene Graph Generation", "Rich Semantic Information", "Scene Understanding", "Relational Features", "Comprehensive Semantic Features", "Sparse Graph Inference", "RPDGAT Mines", "Category Features", "Feature Extraction", "Trainable Relationship Distance Measure Network", "Robust Graph Structure", "Sparse Graph Structure", "Primal Graphs", "Dual Graphs", "Relationship Aware Primal Dual Graph Attention Network", "Graphical Message Passing", "Visualization", "Message Passing", "Conferences", "Semantics", "Image Retrieval", "Genomics", "Feature Extraction", "Scene Graph Generation", "Sparse Graph", "Primal Dual Graph Attention Network", "Graph Inference" ], "authors": [ { "affiliation": "National University of Defense Technology,Science and Technology on Information Systems Engineering Laboratory,Changsha,China", "fullName": "Hao Zhou", "givenName": "Hao", "surname": "Zhou", "__typename": "ArticleAuthorType" }, { "affiliation": "National University of Defense Technology,College of Liberal Arts and Science,Changsha,China", "fullName": "Tingjin Luo", "givenName": "Tingjin", "surname": "Luo", "__typename": "ArticleAuthorType" }, { "affiliation": "National University of Defense Technology,Science and Technology on Information Systems Engineering Laboratory,Changsha,China", "fullName": "Jun Zhang", "givenName": "Jun", "surname": "Zhang", "__typename": "ArticleAuthorType" }, { "affiliation": "National University of Defense Technology,Science and Technology on Information Systems Engineering Laboratory,Changsha,China", "fullName": "Jun Lei", "givenName": "Jun", "surname": "Lei", "__typename": "ArticleAuthorType" }, { "affiliation": "National University of Defense Technology,Science and Technology on Information Systems Engineering Laboratory,Changsha,China", "fullName": "Shuohao Li", "givenName": "Shuohao", "surname": "Li", "__typename": "ArticleAuthorType" } ], "idPrefix": "icme", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-07-01T00:00:00", "pubType": "proceedings", "pages": "1-6", "year": "2021", "issn": null, "isbn": "978-1-6654-3864-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "09428201", "articleId": "1uilJ5Y7SXS", "__typename": "AdjacentArticleType" }, "next": { "fno": "09428413", "articleId": "1uime4xvX8c", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2017/0457/0/0457d097", "title": "Scene Graph Generation by Iterative Message Passing", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2017/0457d097/12OmNBAqZH0", "parentPublication": { "id": "proceedings/cvpr/2017/0457/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200p5964", "title": "Exploiting Scene Graphs for Human-Object Interaction Detection", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200p5964/1BmGDVSgm4M", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2022/8563/0/09859970", "title": "Multi-Scale Graph Attention Network for Scene Graph Generation", "doi": null, "abstractUrl": "/proceedings-article/icme/2022/09859970/1G9EpEewD16", "parentPublication": { "id": "proceedings/icme/2022/8563/0", "title": "2022 IEEE International Conference on Multimedia and Expo (ICME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600u0101", "title": "Object-Relation Reasoning Graph for Action Recognition", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600u0101/1H1iMdG6oZa", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600t9454", "title": "HL-Net: Heterophily Learning Network for Scene Graph Generation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600t9454/1H1lCef5GSY", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2023/9346/0/934600a052", "title": "Composite Relationship Fields with Transformers for Scene Graph Generation", "doi": null, "abstractUrl": "/proceedings-article/wacv/2023/934600a052/1KxUDlkTf8I", "parentPublication": { "id": "proceedings/wacv/2023/9346/0", "title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2023/9346/0/934600e380", "title": "Grounding Scene Graphs on Natural Images via Visio-Lingual Message Passing", "doi": null, "abstractUrl": "/proceedings-article/wacv/2023/934600e380/1KxUEJZWihW", "parentPublication": { "id": "proceedings/wacv/2023/9346/0", "title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2019/5023/0/502300b730", "title": "Visual Relationships as Functions:Enabling Few-Shot Scene Graph Prediction", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2019/502300b730/1i5msdOeSl2", "parentPublication": { "id": "proceedings/iccvw/2019/5023/0", "title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2021/4899/0/489900b663", "title": "Target-Tailored Source-Transformation for Scene Graph Generation", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2021/489900b663/1yJYwdXPYYg", "parentPublication": { "id": "proceedings/cvprw/2021/4899/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900l1541", "title": "Fully Convolutional Scene Graph Generation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900l1541/1yeJJIVP3bi", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1yeHGyRsuys", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1yeMcyroL2o", "doi": "10.1109/CVPR46437.2021.00958", "title": "Exploiting Edge-Oriented Reasoning for 3D Point-based Scene Graph Analysis", "normalizedTitle": "Exploiting Edge-Oriented Reasoning for 3D Point-based Scene Graph Analysis", "abstract": "Scene understanding is a critical problem in computer vision. In this paper, we propose a 3D point-based scene graph generation (SGG<inf>point</inf>) framework to effectively bridge perception and reasoning to achieve scene under-standing via three sequential stages, namely scene graph construction, reasoning, and inference. Within the reasoning stage, an EDGE-oriented Graph Convolutional Network (EdgeGCN) is created to exploit multi-dimensional edge features for explicit relationship modeling, together with the exploration of two associated twinning interaction mechanisms between nodes and edges for the independent evolution of scene graph representations. Overall, our integrated SGG<inf>point</inf> framework is established to seek and infer scene structures of interest from both real-world and synthetic 3D point-based scenes. Our experimental results show promising edge-oriented reasoning effects on scene graph generation studies. We also demonstrate our method advantage on several traditional graph representation learning benchmark datasets, including the node-wise classification on citation networks and whole-graph recognition problems for molecular analysis.", "abstracts": [ { "abstractType": "Regular", "content": "Scene understanding is a critical problem in computer vision. In this paper, we propose a 3D point-based scene graph generation (SGG<inf>point</inf>) framework to effectively bridge perception and reasoning to achieve scene under-standing via three sequential stages, namely scene graph construction, reasoning, and inference. Within the reasoning stage, an EDGE-oriented Graph Convolutional Network (EdgeGCN) is created to exploit multi-dimensional edge features for explicit relationship modeling, together with the exploration of two associated twinning interaction mechanisms between nodes and edges for the independent evolution of scene graph representations. Overall, our integrated SGG<inf>point</inf> framework is established to seek and infer scene structures of interest from both real-world and synthetic 3D point-based scenes. Our experimental results show promising edge-oriented reasoning effects on scene graph generation studies. We also demonstrate our method advantage on several traditional graph representation learning benchmark datasets, including the node-wise classification on citation networks and whole-graph recognition problems for molecular analysis.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Scene understanding is a critical problem in computer vision. In this paper, we propose a 3D point-based scene graph generation (SGGpoint) framework to effectively bridge perception and reasoning to achieve scene under-standing via three sequential stages, namely scene graph construction, reasoning, and inference. Within the reasoning stage, an EDGE-oriented Graph Convolutional Network (EdgeGCN) is created to exploit multi-dimensional edge features for explicit relationship modeling, together with the exploration of two associated twinning interaction mechanisms between nodes and edges for the independent evolution of scene graph representations. Overall, our integrated SGGpoint framework is established to seek and infer scene structures of interest from both real-world and synthetic 3D point-based scenes. Our experimental results show promising edge-oriented reasoning effects on scene graph generation studies. We also demonstrate our method advantage on several traditional graph representation learning benchmark datasets, including the node-wise classification on citation networks and whole-graph recognition problems for molecular analysis.", "fno": "450900j700", "keywords": [ "Computer Vision", "Convolutional Neural Nets", "Edge Detection", "Graph Theory", "Image Representation", "Inference Mechanisms", "Learning Artificial Intelligence", "Sequential Stages", "Scene Graph Construction", "Reasoning Stage", "Multidimensional Edge Features", "Associated Twinning Interaction Mechanisms", "Scene Graph Representations", "Integrated SG Gpoint Framework", "Scene Graph Generation Studies", "Whole Graph Recognition Problems", "3 D Point Based Scene Graph Generation Framework", "Edge Oriented Graph Convolutional Network", "Edge Oriented Reasoning Effects", "Bridges", "Computer Vision", "Three Dimensional Displays", "Benchmark Testing", "Cognition", "Pattern Recognition" ], "authors": [ { "affiliation": "University of Sydney", "fullName": "Chaoyi Zhang", "givenName": "Chaoyi", "surname": "Zhang", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Sydney", "fullName": "Jianhui Yu", "givenName": "Jianhui", "surname": "Yu", "__typename": "ArticleAuthorType" }, { "affiliation": "University of New South Wales", "fullName": "Yang Song", "givenName": "Yang", "surname": "Song", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Sydney", "fullName": "Weidong Cai", "givenName": "Weidong", "surname": "Cai", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-06-01T00:00:00", "pubType": "proceedings", "pages": "9700-9710", "year": "2021", "issn": null, "isbn": "978-1-6654-4509-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [ { "id": "1yeMcuKTFks", "name": "pcvpr202145090-09578123s1-mm_450900j700.zip", "size": "2.12 MB", "location": "https://www.computer.org/csdl/api/v1/extra/pcvpr202145090-09578123s1-mm_450900j700.zip", "__typename": "WebExtraType" } ], "adjacentArticles": { "previous": { "fno": "450900j690", "articleId": "1yeKUx8VM2I", "__typename": "AdjacentArticleType" }, "next": { "fno": "450900j711", "articleId": "1yeKEdaoFS8", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2013/4989/0/4989d127", "title": "Beyond Point Clouds: Scene Understanding by Reasoning Geometry and Physics", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2013/4989d127/12OmNylboMR", "parentPublication": { "id": "proceedings/cvpr/2013/4989/0", "title": "2013 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600u0101", "title": "Object-Relation Reasoning Graph for Action Recognition", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600u0101/1H1iMdG6oZa", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600p5575", "title": "Not All Relations are Equal: Mining Informative Labels for Scene Graph Generation", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600p5575/1H1iflRfdEA", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09947006", "title": "Explore Contextual Information for 3D Scene Graph Generation", "doi": null, "abstractUrl": "/journal/tg/5555/01/09947006/1Idr5neUL5e", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2023/9346/0/9.346E17", "title": "More Knowledge, Less Bias: Unbiasing Scene Graph Generation with Explicit Ontological Adjustment", "doi": null, "abstractUrl": "/proceedings-article/wacv/2023/9.346E17/1KxUBxEiL0Q", "parentPublication": { "id": "proceedings/wacv/2023/9346/0", "title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2023/9346/0/934600a052", "title": "Composite Relationship Fields with Transformers for Scene Graph Generation", "doi": null, "abstractUrl": "/proceedings-article/wacv/2023/934600a052/1KxUDlkTf8I", "parentPublication": { "id": "proceedings/wacv/2023/9346/0", "title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2019/5023/0/502300e584", "title": "Scene Graph Contextualization in Visual Commonsense Reasoning", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2019/502300e584/1i5mDggvRmg", "parentPublication": { "id": "proceedings/iccvw/2019/5023/0", "title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800d713", "title": "Unbiased Scene Graph Generation From Biased Training", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800d713/1m3o31iArJe", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900b356", "title": "Explicit Knowledge Incorporation for Visual Reasoning", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900b356/1yeIZpl7gDm", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2023/01/09661322", "title": "A Comprehensive Survey of Scene Graphs: Generation and Application", "doi": null, "abstractUrl": "/journal/tp/2023/01/09661322/1zzl4piVIzu", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNy3iFul", "title": "2014 18th International Conference on Information Visualisation (IV)", "acronym": "iv", "groupId": "1000370", "volume": "0", "displayVolume": "0", "year": "2014", "__typename": "ProceedingType" }, "article": { "id": "12OmNAnMuLr", "doi": "10.1109/IV.2014.35", "title": "Interactive Similarity Links in Treemap Visualizations", "normalizedTitle": "Interactive Similarity Links in Treemap Visualizations", "abstract": "Exploring hierarchical organizations such as software systems is a challenging task. This gets even harder when these are large, deeply nested, and attached by a list of additional attributes of either quantitative, ordinal, or categorical nature. Tree maps have been designed to graphically represent such hierarchical structures in a space-filling way where two attributes for each hierarchy element can be visualized in each tree map box at the same time: by area and by color. Having more than two attributes attached to each hierarchy element can also be visualized by this concept when allowing an analyst to frequently browse the pair wisely represented attributes in the color dimension leaving the box sizes fixed due to mental map preservation. In this paper we enrich standard tree map visualizations by such a browsing feature and additional interaction techniques such as expanding or collapsing them to different levels of hierarchical granularity. To further support the comparison of similar boxes for either one, two, or a list of attributes we add the concept of similarity links whose display thresholds can also interactively be chosen.", "abstracts": [ { "abstractType": "Regular", "content": "Exploring hierarchical organizations such as software systems is a challenging task. This gets even harder when these are large, deeply nested, and attached by a list of additional attributes of either quantitative, ordinal, or categorical nature. Tree maps have been designed to graphically represent such hierarchical structures in a space-filling way where two attributes for each hierarchy element can be visualized in each tree map box at the same time: by area and by color. Having more than two attributes attached to each hierarchy element can also be visualized by this concept when allowing an analyst to frequently browse the pair wisely represented attributes in the color dimension leaving the box sizes fixed due to mental map preservation. In this paper we enrich standard tree map visualizations by such a browsing feature and additional interaction techniques such as expanding or collapsing them to different levels of hierarchical granularity. To further support the comparison of similar boxes for either one, two, or a list of attributes we add the concept of similarity links whose display thresholds can also interactively be chosen.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Exploring hierarchical organizations such as software systems is a challenging task. This gets even harder when these are large, deeply nested, and attached by a list of additional attributes of either quantitative, ordinal, or categorical nature. Tree maps have been designed to graphically represent such hierarchical structures in a space-filling way where two attributes for each hierarchy element can be visualized in each tree map box at the same time: by area and by color. Having more than two attributes attached to each hierarchy element can also be visualized by this concept when allowing an analyst to frequently browse the pair wisely represented attributes in the color dimension leaving the box sizes fixed due to mental map preservation. In this paper we enrich standard tree map visualizations by such a browsing feature and additional interaction techniques such as expanding or collapsing them to different levels of hierarchical granularity. To further support the comparison of similar boxes for either one, two, or a list of attributes we add the concept of similarity links whose display thresholds can also interactively be chosen.", "fno": "4103a034", "keywords": [ "Visualization", "Image Color Analysis", "Software Metrics", "Layout", "Color", "Organizations", "Rapid Serial Visual Presentation", "Information Hierarchies", "Treemaps", "Node Link Diagrams", "Software Metrics", "Software Visualization" ], "authors": [ { "affiliation": null, "fullName": "M. Burch", "givenName": "M.", "surname": "Burch", "__typename": "ArticleAuthorType" } ], "idPrefix": "iv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2014-07-01T00:00:00", "pubType": "proceedings", "pages": "34-39", "year": "2014", "issn": "1550-6037", "isbn": "978-1-4799-4103-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "4103a027", "articleId": "12OmNzdoMiu", "__typename": "AdjacentArticleType" }, "next": { "fno": "4103a040", "articleId": "12OmNBigFst", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iv/2011/0868/0/06004017", "title": "Layered TimeRadarTrees", "doi": null, "abstractUrl": "/proceedings-article/iv/2011/06004017/12OmNArthca", "parentPublication": { "id": "proceedings/iv/2011/0868/0", "title": "2011 15th International Conference on Information Visualisation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2014/4103/0/4103a001", "title": "Using Visual Cues on DOITree for Visualizing Large Hierarchical Data", "doi": null, "abstractUrl": "/proceedings-article/iv/2014/4103a001/12OmNBJNL1S", "parentPublication": { "id": "proceedings/iv/2014/4103/0", "title": "2014 18th International Conference on Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pacificvis/2011/935/0/05742384", "title": "Interactive visualization of multivariate trajectory data with density maps", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2011/05742384/12OmNqAU6rq", "parentPublication": { "id": "proceedings/pacificvis/2011/935/0", "title": "2011 IEEE Pacific Visualization Symposium (PacificVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/vlhcc/2012/0852/0/06344496", "title": "Visualizing traceability links between source code and documentation", "doi": null, "abstractUrl": "/proceedings-article/vlhcc/2012/06344496/12OmNwt5siM", "parentPublication": { "id": "proceedings/vlhcc/2012/0852/0", "title": "2012 IEEE Symposium on Visual Languages and Human-Centric Computing (VL/HCC 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2010/7846/0/05571369", "title": "The Network Lens: Interactive Exploration of Multivariate Networks Using Visual Filtering", "doi": null, "abstractUrl": "/proceedings-article/iv/2010/05571369/12OmNxUMHny", "parentPublication": { "id": "proceedings/iv/2010/7846/0", "title": "2010 14th International Conference Information Visualisation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hicss/2015/7367/0/7367b714", "title": "TreeQueST: A Treemap-Based Query Sandbox for Microdocument Retrieval", "doi": null, "abstractUrl": "/proceedings-article/hicss/2015/7367b714/12OmNyuy9JM", "parentPublication": { "id": "proceedings/hicss/2015/7367/0", "title": "2015 48th Hawaii International Conference on System Sciences (HICSS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icicis/2011/1561/0/06063180", "title": "Application on Integration Technology of Visualized Hierarchical Information", "doi": null, "abstractUrl": "/proceedings-article/icicis/2011/06063180/12OmNzcxYYK", "parentPublication": { "id": "proceedings/icicis/2011/1561/0", "title": "2011 International Conference on Internet Computing and Information Services (ICICIS 2011)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09765327", "title": "Graphical Enhancements for Effective Exemplar Identification in Contextual Data Visualizations", "doi": null, "abstractUrl": "/journal/tg/5555/01/09765327/1CWoKyrHUze", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icsme/2019/3094/0/309400a367", "title": "Interactive Traceability Links Visualization using Hierarchical Trace Map", "doi": null, "abstractUrl": "/proceedings-article/icsme/2019/309400a367/1fHlFbzJir6", "parentPublication": { "id": "proceedings/icsme/2019/3094/0", "title": "2019 IEEE International Conference on Software Maintenance and Evolution (ICSME)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pacificvis/2020/5697/0/09086199", "title": "PansyTree: Merging Multiple Hierarchies", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2020/09086199/1kuHo7qiNeE", "parentPublication": { "id": "proceedings/pacificvis/2020/5697/0", "title": "2020 IEEE Pacific Visualization Symposium (PacificVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNro0Ib6", "title": "2004 Southwest Symposium on Image Analysis and Interpretation", "acronym": "iai", "groupId": "1000345", "volume": "0", "displayVolume": "0", "year": "2004", "__typename": "ProceedingType" }, "article": { "id": "12OmNyaoDF1", "doi": "10.1109/IAI.2004.1300947", "title": "Using inverse image frequency for perception-based color image quantization", "normalizedTitle": "Using inverse image frequency for perception-based color image quantization", "abstract": "The process of selecting a small number of representative colors from an image of higher color resolution is called color image quantization. A well-known problem in quantizing images is to select the best representative colors that not only reduce the quantization error, but also account for the perception of human vision. The technique we propose effectively handles this problem by using the variation of colors in different regions of an image, in addition to the use of the color histogram, for effective perception and quantization. We introduce the property of inverse image frequency (IIF) for computing the representative colors of an image. IIF is based on the observation that colors within a color subset having non-uniform frequency distribution across the different regions of an image have better discriminating properties than those having uniform distribution. Our approach to incorporate the information derived from IIF can be combined with any standard quantization algorithm. The results show that our approach quantizes an image more effectively than using just the well-known median cut algorithm.", "abstracts": [ { "abstractType": "Regular", "content": "The process of selecting a small number of representative colors from an image of higher color resolution is called color image quantization. A well-known problem in quantizing images is to select the best representative colors that not only reduce the quantization error, but also account for the perception of human vision. The technique we propose effectively handles this problem by using the variation of colors in different regions of an image, in addition to the use of the color histogram, for effective perception and quantization. We introduce the property of inverse image frequency (IIF) for computing the representative colors of an image. IIF is based on the observation that colors within a color subset having non-uniform frequency distribution across the different regions of an image have better discriminating properties than those having uniform distribution. Our approach to incorporate the information derived from IIF can be combined with any standard quantization algorithm. The results show that our approach quantizes an image more effectively than using just the well-known median cut algorithm.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The process of selecting a small number of representative colors from an image of higher color resolution is called color image quantization. A well-known problem in quantizing images is to select the best representative colors that not only reduce the quantization error, but also account for the perception of human vision. The technique we propose effectively handles this problem by using the variation of colors in different regions of an image, in addition to the use of the color histogram, for effective perception and quantization. We introduce the property of inverse image frequency (IIF) for computing the representative colors of an image. IIF is based on the observation that colors within a color subset having non-uniform frequency distribution across the different regions of an image have better discriminating properties than those having uniform distribution. Our approach to incorporate the information derived from IIF can be combined with any standard quantization algorithm. The results show that our approach quantizes an image more effectively than using just the well-known median cut algorithm.", "fno": "01300947", "keywords": [ "Image Colour Analysis", "Quantisation Signal", "Image Resolution", "Visual Perception", "Statistical Distributions", "Inverse Image Frequency", "Color Image Quantization", "Visual Perception", "Color Resolution", "Human Vision", "Color Histogram", "Representative Colors", "Median Cut Algorithm", "Frequency", "Color", "Quantization", "Humans", "Image Storage", "Application Software", "Image Resolution", "Histograms", "Psychology", "Visual System" ], "authors": [ { "affiliation": "Univ. of Louisiana, Lafayette, LA, USA", "fullName": "B. Shah", "givenName": "B.", "surname": "Shah", "__typename": "ArticleAuthorType" }, { "affiliation": "Univ. of Louisiana, Lafayette, LA, USA", "fullName": "P. Dhatric", "givenName": "P.", "surname": "Dhatric", "__typename": "ArticleAuthorType" }, { "affiliation": "Univ. of Louisiana, Lafayette, LA, USA", "fullName": "V. Raghavan", "givenName": "V.", "surname": "Raghavan", "__typename": "ArticleAuthorType" } ], "idPrefix": "iai", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2004-01-01T00:00:00", "pubType": "proceedings", "pages": "71-75", "year": "2004", "issn": null, "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "01300946", "articleId": "12OmNxvwoY5", "__typename": "AdjacentArticleType" }, "next": { "fno": "01300948", "articleId": "12OmNzEVRVJ", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icassp/1994/1775/2/00389552", "title": "Color quantization of images based on human vision perception", "doi": null, "abstractUrl": "/proceedings-article/icassp/1994/00389552/12OmNARRYgy", "parentPublication": { "id": "proceedings/icassp/1994/1775/2", "title": "Acoustics, Speech, and Signal Processing, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aiccsa/2017/3581/0/3581a788", "title": "Image Retrieval Using Spatial Dominant Color Descriptor", "doi": null, "abstractUrl": "/proceedings-article/aiccsa/2017/3581a788/12OmNAWpytt", "parentPublication": { "id": "proceedings/aiccsa/2017/3581/0", "title": "2017 IEEE/ACS 14th International Conference on Computer Systems and Applications (AICCSA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2004/2128/1/212810664", "title": "Human Perception Based Color Image Quantization", "doi": null, "abstractUrl": "/proceedings-article/icpr/2004/212810664/12OmNCd2rzy", "parentPublication": { "id": "proceedings/icpr/2004/2128/1", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/1992/2920/0/00201921", "title": "A color video image quantization method with stable and efficient color selection capability", "doi": null, "abstractUrl": "/proceedings-article/icpr/1992/00201921/12OmNvF83ne", "parentPublication": { "id": "proceedings/icpr/1992/2920/0", "title": "11th IAPR International Conference on Pattern Recognition. Vol. III. Conference C: Image, Speech and Signal Analysis,", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icece/2010/4031/0/4031f092", "title": "Color Image Retrieval Based on Vector Quantization", "doi": null, "abstractUrl": "/proceedings-article/icece/2010/4031f092/12OmNwnYG1K", "parentPublication": { "id": "proceedings/icece/2010/4031/0", "title": "Electrical and Control Engineering, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sitis/2016/5698/0/07907558", "title": "From Color Quantization to Image Segmentation", "doi": null, "abstractUrl": "/proceedings-article/sitis/2016/07907558/12OmNy5R3Hk", "parentPublication": { "id": "proceedings/sitis/2016/5698/0", "title": "2016 12th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sitis/2016/5698/0/07907437", "title": "A New Method for Color Quantization", "doi": null, "abstractUrl": "/proceedings-article/sitis/2016/07907437/12OmNyQGS9t", "parentPublication": { "id": "proceedings/sitis/2016/5698/0", "title": "2016 12th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ifita/2009/3600/1/3600a135", "title": "Adaptive Color Quantization Based on Self-Growing Network", "doi": null, "abstractUrl": "/proceedings-article/ifita/2009/3600a135/12OmNzR8CwG", "parentPublication": { "id": "proceedings/ifita/2009/3600/3", "title": "Information Technology and Applications, International Forum on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sigra/1997/8102/0/00625178", "title": "Color image quantization by pairwise clustering", "doi": null, "abstractUrl": "/proceedings-article/sigra/1997/00625178/12OmNzYwbWL", "parentPublication": { "id": "proceedings/sigra/1997/8102/0", "title": "Proceedings X Brazilian Symposium on Computer Graphics and Image Processing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mmcs/1997/7819/0/00609755", "title": "Color clustering techniques for color-content-based image retrieval from image databases", "doi": null, "abstractUrl": "/proceedings-article/mmcs/1997/00609755/12OmNzl3WQA", "parentPublication": { "id": "proceedings/mmcs/1997/7819/0", "title": "Proceedings of IEEE International Conference on Multimedia Computing and Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1cI6akLvAuQ", "title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)", "acronym": "vr", "groupId": "1000791", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1cJ0L8WggAE", "doi": "10.1109/VR.2019.8797714", "title": "PILC Projector: RGB-IR Projector for Pixel-level Infrared Light Communication", "normalizedTitle": "PILC Projector: RGB-IR Projector for Pixel-level Infrared Light Communication", "abstract": "The projection of invisible data on visible images can facilitate seamless interactive projection, since data embedded in regular images is unobtrusive to human viewers. However, the previous techniques sacrificed one of the following key goals: 1) calibration-free setup; 2) full-color projection; or 3) high contrast image. In this paper, we propose a Pixel-level Infrared Light Communication (PILC) projector that achieves all these requirements by adding an infrared light source to the full-color projector. To provide a proof of concept, we built a functional prototype, evaluated its performance, and presented a basic application.", "abstracts": [ { "abstractType": "Regular", "content": "The projection of invisible data on visible images can facilitate seamless interactive projection, since data embedded in regular images is unobtrusive to human viewers. However, the previous techniques sacrificed one of the following key goals: 1) calibration-free setup; 2) full-color projection; or 3) high contrast image. In this paper, we propose a Pixel-level Infrared Light Communication (PILC) projector that achieves all these requirements by adding an infrared light source to the full-color projector. To provide a proof of concept, we built a functional prototype, evaluated its performance, and presented a basic application.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The projection of invisible data on visible images can facilitate seamless interactive projection, since data embedded in regular images is unobtrusive to human viewers. However, the previous techniques sacrificed one of the following key goals: 1) calibration-free setup; 2) full-color projection; or 3) high contrast image. In this paper, we propose a Pixel-level Infrared Light Communication (PILC) projector that achieves all these requirements by adding an infrared light source to the full-color projector. To provide a proof of concept, we built a functional prototype, evaluated its performance, and presented a basic application.", "fno": "08797714", "keywords": [ "Image Colour Analysis", "Infrared Imaging", "Light Sources", "Optical Communication", "Optical Projectors", "Seamless Interactive Projection", "Infrared Light Source", "Full Color Projector", "RGB IR Projector", "Pixel Level Infrared Light Communication Projector", "PILC Projector", "Calibration Free Setup", "Light Emitting Diodes", "Brightness", "Image Color Analysis", "Decoding", "Light Sources", "Color", "Sensors", "Visible Light Communication", "Infrared Data Embedding", "Digital Micromirror Device", "Augmented Reality", "Human Centered Computing", "Human Computer Interaction HCI", "Interaction Paradigms", "Mixed Augmented Reality", "Interaction Devices", "Displays And Imagers" ], "authors": [ { "affiliation": "The University of Tokyo", "fullName": "Ikuo Kamei", "givenName": "Ikuo", "surname": "Kamei", "__typename": "ArticleAuthorType" }, { "affiliation": "The University of Tokyo", "fullName": "Takefumi Hiraki", "givenName": "Takefumi", "surname": "Hiraki", "__typename": "ArticleAuthorType" }, { "affiliation": "The University of Tokyo", "fullName": "Shogo Fukushima", "givenName": "Shogo", "surname": "Fukushima", "__typename": "ArticleAuthorType" }, { "affiliation": "The University of Tokyo", "fullName": "Takeshi Naemura", "givenName": "Takeshi", "surname": "Naemura", "__typename": "ArticleAuthorType" } ], "idPrefix": "vr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-03-01T00:00:00", "pubType": "proceedings", "pages": "1004-1005", "year": "2019", "issn": null, "isbn": "978-1-7281-1377-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "08798268", "articleId": "1cJ0Ir5EpRm", "__typename": "AdjacentArticleType" }, "next": { "fno": "08797856", "articleId": "1cJ1evLlHRm", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/physics/1995/7321/0/73210122", "title": "Optoelectronic integration of quantum well infrared photodetector for array fabrication", "doi": null, "abstractUrl": "/proceedings-article/physics/1995/73210122/12OmNAYGlyn", "parentPublication": { "id": "proceedings/physics/1995/7321/0", "title": "Physics and Modeling of Devices Based on Low-Dimensional Structures, Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icisce/2015/6850/0/6850a778", "title": "Intelligent Dimming LED for Moonlight Simulation", "doi": null, "abstractUrl": "/proceedings-article/icisce/2015/6850a778/12OmNBghtrs", "parentPublication": { "id": "proceedings/icisce/2015/6850/0", "title": "2015 2nd International Conference on Information Science and Control Engineering (ICISCE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2015/7660/0/7660a096", "title": "[POSTER] Marker Identification Using IR LEDs and RGB Color Descriptors", "doi": null, "abstractUrl": "/proceedings-article/ismar/2015/7660a096/12OmNscOUfD", "parentPublication": { "id": "proceedings/ismar/2015/7660/0", "title": "2015 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2015/8332/0/8332a580", "title": "Depth Estimation Based on an Infrared Projector and an Infrared Color Stereo Camera by Using Cross-Based Dynamic Programming with Cost Volume Filter", "doi": null, "abstractUrl": "/proceedings-article/3dv/2015/8332a580/12OmNyKa5Uq", "parentPublication": { "id": "proceedings/3dv/2015/8332/0", "title": "2015 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/nicoint/2018/6909/0/690901a087", "title": "Lanterns Lighitng Up by Sound: Visualization of the Sound Space by Colors", "doi": null, "abstractUrl": "/proceedings-article/nicoint/2018/690901a087/13bd1gFCjrP", "parentPublication": { "id": "proceedings/nicoint/2018/6909/0", "title": "2018 Nicograph International (NicoInt)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2016/04/07383338", "title": "Inter-reflection Compensation of Immersive Projection Display by Spatio-Temporal Screen Reflectance Modulation", "doi": null, "abstractUrl": "/journal/tg/2016/04/07383338/13rRUwInvfc", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/snsp/2018/7413/0/741300a365", "title": "Performance Improvement of Symbol Decision for Visual-MIMO System in RGB LED Lighting", "doi": null, "abstractUrl": "/proceedings-article/snsp/2018/741300a365/17D45W1Oa3k", "parentPublication": { "id": "proceedings/snsp/2018/7413/0", "title": "2018 International Conference on Sensor Networks and Signal Processing (SNSP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/04/08327511", "title": "Non-Contact Thermo-Visual Augmentation by IR-RGB Projection", "doi": null, "abstractUrl": "/journal/tg/2019/04/08327511/17YCN2mRpss", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2018/7592/0/08699280", "title": "Precise Surface Color Estimation Using a Non-Diagonal Reflectance Matrix on an Adaptive Projector-Camera System", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699280/19F1RBpcB0s", "parentPublication": { "id": "proceedings/ismar-adjunct/2018/7592/0", "title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2023/9346/0/934600f447", "title": "Event-based RGB sensing with structured light", "doi": null, "abstractUrl": "/proceedings-article/wacv/2023/934600f447/1KxVerZJFXa", "parentPublication": { "id": "proceedings/wacv/2023/9346/0", "title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1y4osUIVqne", "title": "2021 International Conference on Computer Engineering and Application (ICCEA)", "acronym": "iccea", "groupId": "1040032", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1y4owqkadBC", "doi": "10.1109/ICCEA53728.2021.00052", "title": "Thematic Map Color Matching Design Based On Geese Swarm Optimization Algorithm", "normalizedTitle": "Thematic Map Color Matching Design Based On Geese Swarm Optimization Algorithm", "abstract": "In order to enhance the beauty of thematic map and improve the intelligent level of map color matching, this paper designs an intelligent design optimization algorithm of thematic map color based on the actual needs of thematic map and geese swarm optimization algorithm. The basic idea is: guided by map types and user expectations, according to color psychology and Munsell&#x2019;s color harmony theory, the initial color is selected as the particles in the population, and then the geese swarm optimization algorithm is used to update the population, and the Munsell Spencer&#x2019;s color harmony theory and beauty formula are used as fitness functions for evaluation and optimization, so as to get the map color scheme. Finally, the algorithm is demonstrated by experiments.", "abstracts": [ { "abstractType": "Regular", "content": "In order to enhance the beauty of thematic map and improve the intelligent level of map color matching, this paper designs an intelligent design optimization algorithm of thematic map color based on the actual needs of thematic map and geese swarm optimization algorithm. The basic idea is: guided by map types and user expectations, according to color psychology and Munsell&#x2019;s color harmony theory, the initial color is selected as the particles in the population, and then the geese swarm optimization algorithm is used to update the population, and the Munsell Spencer&#x2019;s color harmony theory and beauty formula are used as fitness functions for evaluation and optimization, so as to get the map color scheme. Finally, the algorithm is demonstrated by experiments.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In order to enhance the beauty of thematic map and improve the intelligent level of map color matching, this paper designs an intelligent design optimization algorithm of thematic map color based on the actual needs of thematic map and geese swarm optimization algorithm. The basic idea is: guided by map types and user expectations, according to color psychology and Munsell’s color harmony theory, the initial color is selected as the particles in the population, and then the geese swarm optimization algorithm is used to update the population, and the Munsell Spencer’s color harmony theory and beauty formula are used as fitness functions for evaluation and optimization, so as to get the map color scheme. Finally, the algorithm is demonstrated by experiments.", "fno": "261600a224", "keywords": [ "Cartography", "Image Colour Analysis", "Image Matching", "Particle Swarm Optimisation", "Psychology", "Intelligent Design Optimization", "Map Types", "Color Psychology", "Initial Color", "Thematic Map Color Matching Design", "Munsell Spencer Color Harmony Theory", "Geese Swarm Optimization", "Sociology", "Psychology", "Color", "Particle Swarm Optimization", "Statistics", "Optimization", "Standards", "Color Intelligent Optimization Design", "Geese Swarm Optimization Algorithm", "Color Harmony Theory", "Meidu Formula" ], "authors": [ { "affiliation": "North China Institute of Computing Technology,Beijing,China", "fullName": "Weiyi Wang", "givenName": "Weiyi", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": "North China Institute of Computing Technology,Beijing,China", "fullName": "Dawei Zuo", "givenName": "Dawei", "surname": "Zuo", "__typename": "ArticleAuthorType" }, { "affiliation": "North China Institute of Computing Technology,Beijing,China", "fullName": "Meizheng Zhu", "givenName": "Meizheng", "surname": "Zhu", "__typename": "ArticleAuthorType" } ], "idPrefix": "iccea", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-06-01T00:00:00", "pubType": "proceedings", "pages": "224-229", "year": "2021", "issn": null, "isbn": "978-1-6654-2616-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "261600a220", "articleId": "1y4ozwyRgeA", "__typename": "AdjacentArticleType" }, "next": { "fno": "261600a230", "articleId": "1y4oxnqq3Is", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/aici/2009/3816/1/3816a134", "title": "Geese-Inspired Hybrid Particle Swarm Optimization Algorithm for Traveling Salesman Problem", "doi": null, "abstractUrl": "/proceedings-article/aici/2009/3816a134/12OmNAmVH9b", "parentPublication": { "id": "proceedings/aici/2009/3816/1", "title": "2009 International Conference on Artificial Intelligence and Computational Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cbmi/2009/3662/0/3662a181", "title": "Dominant Color Extraction Based on Dynamic Clustering by Multi-dimensional Particle Swarm Optimization", "doi": null, "abstractUrl": "/proceedings-article/cbmi/2009/3662a181/12OmNBpVQ5F", "parentPublication": { "id": "proceedings/cbmi/2009/3662/0", "title": "Content-Based Multimedia Indexing, International Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccnea/2017/3981/0/3981a156", "title": "The Application of Improved PSO Algorithm in the Geometric Constraint Solving", "doi": null, "abstractUrl": "/proceedings-article/iccnea/2017/3981a156/12OmNvo67Cq", "parentPublication": { "id": "proceedings/iccnea/2017/3981/0", "title": "2017 International Conference on Computer Network, Electronic and Automation (ICCNEA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aici/2009/3816/3/3816c480", "title": "Color Image Filter Based on Predator-Prey Particle Swarm Optimization", "doi": null, "abstractUrl": "/proceedings-article/aici/2009/3816c480/12OmNx38vPs", "parentPublication": { "id": "proceedings/aici/2009/3816/3", "title": "2009 International Conference on Artificial Intelligence and Computational Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/scalcom-embeddedcom/2009/3825/0/3825a087", "title": "A Map-Coverage Algorithm Basing on Particle Swarm Optimization", "doi": null, "abstractUrl": "/proceedings-article/scalcom-embeddedcom/2009/3825a087/12OmNxAlA6g", "parentPublication": { "id": "proceedings/scalcom-embeddedcom/2009/3825/0", "title": "Scalable Computing and Communications; International Conference on Embedded Computing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bigmm/2015/8688/0/8688a348", "title": "Research on the Application of Face Symbol in Thematic Map Making", "doi": null, "abstractUrl": "/proceedings-article/bigmm/2015/8688a348/12OmNxVlTEX", "parentPublication": { "id": "proceedings/bigmm/2015/8688/0", "title": "2015 IEEE International Conference on Multimedia Big Data (BigMM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icisa/2014/4443/0/06847347", "title": "A Utilization Study of Domestic Thematic Map for Military Terrain Analysis Cartography", "doi": null, "abstractUrl": "/proceedings-article/icisa/2014/06847347/12OmNz5apDp", "parentPublication": { "id": "proceedings/icisa/2014/4443/0", "title": "2014 International Conference on Information Science and Applications (ICISA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cisp/2008/3119/2/3119b110", "title": "Robust Color Classification Using Fuzzy Rule-Based Particle Swarm Optimization", "doi": null, "abstractUrl": "/proceedings-article/cisp/2008/3119b110/12OmNzTYCbh", "parentPublication": { "id": "proceedings/cisp/2008/3119/3", "title": "Image and Signal Processing, Congress on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/si/2014/02/06478841", "title": "Application Mapping Onto Mesh-based Network-on-chip Using Discrete Particle Swarm Optimization", "doi": null, "abstractUrl": "/journal/si/2014/02/06478841/13rRUy08MCR", "parentPublication": { "id": "trans/si", "title": "IEEE Transactions on Very Large Scale Integration (VLSI) Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2020/6497/0/649700a220", "title": "Swarm Intelligence for Automatic Color and Contrast Retrieval of Digital Images of Paintings", "doi": null, "abstractUrl": "/proceedings-article/cw/2020/649700a220/1olHykXHobu", "parentPublication": { "id": "proceedings/cw/2020/6497/0", "title": "2020 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNxwENv2", "title": "Proceedings. ISADS 2005. 2005 International Symposium on Autonomous Decentralized Systems", "acronym": "isads", "groupId": "1000067", "volume": "0", "displayVolume": "0", "year": "2005", "__typename": "ProceedingType" }, "article": { "id": "12OmNBCHMKs", "doi": "10.1109/ISADS.2005.1452121", "title": "Fast algorithm for line rasterization by using slope 1", "normalizedTitle": "Fast algorithm for line rasterization by using slope 1", "abstract": "In this paper, a fast algorithm for line rasterization is presented by using approximate pixels. Slope 1 is used to decide which pixel is selected instead of using slope 1/2. Compared with Bresenham's middle point algorithm, three multiplications are reduced in each line raster process. Moreover, experiment shows that at least 6.315 percent of time is saved, and there are almost the same results for viewer compared with those of Bresenham's middle point algorithm.", "abstracts": [ { "abstractType": "Regular", "content": "In this paper, a fast algorithm for line rasterization is presented by using approximate pixels. Slope 1 is used to decide which pixel is selected instead of using slope 1/2. Compared with Bresenham's middle point algorithm, three multiplications are reduced in each line raster process. Moreover, experiment shows that at least 6.315 percent of time is saved, and there are almost the same results for viewer compared with those of Bresenham's middle point algorithm.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper, a fast algorithm for line rasterization is presented by using approximate pixels. Slope 1 is used to decide which pixel is selected instead of using slope 1/2. Compared with Bresenham's middle point algorithm, three multiplications are reduced in each line raster process. Moreover, experiment shows that at least 6.315 percent of time is saved, and there are almost the same results for viewer compared with those of Bresenham's middle point algorithm.", "fno": "01452121", "keywords": [ "Algorithm Theory", "Approximation Theory", "Fast Algorithm", "Line Rasterization", "Approximate Pixel", "Bresenham Middle Point Algorithm", "Computer Numerical Control", "Physics Computing", "Computer Applications", "Computer Graphics", "Data Visualization", "Hardware", "Packaging", "Displays", "Layout", "Rendering Computer Graphics" ], "authors": [ { "affiliation": "Dept. of Comput. & Commun. Eng., Southwest Jiaotong Univ., China", "fullName": "Hua Zhang", "givenName": null, "surname": "Hua Zhang", "__typename": "ArticleAuthorType" }, { "affiliation": "Dept. of Comput. & Commun. Eng., Southwest Jiaotong Univ., China", "fullName": "Changqian Zhu", "givenName": null, "surname": "Changqian Zhu", "__typename": "ArticleAuthorType" }, { "affiliation": "Dept. of Comput. & Commun. Eng., Southwest Jiaotong Univ., China", "fullName": "Jun Yang", "givenName": null, "surname": "Jun Yang", "__typename": "ArticleAuthorType" } ], "idPrefix": "isads", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2005-01-01T00:00:00", "pubType": "proceedings", "pages": "508,509,510,511,512", "year": "2005", "issn": "1541-0056", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "01452120", "articleId": "12OmNCbU34j", "__typename": "AdjacentArticleType" }, "next": { "fno": "01452122", "articleId": "12OmNzd7bEj", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccea/2010/6079/1/05445852", "title": "3-Dimensional Modelling for the Rock Slope Excavation and Reinforcement by the Bolts in Civil Engineering", "doi": null, "abstractUrl": "/proceedings-article/iccea/2010/05445852/12OmNrkBwwD", "parentPublication": { "id": "proceedings/iccea/2010/6079/1", "title": "2010 Second International Conference on Computer Engineering and Applications (ICCEA 2010)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/eait/2014/4272/0/4272a204", "title": "An Efficient Slope and Slant Correction Technique for Off-Line Handwritten Text Word", "doi": null, "abstractUrl": "/proceedings-article/eait/2014/4272a204/12OmNvkpkWm", "parentPublication": { "id": "proceedings/eait/2014/4272/0", "title": "2014 Fourth International Conference of Emerging Applications of Information Technology (EAIT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icicta/2009/3804/4/3804e550", "title": "Method for Analyzing the Slope Stability Based on Potential Slip Line Theory", "doi": null, "abstractUrl": "/proceedings-article/icicta/2009/3804e550/12OmNwekjBC", "parentPublication": { "id": "proceedings/icicta/2009/3804/4", "title": "Intelligent Computation Technology and Automation, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hpcs/2002/1626/0/16260281", "title": "Distributed Rasterization using OpenGL", "doi": null, "abstractUrl": "/proceedings-article/hpcs/2002/16260281/12OmNyqRnpd", "parentPublication": { "id": "proceedings/hpcs/2002/1626/0", "title": "High Performance Computing Systems and Applications, Annual International Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/etcs/2010/3987/1/3987a027", "title": "Numerical Simulation for the Rock Slope Angle Design in Civil Engineering", "doi": null, "abstractUrl": "/proceedings-article/etcs/2010/3987a027/12OmNyugz3W", "parentPublication": { "id": "proceedings/etcs/2010/3987/1", "title": "Education Technology and Computer Science, International Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/07/08356687", "title": "A Voxel-Based Rendering Pipeline for Large 3D Line Sets", "doi": null, "abstractUrl": "/journal/tg/2019/07/08356687/13rRUwInvJn", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/si/2013/07/06264117", "title": "Efficient Vector Graphics Rasterization Accelerator Using Optimized Scan-Line Buffer", "doi": null, "abstractUrl": "/journal/si/2013/07/06264117/13rRUwInvcO", "parentPublication": { "id": "trans/si", "title": "IEEE Transactions on Very Large Scale Integration (VLSI) Systems", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2013/10/ttg2013101732", "title": "Octree Rasterization: Accelerating High-Quality Out-of-Core GPU Volume Rendering", "doi": null, "abstractUrl": "/journal/tg/2013/10/ttg2013101732/13rRUwvBy8T", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icisce/2019/5712/0/09107676", "title": "Analysis on Slope Stability of Some Pole and Tower on &#x00B1;800kV UHV Fufeng Line", "doi": null, "abstractUrl": "/proceedings-article/icisce/2019/09107676/1koLt82Ry5a", "parentPublication": { "id": "proceedings/icisce/2019/5712/0", "title": "2019 6th International Conference on Information Science and Control Engineering (ICISCE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNxjjEcl", "title": "2009 WRI International Conference on Communications and Mobile Computing. CMC 2009", "acronym": "cmc", "groupId": "1002644", "volume": "3", "displayVolume": "3", "year": "2009", "__typename": "ProceedingType" }, "article": { "id": "12OmNBhZ4i8", "doi": "10.1109/CMC.2009.285", "title": "Vector Graphics Rendering on Mobile Device", "normalizedTitle": "Vector Graphics Rendering on Mobile Device", "abstract": "In order to solve the lower performance of vector graphics rendering on mobile device. The path generation and drawing in OpenVG is researched. After analyzing the OpenVG rendering pipeline, accelerated rendering methods are presented in three major aspects: rasterizer, tessellate and mathematical function. Experimental results indicate that the time of tessellation and rasterization is reduced. When applied to VG samples, the algorithm results in improved performance and good visual quality graphics.", "abstracts": [ { "abstractType": "Regular", "content": "In order to solve the lower performance of vector graphics rendering on mobile device. The path generation and drawing in OpenVG is researched. After analyzing the OpenVG rendering pipeline, accelerated rendering methods are presented in three major aspects: rasterizer, tessellate and mathematical function. Experimental results indicate that the time of tessellation and rasterization is reduced. When applied to VG samples, the algorithm results in improved performance and good visual quality graphics.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In order to solve the lower performance of vector graphics rendering on mobile device. The path generation and drawing in OpenVG is researched. After analyzing the OpenVG rendering pipeline, accelerated rendering methods are presented in three major aspects: rasterizer, tessellate and mathematical function. Experimental results indicate that the time of tessellation and rasterization is reduced. When applied to VG samples, the algorithm results in improved performance and good visual quality graphics.", "fno": "3501c008", "keywords": [ "Computer Graphics", "Mathematical Analysis", "Mobile Computing", "Rendering Computer Graphics", "Vector Graphics Rendering", "Mobile Device", "Open VG Rendering Pipeline", "Visual Quality Graphics", "Mathematical Function", "Rendering Computer Graphics", "Pipelines", "Mobile Computing", "Acceleration", "Mobile Communication", "Paints", "Computer Graphics", "Displays", "User Interfaces", "Libraries", "Mobile Device", "Vector Graphics", "Rendering", "Path", "Embedded System" ], "authors": [ { "affiliation": "Sch. of Comput. Sci. & Software Eng., Hangzhou Dianzi Univ., Hangzhou", "fullName": "Bi-shi He", "givenName": "Bi-shi", "surname": "He", "__typename": "ArticleAuthorType" }, { "affiliation": "Sch. of Comput. Sci. & Software Eng., Hangzhou Dianzi Univ., Hangzhou", "fullName": "Xiao-liang Xu", "givenName": "Xiao-liang", "surname": "Xu", "__typename": "ArticleAuthorType" }, { "affiliation": "Sch. of Comput. Sci. & Software Eng., Hangzhou Dianzi Univ., Hangzhou", "fullName": "Tao Zheng", "givenName": "Tao", "surname": "Zheng", "__typename": "ArticleAuthorType" } ], "idPrefix": "cmc", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2009-01-01T00:00:00", "pubType": "proceedings", "pages": "8-11", "year": "2009", "issn": null, "isbn": "978-0-7695-3501-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "3501x023", "articleId": "12OmNCd2rC6", "__typename": "AdjacentArticleType" }, "next": { "fno": "3501c012", "articleId": "12OmNzw8j1V", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/fcst/2015/9295/0/9295a153", "title": "Diffusion Based Vector Graphics on Mobile Devices", "doi": null, "abstractUrl": "/proceedings-article/fcst/2015/9295a153/12OmNAlvI0g", "parentPublication": { "id": "proceedings/fcst/2015/9295/0", "title": "2015 Ninth International Conference on Frontier of Computer Science and Technology (FCST)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icitcs/2015/6537/0/07292953", "title": "ARINC661 Graphics Rendering Based on OpenVG", "doi": null, "abstractUrl": "/proceedings-article/icitcs/2015/07292953/12OmNvTjZSw", "parentPublication": { "id": "proceedings/icitcs/2015/6537/0", "title": "2015 5th International Conference on IT Convergence and Security (ICITCS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pg/1997/8028/0/80280051", "title": "Rendering in object interference detection on conventional graphics workstations", "doi": null, "abstractUrl": "/proceedings-article/pg/1997/80280051/12OmNxG1yWt", "parentPublication": { "id": "proceedings/pg/1997/8028/0", "title": "Computer Graphics and Applications, Pacific Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hpca/2018/3659/0/365901a362", "title": "Perception-Oriented 3D Rendering Approximation for Modern Graphics Processors", "doi": null, "abstractUrl": "/proceedings-article/hpca/2018/365901a362/12OmNyvGyj5", "parentPublication": { "id": "proceedings/hpca/2018/3659/0", "title": "2018 IEEE International Symposium on High Performance Computer Architecture (HPCA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-vis/2005/2766/0/01532784", "title": "Sort-middle multi-projector immediate-mode rendering in Chromium", "doi": null, "abstractUrl": "/proceedings-article/ieee-vis/2005/01532784/12OmNzIl3xh", "parentPublication": { "id": "proceedings/ieee-vis/2005/2766/0", "title": "Visualization Conference, IEEE", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ism/2015/0379/0/0379a421", "title": "A Joint Asymmetric Graphics Rendering and Video Encoding Approach for Optimizing Cloud Mobile 3D Display Gaming User Experience", "doi": null, "abstractUrl": "/proceedings-article/ism/2015/0379a421/12OmNzZmZxe", "parentPublication": { "id": "proceedings/ism/2015/0379/0", "title": "2015 IEEE International Symposium on Multimedia (ISM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2010/04/ttg2010040531", "title": "Guest Editors' Introduction: Special Section on Volume Graphics and Point-Based Graphics", "doi": null, "abstractUrl": "/journal/tg/2010/04/ttg2010040531/13rRUxly9dP", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2016/02/07111373", "title": "Mobile Volume Rendering: Past, Present and Future", "doi": null, "abstractUrl": "/journal/tg/2016/02/07111373/13rRUyuNsx0", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600f398", "title": "RendNet: Unified 2D/3D Recognizer with Latent Space Rendering", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600f398/1H1mmRsoeVq", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09942350", "title": "A Survey of Smooth Vector Graphics: Recent Advances in Representation, Creation, Rasterization and Image Vectorization", "doi": null, "abstractUrl": "/journal/tg/5555/01/09942350/1I8NUS7Puc8", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNCbU3aO", "title": "2013 12th International Conference on Document Analysis and Recognition (ICDAR)", "acronym": "icdar", "groupId": "1000219", "volume": "0", "displayVolume": "0", "year": "2013", "__typename": "ProceedingType" }, "article": { "id": "12OmNwoxSfW", "doi": "10.1109/ICDAR.2013.21", "title": "Vectorization of 3D-Characters by Integral Invariant Filtering of High-Resolution Triangular Meshes", "normalizedTitle": "Vectorization of 3D-Characters by Integral Invariant Filtering of High-Resolution Triangular Meshes", "abstract": "Motivated by the demand of today's Assyriologists we develop a system for automated detection and extraction of cuneiform script, which is one of the most important sources for ancient history. Traditional means of documentation are (i) photographs and (ii) manual drawings, which are increasingly replaced by shape acquisition using 3D-scanners resulting in (iii) high-resolution 3D-models. To utilize the full potential of the acquired 3D-data, we propose a filtering algorithm on 2D-manifolds using Multi-Scale Integral Invariants (MSII) to detect characters within a high-dimensional feature space. As MSII filtering is a local method it overcomes the drawbacks of global illumination methods using virtual light sources. This filtering technique allows for rendering false-color images of the tablets without shadowing effects making the tablets already easy to read. With an additional step of the processing pipeline of our software framework \\emph{GigaMesh}, we can extract vector drawings. These are the basis for character recognition as well as for future paleographic analysis. The vectorized characters are stored in the XML-based \\emph{Scalable Vector Graphics} (SVG) format. This results in a tremendous reduction of the triangular mesh data to a meaningful spline representation of the tablets' contents.", "abstracts": [ { "abstractType": "Regular", "content": "Motivated by the demand of today's Assyriologists we develop a system for automated detection and extraction of cuneiform script, which is one of the most important sources for ancient history. Traditional means of documentation are (i) photographs and (ii) manual drawings, which are increasingly replaced by shape acquisition using 3D-scanners resulting in (iii) high-resolution 3D-models. To utilize the full potential of the acquired 3D-data, we propose a filtering algorithm on 2D-manifolds using Multi-Scale Integral Invariants (MSII) to detect characters within a high-dimensional feature space. As MSII filtering is a local method it overcomes the drawbacks of global illumination methods using virtual light sources. This filtering technique allows for rendering false-color images of the tablets without shadowing effects making the tablets already easy to read. With an additional step of the processing pipeline of our software framework \\emph{GigaMesh}, we can extract vector drawings. These are the basis for character recognition as well as for future paleographic analysis. The vectorized characters are stored in the XML-based \\emph{Scalable Vector Graphics} (SVG) format. This results in a tremendous reduction of the triangular mesh data to a meaningful spline representation of the tablets' contents.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Motivated by the demand of today's Assyriologists we develop a system for automated detection and extraction of cuneiform script, which is one of the most important sources for ancient history. Traditional means of documentation are (i) photographs and (ii) manual drawings, which are increasingly replaced by shape acquisition using 3D-scanners resulting in (iii) high-resolution 3D-models. To utilize the full potential of the acquired 3D-data, we propose a filtering algorithm on 2D-manifolds using Multi-Scale Integral Invariants (MSII) to detect characters within a high-dimensional feature space. As MSII filtering is a local method it overcomes the drawbacks of global illumination methods using virtual light sources. This filtering technique allows for rendering false-color images of the tablets without shadowing effects making the tablets already easy to read. With an additional step of the processing pipeline of our software framework \\emph{GigaMesh}, we can extract vector drawings. These are the basis for character recognition as well as for future paleographic analysis. The vectorized characters are stored in the XML-based \\emph{Scalable Vector Graphics} (SVG) format. This results in a tremendous reduction of the triangular mesh data to a meaningful spline representation of the tablets' contents.", "fno": "06628586", "keywords": [ "Vectors", "Manuals", "Feature Extraction", "Robustness", "Shape", "Correlation", "Splines Mathematics", "Integral Invariant Filtering" ], "authors": [ { "affiliation": null, "fullName": "Hubert Mara", "givenName": "Hubert", "surname": "Mara", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Susanne Kromker", "givenName": "Susanne", "surname": "Kromker", "__typename": "ArticleAuthorType" } ], "idPrefix": "icdar", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2013-08-01T00:00:00", "pubType": "proceedings", "pages": "62-66", "year": "2013", "issn": "1520-5363", "isbn": "978-0-7695-4999-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "06628585", "articleId": "12OmNzh5z1H", "__typename": "AdjacentArticleType" }, "next": { "fno": "06628587", "articleId": "12OmNvHGrxn", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/bibe/2014/7502/0/7502a311", "title": "Median Based Method for Baseline Wander Removal in Photoplethysmogram Signals", "doi": null, "abstractUrl": "/proceedings-article/bibe/2014/7502a311/12OmNAS9ztt", "parentPublication": { "id": "proceedings/bibe/2014/7502/0", "title": "2014 IEEE International Conference on Bioinformatics and Bioengineering (BIBE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2012/2216/0/06460080", "title": "A non-rigid registration method for medical volume data using 3D Phase-Only Correlation", "doi": null, "abstractUrl": "/proceedings-article/icpr/2012/06460080/12OmNwwMf4S", "parentPublication": { "id": "proceedings/icpr/2012/2216/0", "title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cw/2013/2246/0/2246a318", "title": "Skeleton-Based Anime Hair Modeling and Visualization", "doi": null, "abstractUrl": "/proceedings-article/cw/2013/2246a318/12OmNykCceO", "parentPublication": { "id": "proceedings/cw/2013/2246/0", "title": "2013 International Conference on Cyberworlds (CW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sibgrapi/2013/5099/0/5099a039", "title": "Applications of Conformal Geometric Algebra in Mesh Deformation", "doi": null, "abstractUrl": "/proceedings-article/sibgrapi/2013/5099a039/12OmNzsJ7DK", "parentPublication": { "id": "proceedings/sibgrapi/2013/5099/0", "title": "2013 XXVI Conference on Graphics, Patterns and Images", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2018/01/08017623", "title": "Functional Decomposition for Bundled Simplification of Trail Sets", "doi": null, "abstractUrl": "/journal/tg/2018/01/08017623/13rRUyYSWt2", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icfhr/2018/5875/0/587500a363", "title": "Feature Descriptors for Spotting 3D Characters on Triangular Meshes", "doi": null, "abstractUrl": "/proceedings-article/icfhr/2018/587500a363/17D45WGGoLw", "parentPublication": { "id": "proceedings/icfhr/2018/5875/0", "title": "2018 16th International Conference on Frontiers in Handwriting Recognition (ICFHR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09767783", "title": "b/Surf: Interactive Bézier Splines on Surface Meshes", "doi": null, "abstractUrl": "/journal/tg/5555/01/09767783/1D4MIotOemQ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icfhr/2020/9966/0/996600a246", "title": "Period Classification of 3D Cuneiform Tablets with Geometric Neural Networks", "doi": null, "abstractUrl": "/proceedings-article/icfhr/2020/996600a246/1p2VtWd1Z3a", "parentPublication": { "id": "proceedings/icfhr/2020/9966/0", "title": "2020 17th International Conference on Frontiers in Handwriting Recognition (ICFHR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNwD1pTx", "title": "High Performance Computing Systems and Applications, Annual International Symposium on", "acronym": "hpcs", "groupId": "1001127", "volume": "0", "displayVolume": "0", "year": "2002", "__typename": "ProceedingType" }, "article": { "id": "12OmNyqRnpd", "doi": "10.1109/HPCSA.2002.1019171", "title": "Distributed Rasterization using OpenGL", "normalizedTitle": "Distributed Rasterization using OpenGL", "abstract": "This work examines the facility of using a large distributed memory system for rasterization of computer graphics using the OpenGL and GLUT libraries. Issues examined include the performance increases achieved through parallel processing and the effects of different methods for dividing the framebuffer over multiple processors.", "abstracts": [ { "abstractType": "Regular", "content": "This work examines the facility of using a large distributed memory system for rasterization of computer graphics using the OpenGL and GLUT libraries. Issues examined include the performance increases achieved through parallel processing and the effects of different methods for dividing the framebuffer over multiple processors.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This work examines the facility of using a large distributed memory system for rasterization of computer graphics using the OpenGL and GLUT libraries. Issues examined include the performance increases achieved through parallel processing and the effects of different methods for dividing the framebuffer over multiple processors.", "fno": "16260281", "keywords": [ "Rasterization", "Computer Graphics", "Distributed System", "Open GL" ], "authors": [ { "affiliation": "University of Guelph", "fullName": "David Calvert", "givenName": "David", "surname": "Calvert", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Guelph", "fullName": "David Thompson", "givenName": "David", "surname": "Thompson", "__typename": "ArticleAuthorType" } ], "idPrefix": "hpcs", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2002-06-01T00:00:00", "pubType": "proceedings", "pages": "281", "year": "2002", "issn": null, "isbn": "0-7695-1626-2", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "16260280", "articleId": "12OmNwwuDZf", "__typename": "AdjacentArticleType" }, "next": { "fno": "16260283", "articleId": "12OmNvDZEX1", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/uic-atc/2012/4843/0/4843a996", "title": "An Augmented Reality Environment for Learning OpenGL Programming", "doi": null, "abstractUrl": "/proceedings-article/uic-atc/2012/4843a996/12OmNAXxXep", "parentPublication": { "id": "proceedings/uic-atc/2012/4843/0", "title": "Ubiquitous, Autonomic and Trusted Computing, Symposia and Workshops on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iscid/2011/4500/2/4500b170", "title": "Virtual Scene Modeling Technology Based on OpenGL and 3dsMAX", "doi": null, "abstractUrl": "/proceedings-article/iscid/2011/4500b170/12OmNCwCLvf", "parentPublication": { "id": "proceedings/iscid/2011/4500/2", "title": "Computational Intelligence and Design, International Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/itcc/2000/0540/0/05400295", "title": "Simulation of Hardware Support for OpenGL Graphics Architecture", "doi": null, "abstractUrl": "/proceedings-article/itcc/2000/05400295/12OmNrGKet7", "parentPublication": { "id": "proceedings/itcc/2000/0540/0", "title": "Information Technology: Coding and Computing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/e-science/2007/3064/0/04426929", "title": "Network OpenGL Fusion to Make Effective Presentation System", "doi": null, "abstractUrl": "/proceedings-article/e-science/2007/04426929/12OmNvlPkD2", "parentPublication": { "id": "proceedings/e-science/2007/3064/0", "title": "2007 3rd IEEE International Conference on e-Science and Grid Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/csse/2008/3336/2/3336d008", "title": "Research on Constructing 3-D Pipeline Connection Model By Using OpenGL", "doi": null, "abstractUrl": "/proceedings-article/csse/2008/3336d008/12OmNwDACcc", "parentPublication": { "id": "proceedings/csse/2008/3336/6", "title": "Computer Science and Software Engineering, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/itcs/2010/4074/0/4074a198", "title": "The Realization of Flight Simulation System Based on OpenGL", "doi": null, "abstractUrl": "/proceedings-article/itcs/2010/4074a198/12OmNx76TKh", "parentPublication": { "id": "proceedings/itcs/2010/4074/0", "title": "Information Technology and Computer Science, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icitcs/2015/6537/0/07292938", "title": "Adding Advanced Debug Output Features to an Existing OpenGL ES 1.1 Implementation", "doi": null, "abstractUrl": "/proceedings-article/icitcs/2015/07292938/12OmNyfdOXK", "parentPublication": { "id": "proceedings/icitcs/2015/6537/0", "title": "2015 5th International Conference on IT Convergence and Security (ICITCS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2007/04/mcg2007040084", "title": "Climbing Longs Peak: The Steep Road to the Future of OpenGL", "doi": null, "abstractUrl": "/magazine/cg/2007/04/mcg2007040084/13rRUIIVleR", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2011/03/ttg2011030320", "title": "CGLX: A Scalable, High-Performance Visualization Framework for Networked Display Environments", "doi": null, "abstractUrl": "/journal/tg/2011/03/ttg2011030320/13rRUwgQpqJ", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNyoiYVr", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2017", "__typename": "ProceedingType" }, "article": { "id": "12OmNqNG3iJ", "doi": "10.1109/CVPR.2017.727", "title": "Snapshot Hyperspectral Light Field Imaging", "normalizedTitle": "Snapshot Hyperspectral Light Field Imaging", "abstract": "This paper presents the first snapshot hyperspectral light field imager in practice. Specifically, we design a novel hybrid camera system to obtain two complementary measurements that sample the angular and spectral dimensions respectively. To recover the full 5D hyperspectral light field from the severely undersampled measurements, we then propose an efficient computational reconstruction algorithm by exploiting the large correlations across the angular and spectral dimensions through self-learned dictionaries. Simulation on an elaborate hyperspectral light field dataset validates the effectiveness of the proposed approach. Hardware experimental results demonstrate that, for the first time to our knowledge, a 5D hyperspectral light field containing 9x9 angular views and 27 spectral bands can be acquired in a single shot.", "abstracts": [ { "abstractType": "Regular", "content": "This paper presents the first snapshot hyperspectral light field imager in practice. Specifically, we design a novel hybrid camera system to obtain two complementary measurements that sample the angular and spectral dimensions respectively. To recover the full 5D hyperspectral light field from the severely undersampled measurements, we then propose an efficient computational reconstruction algorithm by exploiting the large correlations across the angular and spectral dimensions through self-learned dictionaries. Simulation on an elaborate hyperspectral light field dataset validates the effectiveness of the proposed approach. Hardware experimental results demonstrate that, for the first time to our knowledge, a 5D hyperspectral light field containing 9x9 angular views and 27 spectral bands can be acquired in a single shot.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper presents the first snapshot hyperspectral light field imager in practice. Specifically, we design a novel hybrid camera system to obtain two complementary measurements that sample the angular and spectral dimensions respectively. To recover the full 5D hyperspectral light field from the severely undersampled measurements, we then propose an efficient computational reconstruction algorithm by exploiting the large correlations across the angular and spectral dimensions through self-learned dictionaries. Simulation on an elaborate hyperspectral light field dataset validates the effectiveness of the proposed approach. Hardware experimental results demonstrate that, for the first time to our knowledge, a 5D hyperspectral light field containing 9x9 angular views and 27 spectral bands can be acquired in a single shot.", "fno": "0457g873", "keywords": [ "Cameras", "Hyperspectral Imaging", "Image Reconstruction", "Unsupervised Learning", "Angular Dimensions", "Spectral Dimensions", "5 D Hyperspectral Light Field", "Hybrid Camera System", "Computational Reconstruction Algorithm", "Hyperspectral Light Field Dataset", "Spectral Bands", "Snapshot Hyperspectral Light Field Imaging", "Self Learned Dictionaries", "Hyperspectral Imaging", "Cameras", "Dictionaries", "Image Resolution", "Image Reconstruction" ], "authors": [ { "affiliation": null, "fullName": "Zhiwei Xiong", "givenName": "Zhiwei", "surname": "Xiong", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Lizhi Wang", "givenName": "Lizhi", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Huiqun Li", "givenName": "Huiqun", "surname": "Li", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Dong Liu", "givenName": "Dong", "surname": "Liu", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Feng Wu", "givenName": "Feng", "surname": "Wu", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2017-07-01T00:00:00", "pubType": "proceedings", "pages": "6873-6881", "year": "2017", "issn": "1063-6919", "isbn": "978-1-5386-0457-1", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "0457g864", "articleId": "12OmNBigFtu", "__typename": "AdjacentArticleType" }, "next": { "fno": "0457g882", "articleId": "12OmNzXnNvU", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iccv/2017/1032/0/1032a985", "title": "Catadioptric HyperSpectral Light Field Imaging", "doi": null, "abstractUrl": "/proceedings-article/iccv/2017/1032a985/12OmNAkWvyZ", "parentPublication": { "id": "proceedings/iccv/2017/1032/0", "title": "2017 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2015/8391/0/8391a307", "title": "RGB-Guided Hyperspectral Image Upsampling", "doi": null, "abstractUrl": "/proceedings-article/iccv/2015/8391a307/12OmNC8uRyl", "parentPublication": { "id": "proceedings/iccv/2015/8391/0", "title": "2015 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2011/0394/0/05995457", "title": "High-resolution hyperspectral imaging via matrix factorization", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2011/05995457/12OmNrJiCPq", "parentPublication": { "id": "proceedings/cvpr/2011/0394/0", "title": "CVPR 2011", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icassp/2004/8484/3/01326590", "title": "Superresolution reconstruction of hyperspectral images", "doi": null, "abstractUrl": "/proceedings-article/icassp/2004/01326590/12OmNwc3wvk", "parentPublication": { "id": "proceedings/icassp/2004/8484/3", "title": "2004 IEEE International Conference on Acoustics, Speech, and Signal Processing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2016/8851/0/8851c461", "title": "Do It Yourself Hyperspectral Imaging with Everyday Digital Cameras", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2016/8851c461/12OmNyuPL0H", "parentPublication": { "id": "proceedings/cvpr/2016/8851/0", "title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2019/05/08338146", "title": "Hyperspectral Light Field Stereo Matching", "doi": null, "abstractUrl": "/journal/tp/2019/05/08338146/13rRUwInvgy", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2017/10/07676344", "title": "Adaptive Nonlocal Sparse Representation for Dual-Camera Compressive Hyperspectral Imaging", "doi": null, "abstractUrl": "/journal/tp/2017/10/07676344/13rRUx0gegF", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2019/04/08320303", "title": "High-Speed Hyperspectral Video Acquisition By Combining Nyquist and Compressive Sampling", "doi": null, "abstractUrl": "/journal/tp/2019/04/08320303/13rRUyeCkbF", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bigmm/2019/5527/0/552700a266", "title": "Deep Residual Network of Spectral and Spatial Fusion for Hyperspectral Image Super-Resolution", "doi": null, "abstractUrl": "/proceedings-article/bigmm/2019/552700a266/1fHjKOrvGZq", "parentPublication": { "id": "proceedings/bigmm/2019/5527/0", "title": "2019 IEEE Fifth International Conference on Multimedia Big Data (BigMM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2019/5023/0/502300d933", "title": "Blind Unitary Transform Learning for Inverse Problems in Light-Field Imaging", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2019/502300d933/1i5mns7BIAg", "parentPublication": { "id": "proceedings/iccvw/2019/5023/0", "title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNxwENve", "title": "2018 IEEE Conference on Multimedia Information Processing and Retrieval (MIPR)", "acronym": "mipr", "groupId": "1825825", "volume": "0", "displayVolume": "0", "year": "2018", "__typename": "ProceedingType" }, "article": { "id": "12OmNrIJqDf", "doi": "10.1109/MIPR.2018.00073", "title": "Robust Surface Light Field Modeling", "normalizedTitle": "Robust Surface Light Field Modeling", "abstract": "Surface light field advances conventional light field rendering techniques by utilizing geometry information. Using surface light field, real-world objects with complex appearance could be faithfully represented. This capability could play an important role in many VR/AR applications. However, an accurate geometric model is needed for surface light field sampling and processing, which limits its wide usage since many objects of interests are difficult if not impossible to reconstruct with their usually very complex appearances. We propose a novel optimization framework to reduce the dependency of accurate geometry. The key insight is to treat surface light sampling as a multi-view multi-texture optimization problem. Our approach can deal with both model inaccuracy and texture to model misalignment, making it possible to create high-fidelity surface light field models without using high-precision special hardware.", "abstracts": [ { "abstractType": "Regular", "content": "Surface light field advances conventional light field rendering techniques by utilizing geometry information. Using surface light field, real-world objects with complex appearance could be faithfully represented. This capability could play an important role in many VR/AR applications. However, an accurate geometric model is needed for surface light field sampling and processing, which limits its wide usage since many objects of interests are difficult if not impossible to reconstruct with their usually very complex appearances. We propose a novel optimization framework to reduce the dependency of accurate geometry. The key insight is to treat surface light sampling as a multi-view multi-texture optimization problem. Our approach can deal with both model inaccuracy and texture to model misalignment, making it possible to create high-fidelity surface light field models without using high-precision special hardware.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Surface light field advances conventional light field rendering techniques by utilizing geometry information. Using surface light field, real-world objects with complex appearance could be faithfully represented. This capability could play an important role in many VR/AR applications. However, an accurate geometric model is needed for surface light field sampling and processing, which limits its wide usage since many objects of interests are difficult if not impossible to reconstruct with their usually very complex appearances. We propose a novel optimization framework to reduce the dependency of accurate geometry. The key insight is to treat surface light sampling as a multi-view multi-texture optimization problem. Our approach can deal with both model inaccuracy and texture to model misalignment, making it possible to create high-fidelity surface light field models without using high-precision special hardware.", "fno": "185701a321", "keywords": [ "Geometry", "Image Reconstruction", "Image Texture", "Optimisation", "Rendering Computer Graphics", "Model Misalignment", "High Fidelity Surface Light Field Models", "Geometry Information", "Surface Light Field Sampling", "Robust Surface Light Field Modeling", "Geometric Model", "Light Field Rendering Techniques", "Multiview Multitexture Optimization Problem", "High Precision Special Hardware", "Geometry", "Face", "Rendering Computer Graphics", "Image Color Analysis", "Surface Treatment", "Optimization", "Image Coding", "Light Filed", "Image Based Rendering", "Surface Light Filed", "AR VR" ], "authors": [ { "affiliation": null, "fullName": "Wei Li", "givenName": "Wei", "surname": "Li", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Hui Qiao", "givenName": "Hui", "surname": "Qiao", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Chen Zhao", "givenName": "Chen", "surname": "Zhao", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Zhongqin Wu", "givenName": "Zhongqin", "surname": "Wu", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Ruigang Yang", "givenName": "Ruigang", "surname": "Yang", "__typename": "ArticleAuthorType" } ], "idPrefix": "mipr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2018-04-01T00:00:00", "pubType": "proceedings", "pages": "321-327", "year": "2018", "issn": null, "isbn": "978-1-5386-1857-8", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "185701a315", "articleId": "12OmNxRnvTk", "__typename": "AdjacentArticleType" }, "next": { "fno": "185701a328", "articleId": "12OmNx0RILV", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvprw/2017/0733/0/0733b735", "title": "Surface Normal Reconstruction from Specular Information in Light Field Data", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2017/0733b735/12OmNAP1YZr", "parentPublication": { "id": "proceedings/cvprw/2017/0733/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2012/4660/0/06402544", "title": "Real-time surface light-field capture for augmentation of planar specular surfaces", "doi": null, "abstractUrl": "/proceedings-article/ismar/2012/06402544/12OmNASILPn", "parentPublication": { "id": "proceedings/ismar/2012/4660/0", "title": "2012 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pg/2003/2028/0/20280215", "title": "Feature-Based Surface Light Field Morphing", "doi": null, "abstractUrl": "/proceedings-article/pg/2003/20280215/12OmNAkWvlK", "parentPublication": { "id": "proceedings/pg/2003/2028/0", "title": "Computer Graphics and Applications, Pacific Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2012/1611/0/06239344", "title": "Geometry-corrected light field rendering for creating a holographic stereogram", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2012/06239344/12OmNBh8gW6", "parentPublication": { "id": "proceedings/cvprw/2012/1611/0", "title": "2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/th/2016/02/07358132", "title": "On Frictional Forces between the Finger and a Textured Surface during Active Touch", "doi": null, "abstractUrl": "/journal/th/2016/02/07358132/13rRUxZzAhO", "parentPublication": { "id": "trans/th", "title": "IEEE Transactions on Haptics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2018/8425/0/842500a012", "title": "Surface Light Field Fusion", "doi": null, "abstractUrl": "/proceedings-article/3dv/2018/842500a012/17D45WODasr", "parentPublication": { "id": "proceedings/3dv/2018/8425/0", "title": "2018 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2021/2812/0/281200g505", "title": "Learning Signed Distance Field for Multi-view Surface Reconstruction", "doi": null, "abstractUrl": "/proceedings-article/iccv/2021/281200g505/1BmFLjuiAKs", "parentPublication": { "id": "proceedings/iccv/2021/2812/0", "title": "2021 IEEE/CVF International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2019/3131/0/313100a482", "title": "Light Field Compression using Eigen Textures", "doi": null, "abstractUrl": "/proceedings-article/3dv/2019/313100a482/1ezRAUnTCpy", "parentPublication": { "id": "proceedings/3dv/2019/3131/0", "title": "2019 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/3dv/2020/8128/0/812800a452", "title": "Learning Implicit Surface Light Fields", "doi": null, "abstractUrl": "/proceedings-article/3dv/2020/812800a452/1qyxkR2YxGM", "parentPublication": { "id": "proceedings/3dv/2020/8128/0", "title": "2020 International Conference on 3D Vision (3DV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2022/11/09555381", "title": "SurRF: Unsupervised Multi-View Stereopsis by Learning Surface Radiance Field", "doi": null, "abstractUrl": "/journal/tp/2022/11/09555381/1xjQQdQGABG", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNqJ8taQ", "title": "2014 IEEE Conference on Visual Analytics Science and Technology (VAST)", "acronym": "vast", "groupId": "1001630", "volume": "0", "displayVolume": "0", "year": "2014", "__typename": "ProceedingType" }, "article": { "id": "12OmNASraHn", "doi": "10.1109/VAST.2014.7042511", "title": "VisIRR: Visual analytics for information retrieval and recommendation with large-scale document data", "normalizedTitle": "VisIRR: Visual analytics for information retrieval and recommendation with large-scale document data", "abstract": "We present VisIRR, an interactive visual information retrieval and recommendation system for large-scale document data. Starting with a query, VisIRR visualizes the retrieved documents in a scatter plot along with their topic summary. Next, based on interactive personalized preference feedback on the documents, VisIRR collects and visualizes potentially relevant documents out of the entire corpus so that an integrated analysis of both retrieved and recommended documents can be performed seamlessly.", "abstracts": [ { "abstractType": "Regular", "content": "We present VisIRR, an interactive visual information retrieval and recommendation system for large-scale document data. Starting with a query, VisIRR visualizes the retrieved documents in a scatter plot along with their topic summary. Next, based on interactive personalized preference feedback on the documents, VisIRR collects and visualizes potentially relevant documents out of the entire corpus so that an integrated analysis of both retrieved and recommended documents can be performed seamlessly.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "We present VisIRR, an interactive visual information retrieval and recommendation system for large-scale document data. Starting with a query, VisIRR visualizes the retrieved documents in a scatter plot along with their topic summary. Next, based on interactive personalized preference feedback on the documents, VisIRR collects and visualizes potentially relevant documents out of the entire corpus so that an integrated analysis of both retrieved and recommended documents can be performed seamlessly.", "fno": "07042511", "keywords": [ "Data Visualization", "Alzheimers Disease", "Information Retrieval", "Visual Analytics", "Support Vector Machines", "Scatter Plot", "Recommendation", "Document Analysis", "Dimension Reduction", "Clustering", "Information Retrieval" ], "authors": [ { "affiliation": "Georgia Institute of Technology", "fullName": "Jaegul Choo", "givenName": "Jaegul", "surname": "Choo", "__typename": "ArticleAuthorType" }, { "affiliation": "Google Inc.", "fullName": "Changhyun Lee", "givenName": "Changhyun", "surname": "Lee", "__typename": "ArticleAuthorType" }, { "affiliation": "Georgia Institute of Technology", "fullName": "Hannah Kim", "givenName": "Hannah", "surname": "Kim", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Maryland", "fullName": "Hanseung Lee", "givenName": "Hanseung", "surname": "Lee", "__typename": "ArticleAuthorType" }, { "affiliation": "Adobe Research", "fullName": "Zhicheng Liu", "givenName": "Zhicheng", "surname": "Liu", "__typename": "ArticleAuthorType" }, { "affiliation": "Georgia Institute of Technology", "fullName": "Ramakrishnan Kannan", "givenName": "Ramakrishnan", "surname": "Kannan", "__typename": "ArticleAuthorType" }, { "affiliation": "Georgia Institute of Technology", "fullName": "Charles D. Stolper", "givenName": "Charles D.", "surname": "Stolper", "__typename": "ArticleAuthorType" }, { "affiliation": "Georgia Institute of Technology", "fullName": "John Stasko", "givenName": "John", "surname": "Stasko", "__typename": "ArticleAuthorType" }, { "affiliation": "Georgia Tech Research Institute", "fullName": "Barry L. Drake", "givenName": "Barry L.", "surname": "Drake", "__typename": "ArticleAuthorType" }, { "affiliation": "Georgia Institute of Technology", "fullName": "Haesun Park", "givenName": "Haesun", "surname": "Park", "__typename": "ArticleAuthorType" } ], "idPrefix": "vast", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2014-10-01T00:00:00", "pubType": "proceedings", "pages": "243-244", "year": "2014", "issn": null, "isbn": "978-1-4799-6227-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07042510", "articleId": "12OmNBgz4CP", "__typename": "AdjacentArticleType" }, "next": { "fno": "07042512", "articleId": "12OmNwcl7DF", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/iv/2014/4103/0/4103a164", "title": "A Visual Analytics of Geometric Distances between Amino Acids and Surface Pockets of Proteins", "doi": null, "abstractUrl": "/proceedings-article/iv/2014/4103a164/12OmNBOll3Y", "parentPublication": { "id": "proceedings/iv/2014/4103/0", "title": "2014 18th International Conference on Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ldav/2012/4733/0/06378977", "title": "Visual analysis of massive web session data", "doi": null, "abstractUrl": "/proceedings-article/ldav/2012/06378977/12OmNs0kyBw", "parentPublication": { "id": "proceedings/ldav/2012/4733/0", "title": "2012 IEEE Symposium on Large Data Analysis and Visualization (LDAV 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hicss/2012/4525/0/4525b855", "title": "Applied Visual Analytics for Exploring the National Health and Nutrition Examination Survey", "doi": null, "abstractUrl": "/proceedings-article/hicss/2012/4525b855/12OmNviZllM", "parentPublication": { "id": "proceedings/hicss/2012/4525/0", "title": "2012 45th Hawaii International Conference on System Sciences", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/kam/2009/3888/2/3888b361", "title": "Non-relevance Feedback for Document Retrieval", "doi": null, "abstractUrl": "/proceedings-article/kam/2009/3888b361/12OmNzvhvvm", "parentPublication": { "id": "proceedings/kam/2009/3888/2", "title": "Knowledge Acquisition and Modeling, International Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdm/2011/4408/0/4408a101", "title": "SolarMap: Multifaceted Visual Analytics for Topic Exploration", "doi": null, "abstractUrl": "/proceedings-article/icdm/2011/4408a101/12OmNzw8j1t", "parentPublication": { "id": "proceedings/icdm/2011/4408/0", "title": "2011 IEEE 11th International Conference on Data Mining", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cg/2008/01/mcg2008010018", "title": "An Information-Theoretic View of Visual Analytics", "doi": null, "abstractUrl": "/magazine/cg/2008/01/mcg2008010018/13rRUB6SpRW", "parentPublication": { "id": "mags/cg", "title": "IEEE Computer Graphics and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2012/12/ttg2012122839", "title": "Visual Classifier Training for Text Document Retrieval", "doi": null, "abstractUrl": "/journal/tg/2012/12/ttg2012122839/13rRUB6Sq0z", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "mags/cs/2013/04/mcs2013040066", "title": "Visual Document Retrieval: Supporting Text Search and Analysis with Visual Analytics", "doi": null, "abstractUrl": "/magazine/cs/2013/04/mcs2013040066/13rRUx0xPPu", "parentPublication": { "id": "mags/cs", "title": "Computing in Science & Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09939115", "title": "DocFlow: A Visual Analytics System for Question-based Document Retrieval and Categorization", "doi": null, "abstractUrl": "/journal/tg/5555/01/09939115/1I1KuH1xVF6", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pacificvis/2019/9226/0/922600a148", "title": "An Interactive Visual Analytics System for Incremental Classification Based on Semi-supervised Topic Modeling", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2019/922600a148/1cMF8cnyXfi", "parentPublication": { "id": "proceedings/pacificvis/2019/9226/0", "title": "2019 IEEE Pacific Visualization Symposium (PacificVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNC1GueH", "title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)", "acronym": "icpr", "groupId": "1000545", "volume": "0", "displayVolume": "0", "year": "2012", "__typename": "ProceedingType" }, "article": { "id": "12OmNBInLk9", "doi": "", "title": "Logo spotting for document categorization", "normalizedTitle": "Logo spotting for document categorization", "abstract": "Logo spotting is of a great interest because it enables to categorize the document images of a digital library of scanned documents according to their sources, without any costly semantic analysis of their textual transcript. In this paper, we present an approach for logo spotting, based on the matching of keypoints extracted both from the query document images and a given set of logos (gallery) using SIFT. In order to filter the matching points and keep only the most relevant, we compare the spatial distribution of the matching keypoints in the query image and in the logo gallery. We test our approach using a large collection of real world documents using a well-known benchmark database of logos and show that our approach achieves good performances compared to state-of-the-art approaches.", "abstracts": [ { "abstractType": "Regular", "content": "Logo spotting is of a great interest because it enables to categorize the document images of a digital library of scanned documents according to their sources, without any costly semantic analysis of their textual transcript. In this paper, we present an approach for logo spotting, based on the matching of keypoints extracted both from the query document images and a given set of logos (gallery) using SIFT. In order to filter the matching points and keep only the most relevant, we compare the spatial distribution of the matching keypoints in the query image and in the logo gallery. We test our approach using a large collection of real world documents using a well-known benchmark database of logos and show that our approach achieves good performances compared to state-of-the-art approaches.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Logo spotting is of a great interest because it enables to categorize the document images of a digital library of scanned documents according to their sources, without any costly semantic analysis of their textual transcript. In this paper, we present an approach for logo spotting, based on the matching of keypoints extracted both from the query document images and a given set of logos (gallery) using SIFT. In order to filter the matching points and keep only the most relevant, we compare the spatial distribution of the matching keypoints in the query image and in the logo gallery. We test our approach using a large collection of real world documents using a well-known benchmark database of logos and show that our approach achieves good performances compared to state-of-the-art approaches.", "fno": "06460915", "keywords": [ "Digital Libraries", "Document Image Processing", "Image Retrieval", "Information Filtering", "Visual Databases", "Logo Spotting", "Keypoint Extraction", "Query Document Image Categorization", "SIFT", "Matching Point Filtering", "Matching Keypoint Spatial Distribution", "Logo Gallery", "Real World Documents", "Logos Database", "Scanned Document Digital Library", "Databases", "Feature Extraction", "Matched Filters", "Noise", "Histograms", "Image Segmentation", "Clustering Algorithms" ], "authors": [ { "affiliation": "Laboratory L3I, Faculty of Science and Technology, La Rochelle University, France", "fullName": "Viet Phuong Le", "givenName": "Viet Phuong", "surname": "Le", "__typename": "ArticleAuthorType" }, { "affiliation": "Laboratory L3I, Faculty of Science and Technology, La Rochelle University, France", "fullName": "Muriel Visani", "givenName": "Muriel", "surname": "Visani", "__typename": "ArticleAuthorType" }, { "affiliation": "College of Information and Communication Technology, Can Tho University, Vietnam", "fullName": "Cao De Tran", "givenName": "Cao", "surname": "De Tran", "__typename": "ArticleAuthorType" }, { "affiliation": "Laboratory L3I, Faculty of Science and Technology, La Rochelle University, France", "fullName": "Jean-Marc Ogier", "givenName": "Jean-Marc", "surname": "Ogier", "__typename": "ArticleAuthorType" } ], "idPrefix": "icpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2012-11-01T00:00:00", "pubType": "proceedings", "pages": "3484-3487", "year": "2012", "issn": "1051-4651", "isbn": "978-1-4673-2216-4", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "06460914", "articleId": "12OmNvJXeF2", "__typename": "AdjacentArticleType" }, "next": { "fno": "06460916", "articleId": "12OmNzaQo7r", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icdar/2013/4999/0/06628626", "title": "Improving Logo Spotting and Matching for Document Categorization by a Post-Filter Based on Homography", "doi": null, "abstractUrl": "/proceedings-article/icdar/2013/06628626/12OmNBPc8z3", "parentPublication": { "id": "proceedings/icdar/2013/4999/0", "title": "2013 12th International Conference on Document Analysis and Recognition (ICDAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdar/2009/3725/0/3725b335", "title": "Logo Detection in Document Images Based on Boundary Extension of Feature Rectangles", "doi": null, "abstractUrl": "/proceedings-article/icdar/2009/3725b335/12OmNqGRGmg", "parentPublication": { "id": "proceedings/icdar/2009/3725/0", "title": "2009 10th International Conference on Document Analysis and Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/das/2014/3244/0/3244a324", "title": "A Complete Logo Detection/Recognition System for Document Images", "doi": null, "abstractUrl": "/proceedings-article/das/2014/3244a324/12OmNro0I9i", "parentPublication": { "id": "proceedings/das/2014/3244/0", "title": "2014 11th IAPR International Workshop on Document Analysis Systems (DAS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdar/2007/2822/2/28220864", "title": "Automatic Document Logo Detection", "doi": null, "abstractUrl": "/proceedings-article/icdar/2007/28220864/12OmNs5rl1s", "parentPublication": { "id": "proceedings/icdar/2007/2822/2", "title": "Ninth International Conference on Document Analysis and Recognition (ICDAR 2007)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdar/2009/3725/0/3725a606", "title": "Logo Matching for Document Image Retrieval", "doi": null, "abstractUrl": "/proceedings-article/icdar/2009/3725a606/12OmNwJgAFJ", "parentPublication": { "id": "proceedings/icdar/2009/3725/0", "title": "2009 10th International Conference on Document Analysis and Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2014/5209/0/5209d056", "title": "Document Retrieval Based on Logo Spotting Using Key-Point Matching", "doi": null, "abstractUrl": "/proceedings-article/icpr/2014/5209d056/12OmNxecRSp", "parentPublication": { "id": "proceedings/icpr/2014/5209/0", "title": "2014 22nd International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdar/2009/3725/0/3725a111", "title": "Logo Spotting by a Bag-of-words Approach for Document Categorization", "doi": null, "abstractUrl": "/proceedings-article/icdar/2009/3725a111/12OmNyQ7FFW", "parentPublication": { "id": "proceedings/icdar/2009/3725/0", "title": "2009 10th International Conference on Document Analysis and Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2006/0366/0/04036781", "title": "A Robust Method for TV Logo Tracking in Video Streams", "doi": null, "abstractUrl": "/proceedings-article/icme/2006/04036781/12OmNzIl3xX", "parentPublication": { "id": "proceedings/icme/2006/0366/0", "title": "2006 IEEE International Conference on Multimedia and Expo", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2007/1016/0/04284995", "title": "A Reliable Logo and Replay Detector for Sports Video", "doi": null, "abstractUrl": "/proceedings-article/icme/2007/04284995/12OmNzaQonu", "parentPublication": { "id": "proceedings/icme/2007/1016/0", "title": "2007 International Conference on Multimedia & Expo", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icfhr/2018/5875/0/587500a416", "title": "Signature and Logo Detection using Deep CNN for Document Image Retrieval", "doi": null, "abstractUrl": "/proceedings-article/icfhr/2018/587500a416/17D45WaTkn3", "parentPublication": { "id": "proceedings/icfhr/2018/5875/0", "title": "2018 16th International Conference on Frontiers in Handwriting Recognition (ICFHR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNxvO07z", "title": "Web Mining and Web-based Application, Pacific-Asia Conference on", "acronym": "wmwa", "groupId": "1002868", "volume": "0", "displayVolume": "0", "year": "2009", "__typename": "ProceedingType" }, "article": { "id": "12OmNBmf3bO", "doi": "10.1109/WMWA.2009.22", "title": "Web Document Categorization Algorithm Using LDE and MA", "normalizedTitle": "Web Document Categorization Algorithm Using LDE and MA", "abstract": "Document categorization is one of the most crucial techniques to assign the documents of a corpus to a set of previously fixed categories.To efficiently deal with document categorization problem, an efficient document categorization algorithm based on local discriminant embedding(LDE) and memetic algorithm (MA) is proposed in this paper. Extensive experiments on Reuter-21578 demonstrate that the proposed algorithm performs much better than other conventional document categorization algorithms.", "abstracts": [ { "abstractType": "Regular", "content": "Document categorization is one of the most crucial techniques to assign the documents of a corpus to a set of previously fixed categories.To efficiently deal with document categorization problem, an efficient document categorization algorithm based on local discriminant embedding(LDE) and memetic algorithm (MA) is proposed in this paper. Extensive experiments on Reuter-21578 demonstrate that the proposed algorithm performs much better than other conventional document categorization algorithms.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Document categorization is one of the most crucial techniques to assign the documents of a corpus to a set of previously fixed categories.To efficiently deal with document categorization problem, an efficient document categorization algorithm based on local discriminant embedding(LDE) and memetic algorithm (MA) is proposed in this paper. Extensive experiments on Reuter-21578 demonstrate that the proposed algorithm performs much better than other conventional document categorization algorithms.", "fno": "3646a197", "keywords": [], "authors": [ { "affiliation": null, "fullName": "Xia Sun", "givenName": "Xia", "surname": "Sun", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Ziqiang Wang", "givenName": "Ziqiang", "surname": "Wang", "__typename": "ArticleAuthorType" } ], "idPrefix": "wmwa", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2009-06-01T00:00:00", "pubType": "proceedings", "pages": "197-200", "year": "2009", "issn": null, "isbn": "978-0-7695-3646-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "3646a192", "articleId": "12OmNynsbyH", "__typename": "AdjacentArticleType" }, "next": { "fno": "3646a201", "articleId": "12OmNAlvI6R", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icpr/2002/1695/4/01047440", "title": "A comparative study of centroid-based, neighborhood-based and statistical approaches for effective document categorization", "doi": null, "abstractUrl": "/proceedings-article/icpr/2002/01047440/12OmNAWpyte", "parentPublication": { "id": "proceedings/icpr/2002/1695/4", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icm/2011/4522/2/4522b065", "title": "Categorical Document Frequency Based Feature Selection for Text Categorization", "doi": null, "abstractUrl": "/proceedings-article/icm/2011/4522b065/12OmNBziB96", "parentPublication": { "id": "proceedings/icm/2011/4522/2", "title": "Information Technology, Computer Engineering and Management Sciences, International Conference of", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2002/1695/4/169540235", "title": "A Comparative Study of Centroid-Based, Neighborhood-Based and Statistical Approaches for Effective Document Categorization", "doi": null, "abstractUrl": "/proceedings-article/icpr/2002/169540235/12OmNC8dgaV", "parentPublication": { "id": "proceedings/icpr/2002/1695/4", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/alpit/2007/2930/0/2930a009", "title": "An Efficient Document Categorization Model Based on LSA and BPNN", "doi": null, "abstractUrl": "/proceedings-article/alpit/2007/2930a009/12OmNCctf9E", "parentPublication": { "id": "proceedings/alpit/2007/2930/0", "title": "Advanced Language Processing and Web Information Technology, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/his/2004/2291/0/22910086", "title": "An Efficient Feature Selection Using Multi-Criteria in Text Categorization", "doi": null, "abstractUrl": "/proceedings-article/his/2004/22910086/12OmNrHSD1x", "parentPublication": { "id": "proceedings/his/2004/2291/0", "title": "Hybrid Intelligent Systems, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/snpd/2007/2909/3/290930659", "title": "A PSO-Based Web Document Classification Algorithm", "doi": null, "abstractUrl": "/proceedings-article/snpd/2007/290930659/12OmNwB2dVB", "parentPublication": { "id": "proceedings/snpd/2007/2909/3", "title": "Eighth ACIS International Conference on Software Engineering, Artificial Intelligence, Networking, and Parallel/Distributed Computing (SNPD 2007)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/grc/2013/1282/0/06740405", "title": "Dimension reduction based on categorical fuzzy correlation degree for document categorization", "doi": null, "abstractUrl": "/proceedings-article/grc/2013/06740405/12OmNwE9OEL", "parentPublication": { "id": "proceedings/grc/2013/1282/0", "title": "2013 IEEE International Conference on Granular Computing (GrC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2004/2128/2/212820558", "title": "Applying the Conjugate Gradient Method for Text Document Categorization", "doi": null, "abstractUrl": "/proceedings-article/icpr/2004/212820558/12OmNwErpB5", "parentPublication": { "id": "proceedings/icpr/2004/2128/2", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/paccs/2009/3614/0/3614a571", "title": "An Efficient LDE-Based Document Classification Algorithm", "doi": null, "abstractUrl": "/proceedings-article/paccs/2009/3614a571/12OmNxj23aY", "parentPublication": { "id": "proceedings/paccs/2009/3614/0", "title": "2009 Pacific-Asia Conference on Circuits, Communications and Systems (PACCS 2009)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iitaw/2008/3505/0/3505a601", "title": "Kernel Discriminant Analysis Algorithm for Document Categorization", "doi": null, "abstractUrl": "/proceedings-article/iitaw/2008/3505a601/12OmNyiUBoZ", "parentPublication": { "id": "proceedings/iitaw/2008/3505/0", "title": "2008 International Symposium on Intelligent Information Technology Application Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNzcxYUS", "title": "2008 International Conference on Computer Science and Information Technology", "acronym": "iccsit", "groupId": "1002437", "volume": "0", "displayVolume": "0", "year": "2008", "__typename": "ProceedingType" }, "article": { "id": "12OmNBqdr2Y", "doi": "10.1109/ICCSIT.2008.76", "title": "A New Retrieval Ranking Method based on Document Retrieval Expected Value in Chinese Document", "normalizedTitle": "A New Retrieval Ranking Method based on Document Retrieval Expected Value in Chinese Document", "abstract": "Through the analysis of the information on the contents of the document which contained in title, abstract and keywords, find out which documents are more relativity with user's retrieval expectation, this paper adopted \"Document Retrieval Expected Value\" as be the indicator, builds the mathematical model for it, and then takes advantage of it to do the quantitative calculation for all documents retrieved from the document retrieval engines, such as Weipu, CNKI, etc, finally, sorts all the document retrieval expected values with the descending sorting algorithm, more accurate documents will be displayed on the front, and it will meet the user's demands for documents better.", "abstracts": [ { "abstractType": "Regular", "content": "Through the analysis of the information on the contents of the document which contained in title, abstract and keywords, find out which documents are more relativity with user's retrieval expectation, this paper adopted \"Document Retrieval Expected Value\" as be the indicator, builds the mathematical model for it, and then takes advantage of it to do the quantitative calculation for all documents retrieved from the document retrieval engines, such as Weipu, CNKI, etc, finally, sorts all the document retrieval expected values with the descending sorting algorithm, more accurate documents will be displayed on the front, and it will meet the user's demands for documents better.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Through the analysis of the information on the contents of the document which contained in title, abstract and keywords, find out which documents are more relativity with user's retrieval expectation, this paper adopted \"Document Retrieval Expected Value\" as be the indicator, builds the mathematical model for it, and then takes advantage of it to do the quantitative calculation for all documents retrieved from the document retrieval engines, such as Weipu, CNKI, etc, finally, sorts all the document retrieval expected values with the descending sorting algorithm, more accurate documents will be displayed on the front, and it will meet the user's demands for documents better.", "fno": "3308a367", "keywords": [], "authors": [ { "affiliation": null, "fullName": "Tao Wang", "givenName": "Tao", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Yan Jiang", "givenName": "Yan", "surname": "Jiang", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Mei Chen", "givenName": "Mei", "surname": "Chen", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Hanhu Wang", "givenName": "Hanhu", "surname": "Wang", "__typename": "ArticleAuthorType" } ], "idPrefix": "iccsit", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2008-08-01T00:00:00", "pubType": "proceedings", "pages": "367-371", "year": "2008", "issn": null, "isbn": "978-0-7695-3308-7", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "3308a362", "articleId": "12OmNrIrPkW", "__typename": "AdjacentArticleType" }, "next": { "fno": "3308a375", "articleId": "12OmNBcAGNe", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/isitc/2007/3045/0/30450315", "title": "A Resolving of Word Sense Ambiguity Using Two-level Document Ranking Method in Information Retrieval", "doi": null, "abstractUrl": "/proceedings-article/isitc/2007/30450315/12OmNAXxXf4", "parentPublication": { "id": "proceedings/isitc/2007/3045/0", "title": "Information Technology Convergence, International Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdar/2013/4999/0/06628804", "title": "Multi-modal Information Integration for Document Retrieval", "doi": null, "abstractUrl": "/proceedings-article/icdar/2013/06628804/12OmNBA9oAP", "parentPublication": { "id": "proceedings/icdar/2013/4999/0", "title": "2013 12th International Conference on Document Analysis and Recognition (ICDAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dexa/2005/2424/0/24241036", "title": "ExtMiner: Combining Multiple Ranking and Clustering Algorithms for Structured Document Retrieval", "doi": null, "abstractUrl": "/proceedings-article/dexa/2005/24241036/12OmNqJ8tcO", "parentPublication": { "id": "proceedings/dexa/2005/2424/0", "title": "16th International Workshop on Database and Expert Systems Applications (DEXA'05)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/waimw/2006/2705/0/04027182", "title": "A Semantic Topic Identification System for Document Retrieval on the Web", "doi": null, "abstractUrl": "/proceedings-article/waimw/2006/04027182/12OmNro0HXx", "parentPublication": { "id": "proceedings/waimw/2006/2705/0", "title": "2006 Seventh International Conference on Web-Age Information Management Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ride/2002/1480/0/14800034", "title": "Enhancive Index for Structured Document Retrieval", "doi": null, "abstractUrl": "/proceedings-article/ride/2002/14800034/12OmNxzMnLI", "parentPublication": { "id": "proceedings/ride/2002/1480/0", "title": "Research Issues in Data Engineering, International Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dexa/1999/0281/0/02810527", "title": "DOCPROS: A Knowledge-Based Personal Document Management System", "doi": null, "abstractUrl": "/proceedings-article/dexa/1999/02810527/12OmNyeWdB4", "parentPublication": { "id": "proceedings/dexa/1999/0281/0", "title": "Proceedings. Tenth International Workshop on Database and Expert Systems Applications. DEXA 99", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icsc/2012/4859/0/4859a126", "title": "Semantic-Based Composite Document Ranking", "doi": null, "abstractUrl": "/proceedings-article/icsc/2012/4859a126/12OmNylKAY8", "parentPublication": { "id": "proceedings/icsc/2012/4859/0", "title": "2012 IEEE Sixth International Conference on Semantic Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismse/2004/2217/0/22170218", "title": "Models for Extensible Multimedia Document Retrieval", "doi": null, "abstractUrl": "/proceedings-article/ismse/2004/22170218/12OmNzahc4w", "parentPublication": { "id": "proceedings/ismse/2004/2217/0", "title": "Multimedia Software Engineering, International Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aici/2009/3816/3/3816c377", "title": "A Study on Pseudo Labeled Document Constructed for Document Re-ranking", "doi": null, "abstractUrl": "/proceedings-article/aici/2009/3816c377/12OmNzsJ7uI", "parentPublication": { "id": "proceedings/aici/2009/3816/3", "title": "2009 International Conference on Artificial Intelligence and Computational Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icsc/2019/6783/0/08665507", "title": "Combining Parts of Speech, Term Proximity, and Query Expansion for Document Retrieval", "doi": null, "abstractUrl": "/proceedings-article/icsc/2019/08665507/18qccOYSvUA", "parentPublication": { "id": "proceedings/icsc/2019/6783/0", "title": "2019 IEEE 13th International Conference on Semantic Computing (ICSC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNzvQHK2", "title": "2008 International Seminar on Future Biomedical Information Engineering (FBIE 2008)", "acronym": "fbie", "groupId": "1002779", "volume": "0", "displayVolume": "0", "year": "2008", "__typename": "ProceedingType" }, "article": { "id": "12OmNx7G5Sj", "doi": "10.1109/FBIE.2008.83", "title": "Research on Medical Document Categorization", "normalizedTitle": "Research on Medical Document Categorization", "abstract": "Medical document categorization is the process of automatically assigning one or more predefined category labels to medical documents. Document indexing plays a very important role in the process of classification. This paper proposes an improved method of computing term weights which is called tfidfie (term frequency, inverted document frequency and inverted entropy). In comparison with the tfidf (term frequency and inverted document frequency) function, the tfidfie function adds an information entropy factor, H, which represents the distribution of documents in the training set in which the term occurs. Then, we discuss the effects of training set in medical document categorization. An imbalanced training set decreases the performance of classifier. Considering the characteristics of medical documents, the medical classifiers are constructed by the methods of Naïve Bayes and Rocchio respectively. The experiment results show that tfidfie improves the classification performance and Naïve Bayes outperforms Rocchio.", "abstracts": [ { "abstractType": "Regular", "content": "Medical document categorization is the process of automatically assigning one or more predefined category labels to medical documents. Document indexing plays a very important role in the process of classification. This paper proposes an improved method of computing term weights which is called tfidfie (term frequency, inverted document frequency and inverted entropy). In comparison with the tfidf (term frequency and inverted document frequency) function, the tfidfie function adds an information entropy factor, H, which represents the distribution of documents in the training set in which the term occurs. Then, we discuss the effects of training set in medical document categorization. An imbalanced training set decreases the performance of classifier. Considering the characteristics of medical documents, the medical classifiers are constructed by the methods of Naïve Bayes and Rocchio respectively. The experiment results show that tfidfie improves the classification performance and Naïve Bayes outperforms Rocchio.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Medical document categorization is the process of automatically assigning one or more predefined category labels to medical documents. Document indexing plays a very important role in the process of classification. This paper proposes an improved method of computing term weights which is called tfidfie (term frequency, inverted document frequency and inverted entropy). In comparison with the tfidf (term frequency and inverted document frequency) function, the tfidfie function adds an information entropy factor, H, which represents the distribution of documents in the training set in which the term occurs. Then, we discuss the effects of training set in medical document categorization. An imbalanced training set decreases the performance of classifier. Considering the characteristics of medical documents, the medical classifiers are constructed by the methods of Naïve Bayes and Rocchio respectively. The experiment results show that tfidfie improves the classification performance and Naïve Bayes outperforms Rocchio.", "fno": "3561a437", "keywords": [ "Medical Information", "Document Categorization", "Information Entropy", "Document Indexing", "Naive Bayes", "Rocchio" ], "authors": [ { "affiliation": null, "fullName": "Qirui Zhang", "givenName": "Qirui", "surname": "Zhang", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Yonggang Xue", "givenName": "Yonggang", "surname": "Xue", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Huaying Zhou", "givenName": "Huaying", "surname": "Zhou", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Jinghua Tan", "givenName": "Jinghua", "surname": "Tan", "__typename": "ArticleAuthorType" } ], "idPrefix": "fbie", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2008-12-01T00:00:00", "pubType": "proceedings", "pages": "437-440", "year": "2008", "issn": null, "isbn": "978-0-7695-3561-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "3561a433", "articleId": "12OmNviHKcZ", "__typename": "AdjacentArticleType" }, "next": { "fno": "3561a441", "articleId": "12OmNrK9q2K", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/fskd/2009/3735/7/05360054", "title": "A Text Categorization Method Based on Local Document Frequency", "doi": null, "abstractUrl": "/proceedings-article/fskd/2009/05360054/12OmNAZfxFF", "parentPublication": { "id": "proceedings/fskd/2009/3735/7", "title": "Fuzzy Systems and Knowledge Discovery, Fourth International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccsee/2012/4647/2/4647b528", "title": "A Transfer Learning Algorithm for Document Categorization Based on Clustering", "doi": null, "abstractUrl": "/proceedings-article/iccsee/2012/4647b528/12OmNBsue8E", "parentPublication": { "id": null, "title": null, "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icm/2011/4522/2/4522b065", "title": "Categorical Document Frequency Based Feature Selection for Text Categorization", "doi": null, "abstractUrl": "/proceedings-article/icm/2011/4522b065/12OmNBziB96", "parentPublication": { "id": "proceedings/icm/2011/4522/2", "title": "Information Technology, Computer Engineering and Management Sciences, International Conference of", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/paccs/2009/3614/0/3614a494", "title": "Machine Learning Methods for Medical Text Categorization", "doi": null, "abstractUrl": "/proceedings-article/paccs/2009/3614a494/12OmNC4eSFJ", "parentPublication": { "id": "proceedings/paccs/2009/3614/0", "title": "2009 Pacific-Asia Conference on Circuits, Communications and Systems (PACCS 2009)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iih-msp/2009/3762/0/3762a266", "title": "Meaningful Inner Link Objects for Automatic Text Categorization", "doi": null, "abstractUrl": "/proceedings-article/iih-msp/2009/3762a266/12OmNCgrCYF", "parentPublication": { "id": "proceedings/iih-msp/2009/3762/0", "title": "Intelligent Information Hiding and Multimedia Signal Processing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/skg/2011/4515/0/4515a083", "title": "R-tfidf, a Variety of tf-idf Term Weighting Strategy in Document Categorization", "doi": null, "abstractUrl": "/proceedings-article/skg/2011/4515a083/12OmNqJ8tiG", "parentPublication": { "id": "proceedings/skg/2011/4515/0", "title": "Semantics, Knowledge and Grid, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2004/2128/2/212820558", "title": "Applying the Conjugate Gradient Method for Text Document Categorization", "doi": null, "abstractUrl": "/proceedings-article/icpr/2004/212820558/12OmNwErpB5", "parentPublication": { "id": "proceedings/icpr/2004/2128/2", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cicn/2011/4587/0/4587a251", "title": "Categorizing the Document Using Multi Class Classification in Data Mining", "doi": null, "abstractUrl": "/proceedings-article/cicn/2011/4587a251/12OmNwqfsWY", "parentPublication": { "id": "proceedings/cicn/2011/4587/0", "title": "Computational Intelligence and Communication Networks, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdar/2009/3725/0/3725a111", "title": "Logo Spotting by a Bag-of-words Approach for Document Categorization", "doi": null, "abstractUrl": "/proceedings-article/icdar/2009/3725a111/12OmNyQ7FFW", "parentPublication": { "id": "proceedings/icdar/2009/3725/0", "title": "2009 10th International Conference on Document Analysis and Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/2004/10/k1279", "title": "Efficient Phrase-Based Document Indexing for Web Document Clustering", "doi": null, "abstractUrl": "/journal/tk/2004/10/k1279/13rRUxDIthx", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNx4Q6Er", "title": "2008 International Symposium on Intelligent Information Technology Application Workshops", "acronym": "iitaw", "groupId": "1001518", "volume": "0", "displayVolume": "0", "year": "2008", "__typename": "ProceedingType" }, "article": { "id": "12OmNyiUBoZ", "doi": "10.1109/IITA.Workshops.2008.43", "title": "Kernel Discriminant Analysis Algorithm for Document Categorization", "normalizedTitle": "Kernel Discriminant Analysis Algorithm for Document Categorization", "abstract": "Document Categorization is one of the most crucial techniques to organize the documents in a supervised manner. To efficiently resolve document classification problems, a novel document classification algorithm based on kernel discriminant analysis (KDA) is proposed in this paper. The high-dimensional document sets are first mapped into lower-dimensional space with KDA, then the SVM is applied to classify the documents into semantically different classes. Experimental results demonstrate the effectiveness and efficiency of the proposed KDA algorithm.", "abstracts": [ { "abstractType": "Regular", "content": "Document Categorization is one of the most crucial techniques to organize the documents in a supervised manner. To efficiently resolve document classification problems, a novel document classification algorithm based on kernel discriminant analysis (KDA) is proposed in this paper. The high-dimensional document sets are first mapped into lower-dimensional space with KDA, then the SVM is applied to classify the documents into semantically different classes. Experimental results demonstrate the effectiveness and efficiency of the proposed KDA algorithm.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Document Categorization is one of the most crucial techniques to organize the documents in a supervised manner. To efficiently resolve document classification problems, a novel document classification algorithm based on kernel discriminant analysis (KDA) is proposed in this paper. The high-dimensional document sets are first mapped into lower-dimensional space with KDA, then the SVM is applied to classify the documents into semantically different classes. Experimental results demonstrate the effectiveness and efficiency of the proposed KDA algorithm.", "fno": "3505a601", "keywords": [], "authors": [ { "affiliation": null, "fullName": "Ziqiang Wang", "givenName": "Ziqiang", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Xu Qian", "givenName": "Xu", "surname": "Qian", "__typename": "ArticleAuthorType" } ], "idPrefix": "iitaw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2008-12-01T00:00:00", "pubType": "proceedings", "pages": "601-604", "year": "2008", "issn": null, "isbn": "978-0-7695-3505-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "3505a597", "articleId": "12OmNwMXnng", "__typename": "AdjacentArticleType" }, "next": { "fno": "3505a605", "articleId": "12OmNyugyPN", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/taai/2012/4976/0/06395020", "title": "Feature Reduction for Text Categorization Using Cluster-Based Discriminant Coefficient", "doi": null, "abstractUrl": "/proceedings-article/taai/2012/06395020/12OmNAgoV9b", "parentPublication": { "id": "proceedings/taai/2012/4976/0", "title": "2012 Conference on Technologies and Applications of Artificial Intelligence (TAAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/imccc/2011/4519/0/4519a330", "title": "Kernel Complete Discriminant Analysis Algorithm for Radar Target Recognition Using HRRPs", "doi": null, "abstractUrl": "/proceedings-article/imccc/2011/4519a330/12OmNCesrdu", "parentPublication": { "id": "proceedings/imccc/2011/4519/0", "title": "Instrumentation, Measurement, Computer, Communication and Control, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2006/2646/0/26460047", "title": "Wavelet Kernel Construction for Kernel Discriminant Analysis on Face Recognition", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2006/26460047/12OmNqFrGJn", "parentPublication": { "id": "proceedings/cvprw/2006/2646/0", "title": "2006 Conference on Computer Vision and Pattern Recognition Workshop (CVPRW'06)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cinc/2009/3645/2/3645b100", "title": "Document Classification Algorithm Using Kernel LPP", "doi": null, "abstractUrl": "/proceedings-article/cinc/2009/3645b100/12OmNvTTcjI", "parentPublication": { "id": "cinc/2009/3645/2", "title": "Computational Intelligence and Natural Computing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icia/2006/0528/0/04097864", "title": "Data-Dependent Kernel Discriminant Analysis for Feature Extraction and Classification", "doi": null, "abstractUrl": "/proceedings-article/icia/2006/04097864/12OmNvqmUCn", "parentPublication": { "id": "proceedings/icia/2006/0528/0", "title": "2006 International Conference on Information Acquisition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cbmi/2009/3662/0/3662a001", "title": "Kernel Discriminant Analysis Using Triangular Kernel for Semantic Scene Classification", "doi": null, "abstractUrl": "/proceedings-article/cbmi/2009/3662a001/12OmNx4Q6Hi", "parentPublication": { "id": "proceedings/cbmi/2009/3662/0", "title": "Content-Based Multimedia Indexing, International Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fbie/2008/3561/0/3561a437", "title": "Research on Medical Document Categorization", "doi": null, "abstractUrl": "/proceedings-article/fbie/2008/3561a437/12OmNx7G5Sj", "parentPublication": { "id": "proceedings/fbie/2008/3561/0", "title": "2008 International Seminar on Future Biomedical Information Engineering (FBIE 2008)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2009/4442/0/05457703", "title": "Visual category recognition using Spectral Regression and Kernel Discriminant Analysis", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2009/05457703/12OmNxWcHgg", "parentPublication": { "id": "proceedings/iccvw/2009/4442/0", "title": "2009 IEEE 12th International Conference on Computer Vision Workshops, ICCV Workshops", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isbim/2008/3560/2/3560b112", "title": "An Efficient Document Categorization Algorithm Based on LDA and SFL", "doi": null, "abstractUrl": "/proceedings-article/isbim/2008/3560b112/12OmNzahcgi", "parentPublication": { "id": "proceedings/isbim/2008/3560/2", "title": "Business and Information Management, International Seminar on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNynsbCp", "title": "12th International Workshop on Database and Expert Systems Applications", "acronym": "dexa", "groupId": "1000180", "volume": "0", "displayVolume": "0", "year": "2001", "__typename": "ProceedingType" }, "article": { "id": "12OmNzaQoaP", "doi": "10.1109/DEXA.2001.953074", "title": "Document Categorization and Retrieval Using Semantic Microfeatures and Growing Cell Structures", "normalizedTitle": "Document Categorization and Retrieval Using Semantic Microfeatures and Growing Cell Structures", "abstract": "Abstract: This paper presents a new approach to document categorization and retrieval using Growing Cell Structures. Semantic microfeatures together with other information are adopted for document representation. The main advantage over traditional information retrieval systems which adopt index terms to index and retrieve documents is that the new approach considers the semantic relationships among documents. This paper describes how to construct Document Microfeature Weight Vectors and use Growing Cell Structures clustering them. Some experimental results are also presented by this paper.", "abstracts": [ { "abstractType": "Regular", "content": "Abstract: This paper presents a new approach to document categorization and retrieval using Growing Cell Structures. Semantic microfeatures together with other information are adopted for document representation. The main advantage over traditional information retrieval systems which adopt index terms to index and retrieve documents is that the new approach considers the semantic relationships among documents. This paper describes how to construct Document Microfeature Weight Vectors and use Growing Cell Structures clustering them. Some experimental results are also presented by this paper.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Abstract: This paper presents a new approach to document categorization and retrieval using Growing Cell Structures. Semantic microfeatures together with other information are adopted for document representation. The main advantage over traditional information retrieval systems which adopt index terms to index and retrieve documents is that the new approach considers the semantic relationships among documents. This paper describes how to construct Document Microfeature Weight Vectors and use Growing Cell Structures clustering them. Some experimental results are also presented by this paper.", "fno": "12300270", "keywords": [], "authors": [ { "affiliation": "Middlesex University", "fullName": "Wantao Deng", "givenName": "Wantao", "surname": "Deng", "__typename": "ArticleAuthorType" }, { "affiliation": "Middlesex University", "fullName": "Wendy Wu", "givenName": "Wendy", "surname": "Wu", "__typename": "ArticleAuthorType" } ], "idPrefix": "dexa", "isOpenAccess": false, "showRecommendedArticles": false, "showBuyMe": true, "hasPdf": true, "pubDate": "2001-09-01T00:00:00", "pubType": "proceedings", "pages": "0270", "year": "2001", "issn": null, "isbn": "0-7695-1230-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "12300265", "articleId": "12OmNyQpgXB", "__typename": "AdjacentArticleType" }, "next": { "fno": "12300275", "articleId": "12OmNx4yvww", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [], "articleVideos": [] }
{ "proceeding": { "id": "1BrAyMVuURG", "title": "2021 IEEE Fourth International Conference on Artificial Intelligence and Knowledge Engineering (AIKE)", "acronym": "aike", "groupId": "1828385", "volume": "0", "displayVolume": "0", "year": "2021", "__typename": "ProceedingType" }, "article": { "id": "1BrADc8Hd5K", "doi": "10.1109/AIKE52691.2021.00011", "title": "Towards Intelligent Legal Advisors for Document Retrieval and Question-Answering in German Legal Documents", "normalizedTitle": "Towards Intelligent Legal Advisors for Document Retrieval and Question-Answering in German Legal Documents", "abstract": "The legal system is one of the most important pillars of human society. While digitization is integrated into many areas of everyday life, the legal system is still very traditionally positioned. Recent advances in storing and processing large number of documents initiated the work of intelligent legal advisors. While first approaches in the English language suggest an enormous potential to generate knowledge from legal documents, there are no approaches in the German legal system. We present an intelligent legal advisor based on semantic document retrieval, to improve knowledge extraction from German legal documents. In addition, we set up a question-answering system. We implemented a BERT and a BM25-model for German document retrieval in legal documents. The approach is validated on a data set consisting of German question-answer pairs.", "abstracts": [ { "abstractType": "Regular", "content": "The legal system is one of the most important pillars of human society. While digitization is integrated into many areas of everyday life, the legal system is still very traditionally positioned. Recent advances in storing and processing large number of documents initiated the work of intelligent legal advisors. While first approaches in the English language suggest an enormous potential to generate knowledge from legal documents, there are no approaches in the German legal system. We present an intelligent legal advisor based on semantic document retrieval, to improve knowledge extraction from German legal documents. In addition, we set up a question-answering system. We implemented a BERT and a BM25-model for German document retrieval in legal documents. The approach is validated on a data set consisting of German question-answer pairs.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The legal system is one of the most important pillars of human society. While digitization is integrated into many areas of everyday life, the legal system is still very traditionally positioned. Recent advances in storing and processing large number of documents initiated the work of intelligent legal advisors. While first approaches in the English language suggest an enormous potential to generate knowledge from legal documents, there are no approaches in the German legal system. We present an intelligent legal advisor based on semantic document retrieval, to improve knowledge extraction from German legal documents. In addition, we set up a question-answering system. We implemented a BERT and a BM25-model for German document retrieval in legal documents. The approach is validated on a data set consisting of German question-answer pairs.", "fno": "373600a029", "keywords": [ "Document Handling", "Information Retrieval", "Law Administration", "Query Processing", "Question Answering Information Retrieval", "Intelligent Legal Advisor", "German Legal Documents", "German Legal System", "Semantic Document Retrieval", "Question Answering System", "German Document Retrieval", "German Question Answer Pairs", "BERT", "BM 25 Model", "English Language", "Knowledge Engineering", "Law", "Semantic Search", "Conferences", "Bit Error Rate", "NLP", "Knowledge Extraction", "Question Answering", "Semantic Search", "Document Retrieval", "German Language" ], "authors": [ { "affiliation": "Bielefeld University of Applied Sciences,Center for Applied Data Science,Gütersloh,Germany", "fullName": "Christoph Hoppe", "givenName": "Christoph", "surname": "Hoppe", "__typename": "ArticleAuthorType" }, { "affiliation": "Bielefeld University of Applied Sciences,Center for Applied Data Science,Gütersloh,Germany", "fullName": "David Pelkmann", "givenName": "David", "surname": "Pelkmann", "__typename": "ArticleAuthorType" }, { "affiliation": "Bielefeld University of Applied Sciences,Center for Applied Data Science,Gütersloh,Germany", "fullName": "Nico Migenda", "givenName": "Nico", "surname": "Migenda", "__typename": "ArticleAuthorType" }, { "affiliation": "Bielefeld University of Applied Sciences,Faculty of Business,Bielefeld,Germany", "fullName": "Daniel Hötte", "givenName": "Daniel", "surname": "Hötte", "__typename": "ArticleAuthorType" }, { "affiliation": "Bielefeld University of Applied Sciences,Center for Applied Data Science,Gütersloh,Germany", "fullName": "Wolfram Schenck", "givenName": "Wolfram", "surname": "Schenck", "__typename": "ArticleAuthorType" } ], "idPrefix": "aike", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2021-12-01T00:00:00", "pubType": "proceedings", "pages": "29-32", "year": "2021", "issn": null, "isbn": "978-1-6654-3736-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "373600a025", "articleId": "1BrAAr8vJK0", "__typename": "AdjacentArticleType" }, "next": { "fno": "373600a033", "articleId": "1BrABRTQFAQ", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ithingscpscom/2011/4580/0/4580a537", "title": "Multi-faceted Navigation of Legal Documents", "doi": null, "abstractUrl": "/proceedings-article/ithingscpscom/2011/4580a537/12OmNyXMQni", "parentPublication": { "id": "proceedings/ithingscpscom/2011/4580/0", "title": "International Conference on Internet of Things and International Conference on Cyber, Physical and Social Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ialp/2009/3904/0/3904a152", "title": "An Experimental Study of Vietnamese Question Answering System", "doi": null, "abstractUrl": "/proceedings-article/ialp/2009/3904a152/12OmNylKAMm", "parentPublication": { "id": "proceedings/ialp/2009/3904/0", "title": "Asian Language Processing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cloud/2017/1993/0/1993a684", "title": "A Question and Answering System for Management of Cloud Service Level Agreements", "doi": null, "abstractUrl": "/proceedings-article/cloud/2017/1993a684/12OmNyxXlnB", "parentPublication": { "id": "proceedings/cloud/2017/1993/0", "title": "2017 IEEE 10th International Conference on Cloud Computing (CLOUD)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dsc/2021/1815/0/181500a328", "title": "A Manifold Learning Method to Passage Retrieval for Open-Domain Question Answering", "doi": null, "abstractUrl": "/proceedings-article/dsc/2021/181500a328/1CuhZdVDnxK", "parentPublication": { "id": "proceedings/dsc/2021/1815/0", "title": "2021 IEEE Sixth International Conference on Data Science in Cyberspace (DSC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/re/2022/7000/0/700000a039", "title": "Automated Question Answering for Improved Understanding of Compliance Requirements: A Multi-Document Study", "doi": null, "abstractUrl": "/proceedings-article/re/2022/700000a039/1HBKpek92TK", "parentPublication": { "id": "proceedings/re/2022/7000/0", "title": "2022 IEEE 30th International Requirements Engineering Conference (RE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/istm/2022/7116/0/711600a007", "title": "Interpretable Text Classification in Legal Contract Documents using Tsetlin Machines", "doi": null, "abstractUrl": "/proceedings-article/istm/2022/711600a007/1HJzGIZp65G", "parentPublication": { "id": "proceedings/istm/2022/7116/0", "title": "2022 International Symposium on the Tsetlin Machine (ISTM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/big-data/2022/8045/0/10020873", "title": "Named Entity Recognition in Long Documents: An End-to-end Case Study in the Legal Domain", "doi": null, "abstractUrl": "/proceedings-article/big-data/2022/10020873/1KfR08WiKA0", "parentPublication": { "id": "proceedings/big-data/2022/8045/0", "title": "2022 IEEE International Conference on Big Data (Big Data)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/big-data/2022/8045/0/10020945", "title": "A Business Workflow For Providing Open-Domain Question Answering Reader Systems on The Wikipedia Dataset", "doi": null, "abstractUrl": "/proceedings-article/big-data/2022/10020945/1KfTaPHtWjm", "parentPublication": { "id": "proceedings/big-data/2022/8045/0", "title": "2022 IEEE International Conference on Big Data (Big Data)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/big-data/2019/0858/0/09006511", "title": "Japanese Mistakable Legal Term Correction using Infrequency-aware BERT Classifier", "doi": null, "abstractUrl": "/proceedings-article/big-data/2019/09006511/1hJsDJhiRPO", "parentPublication": { "id": "proceedings/big-data/2019/0858/0", "title": "2019 IEEE International Conference on Big Data (Big Data)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isctis/2021/1441/0/144100a324", "title": "Named entity recognition of legal documents based on cascade model", "doi": null, "abstractUrl": "/proceedings-article/isctis/2021/144100a324/1yEZGDyRoRO", "parentPublication": { "id": "proceedings/isctis/2021/1441/0", "title": "2021 International Symposium on Computer Technology and Information Science (ISCTIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1ikdZO7kJFe", "title": "2019 15th International Conference on eScience (eScience)", "acronym": "escience", "groupId": "1001511", "volume": "0", "displayVolume": "0", "year": "2019", "__typename": "ProceedingType" }, "article": { "id": "1ike1bwk62I", "doi": "10.1109/eScience.2019.00072", "title": "Iterative Document Retrieval via Deep Learning Approaches for Biomedical Question Answering", "normalizedTitle": "Iterative Document Retrieval via Deep Learning Approaches for Biomedical Question Answering", "abstract": "The ever expanding nature of the scientific literature makes finding answers in them increasingly more challenging for researchers. With the goal to ease this challenge, we have developed a biomedical question answering system called Bio-AnswerFinder which uses a greedy iterative document retrieval approach to find candidate documents in which the answer supporting sentences are searched. To improve the performance of the baseline retrieval approach, neural network based keyword selection and importance ranking approaches are introduced. Together with two ensemble approaches and a non-iterative word embedding based nearest neighbor approach, seven retrieval approaches are evaluated using Bio-AnswerFinder on hundred test questions with manual inspection. The test results revealed that the iterative keyword ranking approach more than doubled MRR@10 score over the baseline having the best Precision@1 score and a close second MRR@10 score to the ensemble of keyword selection and ranking iterative retrieval approaches.", "abstracts": [ { "abstractType": "Regular", "content": "The ever expanding nature of the scientific literature makes finding answers in them increasingly more challenging for researchers. With the goal to ease this challenge, we have developed a biomedical question answering system called Bio-AnswerFinder which uses a greedy iterative document retrieval approach to find candidate documents in which the answer supporting sentences are searched. To improve the performance of the baseline retrieval approach, neural network based keyword selection and importance ranking approaches are introduced. Together with two ensemble approaches and a non-iterative word embedding based nearest neighbor approach, seven retrieval approaches are evaluated using Bio-AnswerFinder on hundred test questions with manual inspection. The test results revealed that the iterative keyword ranking approach more than doubled MRR@10 score over the baseline having the best Precision@1 score and a close second MRR@10 score to the ensemble of keyword selection and ranking iterative retrieval approaches.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The ever expanding nature of the scientific literature makes finding answers in them increasingly more challenging for researchers. With the goal to ease this challenge, we have developed a biomedical question answering system called Bio-AnswerFinder which uses a greedy iterative document retrieval approach to find candidate documents in which the answer supporting sentences are searched. To improve the performance of the baseline retrieval approach, neural network based keyword selection and importance ranking approaches are introduced. Together with two ensemble approaches and a non-iterative word embedding based nearest neighbor approach, seven retrieval approaches are evaluated using Bio-AnswerFinder on hundred test questions with manual inspection. The test results revealed that the iterative keyword ranking approach more than doubled MRR@10 score over the baseline having the best Precision@1 score and a close second MRR@10 score to the ensemble of keyword selection and ranking iterative retrieval approaches.", "fno": "245100a533", "keywords": [ "Iterative Methods", "Learning Artificial Intelligence", "Medical Information Systems", "Query Processing", "Question Answering Information Retrieval", "Text Analysis", "Deep Learning Approaches", "Biomedical Question Answering System", "Bio Answer Finder", "Greedy Iterative Document Retrieval Approach", "Candidate Documents", "Answer Supporting Sentences", "Baseline Retrieval Approach", "Neural Network Based Keyword Selection", "Ensemble Approaches", "Noniterative Word", "Test Questions", "Iterative Keyword Ranking Approach", "Non Iterative Word Embedding Based Nearest Neighbor Approach", "Biomedical Question Answering Iterative Document Retrieval Deep Learning" ], "authors": [ { "affiliation": "UC San Diego", "fullName": "Ibrahim Burak Ozyurt", "givenName": "Ibrahim Burak", "surname": "Ozyurt", "__typename": "ArticleAuthorType" }, { "affiliation": "UC San Diego", "fullName": "Jeffrey Grethe", "givenName": "Jeffrey", "surname": "Grethe", "__typename": "ArticleAuthorType" } ], "idPrefix": "escience", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2019-09-01T00:00:00", "pubType": "proceedings", "pages": "533-538", "year": "2019", "issn": null, "isbn": "978-1-7281-2451-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "245100a530", "articleId": "1ike2wiWU1i", "__typename": "AdjacentArticleType" }, "next": { "fno": "245100a539", "articleId": "1ike0UOgdhe", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2017/0457/0/0457g146", "title": "Knowledge Acquisition for Visual Question Answering via Iterative Querying", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2017/0457g146/12OmNB9t6pc", "parentPublication": { "id": "proceedings/cvpr/2017/0457/0", "title": "2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdar/2015/1805/0/07333738", "title": "A multiple instances approach to improving keyword spotting on historical Mongolian document images", "doi": null, "abstractUrl": "/proceedings-article/icdar/2015/07333738/12OmNqGitRF", "parentPublication": { "id": "proceedings/icdar/2015/1805/0", "title": "2015 13th International Conference on Document Analysis and Recognition (ICDAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icita/2005/2316/1/231610514", "title": "Automated Question Answering: Review of the Main Approaches", "doi": null, "abstractUrl": "/proceedings-article/icita/2005/231610514/12OmNvjyxxX", "parentPublication": { "id": "proceedings/icita/2005/2316/1", "title": "Proceedings. Third International Conference on Information Technology and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cis/2010/4297/0/4297a634", "title": "Graph-Based Answer Passage Ranking for Question Answering", "doi": null, "abstractUrl": "/proceedings-article/cis/2010/4297a634/12OmNx57HHo", "parentPublication": { "id": "proceedings/cis/2010/4297/0", "title": "2010 International Conference on Computational Intelligence and Security", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/enc/2008/3439/0/3439a132", "title": "Evaluating Causal Questions for Question Answering", "doi": null, "abstractUrl": "/proceedings-article/enc/2008/3439a132/12OmNxveNQT", "parentPublication": { "id": "proceedings/enc/2008/3439/0", "title": "2008 Mexican International Conference on Computer Science", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/umc/2008/3427/0/3427a083", "title": "A Web Image Retrieval Re-ranking Scheme with Cross-Modal Association Rules", "doi": null, "abstractUrl": "/proceedings-article/umc/2008/3427a083/12OmNyRxFqs", "parentPublication": { "id": "proceedings/umc/2008/3427/0", "title": "Ubiquitous Multimedia Computing, International Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/grc/2009/4830/0/05255043", "title": "Study on question answering system for biomedical domain", "doi": null, "abstractUrl": "/proceedings-article/grc/2009/05255043/12OmNzVoBBy", "parentPublication": { "id": "proceedings/grc/2009/4830/0", "title": "2009 IEEE International Conference on Granular Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cis/2018/0169/0/016900a179", "title": "A SVM and Co-seMLP Integrated Method for Document-Based Question Answering", "doi": null, "abstractUrl": "/proceedings-article/cis/2018/016900a179/17D45XacGj5", "parentPublication": { "id": "proceedings/cis/2018/0169/0", "title": "2018 14th International Conference on Computational Intelligence and Security (CIS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdmw/2019/4896/0/489600a049", "title": "An Online Retrieval Question Answering System for Featured Snippets Triggering", "doi": null, "abstractUrl": "/proceedings-article/icdmw/2019/489600a049/1gAwU0LlExq", "parentPublication": { "id": "proceedings/icdmw/2019/4896/0", "title": "2019 International Conference on Data Mining Workshops (ICDMW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icaml/2019/3908/0/390800a175", "title": "Question Answering System in Bengali Using Semantic Search", "doi": null, "abstractUrl": "/proceedings-article/icaml/2019/390800a175/1hrLJaiIu9q", "parentPublication": { "id": "proceedings/icaml/2019/3908/0", "title": "2019 International Conference on Applied Machine Learning (ICAML)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNx8wTfL", "title": "ICPR 2008 19th International Conference on Pattern Recognition", "acronym": "icpr", "groupId": "1000545", "volume": "0", "displayVolume": "0", "year": "2008", "__typename": "ProceedingType" }, "article": { "id": "12OmNvk7K6y", "doi": "10.1109/ICPR.2008.4761257", "title": "Feature selection via decision tree surrogate splits", "normalizedTitle": "Feature selection via decision tree surrogate splits", "abstract": "CARTpsilas ldquovariable rankingrdquo provides a quick estimate of the importance of an individual feature in a decision tree, and it is based on surrogate splits. We extend this estimate to arbitrary subsets. We have applied our estimate (called ldquodIrdquo) to three datasets. The performance of dI as an importance estimate is very dependent on the underlying performance of the tree used to generate the surrogate splits.", "abstracts": [ { "abstractType": "Regular", "content": "CARTpsilas ldquovariable rankingrdquo provides a quick estimate of the importance of an individual feature in a decision tree, and it is based on surrogate splits. We extend this estimate to arbitrary subsets. We have applied our estimate (called ldquodIrdquo) to three datasets. The performance of dI as an importance estimate is very dependent on the underlying performance of the tree used to generate the surrogate splits.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "CARTpsilas ldquovariable rankingrdquo provides a quick estimate of the importance of an individual feature in a decision tree, and it is based on surrogate splits. We extend this estimate to arbitrary subsets. We have applied our estimate (called ldquodIrdquo) to three datasets. The performance of dI as an importance estimate is very dependent on the underlying performance of the tree used to generate the surrogate splits.", "fno": "04761257", "keywords": [ "Decision Trees", "Importance Sampling", "Feature Selection", "Decision Tree Surrogate Splits", "CART Variable Ranking", "Importance Estimation", "Decision Trees", "Impurities", "Biomedical Measurements", "Entropy", "Phase Locked Loops", "Laboratories", "Genetic Algorithms", "Training Data" ], "authors": [ { "affiliation": "Sandia National Laboratories, Biosystems Research Department, P.O. Box 969, MS 9951, Livermore, CA 94551-0969, USA", "fullName": "Clayton Springer", "givenName": "Clayton", "surname": "Springer", "__typename": "ArticleAuthorType" }, { "affiliation": "Sandia National Laboratories, Biosystems Research Department, P.O. Box 969, MS 9951, Livermore, CA 94551-0969, USA", "fullName": "W. Philip Kegelmeyer", "givenName": "W. Philip", "surname": "Kegelmeyer", "__typename": "ArticleAuthorType" } ], "idPrefix": "icpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2008-12-01T00:00:00", "pubType": "proceedings", "pages": "", "year": "2008", "issn": "1051-4651", "isbn": "978-1-4244-2174-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "04761256", "articleId": "12OmNzxyiBd", "__typename": "AdjacentArticleType" }, "next": { "fno": "04761258", "articleId": "12OmNyGbIh0", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/ictai/2012/0227/1/06495117", "title": "Data Selection Using Decision Tree for SVM Classification", "doi": null, "abstractUrl": "/proceedings-article/ictai/2012/06495117/12OmNAlvHK9", "parentPublication": { "id": "proceedings/ictai/2012/0227/1", "title": "2012 IEEE 24th International Conference on Tools with Artificial Intelligence (ICTAI 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdm/2013/5108/0/5108b319", "title": "Adaptive Model Tree for Streaming Data", "doi": null, "abstractUrl": "/proceedings-article/icdm/2013/5108b319/12OmNB9t6qX", "parentPublication": { "id": "proceedings/icdm/2013/5108/0", "title": "2013 IEEE International Conference on Data Mining (ICDM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iacc/2017/1560/0/07976918", "title": "Enhancing Structure Learning of Markov Network Using Alternating Decision Trees", "doi": null, "abstractUrl": "/proceedings-article/iacc/2017/07976918/12OmNqAU6z0", "parentPublication": { "id": "proceedings/iacc/2017/1560/0", "title": "2017 IEEE 7th International Advance Computing Conference (IACC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ipdpsw/2016/3682/0/3682b413", "title": "Cache-Aware Approximate Computing for Decision Tree Learning", "doi": null, "abstractUrl": "/proceedings-article/ipdpsw/2016/3682b413/12OmNqIQSaJ", "parentPublication": { "id": "proceedings/ipdpsw/2016/3682/0", "title": "2016 IEEE International Parallel and Distributed Processing Symposium Workshops (IPDPSW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/his/2005/2457/0/24570212", "title": "Feature Selection with Decision Tree Criterion", "doi": null, "abstractUrl": "/proceedings-article/his/2005/24570212/12OmNvpew4h", "parentPublication": { "id": "proceedings/his/2005/2457/0", "title": "Hybrid Intelligent Systems, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/grc/2014/5464/0/06982798", "title": "Minimization of decision tree depth for multi-label decision tables", "doi": null, "abstractUrl": "/proceedings-article/grc/2014/06982798/12OmNyen1l1", "parentPublication": { "id": "proceedings/grc/2014/5464/0", "title": "2014 IEEE International Conference on Granular Computing (GrC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ictai/2012/0227/1/06495049", "title": "A Surrogate Based Multiobjective Evolution Strategy with Different Models for Local Search and Pre-selection", "doi": null, "abstractUrl": "/proceedings-article/ictai/2012/06495049/12OmNzmclCg", "parentPublication": { "id": "proceedings/ictai/2012/0227/1", "title": "2012 IEEE 24th International Conference on Tools with Artificial Intelligence (ICTAI 2012)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aike/2018/9555/0/955500a168", "title": "Tuning Hyperparameters of Decision Tree Classifiers Using Computationally Efficient Schemes", "doi": null, "abstractUrl": "/proceedings-article/aike/2018/955500a168/17D45WwsQ6r", "parentPublication": { "id": "proceedings/aike/2018/9555/0", "title": "2018 IEEE First International Conference on Artificial Intelligence and Knowledge Engineering (AIKE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09937064", "title": "Visual Exploration of Machine Learning Model Behavior with Hierarchical Surrogate Rule Sets", "doi": null, "abstractUrl": "/journal/tg/5555/01/09937064/1I05Bh36uZy", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/2023/02/09507351", "title": "Interactive Reinforcement Learning for Feature Selection With Decision Tree in the Loop", "doi": null, "abstractUrl": "/journal/tk/2023/02/09507351/1vNfn33QIww", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1JvaHn7VZao", "title": "2022 IEEE International Conference on Agents (ICA)", "acronym": "ica", "groupId": "9999063", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1JvaJ44YwXm", "doi": "10.1109/ICA55837.2022.00008", "title": "GORITE: A BDI Realisation of Behavior Trees", "normalizedTitle": "GORITE: A BDI Realisation of Behavior Trees", "abstract": "Behavior trees are becoming increasingly used in both the computer games and robotics industries as a mechanism for non-programmers to specify individual entity (non-playing character or robot) behaviors. The tree representation that is employed is amenable to graphical representation and the availability of graphical editors has contributed significantly to the uptake of the approach. Working with graphical representations of behavior makes development by non-programmers easier. However, existing behavior tree representations are limited in their ability to represent team behavior and complex entity reasoning. Agent frameworks exist that address these issues and while they have been successfully employed in military war gaming applications, the underlying behavior representation that they use are fundamentally different to behavior trees. In this paper, we introduce GORITE, a BDI agent framework in which agent behavior is specified using goal-based process models which are representationally similar to behavior trees. Unlike behavior trees, process models can be used to represent both individual and team behaviors. Furthermore, GORITE provides support to enable an agent (or team of agents) to reason about the goals that it intends to pursue as well as the goals that it is currently pursuing. A simple example is used to demonstrate how team behavior can be modelled using GORITE process models.", "abstracts": [ { "abstractType": "Regular", "content": "Behavior trees are becoming increasingly used in both the computer games and robotics industries as a mechanism for non-programmers to specify individual entity (non-playing character or robot) behaviors. The tree representation that is employed is amenable to graphical representation and the availability of graphical editors has contributed significantly to the uptake of the approach. Working with graphical representations of behavior makes development by non-programmers easier. However, existing behavior tree representations are limited in their ability to represent team behavior and complex entity reasoning. Agent frameworks exist that address these issues and while they have been successfully employed in military war gaming applications, the underlying behavior representation that they use are fundamentally different to behavior trees. In this paper, we introduce GORITE, a BDI agent framework in which agent behavior is specified using goal-based process models which are representationally similar to behavior trees. Unlike behavior trees, process models can be used to represent both individual and team behaviors. Furthermore, GORITE provides support to enable an agent (or team of agents) to reason about the goals that it intends to pursue as well as the goals that it is currently pursuing. A simple example is used to demonstrate how team behavior can be modelled using GORITE process models.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Behavior trees are becoming increasingly used in both the computer games and robotics industries as a mechanism for non-programmers to specify individual entity (non-playing character or robot) behaviors. The tree representation that is employed is amenable to graphical representation and the availability of graphical editors has contributed significantly to the uptake of the approach. Working with graphical representations of behavior makes development by non-programmers easier. However, existing behavior tree representations are limited in their ability to represent team behavior and complex entity reasoning. Agent frameworks exist that address these issues and while they have been successfully employed in military war gaming applications, the underlying behavior representation that they use are fundamentally different to behavior trees. In this paper, we introduce GORITE, a BDI agent framework in which agent behavior is specified using goal-based process models which are representationally similar to behavior trees. Unlike behavior trees, process models can be used to represent both individual and team behaviors. Furthermore, GORITE provides support to enable an agent (or team of agents) to reason about the goals that it intends to pursue as well as the goals that it is currently pursuing. A simple example is used to demonstrate how team behavior can be modelled using GORITE process models.", "fno": "693600a006", "keywords": [ "Computer Games", "Control Engineering Computing", "Military Computing", "Multi Agent Systems", "Robots", "Software Agents", "Trees Mathematics", "Agent Behavior", "Behavior Tree Representations", "Behavior Trees", "Computer Games", "Graphical Representations", "Individual Team Behaviors", "Military War Gaming Applications", "Robotics Industries", "Team Behavior", "Tree Representation", "Underlying Behavior Representation", "Industries", "Video Games", "Service Robots", "Cognition", "Behavioral Sciences", "Behavior Trees", "Multi Agent Systems", "BDI" ], "authors": [ { "affiliation": "University of South Australia,UniSA STEM,Adelaide,Australia", "fullName": "Lui Cirocco", "givenName": "Lui", "surname": "Cirocco", "__typename": "ArticleAuthorType" }, { "affiliation": "School of Engineering and Technology, Central Queensland University,Brisbane,Australia", "fullName": "Dennis Jarvis", "givenName": "Dennis", "surname": "Jarvis", "__typename": "ArticleAuthorType" }, { "affiliation": "School of Engineering and Technology, Central Queensland University,Brisbane,Australia", "fullName": "Jacqueline Jarvis", "givenName": "Jacqueline", "surname": "Jarvis", "__typename": "ArticleAuthorType" }, { "affiliation": "RealThing AI,Melbourne,Australia", "fullName": "Ralph Rönnquist", "givenName": "Ralph", "surname": "Rönnquist", "__typename": "ArticleAuthorType" } ], "idPrefix": "ica", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-11-01T00:00:00", "pubType": "proceedings", "pages": "6-11", "year": "2022", "issn": null, "isbn": "978-1-6654-6936-4", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "693600a001", "articleId": "1JvaJcUf9JK", "__typename": "AdjacentArticleType" }, "next": { "fno": "693600a012", "articleId": "1JvaJk7AYJq", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/aswec/2007/2778/0/27780211", "title": "Timed Behavior Trees and Their Application to Verifying Real-Time Systems", "doi": null, "abstractUrl": "/proceedings-article/aswec/2007/27780211/12OmNqJ8tdA", "parentPublication": { "id": "proceedings/aswec/2007/2778/0", "title": "Software Engineering Conference, Australian", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icita/2005/2316/1/231610035", "title": "A Metamodel for the Behavior Trees Modelling Technique", "doi": null, "abstractUrl": "/proceedings-article/icita/2005/231610035/12OmNwDACA8", "parentPublication": { "id": "proceedings/icita/2005/2316/1", "title": "Proceedings. Third International Conference on Information Technology and Applications", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cgames/2012/1120/0/S8006", "title": "Comparing behavior trees and emotional behavior networks for NPCs", "doi": null, "abstractUrl": "/proceedings-article/cgames/2012/S8006/12OmNzSh17d", "parentPublication": { "id": "proceedings/cgames/2012/1120/0", "title": "2012 17th International Conference on Computer Games (CGAMES)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/annsim/2022/5288/0/09859389", "title": "Automated Model Discovery For Steering Behavior Simulation", "doi": null, "abstractUrl": "/proceedings-article/annsim/2022/09859389/1G4EPUoI9ZS", "parentPublication": { "id": "proceedings/annsim/2022/5288/0", "title": "2022 Annual Modeling and Simulation Conference (ANNSIM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iiai-aai/2022/9755/0/975500a673", "title": "Deep Reinforcement Learning for Multi-agent Simulation using a partial floor field cutout", "doi": null, "abstractUrl": "/proceedings-article/iiai-aai/2022/975500a673/1GU6YWZgSl2", "parentPublication": { "id": "proceedings/iiai-aai/2022/9755/0", "title": "2022 12th International Congress on Advanced Applied Informatics (IIAI-AAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar-adjunct/2022/5365/0/536500a397", "title": "Modeling and optimizing the voice assistant behavior in Virtual Reality", "doi": null, "abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a397/1J7WhI0xeBq", "parentPublication": { "id": "proceedings/ismar-adjunct/2022/5365/0", "title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/dsd/2022/7404/0/740400a429", "title": "Partial Evaluation in Junction Trees", "doi": null, "abstractUrl": "/proceedings-article/dsd/2022/740400a429/1JF8f4QsPDy", "parentPublication": { "id": "proceedings/dsd/2022/7404/0", "title": "2022 25th Euromicro Conference on Digital System Design (DSD)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aiam/2022/6399/0/639900a327", "title": "A Evolutionary Behavior Tree AI for Neural MMO Challenge", "doi": null, "abstractUrl": "/proceedings-article/aiam/2022/639900a327/1LRlIdHomo8", "parentPublication": { "id": "proceedings/aiam/2022/6399/0", "title": "2022 4th International Conference on Artificial Intelligence and Advanced Manufacturing (AIAM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hpcc-dss-smartcity-dependsys/2022/1993/0/199300c189", "title": "Research on the intelligent countermeasure based on the multi-aircraft cooperative combat behavior tree", "doi": null, "abstractUrl": "/proceedings-article/hpcc-dss-smartcity-dependsys/2022/199300c189/1LSPpmUPtf2", "parentPublication": { "id": "proceedings/hpcc-dss-smartcity-dependsys/2022/1993/0", "title": "2022 IEEE 24th Int Conf on High Performance Computing & Communications; 8th Int Conf on Data Science & Systems; 20th Int Conf on Smart City; 8th Int Conf on Dependability in Sensor, Cloud & Big Data Systems & Application (HPCC/DSS/SmartCity/DependSys)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/ts/5555/01/10106642", "title": "Behavior Trees and State Machines in Robotics Applications", "doi": null, "abstractUrl": "/journal/ts/5555/01/10106642/1MwAu1wj4Oc", "parentPublication": { "id": "trans/ts", "title": "IEEE Transactions on Software Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1KfQshha0dW", "title": "2022 IEEE International Conference on Big Data (Big Data)", "acronym": "big-data", "groupId": "10020192", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1KfRJiz7RvO", "doi": "10.1109/BigData55660.2022.10021089", "title": "Exploring the Target Distribution for Surrogate-Based Black-Box Attacks", "normalizedTitle": "Exploring the Target Distribution for Surrogate-Based Black-Box Attacks", "abstract": "Deep Neural Networks are shown to be prone to adversarial attacks. In the black-box setting, where no information about the target is available, surrogate-based black-box attacks train a surrogate on samples queried from the target to imitate the black-box&#x2019;s behavior. The trained surrogate is then attacked to generate adversarial examples. Existing surrogate-based attacks suffer from low success rates because they fail to accurately capture the target&#x2019;s behavior, i.e., their surrogates only mimic the target&#x2019;s outputs for a given set of inputs. Moreover, their attack strategy relies on noisy estimations of high dimensional gradients w.r.t. the inputs (i.e., surrogate&#x2019;s gradients) to generate adversarial examples. Ideally, a successful surrogate-based attack should possess two properties: (1) Train and employ a surrogate that accurately imitates the target behavior for every pair of input and output, i.e., the joint distribution of the target over its input and outputs; and (2) Generate adversarial examples by directly manipulating the class-dependent factors of the input, i.e., factors that affect the target&#x2019;s output, rather than relying on noisy estimations of gradients. We propose a novel surrogate-based attack framework with a surrogate architecture that learns the target distribution over its inputs and outputs while disentangling the class-dependent factors from class-irrelevant ones. The framework is equipped with a novel attack strategy that fully utilizes the target distribution captured by the surrogate while generating adversarial examples by directly manipulating the class-dependent factors. Extensive experiments demonstrate the efficacy of our attack in generating highly successful adversarial examples compared to state-of-the-art methods.", "abstracts": [ { "abstractType": "Regular", "content": "Deep Neural Networks are shown to be prone to adversarial attacks. In the black-box setting, where no information about the target is available, surrogate-based black-box attacks train a surrogate on samples queried from the target to imitate the black-box&#x2019;s behavior. The trained surrogate is then attacked to generate adversarial examples. Existing surrogate-based attacks suffer from low success rates because they fail to accurately capture the target&#x2019;s behavior, i.e., their surrogates only mimic the target&#x2019;s outputs for a given set of inputs. Moreover, their attack strategy relies on noisy estimations of high dimensional gradients w.r.t. the inputs (i.e., surrogate&#x2019;s gradients) to generate adversarial examples. Ideally, a successful surrogate-based attack should possess two properties: (1) Train and employ a surrogate that accurately imitates the target behavior for every pair of input and output, i.e., the joint distribution of the target over its input and outputs; and (2) Generate adversarial examples by directly manipulating the class-dependent factors of the input, i.e., factors that affect the target&#x2019;s output, rather than relying on noisy estimations of gradients. We propose a novel surrogate-based attack framework with a surrogate architecture that learns the target distribution over its inputs and outputs while disentangling the class-dependent factors from class-irrelevant ones. The framework is equipped with a novel attack strategy that fully utilizes the target distribution captured by the surrogate while generating adversarial examples by directly manipulating the class-dependent factors. Extensive experiments demonstrate the efficacy of our attack in generating highly successful adversarial examples compared to state-of-the-art methods.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Deep Neural Networks are shown to be prone to adversarial attacks. In the black-box setting, where no information about the target is available, surrogate-based black-box attacks train a surrogate on samples queried from the target to imitate the black-box’s behavior. The trained surrogate is then attacked to generate adversarial examples. Existing surrogate-based attacks suffer from low success rates because they fail to accurately capture the target’s behavior, i.e., their surrogates only mimic the target’s outputs for a given set of inputs. Moreover, their attack strategy relies on noisy estimations of high dimensional gradients w.r.t. the inputs (i.e., surrogate’s gradients) to generate adversarial examples. Ideally, a successful surrogate-based attack should possess two properties: (1) Train and employ a surrogate that accurately imitates the target behavior for every pair of input and output, i.e., the joint distribution of the target over its input and outputs; and (2) Generate adversarial examples by directly manipulating the class-dependent factors of the input, i.e., factors that affect the target’s output, rather than relying on noisy estimations of gradients. We propose a novel surrogate-based attack framework with a surrogate architecture that learns the target distribution over its inputs and outputs while disentangling the class-dependent factors from class-irrelevant ones. The framework is equipped with a novel attack strategy that fully utilizes the target distribution captured by the surrogate while generating adversarial examples by directly manipulating the class-dependent factors. Extensive experiments demonstrate the efficacy of our attack in generating highly successful adversarial examples compared to state-of-the-art methods.", "fno": "10021089", "keywords": [ "Computer Crime", "Deep Learning Artificial Intelligence", "Adversarial Attacks", "Adversarial Examples", "Black Box Setting", "Class Dependent Factors", "Deep Neural Networks", "Surrogate Architecture", "Surrogate Based Black Box Attacks", "Target Behavior", "Target Distribution", "Deep Learning", "Neural Networks", "Closed Box", "Estimation", "Big Data", "Behavioral Sciences", "Noise Measurement", "Black Box Adversarial Attack", "Surrogate Based Attacks", "Model Stealing", "VAE", "Disentanglement" ], "authors": [ { "affiliation": "Arizona State University,Computer Science & Engineering,Tempe,AZ,USA", "fullName": "Raha Moraffah", "givenName": "Raha", "surname": "Moraffah", "__typename": "ArticleAuthorType" }, { "affiliation": "Arizona State University,Computer Science & Engineering,Tempe,AZ,USA", "fullName": "Paras Sheth", "givenName": "Paras", "surname": "Sheth", "__typename": "ArticleAuthorType" }, { "affiliation": "Arizona State University,Computer Science & Engineering,Tempe,AZ,USA", "fullName": "Huan Liu", "givenName": "Huan", "surname": "Liu", "__typename": "ArticleAuthorType" } ], "idPrefix": "big-data", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-12-01T00:00:00", "pubType": "proceedings", "pages": "1310-1315", "year": "2022", "issn": null, "isbn": "978-1-6654-8045-1", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "10020729", "articleId": "1KfQW8NLtLi", "__typename": "AdjacentArticleType" }, "next": { "fno": "10021014", "articleId": "1KfRlGUwPKg", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2022/6946/0/694600p5334", "title": "Exploring Effective Data for Surrogate Training Towards Black-box Attack", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600p5334/1H0NDB6hmhi", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600p5074", "title": "Boosting Black-Box Attack with Partially Transferred Conditional Adversarial Distribution", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600p5074/1H1mrSsvCM0", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2022/9062/0/09956476", "title": "Boundary Defense Against Black-box Adversarial Attacks", "doi": null, "abstractUrl": "/proceedings-article/icpr/2022/09956476/1IHqhuWNa9i", "parentPublication": { "id": "proceedings/icpr/2022/9062/0", "title": "2022 26th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/bd/5555/01/09973348", "title": "Physical Black-box Adversarial Attacks through Transformations", "doi": null, "abstractUrl": "/journal/bd/5555/01/09973348/1IUAu0pLbOw", "parentPublication": { "id": "trans/bd", "title": "IEEE Transactions on Big Data", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/5555/01/10017370", "title": "Generalizable Black-Box Adversarial Attack With Meta Learning", "doi": null, "abstractUrl": "/journal/tp/5555/01/10017370/1JYYYm8YqIg", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/5555/01/09999043", "title": "Simultaneously Optimizing Perturbations and Positions for Black-Box Adversarial Patch Attacks", "doi": null, "abstractUrl": "/journal/tp/5555/01/09999043/1JqCywy5p8Q", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdm/2022/5099/0/509900a368", "title": "Query-Efficient Target-Agnostic Black-Box Attack", "doi": null, "abstractUrl": "/proceedings-article/icdm/2022/509900a368/1KpCHSKNYQw", "parentPublication": { "id": "proceedings/icdm/2022/5099/0", "title": "2022 IEEE International Conference on Data Mining (ICDM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ispa-bdcloud-socialcom-sustaincom/2022/6497/0/649700a692", "title": "Your Voice is Not Yours? Black-Box Adversarial Attacks Against Speaker Recognition Systems", "doi": null, "abstractUrl": "/proceedings-article/ispa-bdcloud-socialcom-sustaincom/2022/649700a692/1LKwn1YcC1G", "parentPublication": { "id": "proceedings/ispa-bdcloud-socialcom-sustaincom/2022/6497/0", "title": "2022 IEEE Intl Conf on Parallel & Distributed Processing with Applications, Big Data & Cloud Computing, Sustainable Computing & Communications, Social Computing & Networking (ISPA/BDCloud/SocialCom/SustainCom)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/hpcc-dss-smartcity-dependsys/2022/1993/0/199300a672", "title": "A Data-free Black-box Attack for Generating Transferable Adversarial Examples", "doi": null, "abstractUrl": "/proceedings-article/hpcc-dss-smartcity-dependsys/2022/199300a672/1LSPsGsGqZy", "parentPublication": { "id": "proceedings/hpcc-dss-smartcity-dependsys/2022/1993/0", "title": "2022 IEEE 24th Int Conf on High Performance Computing & Communications; 8th Int Conf on Data Science & Systems; 20th Int Conf on Smart City; 8th Int Conf on Dependability in Sensor, Cloud & Big Data Systems & Application (HPCC/DSS/SmartCity/DependSys)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tk/5555/01/10114977", "title": "Adversarial Attacks for Black-Box Recommender Systems Via Copying Transferable Cross-Domain User Profiles", "doi": null, "abstractUrl": "/journal/tk/5555/01/10114977/1MQv6V5jSU0", "parentPublication": { "id": "trans/tk", "title": "IEEE Transactions on Knowledge & Data Engineering", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "1HpCpsLj9YY", "title": "2022 IEEE International Performance, Computing, and Communications Conference (IPCCC)", "acronym": "ipccc", "groupId": "1000548", "volume": "0", "displayVolume": "0", "year": "2022", "__typename": "ProceedingType" }, "article": { "id": "1HpCusekVfW", "doi": "10.1109/IPCCC55026.2022.9894322", "title": "Exploring Adversarial Attacks on Neural Networks: An Explainable Approach", "normalizedTitle": "Exploring Adversarial Attacks on Neural Networks: An Explainable Approach", "abstract": "Deep Learning (DL) is being applied in various domains, especially in safety-critical applications such as autonomous driving. Consequently, it is of great significance to ensure the robustness of these methods and thus counteract uncertain behaviors caused by adversarial attacks. In this paper, we use gradient heatmaps to analyze the response characteristics of the VGG-16 model when the input images are mixed with adversarial noise and statistically similar Gaussian random noise. In particular, we compare the network response layer by layer to determine where errors occurred. Several interesting findings are derived. First, compared to Gaussian random noise, intentionally generated adversarial noise causes severe behavior deviation by distracting the area of concentration in the networks. Second, in many cases, adversarial examples only need to compromise a few intermediate blocks to mislead the final decision. Third, our experiments revealed that specific blocks are more vulnerable and easier to exploit by adversarial examples. Finally, we demonstrate that the layers Block4_conv1 and Block5_ cov1 of the VGG-16 model are more susceptible to adversarial attacks. Our work could potentially provide useful insights into developing more reliable Deep Neural Network (DNN) models.", "abstracts": [ { "abstractType": "Regular", "content": "Deep Learning (DL) is being applied in various domains, especially in safety-critical applications such as autonomous driving. Consequently, it is of great significance to ensure the robustness of these methods and thus counteract uncertain behaviors caused by adversarial attacks. In this paper, we use gradient heatmaps to analyze the response characteristics of the VGG-16 model when the input images are mixed with adversarial noise and statistically similar Gaussian random noise. In particular, we compare the network response layer by layer to determine where errors occurred. Several interesting findings are derived. First, compared to Gaussian random noise, intentionally generated adversarial noise causes severe behavior deviation by distracting the area of concentration in the networks. Second, in many cases, adversarial examples only need to compromise a few intermediate blocks to mislead the final decision. Third, our experiments revealed that specific blocks are more vulnerable and easier to exploit by adversarial examples. Finally, we demonstrate that the layers Block4_conv1 and Block5_ cov1 of the VGG-16 model are more susceptible to adversarial attacks. Our work could potentially provide useful insights into developing more reliable Deep Neural Network (DNN) models.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Deep Learning (DL) is being applied in various domains, especially in safety-critical applications such as autonomous driving. Consequently, it is of great significance to ensure the robustness of these methods and thus counteract uncertain behaviors caused by adversarial attacks. In this paper, we use gradient heatmaps to analyze the response characteristics of the VGG-16 model when the input images are mixed with adversarial noise and statistically similar Gaussian random noise. In particular, we compare the network response layer by layer to determine where errors occurred. Several interesting findings are derived. First, compared to Gaussian random noise, intentionally generated adversarial noise causes severe behavior deviation by distracting the area of concentration in the networks. Second, in many cases, adversarial examples only need to compromise a few intermediate blocks to mislead the final decision. Third, our experiments revealed that specific blocks are more vulnerable and easier to exploit by adversarial examples. Finally, we demonstrate that the layers Block4_conv1 and Block5_ cov1 of the VGG-16 model are more susceptible to adversarial attacks. Our work could potentially provide useful insights into developing more reliable Deep Neural Network (DNN) models.", "fno": "09894322", "keywords": [ "Deep Learning Artificial Intelligence", "Random Noise", "Safety Critical Software", "Security Of Data", "Adversarial Attacks", "Adversarial Examples", "Adversarial Noise", "Autonomous Driving", "Explainable Approach", "Layers Block 4 Conv", "Network Response Layer", "Neural Networks", "Reliable Deep Neural Network Models", "Safety Critical Applications", "Severe Behavior Deviation", "Statistically Similar Gaussian Random Noise", "Uncertain Behaviors", "VGG 16 Model", "Deep Learning", "Heating Systems", "Analytical Models", "Computational Modeling", "Neural Networks", "Robustness", "Behavioral Sciences" ], "authors": [ { "affiliation": "Embry-Riddle Aeronautical University,FL 32114,USA", "fullName": "Justus Renkhoff", "givenName": "Justus", "surname": "Renkhoff", "__typename": "ArticleAuthorType" }, { "affiliation": "Embry-Riddle Aeronautical University,FL 32114,USA", "fullName": "Wenkai Tan", "givenName": "Wenkai", "surname": "Tan", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Colorado Boulder,CO 80309,USA", "fullName": "Alvaro Velasquez", "givenName": "Alvaro", "surname": "Velasquez", "__typename": "ArticleAuthorType" }, { "affiliation": "Purdue University,IN 47907,USA", "fullName": "William Yichen Wang", "givenName": "William Yichen", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": "Embry-Riddle Aeronautical University,FL 32114,USA", "fullName": "Yongxin Liu", "givenName": "Yongxin", "surname": "Liu", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Tennessee at Martin,TN 38237,USA", "fullName": "Jian Wang", "givenName": "Jian", "surname": "Wang", "__typename": "ArticleAuthorType" }, { "affiliation": "Bowling Green State University,OH 43403,USA", "fullName": "Shuteng Niu", "givenName": "Shuteng", "surname": "Niu", "__typename": "ArticleAuthorType" }, { "affiliation": "Trier University of Applied Sciences,Germany", "fullName": "Lejla Begic Fazlic", "givenName": "Lejla Begic", "surname": "Fazlic", "__typename": "ArticleAuthorType" }, { "affiliation": "Trier University of Applied Sciences,Germany", "fullName": "Guido Dartmann", "givenName": "Guido", "surname": "Dartmann", "__typename": "ArticleAuthorType" }, { "affiliation": "Embry-Riddle Aeronautical University,FL 32114,USA", "fullName": "Houbing Song", "givenName": "Houbing", "surname": "Song", "__typename": "ArticleAuthorType" } ], "idPrefix": "ipccc", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2022-11-01T00:00:00", "pubType": "proceedings", "pages": "41-42", "year": "2022", "issn": null, "isbn": "978-1-6654-8018-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "09894316", "articleId": "1HpCsOOqHSM", "__typename": "AdjacentArticleType" }, "next": { "fno": "09894342", "articleId": "1HpCwnzwIRG", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "trans/ai/5555/01/09829929", "title": "Feature Fusion Based Adversarial Example Detection against Second-Round Adversarial Attacks", "doi": null, "abstractUrl": "/journal/ai/5555/01/09829929/1F0cjcVJ6mI", "parentPublication": { "id": "trans/ai", "title": "IEEE Transactions on Artificial Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mlccim/2022/9858/0/985800a333", "title": "Adagard Accelerated Gradient with Weight Decay for Adversarial Attacks", "doi": null, "abstractUrl": "/proceedings-article/mlccim/2022/985800a333/1IAKukalWO4", "parentPublication": { "id": "proceedings/mlccim/2022/9858/0", "title": "2022 International Conference on Machine Learning, Cloud Computing and Intelligent Mining (MLCCIM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdmw/2022/4609/0/460900a451", "title": "Multi-view Representation Learning from Malware to Defend Against Adversarial Variants", "doi": null, "abstractUrl": "/proceedings-article/icdmw/2022/460900a451/1KBqXleDboY", "parentPublication": { "id": "proceedings/icdmw/2022/4609/0", "title": "2022 IEEE International Conference on Data Mining Workshops (ICDMW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/big-data/2022/8045/0/10021089", "title": "Exploring the Target Distribution for Surrogate-Based Black-Box Attacks", "doi": null, "abstractUrl": "/proceedings-article/big-data/2022/10021089/1KfRJiz7RvO", "parentPublication": { "id": "proceedings/big-data/2022/8045/0", "title": "2022 IEEE International Conference on Big Data (Big Data)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ispcem/2022/9271/0/927100a329", "title": "Analyse of Influence of Adversarial Samples on Neural Network Attacks with Different Complexities", "doi": null, "abstractUrl": "/proceedings-article/ispcem/2022/927100a329/1LHd0LYGYFi", "parentPublication": { "id": "proceedings/ispcem/2022/9271/0", "title": "2022 2nd International Signal Processing, Communications and Engineering Management Conference (ISPCEM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2019/3293/0/329300l1342", "title": "Trust Region Based Adversarial Attack on Neural Networks", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2019/329300l1342/1gyrOU5SLvy", "parentPublication": { "id": "proceedings/cvpr/2019/3293/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2020/9360/0/09150846", "title": "Noise is Inside Me! Generating Adversarial Perturbations with Noise Derived from Natural Filters", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2020/09150846/1lPHdKRRqgg", "parentPublication": { "id": "proceedings/cvprw/2020/9360/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icdm/2020/8316/0/831600b412", "title": "Generalizing Universal Adversarial Attacks Beyond Additive Perturbations", "doi": null, "abstractUrl": "/proceedings-article/icdm/2020/831600b412/1r54IaN7xVm", "parentPublication": { "id": "proceedings/icdm/2020/8316/0", "title": "2020 IEEE International Conference on Data Mining (ICDM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tq/2023/01/09612007", "title": "Exploring the Effect of Randomness on Transferability of Adversarial Samples Against Deep Neural Networks", "doi": null, "abstractUrl": "/journal/tq/2023/01/09612007/1yrDczhero4", "parentPublication": { "id": "trans/tq", "title": "IEEE Transactions on Dependable and Secure Computing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ictai/2021/0898/0/089800b247", "title": "Stochastic sparse adversarial attacks", "doi": null, "abstractUrl": "/proceedings-article/ictai/2021/089800b247/1zw60hg1IHe", "parentPublication": { "id": "proceedings/ictai/2021/0898/0", "title": "2021 IEEE 33rd International Conference on Tools with Artificial Intelligence (ICTAI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNCeaPZB", "title": "Proceedings of 1st International Conference on Image Processing", "acronym": "icip", "groupId": "1000349", "volume": "1", "displayVolume": "1", "year": "1994", "__typename": "ProceedingType" }, "article": { "id": "12OmNA14A7M", "doi": "10.1109/ICIP.1994.413338", "title": "Motion estimation and compensation under varying illumination", "normalizedTitle": "Motion estimation and compensation under varying illumination", "abstract": "In this paper we propose a new approach to motion-compensated filtering of image sequences that contain time-varying illumination. There are two contributions in this paper. First, we propose a new method for the estimation of dense 2-D motion that is robust to time-varying illumination often present in images. We define the structural model that is based on the assumption of intensity gradient constancy along motion trajectories. This is in contrast to the usual hypothesis of the intensity constancy. Secondly, we apply the proposed approach to motion-compensated temporal interpolation. We compare the image reconstruction error obtained using the new approach with the error obtained for standard models.<>", "abstracts": [ { "abstractType": "Regular", "content": "In this paper we propose a new approach to motion-compensated filtering of image sequences that contain time-varying illumination. There are two contributions in this paper. First, we propose a new method for the estimation of dense 2-D motion that is robust to time-varying illumination often present in images. We define the structural model that is based on the assumption of intensity gradient constancy along motion trajectories. This is in contrast to the usual hypothesis of the intensity constancy. Secondly, we apply the proposed approach to motion-compensated temporal interpolation. We compare the image reconstruction error obtained using the new approach with the error obtained for standard models.<>", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "In this paper we propose a new approach to motion-compensated filtering of image sequences that contain time-varying illumination. There are two contributions in this paper. First, we propose a new method for the estimation of dense 2-D motion that is robust to time-varying illumination often present in images. We define the structural model that is based on the assumption of intensity gradient constancy along motion trajectories. This is in contrast to the usual hypothesis of the intensity constancy. Secondly, we apply the proposed approach to motion-compensated temporal interpolation. We compare the image reconstruction error obtained using the new approach with the error obtained for standard models.", "fno": "00413338", "keywords": [ "Motion Estimation", "Motion Compensation", "Image Sequences", "Interpolation", "Image Reconstruction", "Filtering Theory", "Motion Estimation", "Motion Compensation", "Dense 2 D Motion", "Filtering", "Image Sequences", "Time Varying Illumination", "Structural Model", "Motion Trajectories", "Intensity Gradient Constancy", "Temporal Interpolation", "Image Reconstruction Error", "Motion Estimation", "Lighting", "Image Sequences", "Robustness", "Equations", "Filtering", "Image Reconstruction", "Interpolation", "Business", "Marine Vehicles" ], "authors": [ { "affiliation": "Inst. Nat. de la Recherche Sci., INRS Telecommun., Ile des Soeurs, Que., Canada", "fullName": "P. Treves", "givenName": "P.", "surname": "Treves", "__typename": "ArticleAuthorType" }, { "affiliation": "Inst. Nat. de la Recherche Sci., INRS Telecommun., Ile des Soeurs, Que., Canada", "fullName": "J. Konrad", "givenName": "J.", "surname": "Konrad", "__typename": "ArticleAuthorType" } ], "idPrefix": "icip", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "1994-01-01T00:00:00", "pubType": "proceedings", "pages": "373,374,375,376,377", "year": "1994", "issn": null, "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "00413337", "articleId": "12OmNyfdOS2", "__typename": "AdjacentArticleType" }, "next": { "fno": "00413339", "articleId": "12OmNyYDDyy", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icassp/1991/0003/0/00150890", "title": "Estimation of motion fields from image sequences with illumination variation", "doi": null, "abstractUrl": "/proceedings-article/icassp/1991/00150890/12OmNAQany0", "parentPublication": { "id": "proceedings/icassp/1991/0003/0", "title": "Acoustics, Speech, and Signal Processing, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icisce/2016/2535/0/2535a083", "title": "A Novel Illumination Compensation Method with Enhanced Retinex", "doi": null, "abstractUrl": "/proceedings-article/icisce/2016/2535a083/12OmNBpVPTp", "parentPublication": { "id": "proceedings/icisce/2016/2535/0", "title": "2016 3rd International Conference on Information Science and Control Engineering (ICISCE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/worv/2013/5646/0/06521924", "title": "Color-based detection robust to varying illumination spectrum", "doi": null, "abstractUrl": "/proceedings-article/worv/2013/06521924/12OmNqJq4EZ", "parentPublication": { "id": "proceedings/worv/2013/5646/0", "title": "2013 IEEE Workshop on Robot Vision (WORV 2013)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv-motion/2005/2271/2/227120140", "title": "Ego-Motion Estimation and 3D Model Refinement in Scenes with Varying Illumination", "doi": null, "abstractUrl": "/proceedings-article/wacv-motion/2005/227120140/12OmNvjQ95r", "parentPublication": { "id": "proceedings/wacv-motion/2005/2271/2", "title": "Applications of Computer Vision and the IEEE Workshop on Motion and Video Computing, IEEE Workshop on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/1993/3870/0/00378199", "title": "Incremental image sequence enhancement with implicit motion compensation", "doi": null, "abstractUrl": "/proceedings-article/iccv/1993/00378199/12OmNwHQB7b", "parentPublication": { "id": "proceedings/iccv/1993/3870/0", "title": "1993 (4th) International Conference on Computer Vision", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icip/1997/8183/3/81833614", "title": "Motion compensation in color video with illumination variations", "doi": null, "abstractUrl": "/proceedings-article/icip/1997/81833614/12OmNyKa6gP", "parentPublication": { "id": "proceedings/icip/1997/8183/3", "title": "Proceedings of International Conference on Image Processing", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/1995/7042/0/70420720", "title": "Color constancy under varying illumination", "doi": null, "abstractUrl": "/proceedings-article/iccv/1995/70420720/12OmNzlUKQX", "parentPublication": { "id": "proceedings/iccv/1995/7042/0", "title": "Computer Vision, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2010/09/ttp2010091646", "title": "Range Flow in Varying Illumination: Algorithms and Comparisons", "doi": null, "abstractUrl": "/journal/tp/2010/09/ttp2010091646/13rRUxCitKw", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isspit/2018/7568/0/08642637", "title": "Spectral Illumination Correction: Achieving Relative Color Constancy Under the Spectral Domain", "doi": null, "abstractUrl": "/proceedings-article/isspit/2018/08642637/17QjJeZzD2N", "parentPublication": { "id": "proceedings/isspit/2018/7568/0", "title": "2018 IEEE International Symposium on Signal Processing and Information Technology (ISSPIT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2021/0191/0/019100b194", "title": "Generative Models for Multi-Illumination Color Constancy", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2021/019100b194/1yNioSxeNTa", "parentPublication": { "id": "proceedings/iccvw/2021/0191/0", "title": "2021 IEEE/CVF International Conference on Computer Vision Workshops (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNBSBkfu", "title": "2016 6th International Conference on Digital Home (ICDH)", "acronym": "icdh", "groupId": "1802037", "volume": "0", "displayVolume": "0", "year": "2016", "__typename": "ProceedingType" }, "article": { "id": "12OmNAYoKx1", "doi": "10.1109/ICDH.2016.017", "title": "Video Denoising Based on Spatial-Temporal Filtering", "normalizedTitle": "Video Denoising Based on Spatial-Temporal Filtering", "abstract": "Video noise reduction based on temporal spatial recursive filter isproposed in this paper. In the proposed model the recursive time-weighted average is applied to the areas where the motion has notbeen detected. The new model is able to be adaptive in each areadepending on whether the area is static or movable. More precisely, more noise removal will be done in the static areas, and lessremoval in the motion areas. In the new model, noise reduction iscarried out in three stages. At first, noise is filtered withspatial filter. Secondly the temporal filter is applied for morenoise repression. Thirdly motion detector and recursive timeaveraging are applied to improve the temporal filter's output. Thequantitative and qualitative performance in various noise levelsdemonstrates the superiority of the proposed scheme as compared withsome well-known methods.", "abstracts": [ { "abstractType": "Regular", "content": "Video noise reduction based on temporal spatial recursive filter isproposed in this paper. In the proposed model the recursive time-weighted average is applied to the areas where the motion has notbeen detected. The new model is able to be adaptive in each areadepending on whether the area is static or movable. More precisely, more noise removal will be done in the static areas, and lessremoval in the motion areas. In the new model, noise reduction iscarried out in three stages. At first, noise is filtered withspatial filter. Secondly the temporal filter is applied for morenoise repression. Thirdly motion detector and recursive timeaveraging are applied to improve the temporal filter's output. Thequantitative and qualitative performance in various noise levelsdemonstrates the superiority of the proposed scheme as compared withsome well-known methods.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Video noise reduction based on temporal spatial recursive filter isproposed in this paper. In the proposed model the recursive time-weighted average is applied to the areas where the motion has notbeen detected. The new model is able to be adaptive in each areadepending on whether the area is static or movable. More precisely, more noise removal will be done in the static areas, and lessremoval in the motion areas. In the new model, noise reduction iscarried out in three stages. At first, noise is filtered withspatial filter. Secondly the temporal filter is applied for morenoise repression. Thirdly motion detector and recursive timeaveraging are applied to improve the temporal filter's output. Thequantitative and qualitative performance in various noise levelsdemonstrates the superiority of the proposed scheme as compared withsome well-known methods.", "fno": "4400a034", "keywords": [ "Noise Reduction", "Video Sequences", "Gaussian Noise", "Noise Measurement", "Adaptation Models", "Visualization", "Speckle", "Spatial Filter", "Temporal Filter", "Weighted Averaging", "Static Area", "Motion Area" ], "authors": [ { "affiliation": null, "fullName": "Ali Abdullah Yahya", "givenName": "Ali Abdullah", "surname": "Yahya", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Jieqing Tan", "givenName": "Jieqing", "surname": "Tan", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Benyue Su", "givenName": "Benyue", "surname": "Su", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Kui Liu", "givenName": "Kui", "surname": "Liu", "__typename": "ArticleAuthorType" } ], "idPrefix": "icdh", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2016-12-01T00:00:00", "pubType": "proceedings", "pages": "34-37", "year": "2016", "issn": null, "isbn": "978-1-5090-4400-9", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "4400a028", "articleId": "12OmNzcPAgl", "__typename": "AdjacentArticleType" }, "next": { "fno": "4400a038", "articleId": "12OmNxWcH9F", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icig/2013/5050/0/5050a064", "title": "Improved Video Denoising Algorithm Based on Spatial-Temporal Combination", "doi": null, "abstractUrl": "/proceedings-article/icig/2013/5050a064/12OmNApLGKy", "parentPublication": { "id": "proceedings/icig/2013/5050/0", "title": "2013 Seventh International Conference on Image and Graphics (ICIG)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/uksim/2010/4016/0/4016a284", "title": "Color Image Denoising with Multi-channel Spatial Color Filtering", "doi": null, "abstractUrl": "/proceedings-article/uksim/2010/4016a284/12OmNvHY2EH", "parentPublication": { "id": "proceedings/uksim/2010/4016/0", "title": "Computer Modeling and Simulation, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/avss/2003/1971/0/19710334", "title": "Combined Wavelet Domain and Temporal Video Denoising", "doi": null, "abstractUrl": "/proceedings-article/avss/2003/19710334/12OmNvUaNqa", "parentPublication": { "id": "proceedings/avss/2003/1971/0", "title": "Proceedings of the IEEE Conference on Advanced Video and Signal Based Surveillance, 2003.", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icassp/1991/0003/0/00151031", "title": "Nonlinear model-based spatio-temporal filtering of image sequences", "doi": null, "abstractUrl": "/proceedings-article/icassp/1991/00151031/12OmNy1SFHg", "parentPublication": { "id": "proceedings/icassp/1991/0003/0", "title": "Acoustics, Speech, and Signal Processing, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/isda/2008/3382/3/04696528", "title": "The Study on Video Enhancement in the Low-Light Environment by Spatio-temporal Filtering", "doi": null, "abstractUrl": "/proceedings-article/isda/2008/04696528/12OmNylboyQ", "parentPublication": { "id": "proceedings/isda/2008/3382/3", "title": "Intelligent Systems Design and Applications, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2007/1016/0/04284986", "title": "Motion Adaptive Spatio-Temporal Gaussian Noise Reduction Filter for Double-Shot Images", "doi": null, "abstractUrl": "/proceedings-article/icme/2007/04284986/12OmNzdoN2x", "parentPublication": { "id": "proceedings/icme/2007/1016/0", "title": "2007 International Conference on Multimedia & Expo", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sibgrapi/2018/9264/0/926400a064", "title": "Video Denoising Quality Assessment for Different Noise Distributions", "doi": null, "abstractUrl": "/proceedings-article/sibgrapi/2018/926400a064/17D45VTRorm", "parentPublication": { "id": "proceedings/sibgrapi/2018/9264/0", "title": "2018 31st SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/avss/2022/6382/0/09959170", "title": "SAR Image Denoising in High Dynamic Range with Speckle and Thermal Noise Refinement Modeling", "doi": null, "abstractUrl": "/proceedings-article/avss/2022/09959170/1Iz5f7tXFgA", "parentPublication": { "id": "proceedings/avss/2022/6382/0", "title": "2022 18th IEEE International Conference on Advanced Video and Signal Based Surveillance (AVSS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2019/2506/0/250600b917", "title": "Kalman Filtering of Patches for Frame-Recursive Video Denoising", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2019/250600b917/1iTvkw5WkrC", "parentPublication": { "id": "proceedings/cvprw/2019/2506/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ismar/2021/0158/0/015800a041", "title": "STGAE: Spatial-Temporal Graph Auto-Encoder for Hand Motion Denoising", "doi": null, "abstractUrl": "/proceedings-article/ismar/2021/015800a041/1yeDbjxQPss", "parentPublication": { "id": "proceedings/ismar/2021/0158/0", "title": "2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNzIUg0M", "title": "2013 Seventh International Conference on Image and Graphics (ICIG)", "acronym": "icig", "groupId": "1001790", "volume": "0", "displayVolume": "0", "year": "2013", "__typename": "ProceedingType" }, "article": { "id": "12OmNApLGKy", "doi": "10.1109/ICIG.2013.19", "title": "Improved Video Denoising Algorithm Based on Spatial-Temporal Combination", "normalizedTitle": "Improved Video Denoising Algorithm Based on Spatial-Temporal Combination", "abstract": "An algorithm of video denoising based on spatial-temporal combination is proposed to improve the video quality. This algorithm can adaptively distinguish the still regions from the motion regions of video frames by bilateral motion detection of multiple frames, where temporal bilateral Kalman filtering is applied to the still regions and spatial bilateral adaptive Nonlocal means (ANL) filtering is applied to the motion regions. Experimental results show that by making full use of the spatial-temporal information of video, the proposed algorithm can significantly improve the peak signal-to-noise ratio (PSNR) and the subjective video quality without movement ghosting. Besides, the speed of video denoising can be accelerated by bilateral filtering.", "abstracts": [ { "abstractType": "Regular", "content": "An algorithm of video denoising based on spatial-temporal combination is proposed to improve the video quality. This algorithm can adaptively distinguish the still regions from the motion regions of video frames by bilateral motion detection of multiple frames, where temporal bilateral Kalman filtering is applied to the still regions and spatial bilateral adaptive Nonlocal means (ANL) filtering is applied to the motion regions. Experimental results show that by making full use of the spatial-temporal information of video, the proposed algorithm can significantly improve the peak signal-to-noise ratio (PSNR) and the subjective video quality without movement ghosting. Besides, the speed of video denoising can be accelerated by bilateral filtering.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "An algorithm of video denoising based on spatial-temporal combination is proposed to improve the video quality. This algorithm can adaptively distinguish the still regions from the motion regions of video frames by bilateral motion detection of multiple frames, where temporal bilateral Kalman filtering is applied to the still regions and spatial bilateral adaptive Nonlocal means (ANL) filtering is applied to the motion regions. Experimental results show that by making full use of the spatial-temporal information of video, the proposed algorithm can significantly improve the peak signal-to-noise ratio (PSNR) and the subjective video quality without movement ghosting. Besides, the speed of video denoising can be accelerated by bilateral filtering.", "fno": "5050a064", "keywords": [ "Noise Reduction", "Video Sequences", "Kalman Filters", "Motion Detection", "PSNR", "Signal Processing Algorithms", "Bilateral ANL Filtering", "Spatial Temporal Combination", "Bilateral Motion Detection", "Bilateral Kalman Filtering" ], "authors": [ { "affiliation": null, "fullName": "Wang Hong-Zhi", "givenName": "Wang", "surname": "Hong-Zhi", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Chen Ling", "givenName": "Chen", "surname": "Ling", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Xu Shu-Liang", "givenName": "Xu", "surname": "Shu-Liang", "__typename": "ArticleAuthorType" } ], "idPrefix": "icig", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2013-07-01T00:00:00", "pubType": "proceedings", "pages": "64-67", "year": "2013", "issn": null, "isbn": "978-0-7695-5050-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "5050a060", "articleId": "12OmNxzMnK8", "__typename": "AdjacentArticleType" }, "next": { "fno": "5050a068", "articleId": "12OmNvFpEzz", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icdh/2016/4400/0/4400a034", "title": "Video Denoising Based on Spatial-Temporal Filtering", "doi": null, "abstractUrl": "/proceedings-article/icdh/2016/4400a034/12OmNAYoKx1", "parentPublication": { "id": "proceedings/icdh/2016/4400/0", "title": "2016 6th International Conference on Digital Home (ICDH)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icacc/2013/5033/0/06686367", "title": "A Novel Method of Medical Image Denoising Using Bilateral and NLm Filtering", "doi": null, "abstractUrl": "/proceedings-article/icacc/2013/06686367/12OmNBqMDzU", "parentPublication": { "id": "proceedings/icacc/2013/5033/0", "title": "2013 Third International Conference on Advances in Computing and Communications (ICACC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2011/348/0/06011845", "title": "An overcomplete pyramid representation for improved gsm image denoising", "doi": null, "abstractUrl": "/proceedings-article/icme/2011/06011845/12OmNCw3z7R", "parentPublication": { "id": "proceedings/icme/2011/348/0", "title": "2011 IEEE International Conference on Multimedia and Expo", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cicn/2011/4587/0/4587a421", "title": "Image Denoising via Combination Anisotropic Diffusion and Bilateral Filtering", "doi": null, "abstractUrl": "/proceedings-article/cicn/2011/4587a421/12OmNvkplaN", "parentPublication": { "id": "proceedings/cicn/2011/4587/0", "title": "Computational Intelligence and Communication Networks, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2011/0063/0/06130216", "title": "High-quality video denoising for motion-based exposure control", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2011/06130216/12OmNwpoFL1", "parentPublication": { "id": "proceedings/iccvw/2011/0063/0", "title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2008/2174/0/04761412", "title": "Video denoising via discrete regularization on graphs", "doi": null, "abstractUrl": "/proceedings-article/icpr/2008/04761412/12OmNxSNvsx", "parentPublication": { "id": "proceedings/icpr/2008/2174/0", "title": "ICPR 2008 19th International Conference on Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icgciot/2015/7910/0/07380477", "title": "Bilateral Filter for Image Denoising", "doi": null, "abstractUrl": "/proceedings-article/icgciot/2015/07380477/12OmNzA6GOU", "parentPublication": { "id": "proceedings/icgciot/2015/7910/0", "title": "2015 International Conference on Green Computing and Internet of Things (ICGCIoT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2019/06/08344461", "title": "Robust and High Fidelity Mesh Denoising", "doi": null, "abstractUrl": "/journal/tg/2019/06/08344461/13rRUxcbnHm", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sibgrapi/2018/9264/0/926400a064", "title": "Video Denoising Quality Assessment for Different Noise Distributions", "doi": null, "abstractUrl": "/proceedings-article/sibgrapi/2018/926400a064/17D45VTRorm", "parentPublication": { "id": "proceedings/sibgrapi/2018/9264/0", "title": "2018 31st SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2019/2506/0/250600b917", "title": "Kalman Filtering of Patches for Frame-Recursive Video Denoising", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2019/250600b917/1iTvkw5WkrC", "parentPublication": { "id": "proceedings/cvprw/2019/2506/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNAWH9tO", "title": "Proceedings of International Conference on Acoustics, Speech and Signal Processing (CASSP'02)", "acronym": "icassp", "groupId": "1000002", "volume": "4", "displayVolume": "4", "year": "2002", "__typename": "ProceedingType" }, "article": { "id": "12OmNrkBwnC", "doi": "10.1109/ICASSP.2002.5745375", "title": "Image sequence restoration in the presence of pathological motion and severe artifacts", "normalizedTitle": "Image sequence restoration in the presence of pathological motion and severe artifacts", "abstract": "Incorrect motion vectors represent the main reason why current image sequence restoration schemes fail. We present a scheme that 1) identifies areas that are likely to contain wrong motion vectors, 2) finds artifacts within these areas, and 3) restores these artifacts: Although temporal information is commonly used in nowadays restoration systems [7, 12], in this particular case the artifact restoration cannot rely on it due to the uncertain motion information. Hence, the restoration needs to rely on spatial information alone. The novel spatial restoration algorithm that we introduce here, performs a non-linear interpolation that preserves the edges surrounding the artifact area. In this way, the general structure of the image is reconstructed. The restoration algorithm is demonstrated on both artificial and real life examples, and the advantages of the proposed edge-based restoration are highlighted. In addition, results are shown for the proposed complete image sequence restoration scheme.", "abstracts": [ { "abstractType": "Regular", "content": "Incorrect motion vectors represent the main reason why current image sequence restoration schemes fail. We present a scheme that 1) identifies areas that are likely to contain wrong motion vectors, 2) finds artifacts within these areas, and 3) restores these artifacts: Although temporal information is commonly used in nowadays restoration systems [7, 12], in this particular case the artifact restoration cannot rely on it due to the uncertain motion information. Hence, the restoration needs to rely on spatial information alone. The novel spatial restoration algorithm that we introduce here, performs a non-linear interpolation that preserves the edges surrounding the artifact area. In this way, the general structure of the image is reconstructed. The restoration algorithm is demonstrated on both artificial and real life examples, and the advantages of the proposed edge-based restoration are highlighted. In addition, results are shown for the proposed complete image sequence restoration scheme.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Incorrect motion vectors represent the main reason why current image sequence restoration schemes fail. We present a scheme that 1) identifies areas that are likely to contain wrong motion vectors, 2) finds artifacts within these areas, and 3) restores these artifacts: Although temporal information is commonly used in nowadays restoration systems [7, 12], in this particular case the artifact restoration cannot rely on it due to the uncertain motion information. Hence, the restoration needs to rely on spatial information alone. The novel spatial restoration algorithm that we introduce here, performs a non-linear interpolation that preserves the edges surrounding the artifact area. In this way, the general structure of the image is reconstructed. The restoration algorithm is demonstrated on both artificial and real life examples, and the advantages of the proposed edge-based restoration are highlighted. In addition, results are shown for the proposed complete image sequence restoration scheme.", "fno": "05745375", "keywords": [ "Image Restoration", "Motion Segmentation", "Image Edge Detection", "Image Segmentation", "Robustness", "Image Reconstruction" ], "authors": [ { "affiliation": "Information and Communication Theory Group, ITS Faculty, Delft University of Technology, The Netherlands", "fullName": "A. Rareş", "givenName": "A.", "surname": "Rareş", "__typename": "ArticleAuthorType" }, { "affiliation": "Information and Communication Theory Group, ITS Faculty, Delft University of Technology, The Netherlands", "fullName": "M.J.T. Reinders", "givenName": "M.J.T.", "surname": "Reinders", "__typename": "ArticleAuthorType" }, { "affiliation": "Information and Communication Theory Group, ITS Faculty, Delft University of Technology, The Netherlands", "fullName": "J. Biemond", "givenName": "J.", "surname": "Biemond", "__typename": "ArticleAuthorType" } ], "idPrefix": "icassp", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2002-05-01T00:00:00", "pubType": "proceedings", "pages": "IV-3365-IV-3368", "year": "2002", "issn": "1520-6149", "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "05745374", "articleId": "12OmNBqv2gg", "__typename": "AdjacentArticleType" }, "next": { "fno": "05745376", "articleId": "12OmNzFdt97", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icpr/2002/1695/1/169510360", "title": "Application of Rigid Motion Geometry to Film Restoration", "doi": null, "abstractUrl": "/proceedings-article/icpr/2002/169510360/12OmNC4wtwb", "parentPublication": { "id": "proceedings/icpr/2002/1695/1", "title": "Proceedings of 16th International Conference on Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icassp/1991/0003/0/00151029", "title": "Restoration of subband coded images", "doi": null, "abstractUrl": "/proceedings-article/icassp/1991/00151029/12OmNvpNIo7", "parentPublication": { "id": "proceedings/icassp/1991/0003/0", "title": "Acoustics, Speech, and Signal Processing, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2009/3992/0/05206615", "title": "Compensation of motion artifacts in MRI via graph-based optimization", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2009/05206615/12OmNvvLi50", "parentPublication": { "id": "proceedings/cvpr/2009/3992/0", "title": "2009 IEEE Conference on Computer Vision and Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icoip/2010/4252/2/4252b714", "title": "Unified Restoration Method for Different Degraded Images", "doi": null, "abstractUrl": "/proceedings-article/icoip/2010/4252b714/12OmNyKa63B", "parentPublication": { "id": null, "title": null, "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bigmm/2018/5321/0/08499072", "title": "Residual-Based Video Restoration for HEVC Intra Coding", "doi": null, "abstractUrl": "/proceedings-article/bigmm/2018/08499072/17D45XDIXOM", "parentPublication": { "id": "proceedings/bigmm/2018/5321/0", "title": "2018 IEEE Fourth International Conference on Multimedia Big Data (BigMM)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccs/2021/9445/0/944500a280", "title": "A Comparative Framework for Blocking Artifacts Removal of compressed Images using Fuzzy Logic", "doi": null, "abstractUrl": "/proceedings-article/iccs/2021/944500a280/1DSyyMZpNQs", "parentPublication": { "id": "proceedings/iccs/2021/9445/0", "title": "2021 International Conference on Computing Sciences (ICCS)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2019/2506/0/250600b954", "title": "EDVR: Video Restoration With Enhanced Deformable Convolutional Networks", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2019/250600b954/1iTvuTmogco", "parentPublication": { "id": "proceedings/cvprw/2019/2506/0", "title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2020/3079/0/307900a085", "title": "ATFaceGAN: Single Face Image Restoration and Recognition from Atmospheric Turbulence", "doi": null, "abstractUrl": "/proceedings-article/fg/2020/307900a085/1kecHT7g7JK", "parentPublication": { "id": "proceedings/fg/2020/3079/0/", "title": "2020 15th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2020) (FG)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icci*cc/2019/1419/0/09146084", "title": "Image Restoration Based on Structure and Texture Decomposition", "doi": null, "abstractUrl": "/proceedings-article/icci*cc/2019/09146084/1lFJafT7Nss", "parentPublication": { "id": "proceedings/icci*cc/2019/1419/0", "title": "2019 IEEE 18th International Conference on Cognitive Informatics & Cognitive Computing (ICCI*CC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2021/8808/0/09413095", "title": "A NoGAN approach for image and video restoration and compression artifact removal", "doi": null, "abstractUrl": "/proceedings-article/icpr/2021/09413095/1tmjmMPah7G", "parentPublication": { "id": "proceedings/icpr/2021/8808/0", "title": "2020 25th International Conference on Pattern Recognition (ICPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNCbCrVT", "title": "2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "acronym": "cvpr", "groupId": "1000147", "volume": "0", "displayVolume": "0", "year": "2014", "__typename": "ProceedingType" }, "article": { "id": "12OmNzuIjov", "doi": "10.1109/CVPR.2014.418", "title": "Diffuse Mirrors: 3D Reconstruction from Diffuse Indirect Illumination Using Inexpensive Time-of-Flight Sensors", "normalizedTitle": "Diffuse Mirrors: 3D Reconstruction from Diffuse Indirect Illumination Using Inexpensive Time-of-Flight Sensors", "abstract": "The functional difference between a diffuse wall and a mirror is well understood: one scatters back into all directions, and the other one preserves the directionality of reflected light. The temporal structure of the light, however, is left intact by both: assuming simple surface reflection, photons that arrive first are reflected first. In this paper, we exploit this insight to recover objects outside the line of sight from second-order diffuse reflections, effectively turning walls into mirrors. We formulate the reconstruction task as a linear inverse problem on the transient response of a scene, which we acquire using an affordable setup consisting of a modulated light source and a time-of-flight image sensor. By exploiting sparsity in the reconstruction domain, we achieve resolutions in the order of a few centimeters for object shape (depth and laterally) and albedo. Our method is robust to ambient light and works for large room-sized scenes. It is drastically faster and less expensive than previous approaches using femtosecond lasers and streak cameras, and does not require any moving parts.", "abstracts": [ { "abstractType": "Regular", "content": "The functional difference between a diffuse wall and a mirror is well understood: one scatters back into all directions, and the other one preserves the directionality of reflected light. The temporal structure of the light, however, is left intact by both: assuming simple surface reflection, photons that arrive first are reflected first. In this paper, we exploit this insight to recover objects outside the line of sight from second-order diffuse reflections, effectively turning walls into mirrors. We formulate the reconstruction task as a linear inverse problem on the transient response of a scene, which we acquire using an affordable setup consisting of a modulated light source and a time-of-flight image sensor. By exploiting sparsity in the reconstruction domain, we achieve resolutions in the order of a few centimeters for object shape (depth and laterally) and albedo. Our method is robust to ambient light and works for large room-sized scenes. It is drastically faster and less expensive than previous approaches using femtosecond lasers and streak cameras, and does not require any moving parts.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "The functional difference between a diffuse wall and a mirror is well understood: one scatters back into all directions, and the other one preserves the directionality of reflected light. The temporal structure of the light, however, is left intact by both: assuming simple surface reflection, photons that arrive first are reflected first. In this paper, we exploit this insight to recover objects outside the line of sight from second-order diffuse reflections, effectively turning walls into mirrors. We formulate the reconstruction task as a linear inverse problem on the transient response of a scene, which we acquire using an affordable setup consisting of a modulated light source and a time-of-flight image sensor. By exploiting sparsity in the reconstruction domain, we achieve resolutions in the order of a few centimeters for object shape (depth and laterally) and albedo. Our method is robust to ambient light and works for large room-sized scenes. It is drastically faster and less expensive than previous approaches using femtosecond lasers and streak cameras, and does not require any moving parts.", "fno": "5118d222", "keywords": [ "Image Reconstruction", "Transient Analysis", "Geometry", "Mathematical Model", "Sensors", "Cameras", "Lighting" ], "authors": [ { "affiliation": null, "fullName": "Felix Heide", "givenName": "Felix", "surname": "Heide", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Lei Xiao", "givenName": "Lei", "surname": "Xiao", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Wolfgang Heidrich", "givenName": "Wolfgang", "surname": "Heidrich", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Matthias B. Hullin", "givenName": "Matthias B.", "surname": "Hullin", "__typename": "ArticleAuthorType" } ], "idPrefix": "cvpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2014-06-01T00:00:00", "pubType": "proceedings", "pages": "3222-3229", "year": "2014", "issn": "1063-6919", "isbn": "978-1-4799-5118-5", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "5118d214", "articleId": "12OmNwKGArc", "__typename": "AdjacentArticleType" }, "next": { "fno": "5118d230", "articleId": "12OmNxGAKZx", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/wacv/2016/0641/0/07477643", "title": "Unifying diffuse and specular reflections for the photometric stereo problem", "doi": null, "abstractUrl": "/proceedings-article/wacv/2016/07477643/12OmNAsBFHt", "parentPublication": { "id": "proceedings/wacv/2016/0641/0", "title": "2016 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2017/1032/0/1032f180", "title": "What is Around the Camera?", "doi": null, "abstractUrl": "/proceedings-article/iccv/2017/1032f180/12OmNCdBDIs", "parentPublication": { "id": "proceedings/iccv/2017/1032/0", "title": "2017 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icip/1995/7310/2/73102354", "title": "3-D shape recovery of hybrid reflectance surface using indirect diffuse illumination", "doi": null, "abstractUrl": "/proceedings-article/icip/1995/73102354/12OmNCyTysb", "parentPublication": { "id": "proceedings/icip/1995/7310/2", "title": "Image Processing, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccp/2012/1662/0/06215216", "title": "Diffuse structured light", "doi": null, "abstractUrl": "/proceedings-article/iccp/2012/06215216/12OmNwM6A58", "parentPublication": { "id": "proceedings/iccp/2012/1662/0", "title": "2012 IEEE International Conference on Computational Photography (ICCP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2008/2174/0/04761299", "title": "Specularity removal and relighting of 3D object model for virtual exhibition", "doi": null, "abstractUrl": "/proceedings-article/icpr/2008/04761299/12OmNyuPKTC", "parentPublication": { "id": "proceedings/icpr/2008/2174/0", "title": "ICPR 2008 19th International Conference on Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccp/2018/2526/0/08368461", "title": "Acquiring and characterizing plane-to-ray indirect light transport", "doi": null, "abstractUrl": "/proceedings-article/iccp/2018/08368461/12OmNzkMlWO", "parentPublication": { "id": "proceedings/iccp/2018/2526/0", "title": "2018 IEEE International Conference on Computational Photography (ICCP)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2018/6420/0/642000g267", "title": "Inferring Light Fields from Shadows", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2018/642000g267/17D45XvMcaB", "parentPublication": { "id": "proceedings/cvpr/2018/6420/0", "title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2022/6946/0/694600r7855", "title": "All-photon Polarimetric Time-of-Flight Imaging", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2022/694600r7855/1H0OrvG2d1u", "parentPublication": { "id": "proceedings/cvpr/2022/6946/0", "title": "2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/04/08877764", "title": "Programmable Non-Epipolar Indirect Light Transport: Capture and Analysis", "doi": null, "abstractUrl": "/journal/tg/2021/04/08877764/1emy95qb1NS", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2021/4509/0/450900e789", "title": "Monocular Reconstruction of Neural Face Reflectance Fields", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2021/450900e789/1yeICvosFFK", "parentPublication": { "id": "proceedings/cvpr/2021/4509/0", "title": "2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNy9Prj1", "title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)", "acronym": "iccvw", "groupId": "1800041", "volume": "0", "displayVolume": "0", "year": "2017", "__typename": "ProceedingType" }, "article": { "id": "12OmNwF0C4S", "doi": "10.1109/ICCVW.2017.190", "title": "Dense Face Alignment", "normalizedTitle": "Dense Face Alignment", "abstract": "Face alignment is a classic problem in the computer vision field. Previous works mostly focus on sparse alignment with a limited number of facial landmark points, i.e., facial landmark detection. In this paper, for the first time, we aim at providing a very dense 3D alignment for large-pose face images. To achieve this, we train a CNN to estimate the 3D face shape, which not only aligns limited facial landmarks but also fits face contours and SIFT feature points. Moreover, we also address the bottleneck of training CNN with multiple datasets, due to different landmark markups on different datasets, such as 5, 34, 68. Experimental results show our method not only provides high-quality, dense 3D face fitting but also outperforms the state-of-the-art facial landmark detection methods on challenging datasets. Our model can run at real time during testing and it's available at http:///cvlab.cse.msu.edu/project-pifa.html.", "abstracts": [ { "abstractType": "Regular", "content": "Face alignment is a classic problem in the computer vision field. Previous works mostly focus on sparse alignment with a limited number of facial landmark points, i.e., facial landmark detection. In this paper, for the first time, we aim at providing a very dense 3D alignment for large-pose face images. To achieve this, we train a CNN to estimate the 3D face shape, which not only aligns limited facial landmarks but also fits face contours and SIFT feature points. Moreover, we also address the bottleneck of training CNN with multiple datasets, due to different landmark markups on different datasets, such as 5, 34, 68. Experimental results show our method not only provides high-quality, dense 3D face fitting but also outperforms the state-of-the-art facial landmark detection methods on challenging datasets. Our model can run at real time during testing and it's available at http:///cvlab.cse.msu.edu/project-pifa.html.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Face alignment is a classic problem in the computer vision field. Previous works mostly focus on sparse alignment with a limited number of facial landmark points, i.e., facial landmark detection. In this paper, for the first time, we aim at providing a very dense 3D alignment for large-pose face images. To achieve this, we train a CNN to estimate the 3D face shape, which not only aligns limited facial landmarks but also fits face contours and SIFT feature points. Moreover, we also address the bottleneck of training CNN with multiple datasets, due to different landmark markups on different datasets, such as 5, 34, 68. Experimental results show our method not only provides high-quality, dense 3D face fitting but also outperforms the state-of-the-art facial landmark detection methods on challenging datasets. Our model can run at real time during testing and it's available at http:///cvlab.cse.msu.edu/project-pifa.html.", "fno": "1034b619", "keywords": [ "Face", "Three Dimensional Displays", "Shape", "Solid Modeling", "Two Dimensional Displays", "Labeling", "Training" ], "authors": [ { "affiliation": null, "fullName": "Yaojie Liu", "givenName": "Yaojie", "surname": "Liu", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Amin Jourabloo", "givenName": "Amin", "surname": "Jourabloo", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "William Ren", "givenName": "William", "surname": "Ren", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Xiaoming Liu", "givenName": "Xiaoming", "surname": "Liu", "__typename": "ArticleAuthorType" } ], "idPrefix": "iccvw", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2017-10-01T00:00:00", "pubType": "proceedings", "pages": "1619-1628", "year": "2017", "issn": "2473-9944", "isbn": "978-1-5386-1034-3", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "1034b609", "articleId": "12OmNxG1yH3", "__typename": "AdjacentArticleType" }, "next": { "fno": "1034b629", "articleId": "12OmNyPQ4zL", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/cvpr/2016/8851/0/07780823", "title": "Large-Pose Face Alignment via CNN-Based Dense 3D Model Fitting", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2016/07780823/12OmNqBbHze", "parentPublication": { "id": "proceedings/cvpr/2016/8851/0", "title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2017/1032/0/08237691", "title": "Faster than Real-Time Facial Alignment: A 3D Spatial Transformer Network Approach in Unconstrained Poses", "doi": null, "abstractUrl": "/proceedings-article/iccv/2017/08237691/12OmNvAiSzK", "parentPublication": { "id": "proceedings/iccv/2017/1032/0", "title": "2017 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2017/1034/0/1034b599", "title": "FacePoseNet: Making a Case for Landmark-Free Face Alignment", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2017/1034b599/12OmNwpoFCL", "parentPublication": { "id": "proceedings/iccvw/2017/1034/0", "title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/fg/2018/2335/0/233501a399", "title": "Cascade Multi-View Hourglass Model for Robust 3D Face Alignment", "doi": null, "abstractUrl": "/proceedings-article/fg/2018/233501a399/12OmNxaw5bt", "parentPublication": { "id": "proceedings/fg/2018/2335/0", "title": "2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2016/8851/0/8851a146", "title": "Face Alignment Across Large Poses: A 3D Solution", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2016/8851a146/12OmNzV70Ix", "parentPublication": { "id": "proceedings/cvpr/2016/8851/0", "title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2017/1032/0/1032b021", "title": "How Far are We from Solving the 2D & 3D Face Alignment Problem? (and a Dataset of 230,000 3D Facial Landmarks)", "doi": null, "abstractUrl": "/proceedings-article/iccv/2017/1032b021/12OmNzX6ctJ", "parentPublication": { "id": "proceedings/iccv/2017/1032/0", "title": "2017 IEEE International Conference on Computer Vision (ICCV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2019/01/08122025", "title": "Face Alignment in Full Pose Range: A 3D Total Solution", "doi": null, "abstractUrl": "/journal/tp/2019/01/08122025/17D45XDIXWd", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/mlbdbi/2019/5094/0/509400a057", "title": "A Survey for 2D and 3D Face Alignment", "doi": null, "abstractUrl": "/proceedings-article/mlbdbi/2019/509400a057/1gjRJeUDXjO", "parentPublication": { "id": "proceedings/mlbdbi/2019/5094/0", "title": "2019 International Conference on Machine Learning, Big Data and Business Intelligence (MLBDBI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccvw/2019/5023/0/502300d082", "title": "The 2nd 3D Face Alignment in the Wild Challenge (3DFAW-Video): Dense Reconstruction From Video", "doi": null, "abstractUrl": "/proceedings-article/iccvw/2019/502300d082/1i5muuh7S6s", "parentPublication": { "id": "proceedings/iccvw/2019/5023/0", "title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2020/7168/0/716800f202", "title": "RetinaFace: Single-Shot Multi-Level Face Localisation in the Wild", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2020/716800f202/1m3o3o90ONa", "parentPublication": { "id": "proceedings/cvpr/2020/7168/0", "title": "2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNyxXlsZ", "title": "Pattern Recognition, International Conference on", "acronym": "icpr", "groupId": "1000545", "volume": "1", "displayVolume": "0", "year": "2006", "__typename": "ProceedingType" }, "article": { "id": "12OmNy7h3cU", "doi": "10.1109/ICPR.2006.1048", "title": "Shape Alignment by Learning a Landmark-PDM Coupled Model", "normalizedTitle": "Shape Alignment by Learning a Landmark-PDM Coupled Model", "abstract": "This paper revisits the model-based approaches for groupwise shape alignment. The key contribution is modeling the landmarks instead of considering them as nodes sliding along the shape contour. The shape group is thus modeled by a landmark-PDM coupled model instead of a constrained Point Distribution Model (PDM). This coupled model is estimated by a stable four-stage estimation algorithm. There are two significant achievements. First, shapes are aligned in a fully unsupervised manner ? both the number and location of landmarks are automatically decided. Second, extremely noisy and largely deformed shapes can be robustly aligned. These are demonstrated using both synthesized and real data.", "abstracts": [ { "abstractType": "Regular", "content": "This paper revisits the model-based approaches for groupwise shape alignment. The key contribution is modeling the landmarks instead of considering them as nodes sliding along the shape contour. The shape group is thus modeled by a landmark-PDM coupled model instead of a constrained Point Distribution Model (PDM). This coupled model is estimated by a stable four-stage estimation algorithm. There are two significant achievements. First, shapes are aligned in a fully unsupervised manner ? both the number and location of landmarks are automatically decided. Second, extremely noisy and largely deformed shapes can be robustly aligned. These are demonstrated using both synthesized and real data.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "This paper revisits the model-based approaches for groupwise shape alignment. The key contribution is modeling the landmarks instead of considering them as nodes sliding along the shape contour. The shape group is thus modeled by a landmark-PDM coupled model instead of a constrained Point Distribution Model (PDM). This coupled model is estimated by a stable four-stage estimation algorithm. There are two significant achievements. First, shapes are aligned in a fully unsupervised manner ? both the number and location of landmarks are automatically decided. Second, extremely noisy and largely deformed shapes can be robustly aligned. These are demonstrated using both synthesized and real data.", "fno": "252110959", "keywords": [], "authors": [ { "affiliation": "Chinese University of Hong Kong", "fullName": "Yi-Feng Jiang", "givenName": "Yi-Feng", "surname": "Jiang", "__typename": "ArticleAuthorType" }, { "affiliation": "Chinese University of Hong Kong", "fullName": "Jun Xie", "givenName": "Jun", "surname": "Xie", "__typename": "ArticleAuthorType" }, { "affiliation": "Chinese University of Hong Kong", "fullName": "Hung Tat Tsui", "givenName": "Hung", "surname": "Tat Tsui", "__typename": "ArticleAuthorType" } ], "idPrefix": "icpr", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2006-08-01T00:00:00", "pubType": "proceedings", "pages": "959-962", "year": "2006", "issn": "1051-4651", "isbn": "0-7695-2521-0", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "252110955", "articleId": "12OmNzBOikI", "__typename": "AdjacentArticleType" }, "next": { "fno": "252110963", "articleId": "12OmNC4eSzL", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/icpr/2010/4109/0/4109d971", "title": "Initialization and Pose Alignment in Active Shape Model", "doi": null, "abstractUrl": "/proceedings-article/icpr/2010/4109d971/12OmNAo45Lh", "parentPublication": { "id": "proceedings/icpr/2010/4109/0", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/socpar/2009/3879/0/3879a381", "title": "Face Sketch Multiple Features Detection Using Simultaneously Shape and Landmark Movement", "doi": null, "abstractUrl": "/proceedings-article/socpar/2009/3879a381/12OmNAsTgOW", "parentPublication": { "id": "proceedings/socpar/2009/3879/0", "title": "Soft Computing and Pattern Recognition, International Conference of", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2001/1143/1/114310566", "title": "Automated 3D PDM Construction Using Deformable Models", "doi": null, "abstractUrl": "/proceedings-article/iccv/2001/114310566/12OmNB9bved", "parentPublication": { "id": "proceedings/iccv/2001/1143/1", "title": "Computer Vision, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2013/4989/0/4989b878", "title": "PDM-ENLOR: Learning Ensemble of Local PDM-Based Regressions", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2013/4989b878/12OmNvAiSJx", "parentPublication": { "id": "proceedings/cvpr/2013/4989/0", "title": "2013 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2001/1143/1/00937567", "title": "Automated 3D PDM construction using deformable models", "doi": null, "abstractUrl": "/proceedings-article/iccv/2001/00937567/12OmNvFHfC0", "parentPublication": { "id": "proceedings/iccv/2001/1143/1", "title": "Computer Vision, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2004/2158/1/01315025", "title": "Shape correspondence through landmark sliding", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2004/01315025/12OmNvlPkCB", "parentPublication": { "id": "proceedings/cvpr/2004/2158/1", "title": "Proceedings of the 2004 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2004. CVPR 2004.", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icassp/1999/5041/6/00757607", "title": "Gesture image sequence interpretation using the multi-PDM method and hidden Markov model", "doi": null, "abstractUrl": "/proceedings-article/icassp/1999/00757607/12OmNxwENGf", "parentPublication": { "id": "proceedings/icassp/1999/5041/6", "title": "1999 IEEE International Conference on Acoustics, Speech, and Signal Processing. Proceedings. ICASSP99", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2015/6683/0/6683a929", "title": "Person Re-identification Using the Silhouette Shape Described by a Point Distribution Model", "doi": null, "abstractUrl": "/proceedings-article/wacv/2015/6683a929/12OmNy6qfN4", "parentPublication": { "id": "proceedings/wacv/2015/6683/0", "title": "2015 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2010/04/ttp2010040579", "title": "Nonstationary Shape Activities: Dynamic Models for Landmark Shape Change and Applications", "doi": null, "abstractUrl": "/journal/tp/2010/04/ttp2010040579/13rRUNvgzjx", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNzvQHKd", "title": "2016 International Conference on Micro-Electronics and Telecommunication Engineering (ICMETE)", "acronym": "icmete", "groupId": "1817244", "volume": "0", "displayVolume": "0", "year": "2016", "__typename": "ProceedingType" }, "article": { "id": "12OmNzahc5j", "doi": "10.1109/ICMETE.2016.128", "title": "Detection and Grading Severity of Caries in Dental X-ray Images", "normalizedTitle": "Detection and Grading Severity of Caries in Dental X-ray Images", "abstract": "It is significant to analyze the dental images in order to improve and quantify medical images for correct diagnosis. Caries or cavity is one of the most prevalent diseases of the teeth. Dentists are putting the best effort to identify the problem at an earlier stage. The proposed method used in this paper is focused on the challenges faced during the root canal edge extraction from dental radiographic images, which is a major problem besides cavity detection and extraction. The image processing techniques helps to identify the caries that provide dentists with the precise results of the area affected by the caries. The proposed methodology consists of preprocessing of bitewing radiographic images using top hat bottom hat transformation followed by the sharpening filter for edge enhancement. This combinational approach provides qualitative and quantitative assessment to dentists on the presence of cavity. The caries are extracted by some morphological tools to grade the severity on the basis of some metric values. Preparatory experiments show the significance of the proposed method to extract cavity and grade its effect on the tooth.", "abstracts": [ { "abstractType": "Regular", "content": "It is significant to analyze the dental images in order to improve and quantify medical images for correct diagnosis. Caries or cavity is one of the most prevalent diseases of the teeth. Dentists are putting the best effort to identify the problem at an earlier stage. The proposed method used in this paper is focused on the challenges faced during the root canal edge extraction from dental radiographic images, which is a major problem besides cavity detection and extraction. The image processing techniques helps to identify the caries that provide dentists with the precise results of the area affected by the caries. The proposed methodology consists of preprocessing of bitewing radiographic images using top hat bottom hat transformation followed by the sharpening filter for edge enhancement. This combinational approach provides qualitative and quantitative assessment to dentists on the presence of cavity. The caries are extracted by some morphological tools to grade the severity on the basis of some metric values. Preparatory experiments show the significance of the proposed method to extract cavity and grade its effect on the tooth.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "It is significant to analyze the dental images in order to improve and quantify medical images for correct diagnosis. Caries or cavity is one of the most prevalent diseases of the teeth. Dentists are putting the best effort to identify the problem at an earlier stage. The proposed method used in this paper is focused on the challenges faced during the root canal edge extraction from dental radiographic images, which is a major problem besides cavity detection and extraction. The image processing techniques helps to identify the caries that provide dentists with the precise results of the area affected by the caries. The proposed methodology consists of preprocessing of bitewing radiographic images using top hat bottom hat transformation followed by the sharpening filter for edge enhancement. This combinational approach provides qualitative and quantitative assessment to dentists on the presence of cavity. The caries are extracted by some morphological tools to grade the severity on the basis of some metric values. Preparatory experiments show the significance of the proposed method to extract cavity and grade its effect on the tooth.", "fno": "07938944", "keywords": [ "Dentistry", "Diagnostic Radiography", "Edge Detection", "Medical Disorders", "Medical Image Processing", "Caries Detection", "Caries Grading Severity", "Dental X Ray Images", "Diseases", "Teeth", "Root Canal Edge Extraction", "Dental Radiographic Images", "Bitewing Radiographic Images", "Top Hat Bottom Hat Transformation", "Edge Enhancement", "Dentistry", "Teeth", "Cavity Resonators", "X Ray Imaging", "Radiography", "Lesions", "Standards", "Dental Radiographs", "Top Hat Bottom Hat Transformation", "Morphology" ], "authors": [ { "affiliation": null, "fullName": "Anupama Bhan", "givenName": "Anupama", "surname": "Bhan", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Garima Vyas", "givenName": "Garima", "surname": "Vyas", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Sourav Mishra", "givenName": "Sourav", "surname": "Mishra", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Pulkit Pandey", "givenName": "Pulkit", "surname": "Pandey", "__typename": "ArticleAuthorType" } ], "idPrefix": "icmete", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2016-09-01T00:00:00", "pubType": "proceedings", "pages": "375-378", "year": "2016", "issn": null, "isbn": "978-1-5090-3411-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07938942", "articleId": "12OmNyKJij4", "__typename": "AdjacentArticleType" }, "next": { "fno": "07938945", "articleId": "12OmNxy4MWO", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/bibe/2015/7983/0/07367726", "title": "Proposal for an image-based software system for dental implant positioning", "doi": null, "abstractUrl": "/proceedings-article/bibe/2015/07367726/12OmNASILTm", "parentPublication": { "id": "proceedings/bibe/2015/7983/0", "title": "2015 IEEE 15th International Conference on Bioinformatics and Bioengineering (BIBE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/synasc/2014/8447/0/07034730", "title": "Enhancing Dental Radiographic Images in Spline-Type Spaces", "doi": null, "abstractUrl": "/proceedings-article/synasc/2014/07034730/12OmNAfPIOV", "parentPublication": { "id": "proceedings/synasc/2014/8447/0", "title": "2014 16th International Symposium on Symbolic and Numeric Algorithms for Scientific Computing (SYNASC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bibe/2017/1324/0/132401a327", "title": "Near-Infrared Transillumination Guides Administration of Dental 2D Radiography and CBCT Imaging", "doi": null, "abstractUrl": "/proceedings-article/bibe/2017/132401a327/12OmNvyjGh4", "parentPublication": { "id": "proceedings/bibe/2017/1324/0", "title": "2017 IEEE 17th International Conference on Bioinformatics and Bioengineering (BIBE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmtma/2017/4868/0/07832280", "title": "Dental Tissue Engineering on Human Dental Pulp Stem Cells Based on Tooth Development", "doi": null, "abstractUrl": "/proceedings-article/icmtma/2017/07832280/12OmNwpXROH", "parentPublication": { "id": "proceedings/icmtma/2017/4868/0", "title": "2017 9th International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ldav/2014/5215/0/07013207", "title": "Visual analysis of large dental imaging data in caries research", "doi": null, "abstractUrl": "/proceedings-article/ldav/2014/07013207/12OmNx76TD4", "parentPublication": { "id": "proceedings/ldav/2014/5215/0", "title": "2014 IEEE 4th Symposium on Large Data Analysis and Visualization (LDAV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmete/2016/3411/0/07938993", "title": "Feature Line Profile Based Automatic Detection of Dental Caries in Bitewing Radiography", "doi": null, "abstractUrl": "/proceedings-article/icmete/2016/07938993/12OmNzuZUBc", "parentPublication": { "id": "proceedings/icmete/2016/3411/0", "title": "2016 International Conference on Micro-Electronics and Telecommunication Engineering (ICMETE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/compsac/2018/2666/1/266601a492", "title": "Automated Dental Image Analysis by Deep Learning on Small Dataset", "doi": null, "abstractUrl": "/proceedings-article/compsac/2018/266601a492/17D45WwsQ4H", "parentPublication": { "id": "proceedings/compsac/2018/2666/2", "title": "2018 IEEE 42nd Annual Computer Software and Applications Conference (COMPSAC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/wacv/2023/9346/0/934600f583", "title": "Self-Supervised Learning with Masked Image Modeling for Teeth Numbering, Detection of Dental Restorations, and Instance Segmentation in Dental Panoramic Radiographs", "doi": null, "abstractUrl": "/proceedings-article/wacv/2023/934600f583/1L8qxxnly2k", "parentPublication": { "id": "proceedings/wacv/2023/9346/0", "title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/aiccsa/2019/5052/0/09035278", "title": "An Automatic Dental Decay Treatment Prediction using a Deep Convolutional Neural Network on X-Ray Images", "doi": null, "abstractUrl": "/proceedings-article/aiccsa/2019/09035278/1ifhuvx19ny", "parentPublication": { "id": "proceedings/aiccsa/2019/5052/0", "title": "2019 IEEE/ACS 16th International Conference on Computer Systems and Applications (AICCSA)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bibe/2020/9574/0/957400a315", "title": "Estimating Hard-tissue Conditions from Dental Images via Machine Learning", "doi": null, "abstractUrl": "/proceedings-article/bibe/2020/957400a315/1pBMota9P68", "parentPublication": { "id": "proceedings/bibe/2020/9574/0", "title": "2020 IEEE 20th International Conference on Bioinformatics and Bioengineering (BIBE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNzvQHKd", "title": "2016 International Conference on Micro-Electronics and Telecommunication Engineering (ICMETE)", "acronym": "icmete", "groupId": "1817244", "volume": "0", "displayVolume": "0", "year": "2016", "__typename": "ProceedingType" }, "article": { "id": "12OmNzuZUBc", "doi": "10.1109/ICMETE.2016.59", "title": "Feature Line Profile Based Automatic Detection of Dental Caries in Bitewing Radiography", "normalizedTitle": "Feature Line Profile Based Automatic Detection of Dental Caries in Bitewing Radiography", "abstract": "Dental caries is a bacterial infection that causes tooth decay and is amongst the most common incessant maladies of individuals around the world. Teeth are defenseless to this infection all through their lifetime especially when care is not taken for proper oral hygiene. It is significant to analyze the dental images in order to improve and quantify medical images for correct diagnosis. Caries or cavity is one of the most prevalent diseases of the teeth. Dentists are putting the best effort to identify the problem at an earlier stage. The proposed method used in this paper is focused on the challenges faced during the cavity detection which sometimes is very tedious task due to small lesions not visible to human eye. The image processing techniques helps to identify the caries that provide dentists with the precise results of the area affected by the caries. The proposed methodology consists of preprocessing of bitewing radiographic images followed by edge recognition, thresholding and connected component labelling. This combinational approach provides qualitative and quantitative assessment to dentists on the presence of cavity. The caries are detected by connected component and mask overlap helps to highlight the affected area to grade the severity which is tested on the basis of line intensity profiles. Preparatory experiments show the significance of the proposed method to extract cavity and grade its effect on the tooth.", "abstracts": [ { "abstractType": "Regular", "content": "Dental caries is a bacterial infection that causes tooth decay and is amongst the most common incessant maladies of individuals around the world. Teeth are defenseless to this infection all through their lifetime especially when care is not taken for proper oral hygiene. It is significant to analyze the dental images in order to improve and quantify medical images for correct diagnosis. Caries or cavity is one of the most prevalent diseases of the teeth. Dentists are putting the best effort to identify the problem at an earlier stage. The proposed method used in this paper is focused on the challenges faced during the cavity detection which sometimes is very tedious task due to small lesions not visible to human eye. The image processing techniques helps to identify the caries that provide dentists with the precise results of the area affected by the caries. The proposed methodology consists of preprocessing of bitewing radiographic images followed by edge recognition, thresholding and connected component labelling. This combinational approach provides qualitative and quantitative assessment to dentists on the presence of cavity. The caries are detected by connected component and mask overlap helps to highlight the affected area to grade the severity which is tested on the basis of line intensity profiles. Preparatory experiments show the significance of the proposed method to extract cavity and grade its effect on the tooth.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Dental caries is a bacterial infection that causes tooth decay and is amongst the most common incessant maladies of individuals around the world. Teeth are defenseless to this infection all through their lifetime especially when care is not taken for proper oral hygiene. It is significant to analyze the dental images in order to improve and quantify medical images for correct diagnosis. Caries or cavity is one of the most prevalent diseases of the teeth. Dentists are putting the best effort to identify the problem at an earlier stage. The proposed method used in this paper is focused on the challenges faced during the cavity detection which sometimes is very tedious task due to small lesions not visible to human eye. The image processing techniques helps to identify the caries that provide dentists with the precise results of the area affected by the caries. The proposed methodology consists of preprocessing of bitewing radiographic images followed by edge recognition, thresholding and connected component labelling. This combinational approach provides qualitative and quantitative assessment to dentists on the presence of cavity. The caries are detected by connected component and mask overlap helps to highlight the affected area to grade the severity which is tested on the basis of line intensity profiles. Preparatory experiments show the significance of the proposed method to extract cavity and grade its effect on the tooth.", "fno": "07938993", "keywords": [ "Dentistry", "Diagnostic Radiography", "Diseases", "Edge Detection", "Medical Image Processing", "Microorganisms", "Object Detection", "Feature Line Profile Based Automatic Detection", "Dental Caries", "Bitewing Radiography", "Bacterial Infection", "Tooth Decay", "Oral Hygiene", "Diseases", "Cavity Detection", "Image Processing Techniques", "Edge Recognition", "Thresholding", "Connected Component Labelling", "Dentistry", "Teeth", "Radiography", "X Rays", "Cavity Resonators", "Labeling", "Image Edge Detection", "Bitewing Radiographs", "Otsu Thresholding", "Connected Component Labelling", "Morphology" ], "authors": [ { "affiliation": null, "fullName": "Anupama Bhan", "givenName": "Anupama", "surname": "Bhan", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Ayush Goyal", "givenName": "Ayush", "surname": "Goyal", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Harsh", "givenName": null, "surname": "Harsh", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Naveen Chauhan", "givenName": "Naveen", "surname": "Chauhan", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "Ching-Wei Wang", "givenName": "Ching-Wei", "surname": "Wang", "__typename": "ArticleAuthorType" } ], "idPrefix": "icmete", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2016-09-01T00:00:00", "pubType": "proceedings", "pages": "635-640", "year": "2016", "issn": null, "isbn": "978-1-5090-3411-6", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "07938992", "articleId": "12OmNzIUfLs", "__typename": "AdjacentArticleType" }, "next": { "fno": "07938994", "articleId": "12OmNyL0Tro", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/robot/1991/2163/0/00131930", "title": "Robotic system for dental subtraction radiography", "doi": null, "abstractUrl": "/proceedings-article/robot/1991/00131930/12OmNASraZG", "parentPublication": { "id": "proceedings/robot/1991/2163/0", "title": "Proceedings. 1991 IEEE International Conference on Robotics and Automation", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sbec/2016/2132/0/07458971", "title": "Utilizing Biochemical Analyses as a Predictor for Structural Alterations of Fibroblasts Exposed to Adhesives in Combination with Nifedipine and Periodontal Pathogens", "doi": null, "abstractUrl": "/proceedings-article/sbec/2016/07458971/12OmNBEGYGL", "parentPublication": { "id": "proceedings/sbec/2016/2132/0", "title": "2016 32nd Southern Biomedical Engineering Conference (SBEC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/bibe/2017/1324/0/132401a327", "title": "Near-Infrared Transillumination Guides Administration of Dental 2D Radiography and CBCT Imaging", "doi": null, "abstractUrl": "/proceedings-article/bibe/2017/132401a327/12OmNvyjGh4", "parentPublication": { "id": "proceedings/bibe/2017/1324/0", "title": "2017 IEEE 17th International Conference on Bioinformatics and Bioengineering (BIBE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ldav/2014/5215/0/07013207", "title": "Visual analysis of large dental imaging data in caries research", "doi": null, "abstractUrl": "/proceedings-article/ldav/2014/07013207/12OmNx76TD4", "parentPublication": { "id": "proceedings/ldav/2014/5215/0", "title": "2014 IEEE 4th Symposium on Large Data Analysis and Visualization (LDAV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ism/2012/4875/0/4875a092", "title": "Automatic Classification of Teeth in Bitewing Dental Images Using OLPP", "doi": null, "abstractUrl": "/proceedings-article/ism/2012/4875a092/12OmNyr8YlK", "parentPublication": { "id": "proceedings/ism/2012/4875/0", "title": "2012 IEEE International Symposium on Multimedia", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icccnt/2013/3926/0/06726480", "title": "An effective numbering and classification system for dental panoramic radiographs", "doi": null, "abstractUrl": "/proceedings-article/icccnt/2013/06726480/12OmNzaQoCq", "parentPublication": { "id": "proceedings/icccnt/2013/3926/0", "title": "2013 Fourth International Conference on Computing, Communications and Networking Technologies (ICCCNT)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icmete/2016/3411/0/07938944", "title": "Detection and Grading Severity of Caries in Dental X-ray Images", "doi": null, "abstractUrl": "/proceedings-article/icmete/2016/07938944/12OmNzahc5j", "parentPublication": { "id": "proceedings/icmete/2016/3411/0", "title": "2016 International Conference on Micro-Electronics and Telecommunication Engineering (ICMETE)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icme/2006/0366/0/04036687", "title": "Virtual Dental Patient: a System for Virtual Teeth Drilling", "doi": null, "abstractUrl": "/proceedings-article/icme/2006/04036687/12OmNzw8jhA", "parentPublication": { "id": "proceedings/icme/2006/0366/0", "title": "2006 IEEE International Conference on Multimedia and Expo", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/sibgrapi/2020/9274/0/927400a164", "title": "A study on tooth segmentation and numbering using end-to-end deep neural networks", "doi": null, "abstractUrl": "/proceedings-article/sibgrapi/2020/927400a164/1p2VzkB4pji", "parentPublication": { "id": "proceedings/sibgrapi/2020/9274/0", "title": "2020 33rd SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ucc/2020/2394/0/239400a322", "title": "Gingivitis detection by Fractional Fourier Entropy and Biogeography-based Optimization", "doi": null, "abstractUrl": "/proceedings-article/ucc/2020/239400a322/1pZ10jzBjaw", "parentPublication": { "id": "proceedings/ucc/2020/2394/0", "title": "2020 IEEE/ACM 13th International Conference on Utility and Cloud Computing (UCC)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNxH9X7w", "title": "Information Visualization, IEEE Symposium on", "acronym": "ieee-infovis", "groupId": "1000371", "volume": "0", "displayVolume": "0", "year": "2005", "__typename": "ProceedingType" }, "article": { "id": "12OmNxE2n28", "doi": "10.1109/INFOVIS.2005.25", "title": "Multivariate Glyphs for Multi-Object Clusters", "normalizedTitle": "Multivariate Glyphs for Multi-Object Clusters", "abstract": "Aggregating items can simplify the display of huge quantities of data values at the cost of losing information about the attribute values of the individual items. We propose a distribution glyph, in both two- and three-dimensional forms, which specifically addresses the concept of how the aggregated data is distributed over the possible range of values. It is capable of displaying distribution, variability and extent information for up to four attributes at a time of multivariate, clustered data. User studies validate the concept, showing that both glyphs are just as good as raw data and the 3D glyph is better for answering some questions.", "abstracts": [ { "abstractType": "Regular", "content": "Aggregating items can simplify the display of huge quantities of data values at the cost of losing information about the attribute values of the individual items. We propose a distribution glyph, in both two- and three-dimensional forms, which specifically addresses the concept of how the aggregated data is distributed over the possible range of values. It is capable of displaying distribution, variability and extent information for up to four attributes at a time of multivariate, clustered data. User studies validate the concept, showing that both glyphs are just as good as raw data and the 3D glyph is better for answering some questions.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Aggregating items can simplify the display of huge quantities of data values at the cost of losing information about the attribute values of the individual items. We propose a distribution glyph, in both two- and three-dimensional forms, which specifically addresses the concept of how the aggregated data is distributed over the possible range of values. It is capable of displaying distribution, variability and extent information for up to four attributes at a time of multivariate, clustered data. User studies validate the concept, showing that both glyphs are just as good as raw data and the 3D glyph is better for answering some questions.", "fno": "27900019", "keywords": [ "Information Visualization", "Multivariate Visualization", "Distribution", "Aggregated Data" ], "authors": [ { "affiliation": "Johns Hopkins University", "fullName": "Eleanor Boyle Chlan", "givenName": "Eleanor Boyle", "surname": "Chlan", "__typename": "ArticleAuthorType" }, { "affiliation": "University of Maryland,Baltimore County", "fullName": "Penny Rheingans", "givenName": "Penny", "surname": "Rheingans", "__typename": "ArticleAuthorType" } ], "idPrefix": "ieee-infovis", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2005-10-01T00:00:00", "pubType": "proceedings", "pages": "19", "year": "2005", "issn": "1522-404x", "isbn": "0-7803-9464-x", "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "27900018", "articleId": "12OmNAThXUt", "__typename": "AdjacentArticleType" }, "next": { "fno": "27900020", "articleId": "12OmNzkMlUx", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/infvis/2005/9464/0/01532140", "title": "Multivariate glyphs for multi-object clusters", "doi": null, "abstractUrl": "/proceedings-article/infvis/2005/01532140/12OmNqyDjpz", "parentPublication": { "id": "proceedings/infvis/2005/9464/0", "title": "IEEE Symposium on Information Visualization (InfoVis 05)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/pacificvis/2014/2874/0/2874a017", "title": "Non-overlapping Aggregated Multivariate Glyphs for Moving Objects", "doi": null, "abstractUrl": "/proceedings-article/pacificvis/2014/2874a017/12OmNy2agTd", "parentPublication": { "id": "proceedings/pacificvis/2014/2874/0", "title": "2014 IEEE Pacific Visualization Symposium (PacificVis)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/ieee-infovis/2005/2790/0/01532140", "title": "Multivariate glyphs for multi-object clusters", "doi": null, "abstractUrl": "/proceedings-article/ieee-infovis/2005/01532140/12OmNyjccAB", "parentPublication": { "id": "proceedings/ieee-infovis/2005/2790/0", "title": "Information Visualization, IEEE Symposium on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2017/07/07445239", "title": "A Systematic Review of Experimental Studies on Data Glyphs", "doi": null, "abstractUrl": "/journal/tg/2017/07/07445239/13rRUNvgz4m", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2014/12/06875973", "title": "The Influence of Contour on Similarity Perception of Star Glyphs", "doi": null, "abstractUrl": "/journal/tg/2014/12/06875973/13rRUwhHcQV", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/1996/03/v0266", "title": "Glyphs for Visualizing Uncertainty in Vector Fields", "doi": null, "abstractUrl": "/journal/tg/1996/03/v0266/13rRUxly8SN", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2018/7202/0/720200a058", "title": "Visualizing Multidimensional Data in Treemaps with Adaptive Glyphs", "doi": null, "abstractUrl": "/proceedings-article/iv/2018/720200a058/17D45XeKgvR", "parentPublication": { "id": "proceedings/iv/2018/7202/0", "title": "2018 22nd International Conference Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/5555/01/09930144", "title": "Out of the Plane: Flower Vs. Star Glyphs to Support High-Dimensional Exploration in Two-Dimensional Embeddings", "doi": null, "abstractUrl": "/journal/tg/5555/01/09930144/1HMOX2J2VMY", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iv/2019/2838/0/283800a157", "title": "Evaluation of Effectiveness of Glyphs to Enhance ChronoView", "doi": null, "abstractUrl": "/proceedings-article/iv/2019/283800a157/1cMF9mvWMFO", "parentPublication": { "id": "proceedings/iv/2019/2838/0", "title": "2019 23rd International Conference Information Visualisation (IV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tg/2021/09/09067088", "title": "AgentVis: Visual Analysis of Agent Behavior With Hierarchical Glyphs", "doi": null, "abstractUrl": "/journal/tg/2021/09/09067088/1j1lyTz50k0", "parentPublication": { "id": "trans/tg", "title": "IEEE Transactions on Visualization & Computer Graphics", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }
{ "proceeding": { "id": "12OmNvAiSpb", "title": "Computer Vision, IEEE International Conference on", "acronym": "iccv", "groupId": "1000149", "volume": "1", "displayVolume": "2", "year": "2001", "__typename": "ProceedingType" }, "article": { "id": "12OmNxwncaw", "doi": "10.1109/ICCV.2001.937525", "title": "Smarter presentations: exploiting homography in camera-projector systems", "normalizedTitle": "Smarter presentations: exploiting homography in camera-projector systems", "abstract": "Standard presentation systems consisting of a laptop connected to a projector suffer from two problems: (1) the projected image appears distorted (keystoned) unless the projector is precisely aligned to the projection screen; (2) the speaker is forced to interact with the computer rather than the audience. This paper shows how the addition of an uncalibrated camera, aimed at the screen, solves both problems. Although the locations, orientations and optical parameters of the camera and projector are unknown, the projector-camera system calibrates itself by exploiting the homography between the projected slide and the camera image. Significant improvements are possible over passively calibrating systems since the projector actively manipulates the environment by placing feature points into the scene. For instance, using a low-resolution (160/spl times/120) camera, we can achieve an accuracy of /spl plusmn/3 pixels in a 1024/spl times/768 presentation slide. The camera-projector system infers models for the projector-to-camera and projector-to-screen mappings in order to provide two major benefits.", "abstracts": [ { "abstractType": "Regular", "content": "Standard presentation systems consisting of a laptop connected to a projector suffer from two problems: (1) the projected image appears distorted (keystoned) unless the projector is precisely aligned to the projection screen; (2) the speaker is forced to interact with the computer rather than the audience. This paper shows how the addition of an uncalibrated camera, aimed at the screen, solves both problems. Although the locations, orientations and optical parameters of the camera and projector are unknown, the projector-camera system calibrates itself by exploiting the homography between the projected slide and the camera image. Significant improvements are possible over passively calibrating systems since the projector actively manipulates the environment by placing feature points into the scene. For instance, using a low-resolution (160/spl times/120) camera, we can achieve an accuracy of /spl plusmn/3 pixels in a 1024/spl times/768 presentation slide. The camera-projector system infers models for the projector-to-camera and projector-to-screen mappings in order to provide two major benefits.", "__typename": "ArticleAbstractType" } ], "normalizedAbstract": "Standard presentation systems consisting of a laptop connected to a projector suffer from two problems: (1) the projected image appears distorted (keystoned) unless the projector is precisely aligned to the projection screen; (2) the speaker is forced to interact with the computer rather than the audience. This paper shows how the addition of an uncalibrated camera, aimed at the screen, solves both problems. Although the locations, orientations and optical parameters of the camera and projector are unknown, the projector-camera system calibrates itself by exploiting the homography between the projected slide and the camera image. Significant improvements are possible over passively calibrating systems since the projector actively manipulates the environment by placing feature points into the scene. For instance, using a low-resolution (160/spl times/120) camera, we can achieve an accuracy of /spl plusmn/3 pixels in a 1024/spl times/768 presentation slide. The camera-projector system infers models for the projector-to-camera and projector-to-screen mappings in order to provide two major benefits.", "fno": "00937525", "keywords": [ "Technical Presentation", "Business Graphics", "Smarter Presentations", "Homography", "Camera Projector Systems", "Orientations", "Optical Parameters", "Projector Camera System", "Optical Distortion", "Cameras", "Portable Computers", "Mice", "Robots", "Layout", "Geometry", "Hardware", "Java", "Application Software" ], "authors": [ { "affiliation": "Just Res., Pittsburgh, PA, USA", "fullName": "R. Sukthankar", "givenName": "R.", "surname": "Sukthankar", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "R.G. Stockton", "givenName": "R.G.", "surname": "Stockton", "__typename": "ArticleAuthorType" }, { "affiliation": null, "fullName": "M.D. Mullin", "givenName": "M.D.", "surname": "Mullin", "__typename": "ArticleAuthorType" } ], "idPrefix": "iccv", "isOpenAccess": false, "showRecommendedArticles": true, "showBuyMe": true, "hasPdf": true, "pubDate": "2001-01-01T00:00:00", "pubType": "proceedings", "pages": "247,248,249,250,251,252,253", "year": "2001", "issn": null, "isbn": null, "notes": null, "notesType": null, "__typename": "ArticleType" }, "webExtras": [], "adjacentArticles": { "previous": { "fno": "114310135", "articleId": "12OmNwdL7td", "__typename": "AdjacentArticleType" }, "next": { "fno": "114310142", "articleId": "12OmNyYm2FK", "__typename": "AdjacentArticleType" }, "__typename": "AdjacentArticlesType" }, "recommendedArticles": [ { "id": "proceedings/wacv/2017/4822/0/07926707", "title": "Automatic Calibration of a Multiple-Projector Spherical Fish Tank VR Display", "doi": null, "abstractUrl": "/proceedings-article/wacv/2017/07926707/12OmNAoDhTe", "parentPublication": { "id": "proceedings/wacv/2017/4822/0", "title": "2017 IEEE Winter Conference on Applications of Computer Vision (WACV)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvpr/2001/1272/2/127220504", "title": "A Self-Correcting Projector", "doi": null, "abstractUrl": "/proceedings-article/cvpr/2001/127220504/12OmNB8Cj43", "parentPublication": { "id": "proceedings/cvpr/2001/1272/2", "title": "Proceedings of the 2001 IEEE Computer Society Conference on Computer Vision and Pattern Recognition. CVPR 2001", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2008/2174/0/04761601", "title": "Calibration of projector-camera systems from virtual mutual projection", "doi": null, "abstractUrl": "/proceedings-article/icpr/2008/04761601/12OmNBp52Hx", "parentPublication": { "id": "proceedings/icpr/2008/2174/0", "title": "ICPR 2008 19th International Conference on Pattern Recognition", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2010/4109/0/4109a307", "title": "Adaptive Image Projection onto Non-planar Screen Using Projector-Camera Systems", "doi": null, "abstractUrl": "/proceedings-article/icpr/2010/4109a307/12OmNs0C9zQ", "parentPublication": { "id": "proceedings/icpr/2010/4109/0", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2001/1143/1/114310247", "title": "Smarter Presentations: Exploiting Homography in Camera-Projector Systems", "doi": null, "abstractUrl": "/proceedings-article/iccv/2001/114310247/12OmNwHyZXS", "parentPublication": { "id": "proceedings/iccv/2001/1143/1", "title": "Computer Vision, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iciap/2007/2877/0/28770093", "title": "Calibration and Image Generation of Mobile Projector-Camera Systems", "doi": null, "abstractUrl": "/proceedings-article/iciap/2007/28770093/12OmNwtWfIL", "parentPublication": { "id": "proceedings/iciap/2007/2877/0", "title": "2007 14th International Conference on Image Analysis and Processing - ICIAP 2007", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/cvprw/2014/4308/0/4308a449", "title": "Projection Center Calibration for a Co-located Projector Camera System", "doi": null, "abstractUrl": "/proceedings-article/cvprw/2014/4308a449/12OmNypIYA4", "parentPublication": { "id": "proceedings/cvprw/2014/4308/0", "title": "2014 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/icpr/2010/4109/0/4109a320", "title": "Active Calibration of Camera-Projector Systems Based on Planar Homography", "doi": null, "abstractUrl": "/proceedings-article/icpr/2010/4109a320/12OmNzDehgc", "parentPublication": { "id": "proceedings/icpr/2010/4109/0", "title": "Pattern Recognition, International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "proceedings/iccv/2003/1950/2/195020774", "title": "Autocalibration of a Projector-Screen-Camera System: Theory and Algorithm for Screen-to-Camera Homography Estimation", "doi": null, "abstractUrl": "/proceedings-article/iccv/2003/195020774/12OmNzXFoCs", "parentPublication": { "id": "proceedings/iccv/2003/1950/2", "title": "Computer Vision, IEEE International Conference on", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" }, { "id": "trans/tp/2005/12/i1845", "title": "Autocalibration of a Projector-Camera System", "doi": null, "abstractUrl": "/journal/tp/2005/12/i1845/13rRUxASuiM", "parentPublication": { "id": "trans/tp", "title": "IEEE Transactions on Pattern Analysis & Machine Intelligence", "__typename": "ParentPublication" }, "__typename": "RecommendedArticleType" } ], "articleVideos": [] }