Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- data_without_website/A_Certified_Unlearning_Approach_without_Access_to_Source_Data.json +50 -0
- data_without_website/A_Provably_Robust_Algorithm_for_Differentially_Private_Clustered_Federated_Learning.json +27 -0
- data_without_website/A_teacher-teacher_framework_for_clinical_language_representation_learning.json +50 -0
- data_without_website/AdMiT__Adaptive_Multi-Source_Tuning_in_Dynamic_Environments.json +66 -0
- data_without_website/AgentClinic__a_multimodal_agent_benchmark_to_evaluate_AI_in_simulated_clinical_environments.json +58 -0
- data_without_website/Align_Voting_Behavior_with_Public_Statements_for_Legislator_Representation_Learning.json +63 -0
- data_without_website/Approximate_Envy-Freeness_in_Graphical_Cake_Cutting.json +42 -0
- data_without_website/BALSA__Benchmarking_Active_Learning_Strategies_for_Autonomous_laboratories.json +28 -0
- data_without_website/Bi-Directional_Distribution_Alignment_for_Transductive_Zero-Shot_Learning.json +26 -0
- data_without_website/CCIL__Continuity-Based_Data_Augmentation_for_Corrective_Imitation_Learning.json +54 -0
- data_without_website/Can_LLMs_Enhance_Performance_Prediction_for_Deep_Learning_Models_.json +42 -0
- data_without_website/Computational_Design_of_Flexible_Planar_Microstructures.json +62 -0
- data_without_website/Consistent_Plug-in_Classifiers_for_Complex_Objectives_and_Constraints.json +46 -0
- data_without_website/Contextual_Vision_Transformers_for_Robust_Representation_Learning.json +42 -0
- data_without_website/Cross-Modal_Alignment_via_Variational_Copula_Modelling.json +54 -0
- data_without_website/D-MiSo__Editing_Dynamic_3D_Scenes_using_Multi-Gaussians_Soup_1.json +54 -0
- data_without_website/Decentralized_Noncooperative_Games_with_Coupled_Decision-Dependent_Distributions.json +42 -0
- data_without_website/Denoising_Vision_Transformers.json +58 -0
- data_without_website/Direct_Preference_Optimization__Your_Language_Model_is_Secretly_a_Reward_Model.json +58 -0
- data_without_website/Edge_Rewiring_Goes_Neural__Boosting_Network_Resilience_via_Policy_Gradient.json +50 -0
- data_without_website/Embracing_the_chaos__analysis_and_diagnosis_of_numerical_instability_in_variational_flows.json +42 -0
- data_without_website/Enhancing_Quality_of_Compressed_Images_by_Mitigating_Enhancement_Bias_Towards_Compression_Domain.json +62 -0
- data_without_website/Enhancing_Tampered_Text_Detection_through_Frequency_Feature_Fusion_and_Decomposition.json +66 -0
- data_without_website/Equivariant_Energy-Guided_SDE_for_Inverse_Molecular_Design.json +58 -0
- data_without_website/Exploring_the_Effectiveness_of_Diffusion_Models_in_One-Shot_Federated_Learning.json +46 -0
- data_without_website/Feature_Grinding__Efficient_Backdoor_Sanitation_in_Deep_Neural_Networks.json +32 -0
- data_without_website/FedCSL__A_Scalable_and_Accurate_Approach_to_Federated_Causal_Structure_Learning.json +49 -0
- data_without_website/Generalizing_to_Unseen_Elements__A_Survey_on_Knowledge_Extrapolation_for_Knowledge_Graphs.json +58 -0
- data_without_website/Geometry-Informed_Neural_Networks.json +54 -0
- data_without_website/Interleaving_Retrieval_with_Chain-of-Thought_Reasoning_for_Knowledge-Intensive_Multi-Step_Questions.json +45 -0
- data_without_website/It_Helps_to_Take_a_Second_Opinion__Teaching_Smaller_LLMs_To_Deliberate_Mutually_via_Selective_Rationale_Optimisation.json +50 -0
- data_without_website/KEFI__Kernel-based_Feature_Identification_for_Generalizable_Classification.json +42 -0
- data_without_website/Language-agnostic_BERT_Sentence_Embedding_1.json +54 -0
- data_without_website/Learning_Symmetric_Locomotion_using_Cumulative_Fatigue_for_Reinforcement_Learning.json +32 -0
- data_without_website/Learning_to_Noise__Application-Agnostic_Data_Sharing_with_Local_Differential_Privacy.json +58 -0
- data_without_website/Learning_without_Forgetting_for_Vision-Language_Models.json +58 -0
- data_without_website/LoCA__Location-Aware_Cosine_Adaptation_for_Parameter-Efficient_Fine-Tuning.json +66 -0
- data_without_website/Long_Context_Transfer_from_Language_to_Vision.json +74 -0
- data_without_website/Matching_Pairs__Attributing_Fine-Tuned_Models_to_their_Pre-Trained_Large_Language_Models.json +58 -0
- data_without_website/Meta_Navigator__Search_for_a_Good_Adaptation_Policy_for_Few-Shot_Learning.json +58 -0
- data_without_website/Mixed-modality_Representation_Learning_and_Pre-training_for_Joint_Table-and-Text_Retrieval_in_OpenQA.json +52 -0
- data_without_website/Multi-Granularity_Hand_Action_Detection.json +66 -0
- data_without_website/Nearly_Optimal_Approximation_of_Matrix_Functions_by_the_Lanczos_Method.json +54 -0
- data_without_website/Neural_Collapse_meets_Differential_Privacy__Curious_behaviors_of_NoisyGD_with_Near-Perfect_Representation_Learning.json +50 -0
- data_without_website/No-Regret_and_Incentive-Compatible_Combinatorial_Online_Prediction.json +27 -0
- data_without_website/Non-Smooth_Weakly-Convex_Finite-sum_Coupled_Compositional_Optimization.json +46 -0
- data_without_website/Online_false_discovery_rate_control_for_anomaly_detection_in_time_series.json +50 -0
- data_without_website/OpenCoS__Contrastive_Semi-supervised_Learning_for_Handling_Open-set_Unlabeled_Data.json +50 -0
- data_without_website/Optimal_Rates_for_Random_Order_Online_Optimization.json +46 -0
- data_without_website/Optimal_transport-based_conformal_prediction.json +46 -0
data_without_website/A_Certified_Unlearning_Approach_without_Access_to_Source_Data.json
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"id": "8lt5776GLB",
|
| 3 |
+
"title": "A Certified Unlearning Approach without Access to Source Data",
|
| 4 |
+
"track": "main",
|
| 5 |
+
"author": "Umit Yigit Basaran;Sk Miraj Ahmed;Amit Roy-Chowdhury;Basak Guler",
|
| 6 |
+
"pdf": "https://openreview.net/pdf?id=8lt5776GLB",
|
| 7 |
+
"keyword": "",
|
| 8 |
+
"abstract": "With the growing adoption of data privacy regulations, the ability to erase private or copyrighted information from trained models has become a crucial requirement. Traditional unlearning methods often assume access to the complete training dataset, which is unrealistic in scenarios where the source data is no longer available. To address this challenge, we propose a certified unlearning framework that enables effective data removal without access to the original training data samples. Our approach utilizes a surrogate dataset that approximates the statistical properties of the source data, allowing for controlled noise scaling based on the statistical distance between the two. While our theoretical guarantees assume knowledge of the exact statistical distance, practical implementations typically approximate this distance, resulting in potentially weaker but still meaningful privacy guarantees. This ensures strong guarantees on the model's behavior post-unlearning while maintaining its overall utility. We establish theoretical bounds, introduce practical noise calibration techniques, and validate our method through extensive experiments on both synthetic and real-world datasets. The results demonstrate the effectiveness and reliability of our approach in privacy-sensitive settings.",
|
| 9 |
+
"conference": {
|
| 10 |
+
"name": "ICML",
|
| 11 |
+
"year": 2025
|
| 12 |
+
},
|
| 13 |
+
"template": null,
|
| 14 |
+
"category": "10. Trustworthy and Ethical AI",
|
| 15 |
+
"is_done": true,
|
| 16 |
+
"timestamp": "2025-08-05T12:03:12.374943",
|
| 17 |
+
"rule_paper_possible_url": null,
|
| 18 |
+
"github_base": null,
|
| 19 |
+
"llm_believed_url": null,
|
| 20 |
+
"rule_base_possible_url": null,
|
| 21 |
+
"confirmed_url": null,
|
| 22 |
+
"Internet_fail": null,
|
| 23 |
+
"html_fail": null,
|
| 24 |
+
"citation_data": {
|
| 25 |
+
"original_title": "A Certified Unlearning Approach without Access to Source Data",
|
| 26 |
+
"matched_title": "A Certified Unlearning Approach without Access to Source Data",
|
| 27 |
+
"citation_count": 0,
|
| 28 |
+
"similarity": 1.0,
|
| 29 |
+
"source": "semantic_scholar",
|
| 30 |
+
"year": 2025,
|
| 31 |
+
"authors": [
|
| 32 |
+
{
|
| 33 |
+
"authorId": "2315741886",
|
| 34 |
+
"name": "U. Basaran"
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"authorId": "2366166608",
|
| 38 |
+
"name": "Sk Miraj Ahmed"
|
| 39 |
+
},
|
| 40 |
+
{
|
| 41 |
+
"authorId": "2331507565",
|
| 42 |
+
"name": "Amit K. Roy-Chowdhury"
|
| 43 |
+
},
|
| 44 |
+
{
|
| 45 |
+
"authorId": "2325667",
|
| 46 |
+
"name": "Basak Guler"
|
| 47 |
+
}
|
| 48 |
+
]
|
| 49 |
+
}
|
| 50 |
+
}
|
data_without_website/A_Provably_Robust_Algorithm_for_Differentially_Private_Clustered_Federated_Learning.json
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"id": "rBAnJed1iY",
|
| 3 |
+
"title": "A Provably Robust Algorithm for Differentially Private Clustered Federated Learning",
|
| 4 |
+
"track": "main",
|
| 5 |
+
"author": "Saber Malekmohammadi;Afaf Taik;Golnoosh Farnadi",
|
| 6 |
+
"pdf": "https://openreview.net/pdf?id=rBAnJed1iY",
|
| 7 |
+
"keyword": "",
|
| 8 |
+
"abstract": "Federated Learning (FL), which is a decentralized machine learning (ML) approach, often incorporates differential privacy (DP) to enhance data privacy guarantees. However, differentially private federated learning (DPFL) introduces performance disparities across clients, particularly affecting minority groups. Some recent works have attempted to address large data heterogeneity in vanilla FL settings through clustering clients, but these methods remain sensitive and prone to errors further exacerbated by the DP noise, making them inappropriate for DPFL settings. We propose an algorithm for differentially private clustered FL, which is robust to the DP noise in the system and identifies clients’ clusters correctly. To this end, we propose to cluster clients based on both their model updates and training loss values. Furthermore, when clustering clients’ model updates, our proposed approach addresses the server’s uncertainties by employing large batch sizes as well as Gaussian Mixture Models (GMM) to reduce the impact of DP and stochastic noise and avoid potential clustering errors. This idea is efficient especially in privacy-sensitive scenarios with more DP noise. We provide theoretical analysis justifying our approach, and evaluate it extensively across diverse data distributions and privacy budgets. Our experimental results show its effectiveness in addressing large data heterogeneity in DPFL systems with a small computational cost.",
|
| 9 |
+
"conference": {
|
| 10 |
+
"name": "ICLR",
|
| 11 |
+
"year": 2025
|
| 12 |
+
},
|
| 13 |
+
"template": null,
|
| 14 |
+
"category": "10. Trustworthy and Ethical AI",
|
| 15 |
+
"is_done": true,
|
| 16 |
+
"timestamp": "2025-08-05T17:08:35.661763",
|
| 17 |
+
"rule_paper_possible_url": null,
|
| 18 |
+
"github_base": null,
|
| 19 |
+
"llm_believed_url": null,
|
| 20 |
+
"rule_base_possible_url": null,
|
| 21 |
+
"confirmed_url": null,
|
| 22 |
+
"Internet_fail": null,
|
| 23 |
+
"html_fail": null,
|
| 24 |
+
"citation_data": {
|
| 25 |
+
"citation_count": 0
|
| 26 |
+
}
|
| 27 |
+
}
|
data_without_website/A_teacher-teacher_framework_for_clinical_language_representation_learning.json
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"id": "zDaD8zv8tG",
|
| 3 |
+
"title": "A teacher-teacher framework for clinical language representation learning",
|
| 4 |
+
"track": "main",
|
| 5 |
+
"author": "Feiqing Huang;Shenghan Zhang;Sara Morini Sweet;Tianxi Cai",
|
| 6 |
+
"pdf": "https://openreview.net/pdf?id=zDaD8zv8tG",
|
| 7 |
+
"keyword": "",
|
| 8 |
+
"abstract": "In recent years, there has been a proliferation of ready-to-use large language models (LLMs) designed for various applications, both general-purpose and domain-specific. Instead of advocating for the development of a new model or continuous pretraining of an existing one, this paper introduces a pragmatic teacher-teacher framework to facilitate mutual learning between two pre-existing models.\nBy leveraging two teacher models possessing complementary knowledge, we introduce a LIghtweight kNowledge alignmEnt (LINE) module aimed at harmonizing their knowledge within a unified representation space. This framework is particularly valuable in clinical settings, where stringent regulations and privacy considerations dictate the handling of detailed clinical notes. Our trained LINE module excels in capturing critical information from clinical notes, leveraging highly de-identified data. Validation and downstream tasks further demonstrate the effectiveness of the proposed framework.",
|
| 9 |
+
"conference": {
|
| 10 |
+
"name": "NIPS",
|
| 11 |
+
"year": 2024
|
| 12 |
+
},
|
| 13 |
+
"template": null,
|
| 14 |
+
"category": "06. Natural Language Understanding and Semantics",
|
| 15 |
+
"is_done": true,
|
| 16 |
+
"timestamp": "2025-08-05T18:24:55.174404",
|
| 17 |
+
"rule_paper_possible_url": null,
|
| 18 |
+
"github_base": null,
|
| 19 |
+
"llm_believed_url": null,
|
| 20 |
+
"rule_base_possible_url": null,
|
| 21 |
+
"confirmed_url": null,
|
| 22 |
+
"Internet_fail": null,
|
| 23 |
+
"html_fail": null,
|
| 24 |
+
"citation_data": {
|
| 25 |
+
"original_title": "A teacher-teacher framework for clinical language representation learning",
|
| 26 |
+
"matched_title": "A teacher-teacher framework for clinical language representation learning",
|
| 27 |
+
"citation_count": 0,
|
| 28 |
+
"similarity": 1.0,
|
| 29 |
+
"source": "semantic_scholar",
|
| 30 |
+
"year": 2024,
|
| 31 |
+
"authors": [
|
| 32 |
+
{
|
| 33 |
+
"authorId": "2293666754",
|
| 34 |
+
"name": "Feiqing Huang"
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"authorId": "2345708717",
|
| 38 |
+
"name": "Shenghan Zhang"
|
| 39 |
+
},
|
| 40 |
+
{
|
| 41 |
+
"authorId": "2249640781",
|
| 42 |
+
"name": "Sara Morini Sweet"
|
| 43 |
+
},
|
| 44 |
+
{
|
| 45 |
+
"authorId": "2305801877",
|
| 46 |
+
"name": "Tianxi Cai"
|
| 47 |
+
}
|
| 48 |
+
]
|
| 49 |
+
}
|
| 50 |
+
}
|
data_without_website/AdMiT__Adaptive_Multi-Source_Tuning_in_Dynamic_Environments.json
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"id": "33745",
|
| 3 |
+
"title": "AdMiT: Adaptive Multi-Source Tuning in Dynamic Environments",
|
| 4 |
+
"track": "main",
|
| 5 |
+
"author": "Xiangyu Chang; Fahim Faisal Niloy; Sk Miraj Ahmed; Srikanth V. Krishnamurthy; Basak Guler; Ananthram Swami; Samet Oymak; Amit Roy-Chowdhury",
|
| 6 |
+
"pdf": "https://openaccess.thecvf.com/content/CVPR2025/papers/Chang_AdMiT_Adaptive_Multi-Source_Tuning_in_Dynamic_Environments_CVPR_2025_paper.pdf",
|
| 7 |
+
"keyword": "",
|
| 8 |
+
"abstract": "Incorporating transformer models into edge devices poses a significant challenge due to the computational demands of adapting these large models across diverse applications. Parameter-efficient tuning (PET) methods (e.g. LoRA, Adapter, Visual Prompt Tuning, etc.) allow for targeted adaptation by modifying only small parts of the transformer model. However, adapting to dynamic unlabeled target distributions at the test time remains complex. To address this, we introduce AdMiT: Adaptive Multi -Source Tuning in Dynamic Environments. AdMiT innovates by pre-training a set of PET modules, each optimized for different source distributions or tasks, and dynamically selecting and integrating a sparse subset of relevant modules when encountering a new, few-shot, unlabeled target distribution. This integration leverages Kernel Mean Embedding (KME)-based matching to align the target distribution with relevant source knowledge efficiently, without requiring additional routing networks or hyperparameter tuning. AdMiT achieves adaptation with a single inference step, making it particularly suitable for resource-constrained edge deployments. Furthermore, AdMiT preserves privacy by performing an adaptation locally on each edge device, without the need for data exchange. Our theoretical analysis establishes guarantees for AdMiT's generalization, while extensive benchmarks demonstrate that AdMiT consistently outperforms other PET methods across a range of tasks, achieving robust and efficient adaptation.",
|
| 9 |
+
"conference": {
|
| 10 |
+
"name": "CVPR",
|
| 11 |
+
"year": 2025
|
| 12 |
+
},
|
| 13 |
+
"template": null,
|
| 14 |
+
"category": "01. Deep Learning Architectures and Methods",
|
| 15 |
+
"is_done": true,
|
| 16 |
+
"timestamp": "2025-08-04T02:43:12.198897",
|
| 17 |
+
"rule_paper_possible_url": null,
|
| 18 |
+
"github_base": null,
|
| 19 |
+
"llm_believed_url": null,
|
| 20 |
+
"rule_base_possible_url": null,
|
| 21 |
+
"confirmed_url": null,
|
| 22 |
+
"Internet_fail": null,
|
| 23 |
+
"html_fail": null,
|
| 24 |
+
"citation_data": {
|
| 25 |
+
"original_title": "AdMiT: Adaptive Multi-Source Tuning in Dynamic Environments",
|
| 26 |
+
"matched_title": "AdMiT: Adaptive Multi-Source Tuning in Dynamic Environments",
|
| 27 |
+
"citation_count": 0,
|
| 28 |
+
"similarity": 1.0,
|
| 29 |
+
"source": "semantic_scholar",
|
| 30 |
+
"year": 2025,
|
| 31 |
+
"authors": [
|
| 32 |
+
{
|
| 33 |
+
"authorId": "2218798948",
|
| 34 |
+
"name": "Xiangyu Chang"
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"authorId": "2088927944",
|
| 38 |
+
"name": "Fahim Faisal Niloy"
|
| 39 |
+
},
|
| 40 |
+
{
|
| 41 |
+
"authorId": "2353192334",
|
| 42 |
+
"name": "Sk Miraj"
|
| 43 |
+
},
|
| 44 |
+
{
|
| 45 |
+
"authorId": "2334032435",
|
| 46 |
+
"name": "Srikanth V. Krishnamurthy"
|
| 47 |
+
},
|
| 48 |
+
{
|
| 49 |
+
"authorId": "2325667",
|
| 50 |
+
"name": "Basak Guler"
|
| 51 |
+
},
|
| 52 |
+
{
|
| 53 |
+
"authorId": "144231976",
|
| 54 |
+
"name": "A. Swami"
|
| 55 |
+
},
|
| 56 |
+
{
|
| 57 |
+
"authorId": "3103394",
|
| 58 |
+
"name": "Samet Oymak"
|
| 59 |
+
},
|
| 60 |
+
{
|
| 61 |
+
"authorId": "2331507565",
|
| 62 |
+
"name": "Amit K. Roy-Chowdhury"
|
| 63 |
+
}
|
| 64 |
+
]
|
| 65 |
+
}
|
| 66 |
+
}
|
data_without_website/AgentClinic__a_multimodal_agent_benchmark_to_evaluate_AI_in_simulated_clinical_environments.json
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"id": "ak7r4He1qH",
|
| 3 |
+
"title": "AgentClinic: a multimodal agent benchmark to evaluate AI in simulated clinical environments",
|
| 4 |
+
"track": "main",
|
| 5 |
+
"author": "Samuel Schmidgall;Rojin Ziaei;Carl William Harris;Ji Woong Kim;Eduardo Pontes Reis;Jeffrey K Jopling;Michael Moor",
|
| 6 |
+
"pdf": "https://openreview.net/pdf?id=ak7r4He1qH",
|
| 7 |
+
"keyword": "",
|
| 8 |
+
"abstract": "Evaluating large language models~(LLM) in clinical scenarios is crucial to assessing their potential clinical utility. Existing benchmarks rely heavily on static question-answering, which does not accurately depict the complex, sequential nature of clinical decision-making. Here, we introduce AgentClinic, a multimodal agent benchmark for evaluating LLMs in simulated clinical environments that include patient interactions, multimodal data collection under incomplete information, and the usage of various tools, resulting in an in-depth evaluation across nine medical specialties and seven languages.\nWe find that solving MedQA problems in the sequential decision-making format of AgentClinic is considerably more challenging, resulting in diagnostic accuracies that can drop to below a tenth of the original accuracy. Overall, we observe that agents sourced from Claude-3.5 outperform other LLM backbones in most settings. Nevertheless, we see stark differences in the LLMs’ ability to make use of tools, such as experiential learning, adaptive retrieval, and reflection cycles. Strikingly, Llama-3 shows up to 92\\% relative improvements with the notebook tool that allows for writing and editing notes that persist across cases. To further scrutinize our clinical simulations, we leverage real-world electronic health records, perform a clinical reader study, perturb agents with biases, and explore novel patient-centric metrics that this interactive environment firstly enables.",
|
| 9 |
+
"conference": {
|
| 10 |
+
"name": "ICLR",
|
| 11 |
+
"year": 2025
|
| 12 |
+
},
|
| 13 |
+
"template": null,
|
| 14 |
+
"category": "09. Multimodal Learning",
|
| 15 |
+
"is_done": true,
|
| 16 |
+
"timestamp": "2025-08-05T16:45:38.610514",
|
| 17 |
+
"rule_paper_possible_url": null,
|
| 18 |
+
"github_base": null,
|
| 19 |
+
"llm_believed_url": null,
|
| 20 |
+
"rule_base_possible_url": null,
|
| 21 |
+
"confirmed_url": null,
|
| 22 |
+
"Internet_fail": null,
|
| 23 |
+
"html_fail": null,
|
| 24 |
+
"citation_data": {
|
| 25 |
+
"original_title": "AgentClinic: a multimodal agent benchmark to evaluate AI in simulated clinical environments",
|
| 26 |
+
"matched_title": "AgentClinic: a multimodal agent benchmark to evaluate AI in simulated clinical environments",
|
| 27 |
+
"citation_count": 65,
|
| 28 |
+
"similarity": 1.0,
|
| 29 |
+
"source": "semantic_scholar",
|
| 30 |
+
"year": 2024,
|
| 31 |
+
"authors": [
|
| 32 |
+
{
|
| 33 |
+
"authorId": "1741847434",
|
| 34 |
+
"name": "Samuel Schmidgall"
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"authorId": "2212401663",
|
| 38 |
+
"name": "Rojin Ziaei"
|
| 39 |
+
},
|
| 40 |
+
{
|
| 41 |
+
"authorId": "2283934409",
|
| 42 |
+
"name": "Carl Harris"
|
| 43 |
+
},
|
| 44 |
+
{
|
| 45 |
+
"authorId": "2366017883",
|
| 46 |
+
"name": "Eduardo Reis"
|
| 47 |
+
},
|
| 48 |
+
{
|
| 49 |
+
"authorId": "2301156454",
|
| 50 |
+
"name": "Jeffrey Jopling"
|
| 51 |
+
},
|
| 52 |
+
{
|
| 53 |
+
"authorId": "2301157095",
|
| 54 |
+
"name": "Michael Moor"
|
| 55 |
+
}
|
| 56 |
+
]
|
| 57 |
+
}
|
| 58 |
+
}
|
data_without_website/Align_Voting_Behavior_with_Public_Statements_for_Legislator_Representation_Learning.json
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"id": "2021.acl-long.99",
|
| 3 |
+
"title": "Align Voting Behavior with Public Statements for Legislator Representation Learning",
|
| 4 |
+
"track": "main",
|
| 5 |
+
"author": "Xinyi Mou; Zhongyu Wei; Lei Chen; Shangyi Ning; Yancheng He; Changjian Jiang; Xuanjing Huang",
|
| 6 |
+
"pdf": "https://aclanthology.org/2021.acl-long.99.pdf",
|
| 7 |
+
"keyword": "",
|
| 8 |
+
"abstract": "Ideology of legislators is typically estimated by ideal point models from historical records of votes. It represents legislators and legislation as points in a latent space and shows promising results for modeling voting behavior. However, it fails to capture more specific attitudes of legislators toward emerging issues and is unable to model newly-elected legislators without voting histories. In order to mitigate these two problems, we explore to incorporate both voting behavior and public statements on Twitter to jointly model legislators. In addition, we propose a novel task, namely hashtag usage prediction to model the ideology of legislators on Twitter. In practice, we construct a heterogeneous graph for the legislative context and use relational graph neural networks to learn the representation of legislators with the guidance of historical records of their voting and hashtag usage. Experiment results indicate that our model yields significant improvements for the task of roll call vote prediction. Further analysis further demonstrates that legislator representation we learned captures nuances in statements.",
|
| 9 |
+
"conference": {
|
| 10 |
+
"name": "ACL",
|
| 11 |
+
"year": 2021
|
| 12 |
+
},
|
| 13 |
+
"Internet_problem": "https://github.com/xymou/Align-Voting-Behavior-with-",
|
| 14 |
+
"github_base": "https://github.com/sloria/TextBlob",
|
| 15 |
+
"github_base1": "https://github.com/xymou",
|
| 16 |
+
"template": null,
|
| 17 |
+
"category": "09. Multimodal Learning",
|
| 18 |
+
"is_done": true,
|
| 19 |
+
"timestamp": "2025-08-07T03:37:48.661292",
|
| 20 |
+
"log": {
|
| 21 |
+
"timestamp": "2025-08-07T03:37:48.661292",
|
| 22 |
+
"stage": "special situation",
|
| 23 |
+
"note": "论文没有项目主页但找到了GitHub相关信息"
|
| 24 |
+
},
|
| 25 |
+
"citation_data": {
|
| 26 |
+
"original_title": "Align Voting Behavior with Public Statements for Legislator Representation Learning",
|
| 27 |
+
"matched_title": "Align Voting Behavior with Public Statements for Legislator Representation Learning",
|
| 28 |
+
"citation_count": 20,
|
| 29 |
+
"similarity": 1.0,
|
| 30 |
+
"source": "semantic_scholar",
|
| 31 |
+
"year": 2021,
|
| 32 |
+
"authors": [
|
| 33 |
+
{
|
| 34 |
+
"authorId": "2134027736",
|
| 35 |
+
"name": "Xinyi Mou"
|
| 36 |
+
},
|
| 37 |
+
{
|
| 38 |
+
"authorId": "2118602528",
|
| 39 |
+
"name": "Zhongyu Wei"
|
| 40 |
+
},
|
| 41 |
+
{
|
| 42 |
+
"authorId": "2146073369",
|
| 43 |
+
"name": "Lei Chen"
|
| 44 |
+
},
|
| 45 |
+
{
|
| 46 |
+
"authorId": "3504801",
|
| 47 |
+
"name": "Shan-shan Ning"
|
| 48 |
+
},
|
| 49 |
+
{
|
| 50 |
+
"authorId": "49990515",
|
| 51 |
+
"name": "Yancheng He"
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"authorId": "2115484286",
|
| 55 |
+
"name": "Changjiang Jiang"
|
| 56 |
+
},
|
| 57 |
+
{
|
| 58 |
+
"authorId": "1790227",
|
| 59 |
+
"name": "Xuanjing Huang"
|
| 60 |
+
}
|
| 61 |
+
]
|
| 62 |
+
}
|
| 63 |
+
}
|
data_without_website/Approximate_Envy-Freeness_in_Graphical_Cake_Cutting.json
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"id": "paper326",
|
| 3 |
+
"title": "Approximate Envy-Freeness in Graphical Cake Cutting",
|
| 4 |
+
"track": "main",
|
| 5 |
+
"author": "Sheung Man Yuen; Warut Suksompong",
|
| 6 |
+
"pdf": "https://www.ijcai.org/proceedings/2023/0326.pdf",
|
| 7 |
+
"keyword": "",
|
| 8 |
+
"abstract": "We study the problem of fairly allocating a divisible resource in the form of a graph, also known as graphical cake cutting. Unlike for the canonical interval cake, a connected envy-free allocation is not guaranteed to exist for a graphical cake. We focus on the existence and computation of connected allocations with low envy. For general graphs, we show that there is always a 1/2-additive-envy-free allocation and, if the agents' valuations are identical, a (2+\\epsilon)-multiplicative-envy-free allocation for any \\epsilon > 0. In the case of star graphs, we obtain a multiplicative factor of 3+\\epsilon for arbitrary valuations and 2 for identical valuations. We also derive guarantees when each agent can receive more than one connected piece. All of our results come with efficient algorithms for computing the respective allocations.",
|
| 9 |
+
"conference": {
|
| 10 |
+
"name": "IJCAI",
|
| 11 |
+
"year": 2023
|
| 12 |
+
},
|
| 13 |
+
"template": null,
|
| 14 |
+
"category": "03. ML Theory and Optimization",
|
| 15 |
+
"is_done": true,
|
| 16 |
+
"timestamp": "2025-08-05T15:43:47.535032",
|
| 17 |
+
"rule_paper_possible_url": null,
|
| 18 |
+
"github_base": null,
|
| 19 |
+
"llm_believed_url": null,
|
| 20 |
+
"rule_base_possible_url": null,
|
| 21 |
+
"confirmed_url": null,
|
| 22 |
+
"Internet_fail": null,
|
| 23 |
+
"html_fail": null,
|
| 24 |
+
"citation_data": {
|
| 25 |
+
"original_title": "Approximate Envy-Freeness in Graphical Cake Cutting",
|
| 26 |
+
"matched_title": "Approximate Envy-Freeness in Graphical Cake Cutting",
|
| 27 |
+
"citation_count": 3,
|
| 28 |
+
"similarity": 1.0,
|
| 29 |
+
"source": "semantic_scholar",
|
| 30 |
+
"year": 2023,
|
| 31 |
+
"authors": [
|
| 32 |
+
{
|
| 33 |
+
"authorId": "145778453",
|
| 34 |
+
"name": "S. Yuen"
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"authorId": "2294597",
|
| 38 |
+
"name": "Warut Suksompong"
|
| 39 |
+
}
|
| 40 |
+
]
|
| 41 |
+
}
|
| 42 |
+
}
|
data_without_website/BALSA__Benchmarking_Active_Learning_Strategies_for_Autonomous_laboratories.json
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"id": "PHkUNcno9n",
|
| 3 |
+
"title": "BALSA: Benchmarking Active Learning Strategies for Autonomous laboratories",
|
| 4 |
+
"track": "main",
|
| 5 |
+
"author": "Po-Yen Tung;Yangtao Chen;Peng Bo;Hao Zhang;Wenjie Du;Stefan Bauer;Ye Wei",
|
| 6 |
+
"pdf": "https://openreview.net/pdf?id=PHkUNcno9n",
|
| 7 |
+
"keyword": "",
|
| 8 |
+
"abstract": "Accelerating scientific discoveries holds significant potential to address some of the most pressing challenges facing society, from mitigating climate change to combating public health crises, such as the growing antibiotics resistance. The vast and complex nature of design parameter spaces makes identifying promising candidates both time-consuming and resource-intensive, rendering conventional exhaustive searches impractical. However, recent advancements in data-driven methods, particularly within the framework of \"active learning,\" have led to more efficient strategies for scientific discovery. By iteratively identifying and labeling the most informative data points, these methods function in a closed loop, guiding experiments or simulations to accelerate the identification of optimal candidates while reducing the demand for data labeling. Despite these advancements, the lack of standardized benchmarks in this emerging field of autonomous scientific discovery impedes progress and limits its potential translational impact. To address this, we introduce BALSA: a comprehensive benchmark specifically designed for evaluating various search algorithms applied in autonomous laboratories within the active learning framework. BALSA offers a standardized evaluation protocol, provides a metric to characterize high-dimensional objective functions, and includes reference implementations of recent methodologies, with a focus on minimizing the data required to reach optimal results. It provides not only a suite of synthetic functions or controlled simulators but also real-world active learning tasks in biology and materials science — each presenting unique challenges for autonomous laboratory tasks.",
|
| 9 |
+
"conference": {
|
| 10 |
+
"name": "ICLR",
|
| 11 |
+
"year": 2025
|
| 12 |
+
},
|
| 13 |
+
"Internet_problem": "https://github.com/anonymized",
|
| 14 |
+
"template": null,
|
| 15 |
+
"category": "11. AI for Science",
|
| 16 |
+
"is_done": true,
|
| 17 |
+
"timestamp": "2025-08-05T04:02:15.766664",
|
| 18 |
+
"rule_paper_possible_url": null,
|
| 19 |
+
"github_base": null,
|
| 20 |
+
"llm_believed_url": null,
|
| 21 |
+
"rule_base_possible_url": null,
|
| 22 |
+
"confirmed_url": null,
|
| 23 |
+
"Internet_fail": null,
|
| 24 |
+
"html_fail": null,
|
| 25 |
+
"citation_data": {
|
| 26 |
+
"citation_count": 0
|
| 27 |
+
}
|
| 28 |
+
}
|
data_without_website/Bi-Directional_Distribution_Alignment_for_Transductive_Zero-Shot_Learning.json
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"id": "22461",
|
| 3 |
+
"title": "Bi-Directional Distribution Alignment for Transductive Zero-Shot Learning",
|
| 4 |
+
"track": "main",
|
| 5 |
+
"author": "Zhicai Wang; Yanbin Hao; Tingting Mu; Ouxiang Li; Shuo Wang; Xiangnan He",
|
| 6 |
+
"pdf": "https://openaccess.thecvf.com/content/CVPR2023/papers/Wang_Bi-Directional_Distribution_Alignment_for_Transductive_Zero-Shot_Learning_CVPR_2023_paper.pdf",
|
| 7 |
+
"keyword": "",
|
| 8 |
+
"abstract": "It is well-known that zero-shot learning (ZSL) can suffer severely from the problem of domain shift, where the true and learned data distributions for the unseen classes do not match. Although transductive ZSL (TZSL) attempts to improve this by allowing the use of unlabelled examples from the unseen classes, there is still a high level of distribution shift. We propose a novel TZSL model (named as Bi-VAEGAN), which largely improves the shift by a strengthened distribution alignment between the visual and auxiliary spaces. The key proposal of the model design includes (1) a bi-directional distribution alignment, (2) a simple but effective L_2-norm based feature normalization approach, and (3) a more sophisticated unseen class prior estimation approach. In benchmark evaluation using four datasets, Bi-VAEGAN achieves the new state of the arts under both the standard and generalized TZSL settings. Code could be found at https://github.com/Zhicaiwww/Bi-VAEGAN.",
|
| 9 |
+
"conference": {
|
| 10 |
+
"name": "CVPR",
|
| 11 |
+
"year": 2023
|
| 12 |
+
},
|
| 13 |
+
"github_base": "https://github.com/Zhicaiwww/Bi-VAEGAN",
|
| 14 |
+
"template": null,
|
| 15 |
+
"category": "04. Probabilistic Methods and Causal Inference",
|
| 16 |
+
"is_done": true,
|
| 17 |
+
"timestamp": "2025-08-07T08:57:40.259749",
|
| 18 |
+
"log": {
|
| 19 |
+
"timestamp": "2025-08-07T08:57:40.259749",
|
| 20 |
+
"stage": "special situation",
|
| 21 |
+
"note": "论文没有项目主页但找到了GitHub相关信息"
|
| 22 |
+
},
|
| 23 |
+
"citation_data": {
|
| 24 |
+
"citation_count": 0
|
| 25 |
+
}
|
| 26 |
+
}
|
data_without_website/CCIL__Continuity-Based_Data_Augmentation_for_Corrective_Imitation_Learning.json
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"id": "LQ6LQ8f4y8",
|
| 3 |
+
"title": "CCIL: Continuity-Based Data Augmentation for Corrective Imitation Learning",
|
| 4 |
+
"track": "main",
|
| 5 |
+
"author": "Liyiming Ke;Yunchu Zhang;Abhay Deshpande;Siddhartha Srinivasa;Abhishek Gupta",
|
| 6 |
+
"pdf": "https://openreview.net/pdf?id=LQ6LQ8f4y8",
|
| 7 |
+
"keyword": "",
|
| 8 |
+
"abstract": "We present a new technique to enhance the robustness of imitation learning methods by generating corrective data to account for compounding error and disturbances. While existing methods rely on interactive expert labeling, additional offline datasets, or domain-specific invariances, our approach requires minimal additional assumptions beyond expert data. The key insight is to leverage local continuity in the environment dynamics. Our method first constructs a dynamics model from the expert demonstration, enforcing local Lipschitz continuity while skipping the discontinuous regions. In the locally continuous regions, this model allows us to generate corrective labels within the neighborhood of the demonstrations but beyond the actual set of states and actions in the dataset. Training on this augmented data enhances the agent's ability to recover from perturbations and deal with compounding error. We demonstrate the effectiveness of our generated labels through experiments in a variety of robotics domains that have distinct forms of continuity and discontinuity, including classic control, drone flying, high-dimensional navigation, locomotion, and tabletop manipulation.",
|
| 9 |
+
"conference": {
|
| 10 |
+
"name": "ICLR",
|
| 11 |
+
"year": 2024
|
| 12 |
+
},
|
| 13 |
+
"template": null,
|
| 14 |
+
"category": "02. Reinforcement Learning and Control",
|
| 15 |
+
"is_done": true,
|
| 16 |
+
"timestamp": "2025-08-04T05:59:40.731368",
|
| 17 |
+
"rule_paper_possible_url": null,
|
| 18 |
+
"github_base": null,
|
| 19 |
+
"llm_believed_url": null,
|
| 20 |
+
"rule_base_possible_url": null,
|
| 21 |
+
"confirmed_url": null,
|
| 22 |
+
"Internet_fail": null,
|
| 23 |
+
"html_fail": null,
|
| 24 |
+
"citation_data": {
|
| 25 |
+
"original_title": "CCIL: Continuity-Based Data Augmentation for Corrective Imitation Learning",
|
| 26 |
+
"matched_title": "CCIL: Continuity-based Data Augmentation for Corrective Imitation Learning",
|
| 27 |
+
"citation_count": 13,
|
| 28 |
+
"similarity": 1.0,
|
| 29 |
+
"source": "semantic_scholar",
|
| 30 |
+
"year": 2023,
|
| 31 |
+
"authors": [
|
| 32 |
+
{
|
| 33 |
+
"authorId": "3383717",
|
| 34 |
+
"name": "Liyiming Ke"
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"authorId": "2260344636",
|
| 38 |
+
"name": "Yunchu Zhang"
|
| 39 |
+
},
|
| 40 |
+
{
|
| 41 |
+
"authorId": "40755038",
|
| 42 |
+
"name": "Abhay Deshpande"
|
| 43 |
+
},
|
| 44 |
+
{
|
| 45 |
+
"authorId": "1752197",
|
| 46 |
+
"name": "S. Srinivasa"
|
| 47 |
+
},
|
| 48 |
+
{
|
| 49 |
+
"authorId": "2260595777",
|
| 50 |
+
"name": "Abhishek Gupta"
|
| 51 |
+
}
|
| 52 |
+
]
|
| 53 |
+
}
|
| 54 |
+
}
|
data_without_website/Can_LLMs_Enhance_Performance_Prediction_for_Deep_Learning_Models_.json
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"id": "Txxz9fBPcJ",
|
| 3 |
+
"title": "Can LLMs Enhance Performance Prediction for Deep Learning Models?",
|
| 4 |
+
"track": "main",
|
| 5 |
+
"author": "Karthick Panner Selvam;Phitchaya Mangpo Phothilimthana;Sami Abu-El-Haija;Bryan Perozzi;Mats Brorsson",
|
| 6 |
+
"pdf": "https://openreview.net/pdf?id=Txxz9fBPcJ",
|
| 7 |
+
"keyword": "",
|
| 8 |
+
"abstract": "Accurate performance prediction of Deep Learning (DL) models is essential for efficient resource allocation and optimizations in various stages of the DL system stack. While existing approaches can achieve high prediction accuracy, they lack ability to quickly adapt to new hardware environments or emerging workloads. \nThis paper leverages both Graph Neural Networks (GNNs) and Large Language Models (LLMs) to enhance the accuracy and adaptability of DL performance prediction. Our intuition is that GNNs are adept at capturing the structural information of DL models, naturally represented as graphs, while LLMs provide generalization and the ability to quickly adapt to various tasks thanks to extensive pre-training data.\nWe empirically demonstrate that using GNN-derived graph embeddings as inputs to an LLM outperforms traditional representations, including high-level text summary and lossless semi-structured text (e.g., JSON), for this task. Furthermore, we propose a structured pre-training strategy to enable model adaptation to new hardware environments, significantly reducing the need for extensive retraining. Our experiments validate the effectiveness of this approach, showing an 8.8 percentage-point improvement in accuracy over a state-of-the-art GNN baseline. Notably, when adapted to new hardware with few samples, our method achieves a remarkable 30--70 percentage-point increase in accuracy compared to the GNN baseline.",
|
| 9 |
+
"conference": {
|
| 10 |
+
"name": "ICLR",
|
| 11 |
+
"year": 2025
|
| 12 |
+
},
|
| 13 |
+
"template": null,
|
| 14 |
+
"category": "09. Multimodal Learning",
|
| 15 |
+
"is_done": true,
|
| 16 |
+
"timestamp": "2025-08-05T17:17:02.516716",
|
| 17 |
+
"rule_paper_possible_url": null,
|
| 18 |
+
"github_base": null,
|
| 19 |
+
"llm_believed_url": null,
|
| 20 |
+
"rule_base_possible_url": null,
|
| 21 |
+
"confirmed_url": null,
|
| 22 |
+
"Internet_fail": null,
|
| 23 |
+
"html_fail": null,
|
| 24 |
+
"citation_data": {
|
| 25 |
+
"original_title": "Can LLMs Enhance Performance Prediction for Deep Learning Models?",
|
| 26 |
+
"matched_title": "Performance Metrics Analysis for Deep Learning Models",
|
| 27 |
+
"citation_count": 0,
|
| 28 |
+
"similarity": 0.7008547008547008,
|
| 29 |
+
"source": "semantic_scholar",
|
| 30 |
+
"year": 2024,
|
| 31 |
+
"authors": [
|
| 32 |
+
{
|
| 33 |
+
"authorId": "2246664411",
|
| 34 |
+
"name": "Simple Sharma"
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"authorId": "2054910479",
|
| 38 |
+
"name": "Supriya P. Panda"
|
| 39 |
+
}
|
| 40 |
+
]
|
| 41 |
+
}
|
| 42 |
+
}
|
data_without_website/Computational_Design_of_Flexible_Planar_Microstructures.json
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"id": "",
|
| 3 |
+
"title": "Computational Design of Flexible Planar Microstructures",
|
| 4 |
+
"track": "main",
|
| 5 |
+
"author": "Zhan Zhang, Christopher Brandt, Jean Jouve, Yue Wang, Tian Chen, Mark Pauly, Julian Panetta",
|
| 6 |
+
"pdf": "https://asia.siggraph.org/2023/?post_type=page&p=14494&id=papers_822&sess=sess161",
|
| 7 |
+
"keyword": "",
|
| 8 |
+
"abstract": "[image] Thank you for being a part of SIGGRAPH Asia 2023. We look forward to seeing you in Tokyo this 3 – 6 December. Find out more 12 – 15 December 2023 ICC Sydney, Australia #SIGGRAPHAsia #SIGGRAPHAsia2023",
|
| 9 |
+
"conference": {
|
| 10 |
+
"name": "SIGGRAPHASIA",
|
| 11 |
+
"year": 2023
|
| 12 |
+
},
|
| 13 |
+
"template": null,
|
| 14 |
+
"category": "05. 3D Vision and Computational Graphics",
|
| 15 |
+
"is_done": true,
|
| 16 |
+
"timestamp": "2025-08-04T06:17:26.897648",
|
| 17 |
+
"rule_paper_possible_url": null,
|
| 18 |
+
"github_base": null,
|
| 19 |
+
"llm_believed_url": null,
|
| 20 |
+
"rule_base_possible_url": null,
|
| 21 |
+
"confirmed_url": null,
|
| 22 |
+
"Internet_fail": null,
|
| 23 |
+
"html_fail": null,
|
| 24 |
+
"citation_data": {
|
| 25 |
+
"original_title": "Computational Design of Flexible Planar Microstructures",
|
| 26 |
+
"matched_title": "Computational Design of Flexible Planar Microstructures",
|
| 27 |
+
"citation_count": 11,
|
| 28 |
+
"similarity": 1.0,
|
| 29 |
+
"source": "semantic_scholar",
|
| 30 |
+
"year": 2023,
|
| 31 |
+
"authors": [
|
| 32 |
+
{
|
| 33 |
+
"authorId": "2270152402",
|
| 34 |
+
"name": "Zhan Zhang"
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"authorId": "2269741569",
|
| 38 |
+
"name": "Christopher Brandt"
|
| 39 |
+
},
|
| 40 |
+
{
|
| 41 |
+
"authorId": "2310344322",
|
| 42 |
+
"name": "Jean Jouve"
|
| 43 |
+
},
|
| 44 |
+
{
|
| 45 |
+
"authorId": "2244614278",
|
| 46 |
+
"name": "Yue Wang"
|
| 47 |
+
},
|
| 48 |
+
{
|
| 49 |
+
"authorId": "2244648132",
|
| 50 |
+
"name": "Tian Chen"
|
| 51 |
+
},
|
| 52 |
+
{
|
| 53 |
+
"authorId": "2269740558",
|
| 54 |
+
"name": "Mark Pauly"
|
| 55 |
+
},
|
| 56 |
+
{
|
| 57 |
+
"authorId": "2071297763",
|
| 58 |
+
"name": "Julian Panetta"
|
| 59 |
+
}
|
| 60 |
+
]
|
| 61 |
+
}
|
| 62 |
+
}
|
data_without_website/Consistent_Plug-in_Classifiers_for_Complex_Objectives_and_Constraints.json
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"id": "18816",
|
| 3 |
+
"title": "Consistent Plug-in Classifiers for Complex Objectives and Constraints",
|
| 4 |
+
"track": "main",
|
| 5 |
+
"author": "Shiv Kumar Tavker; Harish Guruprasad Ramaswamy; Harikrishna Narasimhan",
|
| 6 |
+
"pdf": "https://papers.nips.cc/paper_files/paper/2020/file/eab1bceaa6c5823d7ed86cfc7a8bd824-Paper.pdf",
|
| 7 |
+
"keyword": "",
|
| 8 |
+
"abstract": "We present a statistically consistent algorithm for constrained classification problems where the objective (e.g. F-measure, G-mean) and the constraints (e.g. demographic parity, coverage) are defined by general functions of the confusion matrix. The key idea is to reduce the problem into a sequence of plug-in classifier learning problems, which is done by formulating an optimization problem over the intersection of the set of achievable confusion matrices and the set of feasible matrices. For objective and constraints that are convex functions of the confusion matrix, our algorithm requires $O(1/\\epsilon^2)$ calls to the plug-in routine, which improves on the $O(1/\\epsilon^3)$ rate achieved by Narasimhan (2018). We demonstrate empirically that our algorithm performs at least as well as the state-of-the-art methods for these problems.",
|
| 9 |
+
"conference": {
|
| 10 |
+
"name": "NIPS",
|
| 11 |
+
"year": 2020
|
| 12 |
+
},
|
| 13 |
+
"template": null,
|
| 14 |
+
"category": "03. ML Theory and Optimization",
|
| 15 |
+
"is_done": true,
|
| 16 |
+
"timestamp": "2025-08-05T14:12:24.015749",
|
| 17 |
+
"rule_paper_possible_url": null,
|
| 18 |
+
"github_base": null,
|
| 19 |
+
"llm_believed_url": null,
|
| 20 |
+
"rule_base_possible_url": null,
|
| 21 |
+
"confirmed_url": null,
|
| 22 |
+
"Internet_fail": null,
|
| 23 |
+
"html_fail": null,
|
| 24 |
+
"citation_data": {
|
| 25 |
+
"original_title": "Consistent Plug-in Classifiers for Complex Objectives and Constraints",
|
| 26 |
+
"matched_title": "Consistent Plug-in Classifiers for Complex Objectives and Constraints",
|
| 27 |
+
"citation_count": 7,
|
| 28 |
+
"similarity": 1.0,
|
| 29 |
+
"source": "semantic_scholar",
|
| 30 |
+
"year": 2020,
|
| 31 |
+
"authors": [
|
| 32 |
+
{
|
| 33 |
+
"authorId": "2031410386",
|
| 34 |
+
"name": "Shiv Kumar Tavker"
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"authorId": "2508897",
|
| 38 |
+
"name": "H. G. Ramaswamy"
|
| 39 |
+
},
|
| 40 |
+
{
|
| 41 |
+
"authorId": "1727387",
|
| 42 |
+
"name": "H. Narasimhan"
|
| 43 |
+
}
|
| 44 |
+
]
|
| 45 |
+
}
|
| 46 |
+
}
|
data_without_website/Contextual_Vision_Transformers_for_Robust_Representation_Learning.json
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"id": "Pzir15nPfc",
|
| 3 |
+
"title": "Contextual Vision Transformers for Robust Representation Learning",
|
| 4 |
+
"track": "main",
|
| 5 |
+
"author": "Yujia Bao;Theofanis Karaletsos",
|
| 6 |
+
"pdf": "https://openreview.net/pdf?id=Pzir15nPfc",
|
| 7 |
+
"keyword": "",
|
| 8 |
+
"abstract": "We introduce Contextual Vision Transformers (ContextViT), a method designed to generate robust image representations for datasets experiencing shifts in latent factors across various groups. Derived from the concept of in-context learning, ContextViT incorporates an additional context token to encapsulate group-specific information. This integration allows the model to adjust the image representation in accordance with the group-specific context. Specifically, for a given input image, ContextViT maps images with identical group membership into this context token, which is appended to the input image tokens. Additionally, we introduce a context inference network to predict such tokens on-the-fly, given a batch of samples from the group. This enables ContextViT to adapt to new testing distributions during inference time. We demonstrate the efficacy of ContextViT across a wide range of applications. In supervised fine-tuning, we show that augmenting pre-trained ViTs with our proposed context conditioning mechanism results in consistent improvements in out-of-distribution generalization on iWildCam and FMoW. We also investigate self-supervised representation learning with ContextViT. Our experiments on the Camelyon17 pathology imaging benchmark and the JUMP-CP microscopy imaging benchmark demonstrate that ContextViT excels in learning stable image featurizations amidst distribution shift, consistently outperforming its ViT counterpart.",
|
| 9 |
+
"conference": {
|
| 10 |
+
"name": "ICLR",
|
| 11 |
+
"year": 2024
|
| 12 |
+
},
|
| 13 |
+
"template": null,
|
| 14 |
+
"category": "01. Deep Learning Architectures and Methods",
|
| 15 |
+
"is_done": true,
|
| 16 |
+
"timestamp": "2025-08-04T04:42:03.955369",
|
| 17 |
+
"rule_paper_possible_url": null,
|
| 18 |
+
"github_base": null,
|
| 19 |
+
"llm_believed_url": null,
|
| 20 |
+
"rule_base_possible_url": null,
|
| 21 |
+
"confirmed_url": null,
|
| 22 |
+
"Internet_fail": null,
|
| 23 |
+
"html_fail": null,
|
| 24 |
+
"citation_data": {
|
| 25 |
+
"original_title": "Contextual Vision Transformers for Robust Representation Learning",
|
| 26 |
+
"matched_title": "Contextual Vision Transformers for Robust Representation Learning",
|
| 27 |
+
"citation_count": 14,
|
| 28 |
+
"similarity": 1.0,
|
| 29 |
+
"source": "semantic_scholar",
|
| 30 |
+
"year": 2023,
|
| 31 |
+
"authors": [
|
| 32 |
+
{
|
| 33 |
+
"authorId": "145854784",
|
| 34 |
+
"name": "Yu Bao"
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"authorId": "3302673",
|
| 38 |
+
"name": "Theofanis Karaletsos"
|
| 39 |
+
}
|
| 40 |
+
]
|
| 41 |
+
}
|
| 42 |
+
}
|
data_without_website/Cross-Modal_Alignment_via_Variational_Copula_Modelling.json
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"id": "PnQJ24n1qq",
|
| 3 |
+
"title": "Cross-Modal Alignment via Variational Copula Modelling",
|
| 4 |
+
"track": "main",
|
| 5 |
+
"author": "Feng Wu;Tsai Hor Chan;Fuying Wang;Guosheng Yin;Lequan Yu",
|
| 6 |
+
"pdf": "https://openreview.net/pdf?id=PnQJ24n1qq",
|
| 7 |
+
"keyword": "",
|
| 8 |
+
"abstract": "Various data modalities are common in real-world applications. In healthcare, for example, electronic health records, medical images, and clinical notes provide comprehensive information for diagnosis and treatment.\n Thus, it is essential to develop multimodal learning methods that aggregate information from multiple modalities to generate meaningful representations for downstream tasks.\n The key challenge here is how to appropriately align the representations of the respective modalities and fuse them into a joint distribution.\n Existing methods mainly focus on fusing the representations via concatenation or the Kronecker product, which oversimplifies the interaction structure between modalities, prompting the need to model more complex interactions.\n Moreover, the notion of joint distribution of the latent representation that incorporates higher-order interactions between modalities is also underexplored.\n Copula is a powerful statistical structure in modelling the interactions between variables, as it bridges the joint distribution and marginal distributions of multiple variables.\n In this paper, we propose a novel copula modelling-driven multimodal learning framework, which focuses on learning the joint distribution of various modalities to capture the complex interaction among them.\n The key idea is interpreting the copula model as a tool to align the marginal distributions of the modalities efficiently. \n By assuming a Gaussian mixture distribution for each modality and a copula model on the joint distribution, our model can also generate accurate representations for missing modalities.\n Extensive experiments on public MIMIC datasets demonstrate the superior performance of our model over other competitors.\n Ablation studies also validate the effectiveness of the copula alignment strategy and the robustness of our model over different choices of the copula family. \n Code is anonymously available at https://anonymous.4open.science/r/CM2-C1FD/README.md.",
|
| 9 |
+
"conference": {
|
| 10 |
+
"name": "ICLR",
|
| 11 |
+
"year": 2025
|
| 12 |
+
},
|
| 13 |
+
"template": null,
|
| 14 |
+
"category": "09. Multimodal Learning",
|
| 15 |
+
"is_done": true,
|
| 16 |
+
"timestamp": "2025-08-05T03:47:55.840755",
|
| 17 |
+
"rule_paper_possible_url": null,
|
| 18 |
+
"github_base": null,
|
| 19 |
+
"llm_believed_url": null,
|
| 20 |
+
"rule_base_possible_url": null,
|
| 21 |
+
"confirmed_url": null,
|
| 22 |
+
"Internet_fail": null,
|
| 23 |
+
"html_fail": null,
|
| 24 |
+
"citation_data": {
|
| 25 |
+
"original_title": "Cross-Modal Alignment via Variational Copula Modelling",
|
| 26 |
+
"matched_title": "Brain-Machine Cross-Modal Alignment via Sample Relational Learning for Visual Classification",
|
| 27 |
+
"citation_count": 0,
|
| 28 |
+
"similarity": 0.5594405594405595,
|
| 29 |
+
"source": "semantic_scholar",
|
| 30 |
+
"year": 2025,
|
| 31 |
+
"authors": [
|
| 32 |
+
{
|
| 33 |
+
"authorId": "2238551546",
|
| 34 |
+
"name": "Dongjun Liu"
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"authorId": "2274931426",
|
| 38 |
+
"name": "Weichen Dai"
|
| 39 |
+
},
|
| 40 |
+
{
|
| 41 |
+
"authorId": "2189788900",
|
| 42 |
+
"name": "Honggang Liu"
|
| 43 |
+
},
|
| 44 |
+
{
|
| 45 |
+
"authorId": "2238446276",
|
| 46 |
+
"name": "Hangjie Yi"
|
| 47 |
+
},
|
| 48 |
+
{
|
| 49 |
+
"authorId": "2281101705",
|
| 50 |
+
"name": "Wangzeng Kong"
|
| 51 |
+
}
|
| 52 |
+
]
|
| 53 |
+
}
|
| 54 |
+
}
|
data_without_website/D-MiSo__Editing_Dynamic_3D_Scenes_using_Multi-Gaussians_Soup_1.json
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"id": "3og0FT85B2",
|
| 3 |
+
"title": "D-MiSo: Editing Dynamic 3D Scenes using Multi-Gaussians Soup",
|
| 4 |
+
"track": "main",
|
| 5 |
+
"author": "Joanna Waczynska;Piotr Borycki;Joanna Kaleta;Slawomir Tadeja;Przemysław Spurek",
|
| 6 |
+
"pdf": "https://openreview.net/pdf?id=3og0FT85B2",
|
| 7 |
+
"keyword": "",
|
| 8 |
+
"abstract": "Over the past years, we have observed an abundance of approaches for modeling dynamic 3D scenes using Gaussian Splatting (GS). These solutions use GS to represent the scene's structure and the neural network to model dynamics. Such approaches allow fast rendering and extracting each element of such a dynamic scene. However, modifying such objects over time is challenging. SC-GS (Sparse Controlled Gaussian Splatting) enhanced with Deformed Control Points partially solves this issue. However, this approach necessitates selecting elements that need to be kept fixed, as well as centroids that should be adjusted throughout editing. Moreover, this task poses additional difficulties regarding the re-productivity of such editing. To address this, we propose Dynamic Multi-Gaussian Soup (D-MiSo), which allows us to model the mesh-inspired representation of dynamic GS. Additionally, we propose a strategy of linking parameterized Gaussian splats, forming a Triangle Soup with the estimated mesh. Consequently, we can separately construct new trajectories for the 3D objects composing the scene. Thus, we can make the scene's dynamic editable over time or while maintaining partial dynamics.",
|
| 9 |
+
"conference": {
|
| 10 |
+
"name": "NIPS",
|
| 11 |
+
"year": 2024
|
| 12 |
+
},
|
| 13 |
+
"template": null,
|
| 14 |
+
"category": "05. 3D Vision and Computational Graphics",
|
| 15 |
+
"is_done": true,
|
| 16 |
+
"timestamp": "2025-08-07T08:26:39.992643",
|
| 17 |
+
"rule_paper_possible_url": null,
|
| 18 |
+
"github_base": null,
|
| 19 |
+
"llm_believed_url": null,
|
| 20 |
+
"rule_base_possible_url": null,
|
| 21 |
+
"confirmed_url": null,
|
| 22 |
+
"Internet_fail": null,
|
| 23 |
+
"html_fail": null,
|
| 24 |
+
"citation_data": {
|
| 25 |
+
"original_title": "D-MiSo: Editing Dynamic 3D Scenes using Multi-Gaussians Soup",
|
| 26 |
+
"matched_title": "D-MiSo: Editing Dynamic 3D Scenes using Multi-Gaussians Soup",
|
| 27 |
+
"citation_count": 9,
|
| 28 |
+
"similarity": 1.0,
|
| 29 |
+
"source": "semantic_scholar",
|
| 30 |
+
"year": 2024,
|
| 31 |
+
"authors": [
|
| 32 |
+
{
|
| 33 |
+
"authorId": "2282467788",
|
| 34 |
+
"name": "Joanna Waczy'nska"
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"authorId": "2282467710",
|
| 38 |
+
"name": "Piotr Borycki"
|
| 39 |
+
},
|
| 40 |
+
{
|
| 41 |
+
"authorId": "2315301619",
|
| 42 |
+
"name": "Joanna Kaleta"
|
| 43 |
+
},
|
| 44 |
+
{
|
| 45 |
+
"authorId": "153296977",
|
| 46 |
+
"name": "S. Tadeja"
|
| 47 |
+
},
|
| 48 |
+
{
|
| 49 |
+
"authorId": "1790922",
|
| 50 |
+
"name": "P. Spurek"
|
| 51 |
+
}
|
| 52 |
+
]
|
| 53 |
+
}
|
| 54 |
+
}
|
data_without_website/Decentralized_Noncooperative_Games_with_Coupled_Decision-Dependent_Distributions.json
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"id": "KqgSzXbufw",
|
| 3 |
+
"title": "Decentralized Noncooperative Games with Coupled Decision-Dependent Distributions",
|
| 4 |
+
"track": "main",
|
| 5 |
+
"author": "Wenjing Yan;Xuanyu Cao",
|
| 6 |
+
"pdf": "https://openreview.net/pdf?id=KqgSzXbufw",
|
| 7 |
+
"keyword": "",
|
| 8 |
+
"abstract": "Distribution variations in machine learning, driven by the dynamic nature of deployment environments, significantly impact the performance of learning models. This paper explores endogenous distribution shifts in learning systems, where deployed models influence environments and subsequently alter data distributions. This phenomenon is formulated by a decision-dependent distribution mapping within the recently proposed framework of performative prediction (PP) Perdomo et al. (2020). We investigate the performative effect in a decentralized noncooperative game, where players aim to minimize private cost functions while simultaneously managing coupled inequality constraints. Under performativity, we examine two equilibrium concepts for the studied game: performative stable equilibrium (PSE) and Nash equilibrium (NE), and establish sufficient conditions for their existence and uniqueness. Notably, we provide the first upper bound on the distance between the PSE and NE in the literature, which is challenging to evaluate due to the absence of strong convexity on the joint cost function. Furthermore, we develop a decentralized stochastic primal-dual algorithm for efficiently computing the PSE point. By carefully bounding the performative effect in theoretical analysis, we prove that the proposed algorithm achieves sublinear convergence rates for both performative regrets and constraint violation and maintains the same order of convergence rate as the case without performativity. Numerical experiments validate the effectiveness of our algorithm and theoretical results.",
|
| 9 |
+
"conference": {
|
| 10 |
+
"name": "NIPS",
|
| 11 |
+
"year": 2024
|
| 12 |
+
},
|
| 13 |
+
"template": null,
|
| 14 |
+
"category": "03. ML Theory and Optimization",
|
| 15 |
+
"is_done": true,
|
| 16 |
+
"timestamp": "2025-08-05T18:39:42.103391",
|
| 17 |
+
"rule_paper_possible_url": null,
|
| 18 |
+
"github_base": null,
|
| 19 |
+
"llm_believed_url": null,
|
| 20 |
+
"rule_base_possible_url": null,
|
| 21 |
+
"confirmed_url": null,
|
| 22 |
+
"Internet_fail": null,
|
| 23 |
+
"html_fail": null,
|
| 24 |
+
"citation_data": {
|
| 25 |
+
"original_title": "Decentralized Noncooperative Games with Coupled Decision-Dependent Distributions",
|
| 26 |
+
"matched_title": "Decentralized Noncooperative Games with Coupled Decision-Dependent Distributions",
|
| 27 |
+
"citation_count": 0,
|
| 28 |
+
"similarity": 1.0,
|
| 29 |
+
"source": "semantic_scholar",
|
| 30 |
+
"year": 2024,
|
| 31 |
+
"authors": [
|
| 32 |
+
{
|
| 33 |
+
"authorId": "2244757240",
|
| 34 |
+
"name": "Wenjing Yan"
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"authorId": "2244624171",
|
| 38 |
+
"name": "Xuanyu Cao"
|
| 39 |
+
}
|
| 40 |
+
]
|
| 41 |
+
}
|
| 42 |
+
}
|
data_without_website/Denoising_Vision_Transformers.json
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"id": "1633",
|
| 3 |
+
"title": "Denoising Vision Transformers",
|
| 4 |
+
"track": "main",
|
| 5 |
+
"author": "Jiawei Yang*; Katie Z Luo; Jiefeng Li; Congyue Deng; Leonidas Guibas; Dilip Krishnan; Kilian Weinberger; Yonglong Tian; Yue Wang",
|
| 6 |
+
"pdf": "https://www.ecva.net/papers/eccv_2024/papers_ECCV/papers/11504.pdf",
|
| 7 |
+
"keyword": "",
|
| 8 |
+
"abstract": "\"We study a crucial yet often overlooked issue inherent to Vision Transformers (ViTs): feature maps of these models exhibit grid-like artifacts (“Original features” in fig:teaser), which hurt the performance of ViTs in downstream dense prediction tasks such as semantic segmentation, depth prediction, and object discovery. We trace this issue down to the positional embeddings at the input stage. To mitigate this, we propose a two-stage denoising approach, termed (). In the first stage, we separate the clean features from those contaminated by positional artifacts by enforcing cross-view feature consistency with neural fields on a per-image basis. This per-image optimization process extracts artifact-free features from raw ViT outputs, providing clean feature estimates for offline applications. In the second stage, we train a lightweight transformer block to predict clean features from raw ViT outputs, leveraging the derived estimates of the clean features as supervision. Our method, , does not require re-training the existing pre-trained ViTs, and is immediately applicable to any Vision Transformer architecture. We evaluate our method on a variety of representative ViTs (DINO, DeiT-III, EVA02, CLIP, DINOv2, DINOv2-reg) and demonstrate that consistently improves existing state-of-the-art general-purpose models in semantic and geometric tasks across multiple datasets (fig:teaser, right, tab:denser esults, tab : objd et, tab : objd iscovery).W ehopeourstudywillencourageare−evaluationof V iT design, especiallyregardingth\"",
|
| 9 |
+
"conference": {
|
| 10 |
+
"name": "ECCV",
|
| 11 |
+
"year": 2024
|
| 12 |
+
},
|
| 13 |
+
"template": null,
|
| 14 |
+
"category": "01. Deep Learning Architectures and Methods",
|
| 15 |
+
"is_done": true,
|
| 16 |
+
"timestamp": "2025-08-05T15:52:55.846767",
|
| 17 |
+
"rule_paper_possible_url": null,
|
| 18 |
+
"github_base": null,
|
| 19 |
+
"llm_believed_url": null,
|
| 20 |
+
"rule_base_possible_url": null,
|
| 21 |
+
"confirmed_url": null,
|
| 22 |
+
"Internet_fail": null,
|
| 23 |
+
"html_fail": null,
|
| 24 |
+
"citation_data": {
|
| 25 |
+
"original_title": "Denoising Vision Transformers",
|
| 26 |
+
"matched_title": "Denoising Vision Transformers",
|
| 27 |
+
"citation_count": 17,
|
| 28 |
+
"similarity": 1.0,
|
| 29 |
+
"source": "semantic_scholar",
|
| 30 |
+
"year": 2024,
|
| 31 |
+
"authors": [
|
| 32 |
+
{
|
| 33 |
+
"authorId": "2265462798",
|
| 34 |
+
"name": "Jiawei Yang"
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"authorId": "2265578504",
|
| 38 |
+
"name": "Katie Z Luo"
|
| 39 |
+
},
|
| 40 |
+
{
|
| 41 |
+
"authorId": "2265671757",
|
| 42 |
+
"name": "Jie Li"
|
| 43 |
+
},
|
| 44 |
+
{
|
| 45 |
+
"authorId": "7446832",
|
| 46 |
+
"name": "Kilian Q. Weinberger"
|
| 47 |
+
},
|
| 48 |
+
{
|
| 49 |
+
"authorId": "2307043887",
|
| 50 |
+
"name": "Yonglong Tian"
|
| 51 |
+
},
|
| 52 |
+
{
|
| 53 |
+
"authorId": "2265585917",
|
| 54 |
+
"name": "Yue Wang"
|
| 55 |
+
}
|
| 56 |
+
]
|
| 57 |
+
}
|
| 58 |
+
}
|
data_without_website/Direct_Preference_Optimization__Your_Language_Model_is_Secretly_a_Reward_Model.json
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"id": "HPuSIXJaa9",
|
| 3 |
+
"title": "Direct Preference Optimization: Your Language Model is Secretly a Reward Model",
|
| 4 |
+
"track": "main",
|
| 5 |
+
"author": "Rafael Rafailov;Archit Sharma;Eric Mitchell;Christopher D Manning;Stefano Ermon;Chelsea Finn",
|
| 6 |
+
"pdf": "https://openreview.net/pdf?id=HPuSIXJaa9",
|
| 7 |
+
"keyword": "",
|
| 8 |
+
"abstract": "While large-scale unsupervised language models (LMs) learn broad world knowledge and some reasoning skills, achieving precise control of their behavior is difficult due to the completely unsupervised nature of their training. Existing methods for gaining such steerability collect human labels of the relative quality of model generations and fine-tune the unsupervised LM to align with these preferences, often with reinforcement learning from human feedback (RLHF). However, RLHF is a complex and often unstable procedure, first fitting a reward model that reflects the human preferences, and then fine-tuning the large unsupervised LM using reinforcement learning to maximize this estimated reward without drifting too far from the original model. In this paper, we leverage a mapping between reward functions and optimal policies to show that this constrained reward maximization problem can be optimized exactly with a single stage of policy training, essentially solving a classification problem on the human preference data. The resulting algorithm, which we call Direct Preference Optimization (DPO), is stable, performant, and computationally lightweight, eliminating the need for fitting a reward model, sampling from the LM during fine-tuning, or performing significant hyperparameter tuning. Our experiments show that DPO can fine-tune LMs to align with human preferences as well as or better than existing methods. Notably, fine-tuning with DPO exceeds RLHF's ability to control sentiment of generations and improves response quality in summarization and single-turn dialogue while being substantially simpler to implement and train.",
|
| 9 |
+
"conference": {
|
| 10 |
+
"name": "NIPS",
|
| 11 |
+
"year": 2023
|
| 12 |
+
},
|
| 13 |
+
"template": null,
|
| 14 |
+
"category": "02. Reinforcement Learning and Control",
|
| 15 |
+
"is_done": true,
|
| 16 |
+
"timestamp": "2025-08-04T09:25:34.203251",
|
| 17 |
+
"rule_paper_possible_url": null,
|
| 18 |
+
"github_base": null,
|
| 19 |
+
"llm_believed_url": null,
|
| 20 |
+
"rule_base_possible_url": null,
|
| 21 |
+
"confirmed_url": null,
|
| 22 |
+
"Internet_fail": null,
|
| 23 |
+
"html_fail": null,
|
| 24 |
+
"citation_data": {
|
| 25 |
+
"original_title": "Direct Preference Optimization: Your Language Model is Secretly a Reward Model",
|
| 26 |
+
"matched_title": "Direct Preference Optimization: Your Language Model is Secretly a Reward Model",
|
| 27 |
+
"citation_count": 4500,
|
| 28 |
+
"similarity": 1.0,
|
| 29 |
+
"source": "semantic_scholar",
|
| 30 |
+
"year": 2023,
|
| 31 |
+
"authors": [
|
| 32 |
+
{
|
| 33 |
+
"authorId": "102801230",
|
| 34 |
+
"name": "Rafael Rafailov"
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"authorId": "50465276",
|
| 38 |
+
"name": "Archit Sharma"
|
| 39 |
+
},
|
| 40 |
+
{
|
| 41 |
+
"authorId": "49688913",
|
| 42 |
+
"name": "E. Mitchell"
|
| 43 |
+
},
|
| 44 |
+
{
|
| 45 |
+
"authorId": "2490652",
|
| 46 |
+
"name": "Stefano Ermon"
|
| 47 |
+
},
|
| 48 |
+
{
|
| 49 |
+
"authorId": "144783904",
|
| 50 |
+
"name": "Christopher D. Manning"
|
| 51 |
+
},
|
| 52 |
+
{
|
| 53 |
+
"authorId": "46881670",
|
| 54 |
+
"name": "Chelsea Finn"
|
| 55 |
+
}
|
| 56 |
+
]
|
| 57 |
+
}
|
| 58 |
+
}
|
data_without_website/Edge_Rewiring_Goes_Neural__Boosting_Network_Resilience_via_Policy_Gradient.json
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"id": "eVzy-BWKY6Z",
|
| 3 |
+
"title": "Edge Rewiring Goes Neural: Boosting Network Resilience via Policy Gradient",
|
| 4 |
+
"track": "main",
|
| 5 |
+
"author": "Shanchao Yang;MA KAILI;Baoxiang Wang;Hongyuan Zha",
|
| 6 |
+
"pdf": "https://openreview.net/pdf?id=eVzy-BWKY6Z",
|
| 7 |
+
"keyword": "",
|
| 8 |
+
"abstract": "Improving the resilience of a network protects the system from natural disasters and malicious attacks.\nThis is typically achieved by introducing new edges, which however may reach beyond the maximum number of connections a node could sustain.\nMany studies then resort to the degree-preserving operation of rewiring, which swaps existing edges $AC, BD$ to new edges $AB, CD$.\nA significant line of studies focuses on this technique for theoretical and practical results while leaving three limitations: network utility loss, local optimality, and transductivity. \nIn this paper, we propose ResiNet, a reinforcement learning (RL)-based framework to discover Resilient Network topologies against various disasters and attacks. \nResiNet is objective agnostic which allows the utility to be balanced by incorporating it into the objective function.\nThe local optimality, typically seen in greedy algorithms, is addressed by casting the cumulative resilience gain into a sequential decision process of step-wise rewiring.\nThe transductivity, which refers to the necessity to run a computationally intensive optimization for each input graph, is lifted by our variant of RL with auto-regressive permutation-invariant variable action space.\nResiNet is armed by our technical innovation, Filtration enhanced GNN (FireGNN), which distinguishes graphs with minor differences.\nIt is thus possible for ResiNet to capture local structure changes and adapt its decision among consecutive graphs, which is known to be infeasible for GNN.\nExtensive experiments demonstrate that with a small number of rewiring operations, ResiNet achieves a near-optimal resilience gain on multiple graphs while balancing the utility, with a large margin compared to existing approaches.",
|
| 9 |
+
"conference": {
|
| 10 |
+
"name": "ICLR",
|
| 11 |
+
"year": 2022
|
| 12 |
+
},
|
| 13 |
+
"template": null,
|
| 14 |
+
"category": "02. Reinforcement Learning and Control",
|
| 15 |
+
"is_done": true,
|
| 16 |
+
"timestamp": "2025-08-04T03:46:28.151658",
|
| 17 |
+
"rule_paper_possible_url": null,
|
| 18 |
+
"github_base": null,
|
| 19 |
+
"llm_believed_url": null,
|
| 20 |
+
"rule_base_possible_url": null,
|
| 21 |
+
"confirmed_url": null,
|
| 22 |
+
"Internet_fail": null,
|
| 23 |
+
"html_fail": null,
|
| 24 |
+
"citation_data": {
|
| 25 |
+
"original_title": "Edge Rewiring Goes Neural: Boosting Network Resilience via Policy Gradient",
|
| 26 |
+
"matched_title": "Edge Rewiring Goes Neural: Boosting Network Resilience via Policy Gradient",
|
| 27 |
+
"citation_count": 0,
|
| 28 |
+
"similarity": 1.0,
|
| 29 |
+
"source": "semantic_scholar",
|
| 30 |
+
"year": 2021,
|
| 31 |
+
"authors": [
|
| 32 |
+
{
|
| 33 |
+
"authorId": "2268669",
|
| 34 |
+
"name": "Shanchao Yang"
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"authorId": "47737190",
|
| 38 |
+
"name": "Kaili Ma"
|
| 39 |
+
},
|
| 40 |
+
{
|
| 41 |
+
"authorId": "2156645203",
|
| 42 |
+
"name": "Baoxiang Wang"
|
| 43 |
+
},
|
| 44 |
+
{
|
| 45 |
+
"authorId": "145203884",
|
| 46 |
+
"name": "H. Zha"
|
| 47 |
+
}
|
| 48 |
+
]
|
| 49 |
+
}
|
| 50 |
+
}
|
data_without_website/Embracing_the_chaos__analysis_and_diagnosis_of_numerical_instability_in_variational_flows.json
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"id": "87Nu9SagB7",
|
| 3 |
+
"title": "Embracing the chaos: analysis and diagnosis of numerical instability in variational flows",
|
| 4 |
+
"track": "main",
|
| 5 |
+
"author": "Zuheng Xu;Trevor Campbell",
|
| 6 |
+
"pdf": "https://openreview.net/pdf?id=87Nu9SagB7",
|
| 7 |
+
"keyword": "",
|
| 8 |
+
"abstract": "In this paper, we investigate the impact of numerical instability on the reliability of sampling, density evaluation, and evidence lower bound (ELBO) estimation in variational flows. We first empirically demonstrate that common flows can exhibit a catastrophic accumulation of error: the numerical flow map deviates significantly from the exact map---which affects sampling---and the numerical inverse flow map does not accurately recover the initial input---which affects density and ELBO computations. Surprisingly though, we find that results produced by flows are often accurate enough for applications despite the presence of serious numerical instability. In this work, we treat variational flows as chaotic dynamical systems, and leverage shadowing theory to elucidate this behavior via theoretical guarantees on the error of sampling, density evaluation, and ELBO estimation. Finally, we develop and empirically test a diagnostic procedure that can be used to validate results produced by numerically unstable flows in practice.",
|
| 9 |
+
"conference": {
|
| 10 |
+
"name": "NIPS",
|
| 11 |
+
"year": 2023
|
| 12 |
+
},
|
| 13 |
+
"template": null,
|
| 14 |
+
"category": "04. Probabilistic Methods and Causal Inference",
|
| 15 |
+
"is_done": true,
|
| 16 |
+
"timestamp": "2025-08-04T09:42:38.456385",
|
| 17 |
+
"rule_paper_possible_url": null,
|
| 18 |
+
"github_base": null,
|
| 19 |
+
"llm_believed_url": null,
|
| 20 |
+
"rule_base_possible_url": null,
|
| 21 |
+
"confirmed_url": null,
|
| 22 |
+
"Internet_fail": null,
|
| 23 |
+
"html_fail": null,
|
| 24 |
+
"citation_data": {
|
| 25 |
+
"original_title": "Embracing the chaos: analysis and diagnosis of numerical instability in variational flows",
|
| 26 |
+
"matched_title": "Embracing the chaos: analysis and diagnosis of numerical instability in variational flows",
|
| 27 |
+
"citation_count": 3,
|
| 28 |
+
"similarity": 1.0,
|
| 29 |
+
"source": "semantic_scholar",
|
| 30 |
+
"year": 2023,
|
| 31 |
+
"authors": [
|
| 32 |
+
{
|
| 33 |
+
"authorId": "1576258157",
|
| 34 |
+
"name": "Zuheng Xu"
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"authorId": "145215008",
|
| 38 |
+
"name": "Trevor Campbell"
|
| 39 |
+
}
|
| 40 |
+
]
|
| 41 |
+
}
|
| 42 |
+
}
|
data_without_website/Enhancing_Quality_of_Compressed_Images_by_Mitigating_Enhancement_Bias_Towards_Compression_Domain.json
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"id": "29494",
|
| 3 |
+
"title": "Enhancing Quality of Compressed Images by Mitigating Enhancement Bias Towards Compression Domain",
|
| 4 |
+
"track": "main",
|
| 5 |
+
"author": "Qunliang Xing; Mai Xu; Shengxi Li; Xin Deng; Meisong Zheng; Huaida Liu; Ying Chen",
|
| 6 |
+
"pdf": "https://openaccess.thecvf.com/content/CVPR2024/papers/Xing_Enhancing_Quality_of_Compressed_Images_by_Mitigating_Enhancement_Bias_Towards_CVPR_2024_paper.pdf",
|
| 7 |
+
"keyword": "",
|
| 8 |
+
"abstract": "Existing quality enhancement methods for compressed images focus on aligning the enhancement domain with the raw domain to yield realistic images. However these methods exhibit a pervasive enhancement bias towards the compression domain inadvertently regarding it as more realistic than the raw domain. This bias makes enhanced images closely resemble their compressed counterparts thus degrading their perceptual quality. In this paper we propose a simple yet effective method to mitigate this bias and enhance the quality of compressed images. Our method employs a conditional discriminator with the compressed image as a key condition and then incorporates a domain-divergence regularization to actively distance the enhancement domain from the compression domain. Through this dual strategy our method enables the discrimination against the compression domain and brings the enhancement domain closer to the raw domain. Comprehensive quality evaluations confirm the superiority of our method over other state-of-the-art methods without incurring inference overheads.",
|
| 9 |
+
"conference": {
|
| 10 |
+
"name": "CVPR",
|
| 11 |
+
"year": 2024
|
| 12 |
+
},
|
| 13 |
+
"template": null,
|
| 14 |
+
"category": "07. Generative Model",
|
| 15 |
+
"is_done": true,
|
| 16 |
+
"timestamp": "2025-08-04T08:18:39.063865",
|
| 17 |
+
"rule_paper_possible_url": null,
|
| 18 |
+
"github_base": null,
|
| 19 |
+
"llm_believed_url": null,
|
| 20 |
+
"rule_base_possible_url": null,
|
| 21 |
+
"confirmed_url": null,
|
| 22 |
+
"Internet_fail": null,
|
| 23 |
+
"html_fail": null,
|
| 24 |
+
"citation_data": {
|
| 25 |
+
"original_title": "Enhancing Quality of Compressed Images by Mitigating Enhancement Bias Towards Compression Domain",
|
| 26 |
+
"matched_title": "Enhancing Quality of Compressed Images by Mitigating Enhancement Bias Towards Compression Domain",
|
| 27 |
+
"citation_count": 3,
|
| 28 |
+
"similarity": 1.0,
|
| 29 |
+
"source": "semantic_scholar",
|
| 30 |
+
"year": 2024,
|
| 31 |
+
"authors": [
|
| 32 |
+
{
|
| 33 |
+
"authorId": "73768108",
|
| 34 |
+
"name": "Qunliang Xing"
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"authorId": "2271850373",
|
| 38 |
+
"name": "Mai Xu"
|
| 39 |
+
},
|
| 40 |
+
{
|
| 41 |
+
"authorId": "2272184472",
|
| 42 |
+
"name": "Shengxi Li"
|
| 43 |
+
},
|
| 44 |
+
{
|
| 45 |
+
"authorId": "2277041332",
|
| 46 |
+
"name": "Xin Deng"
|
| 47 |
+
},
|
| 48 |
+
{
|
| 49 |
+
"authorId": "46934753",
|
| 50 |
+
"name": "Mei Zheng"
|
| 51 |
+
},
|
| 52 |
+
{
|
| 53 |
+
"authorId": "2145489433",
|
| 54 |
+
"name": "Huaida Liu"
|
| 55 |
+
},
|
| 56 |
+
{
|
| 57 |
+
"authorId": "2287862884",
|
| 58 |
+
"name": "Ying Chen"
|
| 59 |
+
}
|
| 60 |
+
]
|
| 61 |
+
}
|
| 62 |
+
}
|
data_without_website/Enhancing_Tampered_Text_Detection_through_Frequency_Feature_Fusion_and_Decomposition.json
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"id": "2184",
|
| 3 |
+
"title": "Enhancing Tampered Text Detection through Frequency Feature Fusion and Decomposition",
|
| 4 |
+
"track": "main",
|
| 5 |
+
"author": "Zhongxi Chen; Shen Chen; Taiping Yao*; Ke Sun; Shouhong Ding; Xianming Lin*; Liujuan Cao; Rongrong Ji",
|
| 6 |
+
"pdf": "https://www.ecva.net/papers/eccv_2024/papers_ECCV/papers/04834.pdf",
|
| 7 |
+
"keyword": "",
|
| 8 |
+
"abstract": "\"Document image tampering poses a grave risk to the veracity of information, with potential consequences ranging from misinformation dissemination to financial and identity fraud. Current detection methods use frequency information to uncover tampering that is invisible to the naked eye. However, these methods often fail to integrate this information effectively, thereby compromising RGB detection capabilities and missing the high-frequency details necessary to detect subtle tampering. To address these gaps, we introduce a Feature Fusion and Decomposition Network (FFDN) that combines a Visual Enhancement Module (VEM) with a Wavelet-like Frequency Enhancement (WFE). Specifically, the VEM makes tampering traces visible while preserving the integrity of original RGB features using zero-initialized convolutions. Meanwhile, the WFE decomposes the features to explicitly retain high-frequency details that are often overlooked during downsampling, focusing on small but critical tampering clues. Rigorous testing on the DocTamper dataset confirms FFDN’s preeminence, significantly outperforming existing state-of-the-art methods in detecting tampering.\"",
|
| 9 |
+
"conference": {
|
| 10 |
+
"name": "ECCV",
|
| 11 |
+
"year": 2024
|
| 12 |
+
},
|
| 13 |
+
"template": null,
|
| 14 |
+
"category": "05. 3D Vision and Computational Graphics",
|
| 15 |
+
"is_done": true,
|
| 16 |
+
"timestamp": "2025-08-05T15:43:55.308236",
|
| 17 |
+
"rule_paper_possible_url": null,
|
| 18 |
+
"github_base": null,
|
| 19 |
+
"llm_believed_url": null,
|
| 20 |
+
"rule_base_possible_url": null,
|
| 21 |
+
"confirmed_url": null,
|
| 22 |
+
"Internet_fail": null,
|
| 23 |
+
"html_fail": null,
|
| 24 |
+
"citation_data": {
|
| 25 |
+
"original_title": "Enhancing Tampered Text Detection through Frequency Feature Fusion and Decomposition",
|
| 26 |
+
"matched_title": "Enhancing Tampered Text Detection Through Frequency Feature Fusion and Decomposition",
|
| 27 |
+
"citation_count": 2,
|
| 28 |
+
"similarity": 1.0,
|
| 29 |
+
"source": "semantic_scholar",
|
| 30 |
+
"year": 2024,
|
| 31 |
+
"authors": [
|
| 32 |
+
{
|
| 33 |
+
"authorId": "2218874050",
|
| 34 |
+
"name": "Zhongxi Chen"
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"authorId": "2118529267",
|
| 38 |
+
"name": "Shen Chen"
|
| 39 |
+
},
|
| 40 |
+
{
|
| 41 |
+
"authorId": "51300955",
|
| 42 |
+
"name": "Taiping Yao"
|
| 43 |
+
},
|
| 44 |
+
{
|
| 45 |
+
"authorId": "2315991836",
|
| 46 |
+
"name": "Ke Sun"
|
| 47 |
+
},
|
| 48 |
+
{
|
| 49 |
+
"authorId": "2305300611",
|
| 50 |
+
"name": "Shouhong Ding"
|
| 51 |
+
},
|
| 52 |
+
{
|
| 53 |
+
"authorId": "2284323842",
|
| 54 |
+
"name": "Xianming Lin"
|
| 55 |
+
},
|
| 56 |
+
{
|
| 57 |
+
"authorId": "2279282523",
|
| 58 |
+
"name": "Liujuan Cao"
|
| 59 |
+
},
|
| 60 |
+
{
|
| 61 |
+
"authorId": "2325146889",
|
| 62 |
+
"name": "Rongrong Ji"
|
| 63 |
+
}
|
| 64 |
+
]
|
| 65 |
+
}
|
| 66 |
+
}
|
data_without_website/Equivariant_Energy-Guided_SDE_for_Inverse_Molecular_Design.json
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"id": "r0otLtOwYW",
|
| 3 |
+
"title": "Equivariant Energy-Guided SDE for Inverse Molecular Design",
|
| 4 |
+
"track": "main",
|
| 5 |
+
"author": "Fan Bao;Min Zhao;Zhongkai Hao;Peiyao Li;Chongxuan Li;Jun Zhu",
|
| 6 |
+
"pdf": "https://openreview.net/pdf?id=r0otLtOwYW",
|
| 7 |
+
"keyword": "",
|
| 8 |
+
"abstract": "Inverse molecular design is critical in material science and drug discovery, where the generated molecules should satisfy certain desirable properties. In this paper, we propose equivariant energy-guided stochastic differential equations (EEGSDE), a flexible framework for controllable 3D molecule generation under the guidance of an energy function in diffusion models. Formally, we show that EEGSDE naturally exploits the geometric symmetry in 3D molecular conformation, as long as the energy function is invariant to orthogonal transformations. Empirically, under the guidance of designed energy functions, EEGSDE significantly improves the baseline on QM9, in inverse molecular design targeted to quantum properties and molecular structures. Furthermore, EEGSDE is able to generate molecules with multiple target properties by combining the corresponding energy functions linearly.",
|
| 9 |
+
"conference": {
|
| 10 |
+
"name": "ICLR",
|
| 11 |
+
"year": 2023
|
| 12 |
+
},
|
| 13 |
+
"template": null,
|
| 14 |
+
"category": "11. AI for Science",
|
| 15 |
+
"is_done": true,
|
| 16 |
+
"timestamp": "2025-08-04T04:30:47.378756",
|
| 17 |
+
"rule_paper_possible_url": null,
|
| 18 |
+
"github_base": null,
|
| 19 |
+
"llm_believed_url": null,
|
| 20 |
+
"rule_base_possible_url": null,
|
| 21 |
+
"confirmed_url": null,
|
| 22 |
+
"Internet_fail": null,
|
| 23 |
+
"html_fail": null,
|
| 24 |
+
"citation_data": {
|
| 25 |
+
"original_title": "Equivariant Energy-Guided SDE for Inverse Molecular Design",
|
| 26 |
+
"matched_title": "Equivariant Energy-Guided SDE for Inverse Molecular Design",
|
| 27 |
+
"citation_count": 71,
|
| 28 |
+
"similarity": 1.0,
|
| 29 |
+
"source": "semantic_scholar",
|
| 30 |
+
"year": 2022,
|
| 31 |
+
"authors": [
|
| 32 |
+
{
|
| 33 |
+
"authorId": "2071898125",
|
| 34 |
+
"name": "Fan Bao"
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"authorId": "2152529520",
|
| 38 |
+
"name": "Min Zhao"
|
| 39 |
+
},
|
| 40 |
+
{
|
| 41 |
+
"authorId": "95460807",
|
| 42 |
+
"name": "Zhongkai Hao"
|
| 43 |
+
},
|
| 44 |
+
{
|
| 45 |
+
"authorId": "2112727548",
|
| 46 |
+
"name": "Pei‐Yun Li"
|
| 47 |
+
},
|
| 48 |
+
{
|
| 49 |
+
"authorId": "2399563",
|
| 50 |
+
"name": "Chongxuan Li"
|
| 51 |
+
},
|
| 52 |
+
{
|
| 53 |
+
"authorId": "2155220672",
|
| 54 |
+
"name": "Jun Zhu"
|
| 55 |
+
}
|
| 56 |
+
]
|
| 57 |
+
}
|
| 58 |
+
}
|
data_without_website/Exploring_the_Effectiveness_of_Diffusion_Models_in_One-Shot_Federated_Learning.json
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"id": "trCvBb8guo",
|
| 3 |
+
"title": "Exploring the Effectiveness of Diffusion Models in One-Shot Federated Learning",
|
| 4 |
+
"track": "main",
|
| 5 |
+
"author": "Matias Mendieta;Guangyu Sun;Chen Chen",
|
| 6 |
+
"pdf": "https://openreview.net/pdf?id=trCvBb8guo",
|
| 7 |
+
"keyword": "",
|
| 8 |
+
"abstract": "Federated learning (FL) enables multiple clients to train models collectively while preserving data privacy. However, FL faces challenges in terms of communication cost and data heterogeneity. One-shot federated learning has emerged as a solution by reducing communication rounds, improving efficiency, and providing better security against eavesdropping attacks. Nevertheless, data heterogeneity remains a significant challenge, impacting performance. This work explores the effectiveness of diffusion models in one-shot FL, demonstrating their applicability in addressing data heterogeneity and improving FL performance. Additionally, we investigate the utility of our diffusion model approach, FedDiff, compared to other one-shot FL methods under differential privacy (DP). Furthermore, to improve generated sample quality under DP settings, we propose a simple Fourier Magnitude Filtering (FMF) method, enhancing the effectiveness of the generated data for global model training. Code will be made publicly available.",
|
| 9 |
+
"conference": {
|
| 10 |
+
"name": "ICLR",
|
| 11 |
+
"year": 2024
|
| 12 |
+
},
|
| 13 |
+
"template": null,
|
| 14 |
+
"category": "07. Generative Model",
|
| 15 |
+
"is_done": true,
|
| 16 |
+
"timestamp": "2025-08-04T05:37:07.937312",
|
| 17 |
+
"rule_paper_possible_url": null,
|
| 18 |
+
"github_base": null,
|
| 19 |
+
"llm_believed_url": null,
|
| 20 |
+
"rule_base_possible_url": null,
|
| 21 |
+
"confirmed_url": null,
|
| 22 |
+
"Internet_fail": null,
|
| 23 |
+
"html_fail": null,
|
| 24 |
+
"citation_data": {
|
| 25 |
+
"original_title": "Exploring the Effectiveness of Diffusion Models in One-Shot Federated Learning",
|
| 26 |
+
"matched_title": "Navigating Heterogeneity and Privacy in One-Shot Federated Learning with Diffusion Models",
|
| 27 |
+
"citation_count": 5,
|
| 28 |
+
"similarity": 0.5212121212121212,
|
| 29 |
+
"source": "semantic_scholar",
|
| 30 |
+
"year": 2024,
|
| 31 |
+
"authors": [
|
| 32 |
+
{
|
| 33 |
+
"authorId": "1422036273",
|
| 34 |
+
"name": "Mat'ias Mendieta"
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"authorId": "2186913881",
|
| 38 |
+
"name": "Guangyu Sun"
|
| 39 |
+
},
|
| 40 |
+
{
|
| 41 |
+
"authorId": "2298470649",
|
| 42 |
+
"name": "Chen Chen"
|
| 43 |
+
}
|
| 44 |
+
]
|
| 45 |
+
}
|
| 46 |
+
}
|
data_without_website/Feature_Grinding__Efficient_Backdoor_Sanitation_in_Deep_Neural_Networks.json
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"id": "lGRG9TxQ3x",
|
| 3 |
+
"title": "Feature Grinding: Efficient Backdoor Sanitation in Deep Neural Networks",
|
| 4 |
+
"track": "main",
|
| 5 |
+
"author": "Nils Lukas;Charles Zhang;Florian Kerschbaum",
|
| 6 |
+
"pdf": "https://openreview.net/pdf?id=lGRG9TxQ3x",
|
| 7 |
+
"keyword": "",
|
| 8 |
+
"abstract": "Training deep neural networks (DNNs) is expensive and for this reason, third parties provide computational resources to train models. This makes DNNs vulnerable to backdoor attacks, in which the third party maliciously injects hidden functionalities in the model at training time. Removing a backdoor is challenging because although the defender has access to a clean, labeled dataset, they only have limited computational resources which are a fraction of the resources required to train a model from scratch. We propose Feature Grinding as an efficient, randomized backdoor sanitation technique against seven contemporary backdoors on CIFAR-10 and ImageNet. Feature Grinding requires at most six percent of the model's training time on CIFAR-10 and at most two percent on ImageNet for sanitizing the surveyed backdoors. We compare Feature Grinding with five other sanitation methods and find that it is often the most effective at decreasing the backdoor's success rate while preserving a high model accuracy. Our experiments include an ablation study over multiple parameters for each backdoor attack and sanitation technique to ensure a fair evaluation of all methods. Models suspected of containing a backdoor can be Feature Grinded using limited resources, which makes it a practical defense against backdoors that can be incorporated into any standard training procedure.",
|
| 9 |
+
"conference": {
|
| 10 |
+
"name": "ICLR",
|
| 11 |
+
"year": 2022
|
| 12 |
+
},
|
| 13 |
+
"template": null,
|
| 14 |
+
"category": "10. Trustworthy and Ethical AI",
|
| 15 |
+
"is_done": true,
|
| 16 |
+
"timestamp": "2025-08-04T03:40:23.319978",
|
| 17 |
+
"rule_paper_possible_url": null,
|
| 18 |
+
"github_base": null,
|
| 19 |
+
"llm_believed_url": null,
|
| 20 |
+
"rule_base_possible_url": null,
|
| 21 |
+
"confirmed_url": null,
|
| 22 |
+
"Internet_fail": null,
|
| 23 |
+
"html_fail": null,
|
| 24 |
+
"citation_data": {
|
| 25 |
+
"original_title": "Feature Grinding: Efficient Backdoor Sanitation in Deep Neural Networks",
|
| 26 |
+
"matched_title": "Feature Grinding: Efficient Backdoor Sanitation in Deep Neural Networks",
|
| 27 |
+
"citation_count": 0,
|
| 28 |
+
"similarity": 1.0,
|
| 29 |
+
"source": "kimi_llm",
|
| 30 |
+
"raw_response": "0"
|
| 31 |
+
}
|
| 32 |
+
}
|
data_without_website/FedCSL__A_Scalable_and_Accurate_Approach_to_Federated_Causal_Structure_Learning.json
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"id": "article-29113",
|
| 3 |
+
"title": "FedCSL: A Scalable and Accurate Approach to Federated Causal Structure Learning",
|
| 4 |
+
"track": "main",
|
| 5 |
+
"author": "Xianjie Guo; Kui Yu; Lin Liu; Jiuyong Li",
|
| 6 |
+
"pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/29113/30105",
|
| 7 |
+
"keyword": "",
|
| 8 |
+
"abstract": "As an emerging research direction, federated causal structure learning (CSL) aims at learning causal relationships from decentralized data across multiple clients while preserving data privacy. Existing federated CSL algorithms suffer from scalability and accuracy issues, since they require computationally expensive CSL algorithms to be executed at each client. Furthermore, in real-world scenarios, the number of samples held by each client varies significantly, and existing methods still assign equal weights to the learned structural information from each client, which severely harms the learning accuracy of those methods. To address these two limitations, we propose FedCSL, a scalable and accurate method for federated CSL. Specifically, FedCSL consists of two novel strategies: (1) a federated local-to-global learning strategy that enables FedCSL to scale to high-dimensional data for tackling the scalability issue, and (2) a novel weighted aggregation strategy that does not rely on any complex encryption techniques while preserving data privacy for tackling the accuracy issue. Extensive experiments on benchmark datasets, high-dimensional synthetic datasets and a real-world dataset verify the efficacy of the proposed FedCSL method. The source code is available at https://github.com/Xianjie-Guo/FedCSL.",
|
| 9 |
+
"conference": {
|
| 10 |
+
"name": "AAAI",
|
| 11 |
+
"year": 2024
|
| 12 |
+
},
|
| 13 |
+
"github_base": "https://github.com/Xianjie-Guo/FedCSL",
|
| 14 |
+
"template": null,
|
| 15 |
+
"category": "04. Probabilistic Methods and Causal Inference",
|
| 16 |
+
"is_done": true,
|
| 17 |
+
"timestamp": "2025-08-07T08:02:11.547601",
|
| 18 |
+
"log": {
|
| 19 |
+
"timestamp": "2025-08-07T08:02:11.547601",
|
| 20 |
+
"stage": "special situation",
|
| 21 |
+
"note": "论文没有项目主页但找到了GitHub相关信息"
|
| 22 |
+
},
|
| 23 |
+
"citation_data": {
|
| 24 |
+
"original_title": "FedCSL: A Scalable and Accurate Approach to Federated Causal Structure Learning",
|
| 25 |
+
"matched_title": "FedCSL: A Scalable and Accurate Approach to Federated Causal Structure Learning",
|
| 26 |
+
"citation_count": 8,
|
| 27 |
+
"similarity": 1.0,
|
| 28 |
+
"source": "semantic_scholar",
|
| 29 |
+
"year": 2024,
|
| 30 |
+
"authors": [
|
| 31 |
+
{
|
| 32 |
+
"authorId": "1387835221",
|
| 33 |
+
"name": "Xianjie Guo"
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"authorId": "2256160802",
|
| 37 |
+
"name": "Kui Yu"
|
| 38 |
+
},
|
| 39 |
+
{
|
| 40 |
+
"authorId": "2146017365",
|
| 41 |
+
"name": "Lin Liu"
|
| 42 |
+
},
|
| 43 |
+
{
|
| 44 |
+
"authorId": "2257372048",
|
| 45 |
+
"name": "Jiuyong Li"
|
| 46 |
+
}
|
| 47 |
+
]
|
| 48 |
+
}
|
| 49 |
+
}
|
data_without_website/Generalizing_to_Unseen_Elements__A_Survey_on_Knowledge_Extrapolation_for_Knowledge_Graphs.json
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"id": "paper737",
|
| 3 |
+
"title": "Generalizing to Unseen Elements: A Survey on Knowledge Extrapolation for Knowledge Graphs",
|
| 4 |
+
"track": "Survey Track",
|
| 5 |
+
"author": "Mingyang Chen; Wen Zhang; Yuxia Geng; Zezhong Xu; Jeff Z. Pan; Huajun Chen",
|
| 6 |
+
"pdf": "https://www.ijcai.org/proceedings/2023/0737.pdf",
|
| 7 |
+
"keyword": "",
|
| 8 |
+
"abstract": "Knowledge graphs (KGs) have become valuable knowledge resources in various applications, and knowledge graph embedding (KGE) methods have garnered increasing attention in recent years. However, conventional KGE methods still face challenges when it comes to handling unseen entities or relations during model testing. To address this issue, much effort has been devoted to various fields of KGs. In this paper, we use a set of general terminologies to unify these methods and refer to them collectively as Knowledge Extrapolation. We comprehensively summarize these methods, classified by our proposed taxonomy, and describe their interrelationships. Additionally, we introduce benchmarks and provide comparisons of these methods based on aspects that are not captured by the taxonomy. Finally, we suggest potential directions for future research.",
|
| 9 |
+
"conference": {
|
| 10 |
+
"name": "IJCAI",
|
| 11 |
+
"year": 2023
|
| 12 |
+
},
|
| 13 |
+
"template": null,
|
| 14 |
+
"category": "06. Natural Language Understanding and Semantics",
|
| 15 |
+
"is_done": true,
|
| 16 |
+
"timestamp": "2025-08-05T15:44:08.542240",
|
| 17 |
+
"rule_paper_possible_url": null,
|
| 18 |
+
"github_base": null,
|
| 19 |
+
"llm_believed_url": null,
|
| 20 |
+
"rule_base_possible_url": null,
|
| 21 |
+
"confirmed_url": null,
|
| 22 |
+
"Internet_fail": null,
|
| 23 |
+
"html_fail": null,
|
| 24 |
+
"citation_data": {
|
| 25 |
+
"original_title": "Generalizing to Unseen Elements: A Survey on Knowledge Extrapolation for Knowledge Graphs",
|
| 26 |
+
"matched_title": "Generalizing to Unseen Elements: A Survey on Knowledge Extrapolation for Knowledge Graphs",
|
| 27 |
+
"citation_count": 22,
|
| 28 |
+
"similarity": 1.0,
|
| 29 |
+
"source": "semantic_scholar",
|
| 30 |
+
"year": 2023,
|
| 31 |
+
"authors": [
|
| 32 |
+
{
|
| 33 |
+
"authorId": "48622851",
|
| 34 |
+
"name": "Mingyang Chen"
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"authorId": "2155281129",
|
| 38 |
+
"name": "Wen Zhang"
|
| 39 |
+
},
|
| 40 |
+
{
|
| 41 |
+
"authorId": "152683218",
|
| 42 |
+
"name": "Yuxia Geng"
|
| 43 |
+
},
|
| 44 |
+
{
|
| 45 |
+
"authorId": "2136396192",
|
| 46 |
+
"name": "Zezhong Xu"
|
| 47 |
+
},
|
| 48 |
+
{
|
| 49 |
+
"authorId": "9416872",
|
| 50 |
+
"name": "Jeff Z. Pan"
|
| 51 |
+
},
|
| 52 |
+
{
|
| 53 |
+
"authorId": "14499025",
|
| 54 |
+
"name": "Hua-zeng Chen"
|
| 55 |
+
}
|
| 56 |
+
]
|
| 57 |
+
}
|
| 58 |
+
}
|
data_without_website/Geometry-Informed_Neural_Networks.json
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"id": "zpX0teJu9Z",
|
| 3 |
+
"title": "Geometry-Informed Neural Networks",
|
| 4 |
+
"track": "main",
|
| 5 |
+
"author": "Arturs Berzins;Andreas Radler;Eric Volkmann;Sebastian Sanokowski;Sepp Hochreiter;Johannes Brandstetter",
|
| 6 |
+
"pdf": "https://openreview.net/pdf?id=zpX0teJu9Z",
|
| 7 |
+
"keyword": "",
|
| 8 |
+
"abstract": "Geometry is a ubiquitous tool in computer graphics, design, and engineering. However, the lack of large shape datasets limits the application of state-of-the-art supervised learning methods and motivates the exploration of alternative learning strategies. To this end, we introduce geometry-informed neural networks (GINNs) - a framework for training shape-generative neural fields *without data* by leveraging user-specified design requirements in the form of objectives and constraints. By adding *diversity* as an explicit constraint, GINNs avoid mode-collapse and can generate multiple diverse solutions, often required in geometry tasks. Experimentally, we apply GINNs to several introductory problems and a realistic 3D engineering design problem, showing control over geometrical and topological properties, such as surface smoothness or the number of holes. These results demonstrate the potential of training shape-generative models without data, paving the way for new generative design approaches without large datasets.",
|
| 9 |
+
"conference": {
|
| 10 |
+
"name": "ICLR",
|
| 11 |
+
"year": 2025
|
| 12 |
+
},
|
| 13 |
+
"template": null,
|
| 14 |
+
"category": "07. Generative Model",
|
| 15 |
+
"is_done": true,
|
| 16 |
+
"timestamp": "2025-08-05T16:50:32.138353",
|
| 17 |
+
"rule_paper_possible_url": null,
|
| 18 |
+
"github_base": null,
|
| 19 |
+
"llm_believed_url": null,
|
| 20 |
+
"rule_base_possible_url": null,
|
| 21 |
+
"confirmed_url": null,
|
| 22 |
+
"Internet_fail": null,
|
| 23 |
+
"html_fail": null,
|
| 24 |
+
"citation_data": {
|
| 25 |
+
"original_title": "Geometry-Informed Neural Networks",
|
| 26 |
+
"matched_title": "Geometry-Informed Neural Networks",
|
| 27 |
+
"citation_count": 4,
|
| 28 |
+
"similarity": 1.0,
|
| 29 |
+
"source": "semantic_scholar",
|
| 30 |
+
"year": 2024,
|
| 31 |
+
"authors": [
|
| 32 |
+
{
|
| 33 |
+
"authorId": "145611078",
|
| 34 |
+
"name": "Arturs Berzins"
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"authorId": "2175785624",
|
| 38 |
+
"name": "Andreas Radler"
|
| 39 |
+
},
|
| 40 |
+
{
|
| 41 |
+
"authorId": "2268317793",
|
| 42 |
+
"name": "Sebastian Sanokowski"
|
| 43 |
+
},
|
| 44 |
+
{
|
| 45 |
+
"authorId": "3308557",
|
| 46 |
+
"name": "Sepp Hochreiter"
|
| 47 |
+
},
|
| 48 |
+
{
|
| 49 |
+
"authorId": "78843496",
|
| 50 |
+
"name": "Johannes Brandstetter"
|
| 51 |
+
}
|
| 52 |
+
]
|
| 53 |
+
}
|
| 54 |
+
}
|
data_without_website/Interleaving_Retrieval_with_Chain-of-Thought_Reasoning_for_Knowledge-Intensive_Multi-Step_Questions.json
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"id": "2023.acl-long.557",
|
| 3 |
+
"title": "Interleaving Retrieval with Chain-of-Thought Reasoning for Knowledge-Intensive Multi-Step Questions",
|
| 4 |
+
"track": "main",
|
| 5 |
+
"author": "Harsh Trivedi; Niranjan Balasubramanian; Tushar Khot; Ashish Sabharwal",
|
| 6 |
+
"pdf": "https://aclanthology.org/2023.acl-long.557.pdf",
|
| 7 |
+
"keyword": "",
|
| 8 |
+
"abstract": "Prompting-based large language models (LLMs) are surprisingly powerful at generating natural language reasoning steps or Chains-of-Thoughts (CoT) for multi-step question answering (QA). They struggle, however, when the necessary knowledge is either unavailable to the LLM or not up-to-date within its parameters. While using the question to retrieve relevant text from an external knowledge source helps LLMs, we observe that this one-step retrieve-and-read approach is insufficient for multi-step QA. Here, what to retrieve depends on what has already been derived, which in turn may depend on what was previously retrieved. To address this, we propose IRCoT, a new approach for multi-step QA that interleaves retrieval with steps (sentences) in a CoT, guiding the retrieval with CoT and in turn using retrieved results to improve CoT. Using IRCoT with GPT3 substantially improves retrieval (up to 21 points) as well as downstream QA (up to 15 points) on four datasets: HotpotQA, 2WikiMultihopQA, MuSiQue, and IIRC. We observe similar substantial gains in out-of-distribution (OOD) settings as well as with much smaller models such as Flan-T5-large without additional training. IRCoT reduces model hallucination, resulting in factually more accurate CoT reasoning.",
|
| 9 |
+
"conference": {
|
| 10 |
+
"name": "ACL",
|
| 11 |
+
"year": 2023
|
| 12 |
+
},
|
| 13 |
+
"github_base": "https://github.com/stonybrooknlp/ircot",
|
| 14 |
+
"llm_believed_url": "https://github.com/stonybrooknlp/ircot",
|
| 15 |
+
"template": null,
|
| 16 |
+
"category": "09. Multimodal Learning",
|
| 17 |
+
"is_done": true,
|
| 18 |
+
"timestamp": "2025-08-05T05:24:42.295086",
|
| 19 |
+
"citation_data": {
|
| 20 |
+
"original_title": "Interleaving Retrieval with Chain-of-Thought Reasoning for Knowledge-Intensive Multi-Step Questions",
|
| 21 |
+
"matched_title": "Interleaving Retrieval with Chain-of-Thought Reasoning for Knowledge-Intensive Multi-Step Questions",
|
| 22 |
+
"citation_count": 481,
|
| 23 |
+
"similarity": 1.0,
|
| 24 |
+
"source": "semantic_scholar",
|
| 25 |
+
"year": 2022,
|
| 26 |
+
"authors": [
|
| 27 |
+
{
|
| 28 |
+
"authorId": "6365809",
|
| 29 |
+
"name": "H. Trivedi"
|
| 30 |
+
},
|
| 31 |
+
{
|
| 32 |
+
"authorId": "35217367",
|
| 33 |
+
"name": "Niranjan Balasubramanian"
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"authorId": "2236429",
|
| 37 |
+
"name": "Tushar Khot"
|
| 38 |
+
},
|
| 39 |
+
{
|
| 40 |
+
"authorId": "48229640",
|
| 41 |
+
"name": "Ashish Sabharwal"
|
| 42 |
+
}
|
| 43 |
+
]
|
| 44 |
+
}
|
| 45 |
+
}
|
data_without_website/It_Helps_to_Take_a_Second_Opinion__Teaching_Smaller_LLMs_To_Deliberate_Mutually_via_Selective_Rationale_Optimisation.json
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"id": "NHxwxc3ql6",
|
| 3 |
+
"title": "It Helps to Take a Second Opinion: Teaching Smaller LLMs To Deliberate Mutually via Selective Rationale Optimisation",
|
| 4 |
+
"track": "main",
|
| 5 |
+
"author": "Sohan Patnaik;Milan Aggarwal;Sumit Bhatia;Balaji Krishnamurthy",
|
| 6 |
+
"pdf": "https://openreview.net/pdf?id=NHxwxc3ql6",
|
| 7 |
+
"keyword": "",
|
| 8 |
+
"abstract": "Very large language models (LLMs) such as GPT-4 have shown the ability to handle complex tasks by generating and self-refining step-by-step rationales. Smaller language models (SLMs), typically with < 13B parameters, have been improved by using the data generated from very-large LMs through knowledge distillation. However, various practical constraints such as API costs, copyright, legal and ethical policies restrict using large (often opaque) models to train smaller models for commercial use. Limited success has been achieved at improving the ability of an SLM to explore the space of possible rationales and evaluate them by itself through self-deliberation. To address this, we propose COALITION, a trainable framework that facilitates interaction between two variants of the same SLM and trains them to generate and refine rationales optimized for the end-task. The variants exhibit different behaviors to produce a set of diverse candidate rationales during the generation and refinement steps. The model is then trained via Selective Rationale Optimization (SRO) to prefer generating rationale candidates that maximize the likelihood of producing the ground-truth answer. During inference, COALITION employs a controller to select the suitable variant for generating and refining the rationales. On five different datasets covering mathematical problems, commonsense reasoning, and natural language inference, COALITION outperforms several baselines by up to 5%. Our ablation studies reveal that cross-communication between the two variants performs better than using the single model to self-refine the rationales. We also demonstrate the applicability of COALITION for LMs of varying scales (4B to 14B parameters) and model families (Mistral, Llama, Qwen, Phi). We release the code for this work here.",
|
| 9 |
+
"conference": {
|
| 10 |
+
"name": "ICLR",
|
| 11 |
+
"year": 2025
|
| 12 |
+
},
|
| 13 |
+
"template": null,
|
| 14 |
+
"category": "01. Deep Learning Architectures and Methods",
|
| 15 |
+
"is_done": true,
|
| 16 |
+
"timestamp": "2025-08-05T17:19:53.732179",
|
| 17 |
+
"rule_paper_possible_url": null,
|
| 18 |
+
"github_base": null,
|
| 19 |
+
"llm_believed_url": null,
|
| 20 |
+
"rule_base_possible_url": null,
|
| 21 |
+
"confirmed_url": null,
|
| 22 |
+
"Internet_fail": null,
|
| 23 |
+
"html_fail": null,
|
| 24 |
+
"citation_data": {
|
| 25 |
+
"original_title": "It Helps to Take a Second Opinion: Teaching Smaller LLMs To Deliberate Mutually via Selective Rationale Optimisation",
|
| 26 |
+
"matched_title": "It Helps to Take a Second Opinion: Teaching Smaller LLMs to Deliberate Mutually via Selective Rationale Optimisation",
|
| 27 |
+
"citation_count": 0,
|
| 28 |
+
"similarity": 1.0,
|
| 29 |
+
"source": "semantic_scholar",
|
| 30 |
+
"year": 2025,
|
| 31 |
+
"authors": [
|
| 32 |
+
{
|
| 33 |
+
"authorId": "2121632423",
|
| 34 |
+
"name": "Sohan Patnaik"
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"authorId": "6657914",
|
| 38 |
+
"name": "Milan Aggarwal"
|
| 39 |
+
},
|
| 40 |
+
{
|
| 41 |
+
"authorId": "2282468460",
|
| 42 |
+
"name": "Sumita Bhatia"
|
| 43 |
+
},
|
| 44 |
+
{
|
| 45 |
+
"authorId": "2265756284",
|
| 46 |
+
"name": "Balaji Krishnamurthy"
|
| 47 |
+
}
|
| 48 |
+
]
|
| 49 |
+
}
|
| 50 |
+
}
|
data_without_website/KEFI__Kernel-based_Feature_Identification_for_Generalizable_Classification.json
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"id": "L6r9t0HtqQ",
|
| 3 |
+
"title": "KEFI: Kernel-based Feature Identification for Generalizable Classification",
|
| 4 |
+
"track": "main",
|
| 5 |
+
"author": "Long Tung Vuong;Chuanxia Zheng;Manh Luong;Thanh-Toan Do;Trung Le;Dinh Phung",
|
| 6 |
+
"pdf": "https://openreview.net/pdf?id=L6r9t0HtqQ",
|
| 7 |
+
"keyword": "",
|
| 8 |
+
"abstract": "To achieve satisfactory generalization performance on previously unseen domains, existing domain generalization (DG) methods often assume fixed domain-invariant features from a set of training domains for good generalization on new domains. However, this assumption can be overly strict, especially when the source domains lack shared information or when the target domains utilize information from selective source domains in a compositional manner. This leads to the natural question of how we utilize information from the source domain to the target domain in an appropriate way. In response to this challenge, we propose an innovative framework that includes an attribute-based feature extractor that captures from the source domains semantically meaningful components referred to as \\textit{attributes} and a \\textit{Kernel-based Attribute Identifier} that leverages kernel learning theory to define the decision boundaries for these attributes collected from the source domains. This dynamic learning approach empowers the classifier to effectively identify the learned attributes in the domains it has not encountered before. We empirically validate our method on well-established DG benchmarks,\nachieving competitive results compared to state-of-the-art techniques.",
|
| 9 |
+
"conference": {
|
| 10 |
+
"name": "ICLR",
|
| 11 |
+
"year": 2024
|
| 12 |
+
},
|
| 13 |
+
"template": null,
|
| 14 |
+
"category": "03. ML Theory and Optimization",
|
| 15 |
+
"is_done": true,
|
| 16 |
+
"timestamp": "2025-08-04T05:21:56.851574",
|
| 17 |
+
"rule_paper_possible_url": null,
|
| 18 |
+
"github_base": null,
|
| 19 |
+
"llm_believed_url": null,
|
| 20 |
+
"rule_base_possible_url": null,
|
| 21 |
+
"confirmed_url": null,
|
| 22 |
+
"Internet_fail": null,
|
| 23 |
+
"html_fail": null,
|
| 24 |
+
"citation_data": {
|
| 25 |
+
"original_title": "KEFI: Kernel-based Feature Identification for Generalizable Classification",
|
| 26 |
+
"matched_title": "On feature combination for multiclass object classification",
|
| 27 |
+
"citation_count": 918,
|
| 28 |
+
"similarity": 0.5801526717557252,
|
| 29 |
+
"source": "semantic_scholar",
|
| 30 |
+
"year": 2009,
|
| 31 |
+
"authors": [
|
| 32 |
+
{
|
| 33 |
+
"authorId": "2871555",
|
| 34 |
+
"name": "Peter Gehler"
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"authorId": "2388416",
|
| 38 |
+
"name": "Sebastian Nowozin"
|
| 39 |
+
}
|
| 40 |
+
]
|
| 41 |
+
}
|
| 42 |
+
}
|
data_without_website/Language-agnostic_BERT_Sentence_Embedding_1.json
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"id": "2022.acl-long.62",
|
| 3 |
+
"title": "Language-agnostic BERT Sentence Embedding",
|
| 4 |
+
"track": "main",
|
| 5 |
+
"author": "Fangxiaoyu Feng; Yinfei Yang; Daniel Cer; Naveen Arivazhagan; Wei Wang",
|
| 6 |
+
"pdf": "https://aclanthology.org/2022.acl-long.62.pdf",
|
| 7 |
+
"keyword": "",
|
| 8 |
+
"abstract": "While BERT is an effective method for learning monolingual sentence embeddings for semantic similarity and embedding based transfer learning BERT based cross-lingual sentence embeddings have yet to be explored. We systematically investigate methods for learning multilingual sentence embeddings by combining the best methods for learning monolingual and cross-lingual representations including: masked language modeling (MLM), translation language modeling (TLM), dual encoder translation ranking, and additive margin softmax. We show that introducing a pre-trained multilingual language model dramatically reduces the amount of parallel training data required to achieve good performance by 80%. Composing the best of these methods produces a model that achieves 83.7% bi-text retrieval accuracy over 112 languages on Tatoeba, well above the 65.5% achieved by LASER, while still performing competitively on monolingual transfer learning benchmarks. Parallel data mined from CommonCrawl using our best model is shown to train competitive NMT models for en-zh and en-de. We publicly release our best multilingual sentence embedding model for 109+ languages at https://tfhub.dev/google/LaBSE.",
|
| 9 |
+
"conference": {
|
| 10 |
+
"name": "ACL",
|
| 11 |
+
"year": 2022
|
| 12 |
+
},
|
| 13 |
+
"template": null,
|
| 14 |
+
"category": "06. Natural Language Understanding and Semantics",
|
| 15 |
+
"is_done": true,
|
| 16 |
+
"timestamp": "2025-08-07T05:00:40.359232",
|
| 17 |
+
"rule_paper_possible_url": null,
|
| 18 |
+
"github_base": null,
|
| 19 |
+
"llm_believed_url": null,
|
| 20 |
+
"rule_base_possible_url": null,
|
| 21 |
+
"confirmed_url": null,
|
| 22 |
+
"Internet_fail": null,
|
| 23 |
+
"html_fail": null,
|
| 24 |
+
"citation_data": {
|
| 25 |
+
"original_title": "Language-agnostic BERT Sentence Embedding",
|
| 26 |
+
"matched_title": "Language-agnostic BERT Sentence Embedding",
|
| 27 |
+
"citation_count": 941,
|
| 28 |
+
"similarity": 1.0,
|
| 29 |
+
"source": "semantic_scholar",
|
| 30 |
+
"year": 2020,
|
| 31 |
+
"authors": [
|
| 32 |
+
{
|
| 33 |
+
"authorId": "3129590",
|
| 34 |
+
"name": "Fangxiaoyu Feng"
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"authorId": "2118771180",
|
| 38 |
+
"name": "Yinfei Yang"
|
| 39 |
+
},
|
| 40 |
+
{
|
| 41 |
+
"authorId": "46724030",
|
| 42 |
+
"name": "Daniel Matthew Cer"
|
| 43 |
+
},
|
| 44 |
+
{
|
| 45 |
+
"authorId": "3365231",
|
| 46 |
+
"name": "N. Arivazhagan"
|
| 47 |
+
},
|
| 48 |
+
{
|
| 49 |
+
"authorId": "2158624629",
|
| 50 |
+
"name": "Wei Wang"
|
| 51 |
+
}
|
| 52 |
+
]
|
| 53 |
+
}
|
| 54 |
+
}
|
data_without_website/Learning_Symmetric_Locomotion_using_Cumulative_Fatigue_for_Reinforcement_Learning.json
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"id": "3mgYqlH60Uj",
|
| 3 |
+
"title": "Learning Symmetric Locomotion using Cumulative Fatigue for Reinforcement Learning",
|
| 4 |
+
"track": "main",
|
| 5 |
+
"author": "Rui Xu;Noshaba Cheema;Erik Herrmann;Perttu Hämäläinen;Philipp Slusallek",
|
| 6 |
+
"pdf": "https://openreview.net/pdf?id=3mgYqlH60Uj",
|
| 7 |
+
"keyword": "",
|
| 8 |
+
"abstract": "Modern deep reinforcement learning (DRL) methods allow simulated characters to learn complex skills such as locomotion from scratch. However, without further exploitation of domain-specific knowledge, such as motion capture data, finite state machines or morphological specifications, physics-based locomotion generation with DRL often results in unrealistic motions. One explanation for this is that present RL models do not estimate biomechanical effort; instead, they minimize instantaneous squared joint actuation torques as a proxy for the actual subjective cost of actions. To mitigate this discrepancy in a computationally efficient manner, we propose a method for mapping actuation torques to subjective effort without simulating muscles and their energy expenditure. Our approach is based on the Three Compartment Controller model, in which the relationships of variables such as maximum voluntary joint torques, recovery, and cumulative fatigue are present. We extend this method for sustained symmetric locomotion tasks for deep reinforcement learning using a Normalized Cumulative Fatigue (NCF) model.\nIn summary, in this paper we present the first RL model to use biomechanical cumulative effort for full-body movement generation without the use of any finite state machines, morphological specification or motion capture data. Our results show that the learned policies are more symmetric, periodic and robust compared to methods found in previous literature.",
|
| 9 |
+
"conference": {
|
| 10 |
+
"name": "ICLR",
|
| 11 |
+
"year": 2022
|
| 12 |
+
},
|
| 13 |
+
"template": null,
|
| 14 |
+
"category": "02. Reinforcement Learning and Control",
|
| 15 |
+
"is_done": true,
|
| 16 |
+
"timestamp": "2025-08-04T03:33:37.769867",
|
| 17 |
+
"rule_paper_possible_url": null,
|
| 18 |
+
"github_base": null,
|
| 19 |
+
"llm_believed_url": null,
|
| 20 |
+
"rule_base_possible_url": null,
|
| 21 |
+
"confirmed_url": null,
|
| 22 |
+
"Internet_fail": null,
|
| 23 |
+
"html_fail": null,
|
| 24 |
+
"citation_data": {
|
| 25 |
+
"original_title": "Learning Symmetric Locomotion using Cumulative Fatigue for Reinforcement Learning",
|
| 26 |
+
"matched_title": "Learning Symmetric Locomotion using Cumulative Fatigue for Reinforcement Learning",
|
| 27 |
+
"citation_count": 79,
|
| 28 |
+
"similarity": 1.0,
|
| 29 |
+
"source": "kimi_llm",
|
| 30 |
+
"raw_response": "79"
|
| 31 |
+
}
|
| 32 |
+
}
|
data_without_website/Learning_to_Noise__Application-Agnostic_Data_Sharing_with_Local_Differential_Privacy.json
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"id": "BIwkgTsSp_8",
|
| 3 |
+
"title": "Learning to Noise: Application-Agnostic Data Sharing with Local Differential Privacy",
|
| 4 |
+
"track": "main",
|
| 5 |
+
"author": "Alex Mansbridge;Gregory Barbour;Davide Piras;Christopher Frye;Ilya Feige;David Barber",
|
| 6 |
+
"pdf": "https://openreview.net/pdf?id=BIwkgTsSp_8",
|
| 7 |
+
"keyword": "",
|
| 8 |
+
"abstract": "In recent years, the collection and sharing of individuals’ private data has become commonplace in many industries. Local differential privacy (LDP) is a rigorous approach which uses a randomized algorithm to preserve privacy even from the database administrator, unlike the more standard central differential privacy. For LDP, when applying noise directly to high-dimensional data, the level of noise required all but entirely destroys data utility. In this paper we introduce a novel, application-agnostic privatization mechanism that leverages representation learning to overcome the prohibitive noise requirements of direct methods, while maintaining the strict guarantees of LDP. We further demonstrate that data privatized with this mechanism can be used to train machine learning algorithms. Applications of this model include private data collection, private novel-class classification, and the augmentation of clean datasets with additional privatized features. We achieve significant gains in performance on downstream classification tasks relative to benchmarks that noise the data directly, which are state-of-the-art in the context of application-agnostic LDP mechanisms for high-dimensional data sharing tasks.",
|
| 9 |
+
"conference": {
|
| 10 |
+
"name": "ICLR",
|
| 11 |
+
"year": 2021
|
| 12 |
+
},
|
| 13 |
+
"template": null,
|
| 14 |
+
"category": "10. Trustworthy and Ethical AI",
|
| 15 |
+
"is_done": true,
|
| 16 |
+
"timestamp": "2025-08-04T09:03:12.209244",
|
| 17 |
+
"rule_paper_possible_url": null,
|
| 18 |
+
"github_base": null,
|
| 19 |
+
"llm_believed_url": null,
|
| 20 |
+
"rule_base_possible_url": null,
|
| 21 |
+
"confirmed_url": null,
|
| 22 |
+
"Internet_fail": null,
|
| 23 |
+
"html_fail": null,
|
| 24 |
+
"citation_data": {
|
| 25 |
+
"original_title": "Learning to Noise: Application-Agnostic Data Sharing with Local Differential Privacy",
|
| 26 |
+
"matched_title": "Learning to Noise: Application-Agnostic Data Sharing with Local Differential Privacy",
|
| 27 |
+
"citation_count": 1,
|
| 28 |
+
"similarity": 1.0,
|
| 29 |
+
"source": "semantic_scholar",
|
| 30 |
+
"year": 2020,
|
| 31 |
+
"authors": [
|
| 32 |
+
{
|
| 33 |
+
"authorId": "51040721",
|
| 34 |
+
"name": "Alex Mansbridge"
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"authorId": "1388496992",
|
| 38 |
+
"name": "G. Barbour"
|
| 39 |
+
},
|
| 40 |
+
{
|
| 41 |
+
"authorId": "102763557",
|
| 42 |
+
"name": "Davide Piras"
|
| 43 |
+
},
|
| 44 |
+
{
|
| 45 |
+
"authorId": "145763179",
|
| 46 |
+
"name": "Christopher Frye"
|
| 47 |
+
},
|
| 48 |
+
{
|
| 49 |
+
"authorId": "10734982",
|
| 50 |
+
"name": "Ilya Feige"
|
| 51 |
+
},
|
| 52 |
+
{
|
| 53 |
+
"authorId": "145617808",
|
| 54 |
+
"name": "D. Barber"
|
| 55 |
+
}
|
| 56 |
+
]
|
| 57 |
+
}
|
| 58 |
+
}
|
data_without_website/Learning_without_Forgetting_for_Vision-Language_Models.json
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"id": "k9NYnsC4Mq",
|
| 3 |
+
"title": "Learning without Forgetting for Vision-Language Models",
|
| 4 |
+
"track": "main",
|
| 5 |
+
"author": "Da-Wei Zhou;Yuanhan Zhang;Jingyi Ning;Han-Jia Ye;De-Chuan Zhan;Ziwei Liu",
|
| 6 |
+
"pdf": "https://openreview.net/pdf?id=k9NYnsC4Mq",
|
| 7 |
+
"keyword": "",
|
| 8 |
+
"abstract": "Class-Incremental Learning (CIL) or continual learning is a desired capability in the real world, which requires a learning system to adapt to new tasks without forgetting former ones. While traditional CIL methods focus on visual information to grasp core features, recent advances in Vision-Language Models (VLM) have shown promising capabilities in learning generalizable representations with the aid of textual information. However, when continually trained with new classes, VLMs often suffer from catastrophic forgetting of former knowledge. Applying VLMs to CIL poses two major challenges: 1) how to adapt the model without forgetting; and 2) how to make full use of the multi-modal information. To this end, we propose PROjectiOn Fusion (PROOF) that enables VLMs to learn without forgetting. To handle the first challenge, we propose training task-specific projections based on the frozen image/text encoders. When facing new tasks, new projections are expanded, and former projections are fixed, alleviating the forgetting of old concepts. For the second challenge, we propose the fusion module to better utilize the cross-modality information. By jointly adjusting visual and textual features, the model can capture better semantic information. Extensive experiments on nine benchmark datasets with various continual learning scenarios and various VLMs validate PROOF achieves state-of-the-art performance.",
|
| 9 |
+
"conference": {
|
| 10 |
+
"name": "ICLR",
|
| 11 |
+
"year": 2024
|
| 12 |
+
},
|
| 13 |
+
"template": null,
|
| 14 |
+
"category": "09. Multimodal Learning",
|
| 15 |
+
"is_done": true,
|
| 16 |
+
"timestamp": "2025-08-04T06:04:20.595615",
|
| 17 |
+
"rule_paper_possible_url": null,
|
| 18 |
+
"github_base": null,
|
| 19 |
+
"llm_believed_url": null,
|
| 20 |
+
"rule_base_possible_url": null,
|
| 21 |
+
"confirmed_url": null,
|
| 22 |
+
"Internet_fail": null,
|
| 23 |
+
"html_fail": null,
|
| 24 |
+
"citation_data": {
|
| 25 |
+
"original_title": "Learning without Forgetting for Vision-Language Models",
|
| 26 |
+
"matched_title": "Learning Without Forgetting for Vision-Language Models",
|
| 27 |
+
"citation_count": 48,
|
| 28 |
+
"similarity": 1.0,
|
| 29 |
+
"source": "semantic_scholar",
|
| 30 |
+
"year": 2023,
|
| 31 |
+
"authors": [
|
| 32 |
+
{
|
| 33 |
+
"authorId": "2108484534",
|
| 34 |
+
"name": "Da-Wei Zhou"
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"authorId": "2145784327",
|
| 38 |
+
"name": "Yuanhan Zhang"
|
| 39 |
+
},
|
| 40 |
+
{
|
| 41 |
+
"authorId": "2218797021",
|
| 42 |
+
"name": "Jingyi Ning"
|
| 43 |
+
},
|
| 44 |
+
{
|
| 45 |
+
"authorId": "2151459740",
|
| 46 |
+
"name": "Han-Jia Ye"
|
| 47 |
+
},
|
| 48 |
+
{
|
| 49 |
+
"authorId": "1721819",
|
| 50 |
+
"name": "De-chuan Zhan"
|
| 51 |
+
},
|
| 52 |
+
{
|
| 53 |
+
"authorId": "2145254462",
|
| 54 |
+
"name": "Ziwei Liu"
|
| 55 |
+
}
|
| 56 |
+
]
|
| 57 |
+
}
|
| 58 |
+
}
|
data_without_website/LoCA__Location-Aware_Cosine_Adaptation_for_Parameter-Efficient_Fine-Tuning.json
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"id": "4NRjdISWby",
|
| 3 |
+
"title": "LoCA: Location-Aware Cosine Adaptation for Parameter-Efficient Fine-Tuning",
|
| 4 |
+
"track": "main",
|
| 5 |
+
"author": "Zhekai Du;Yinjie Min;Jingjing Li;Ke Lu;Changliang Zou;Liuhua Peng;Tingjin Chu;Mingming Gong",
|
| 6 |
+
"pdf": "https://openreview.net/pdf?id=4NRjdISWby",
|
| 7 |
+
"keyword": "",
|
| 8 |
+
"abstract": "Low-rank adaptation (LoRA) has become a prevalent method for adapting pre-trained large language models to downstream tasks. However, the simple low-rank decomposition form may constrain the optimization flexibility. To address this limitation, we introduce Location-aware Cosine Adaptation (LoCA), a novel frequency-domain parameter-efficient fine-tuning method based on inverse Discrete Cosine Transform (iDCT) with selective locations of learnable components. We begin with a comprehensive theoretical comparison between frequency-domain and low-rank decompositions for fine-tuning pre-trained large models. Our analysis reveals that frequency-domain decomposition with carefully selected frequency components can surpass the expressivity of traditional low-rank-based methods. Furthermore, we demonstrate that iDCT offers a more efficient implementation compared to inverse Discrete Fourier Transform (iDFT), allowing for better selection and tuning of frequency components while maintaining equivalent expressivity to the optimal iDFT-based adaptation. By employing finite-difference approximation to estimate gradients for discrete locations of learnable coefficients on the DCT spectrum, LoCA dynamically selects the most informative frequency components during training. Experiments on diverse language and vision fine-tuning tasks demonstrate that LoCA offers enhanced parameter efficiency while maintains computational feasibility comparable to low-rank-based methods.",
|
| 9 |
+
"conference": {
|
| 10 |
+
"name": "ICLR",
|
| 11 |
+
"year": 2025
|
| 12 |
+
},
|
| 13 |
+
"template": null,
|
| 14 |
+
"category": "01. Deep Learning Architectures and Methods",
|
| 15 |
+
"is_done": true,
|
| 16 |
+
"timestamp": "2025-08-05T04:04:03.837680",
|
| 17 |
+
"rule_paper_possible_url": null,
|
| 18 |
+
"github_base": null,
|
| 19 |
+
"llm_believed_url": null,
|
| 20 |
+
"rule_base_possible_url": null,
|
| 21 |
+
"confirmed_url": null,
|
| 22 |
+
"Internet_fail": null,
|
| 23 |
+
"html_fail": null,
|
| 24 |
+
"citation_data": {
|
| 25 |
+
"original_title": "LoCA: Location-Aware Cosine Adaptation for Parameter-Efficient Fine-Tuning",
|
| 26 |
+
"matched_title": "LoCA: Location-Aware Cosine Adaptation for Parameter-Efficient Fine-Tuning",
|
| 27 |
+
"citation_count": 2,
|
| 28 |
+
"similarity": 1.0,
|
| 29 |
+
"source": "semantic_scholar",
|
| 30 |
+
"year": 2025,
|
| 31 |
+
"authors": [
|
| 32 |
+
{
|
| 33 |
+
"authorId": "82980465",
|
| 34 |
+
"name": "Zhekai Du"
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"authorId": "2344835984",
|
| 38 |
+
"name": "Yinjie Min"
|
| 39 |
+
},
|
| 40 |
+
{
|
| 41 |
+
"authorId": "2237950986",
|
| 42 |
+
"name": "Jingjing Li"
|
| 43 |
+
},
|
| 44 |
+
{
|
| 45 |
+
"authorId": "2345807929",
|
| 46 |
+
"name": "Ke Lu"
|
| 47 |
+
},
|
| 48 |
+
{
|
| 49 |
+
"authorId": "2340311269",
|
| 50 |
+
"name": "Changliang Zou"
|
| 51 |
+
},
|
| 52 |
+
{
|
| 53 |
+
"authorId": "2340416477",
|
| 54 |
+
"name": "Liuhua Peng"
|
| 55 |
+
},
|
| 56 |
+
{
|
| 57 |
+
"authorId": "2257286145",
|
| 58 |
+
"name": "Tingjin Chu"
|
| 59 |
+
},
|
| 60 |
+
{
|
| 61 |
+
"authorId": "2257241811",
|
| 62 |
+
"name": "Mingming Gong"
|
| 63 |
+
}
|
| 64 |
+
]
|
| 65 |
+
}
|
| 66 |
+
}
|
data_without_website/Long_Context_Transfer_from_Language_to_Vision.json
ADDED
|
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"id": "QETk0lBdVf",
|
| 3 |
+
"title": "Long Context Transfer from Language to Vision",
|
| 4 |
+
"track": "main",
|
| 5 |
+
"author": "Peiyuan Zhang;Kaichen Zhang;Bo Li;Guangtao Zeng;Jingkang Yang;Yuanhan Zhang;Ziyue Wang;Haoran Tan;Chunyuan Li;Ziwei Liu",
|
| 6 |
+
"pdf": "https://openreview.net/pdf?id=QETk0lBdVf",
|
| 7 |
+
"keyword": "",
|
| 8 |
+
"abstract": "Video sequences offer valuable temporal information, but existing large multimodal models (LMMs) fall short in understanding extremely long videos. Many works address this by reducing the number of visual tokens using visual resamplers. Alternatively, in this paper, we approach this problem from the perspective of the language model. By simply extrapolating the context length of the language backbone, we enable LMMs to comprehend orders of magnitude more visual tokens without any video training. We call this phenomenon long context transfer and carefully ablate its properties. To effectively measure LMMs' ability to generalize to long contexts in the vision modality, we develop V-NIAH (Visual Needle-In-A-Haystack), a purely synthetic long vision benchmark inspired by the language model's NIAH test. Our proposed Long Video Assistant (LongVA) can process 2000 frames or over 200K visual tokens without additional complexities. With its extended context length, LongVA achieves state-of-the-art performance on Video-MME among 7B-scale models by densely sampling more input frames.",
|
| 9 |
+
"conference": {
|
| 10 |
+
"name": "ICLR",
|
| 11 |
+
"year": 2025
|
| 12 |
+
},
|
| 13 |
+
"template": null,
|
| 14 |
+
"category": "09. Multimodal Learning",
|
| 15 |
+
"is_done": true,
|
| 16 |
+
"timestamp": "2025-08-05T03:22:34.457081",
|
| 17 |
+
"rule_paper_possible_url": null,
|
| 18 |
+
"github_base": null,
|
| 19 |
+
"llm_believed_url": null,
|
| 20 |
+
"rule_base_possible_url": null,
|
| 21 |
+
"confirmed_url": null,
|
| 22 |
+
"Internet_fail": null,
|
| 23 |
+
"html_fail": null,
|
| 24 |
+
"citation_data": {
|
| 25 |
+
"original_title": "Long Context Transfer from Language to Vision",
|
| 26 |
+
"matched_title": "Long Context Transfer from Language to Vision",
|
| 27 |
+
"citation_count": 214,
|
| 28 |
+
"similarity": 1.0,
|
| 29 |
+
"source": "semantic_scholar",
|
| 30 |
+
"year": 2024,
|
| 31 |
+
"authors": [
|
| 32 |
+
{
|
| 33 |
+
"authorId": "2265621323",
|
| 34 |
+
"name": "Peiyuan Zhang"
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"authorId": "2300086932",
|
| 38 |
+
"name": "Kaichen Zhang"
|
| 39 |
+
},
|
| 40 |
+
{
|
| 41 |
+
"authorId": "2165247100",
|
| 42 |
+
"name": "Bo Li"
|
| 43 |
+
},
|
| 44 |
+
{
|
| 45 |
+
"authorId": "2308040513",
|
| 46 |
+
"name": "Guangtao Zeng"
|
| 47 |
+
},
|
| 48 |
+
{
|
| 49 |
+
"authorId": "2295601",
|
| 50 |
+
"name": "Jingkang Yang"
|
| 51 |
+
},
|
| 52 |
+
{
|
| 53 |
+
"authorId": "2145784327",
|
| 54 |
+
"name": "Yuanhan Zhang"
|
| 55 |
+
},
|
| 56 |
+
{
|
| 57 |
+
"authorId": "2257550096",
|
| 58 |
+
"name": "Ziyue Wang"
|
| 59 |
+
},
|
| 60 |
+
{
|
| 61 |
+
"authorId": "2258308833",
|
| 62 |
+
"name": "Haoran Tan"
|
| 63 |
+
},
|
| 64 |
+
{
|
| 65 |
+
"authorId": "2264692022",
|
| 66 |
+
"name": "Chunyuan Li"
|
| 67 |
+
},
|
| 68 |
+
{
|
| 69 |
+
"authorId": "2279869111",
|
| 70 |
+
"name": "Ziwei Liu"
|
| 71 |
+
}
|
| 72 |
+
]
|
| 73 |
+
}
|
| 74 |
+
}
|
data_without_website/Matching_Pairs__Attributing_Fine-Tuned_Models_to_their_Pre-Trained_Large_Language_Models.json
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"id": "2023.acl-long.410",
|
| 3 |
+
"title": "Matching Pairs: Attributing Fine-Tuned Models to their Pre-Trained Large Language Models",
|
| 4 |
+
"track": "main",
|
| 5 |
+
"author": "Myles Foley; Ambrish Rawat; Taesung Lee; Yufang Hou; Gabriele Picco; Giulio Zizzo",
|
| 6 |
+
"pdf": "https://aclanthology.org/2023.acl-long.410.pdf",
|
| 7 |
+
"keyword": "",
|
| 8 |
+
"abstract": "The wide applicability and adaptability of generative large language models (LLMs) has enabled their rapid adoption. While the pre-trained models can perform many tasks, such models are often fine-tuned to improve their performance on various downstream applications. However, this leads to issues over violation of model licenses, model theft, and copyright infringement. Moreover, recent advances show that generative technology is capable of producing harmful content which exacerbates the problems of accountability within model supply chains. Thus, we need a method to investigate how a model was trained or a piece of text was generated and what their pre-trained base model was. In this paper we take the first step to address this open problem by tracing back the origin of a given fine-tuned LLM to its corresponding pre-trained base model. We consider different knowledge levels and attribution strategies, and find that we can correctly trace back 8 out of the 10 fine tuned models with our best method.",
|
| 9 |
+
"conference": {
|
| 10 |
+
"name": "ACL",
|
| 11 |
+
"year": 2023
|
| 12 |
+
},
|
| 13 |
+
"template": null,
|
| 14 |
+
"category": "10. Trustworthy and Ethical AI",
|
| 15 |
+
"is_done": true,
|
| 16 |
+
"timestamp": "2025-08-05T05:28:19.224085",
|
| 17 |
+
"rule_paper_possible_url": null,
|
| 18 |
+
"github_base": null,
|
| 19 |
+
"llm_believed_url": null,
|
| 20 |
+
"rule_base_possible_url": null,
|
| 21 |
+
"confirmed_url": null,
|
| 22 |
+
"Internet_fail": null,
|
| 23 |
+
"html_fail": null,
|
| 24 |
+
"citation_data": {
|
| 25 |
+
"original_title": "Matching Pairs: Attributing Fine-Tuned Models to their Pre-Trained Large Language Models",
|
| 26 |
+
"matched_title": "Matching Pairs: Attributing Fine-Tuned Models to their Pre-Trained Large Language Models",
|
| 27 |
+
"citation_count": 6,
|
| 28 |
+
"similarity": 1.0,
|
| 29 |
+
"source": "semantic_scholar",
|
| 30 |
+
"year": 2023,
|
| 31 |
+
"authors": [
|
| 32 |
+
{
|
| 33 |
+
"authorId": "2166091148",
|
| 34 |
+
"name": "Myles Foley"
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"authorId": "22261698",
|
| 38 |
+
"name": "Ambrish Rawat"
|
| 39 |
+
},
|
| 40 |
+
{
|
| 41 |
+
"authorId": "2801206",
|
| 42 |
+
"name": "Taesung Lee"
|
| 43 |
+
},
|
| 44 |
+
{
|
| 45 |
+
"authorId": "39517968",
|
| 46 |
+
"name": "Yufang Hou"
|
| 47 |
+
},
|
| 48 |
+
{
|
| 49 |
+
"authorId": "46310814",
|
| 50 |
+
"name": "Gabriele Picco"
|
| 51 |
+
},
|
| 52 |
+
{
|
| 53 |
+
"authorId": "152109289",
|
| 54 |
+
"name": "Giulio Zizzo"
|
| 55 |
+
}
|
| 56 |
+
]
|
| 57 |
+
}
|
| 58 |
+
}
|
data_without_website/Meta_Navigator__Search_for_a_Good_Adaptation_Policy_for_Few-Shot_Learning.json
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"id": "",
|
| 3 |
+
"title": "Meta Navigator: Search for a Good Adaptation Policy for Few-Shot Learning",
|
| 4 |
+
"track": "main",
|
| 5 |
+
"author": "Chi Zhang; Henghui Ding; Guosheng Lin; Ruibo Li; Changhu Wang; Chunhua Shen",
|
| 6 |
+
"pdf": "https://openaccess.thecvf.com/content/ICCV2021/papers/Zhang_Meta_Navigator_Search_for_a_Good_Adaptation_Policy_for_Few-Shot_ICCV_2021_paper.pdf",
|
| 7 |
+
"keyword": "",
|
| 8 |
+
"abstract": "Few-shot learning aims to adapt knowledge learned from previous tasks to novel tasks with only a limited amount of labeled data. Research literature on few-shot learning exhibits great diversity, while different algorithms often excel at different few-shot learning scenarios. It is therefore tricky to decide which learning strategies to use under different task conditions. Inspired by the recent success in Automated Machine Learning literature (AutoML), in this paper, we present Meta Navigator, a framework that attempts to solve the aforementioned limitation in few-shot learning by seeking a higher-level strategy and proffer to automate the selection from various few-shot learning designs. The goal of our work is to search for good parameter adaptation policies that are applied to different stages in the network for few-shot classification. We present a search space that covers many popular few-shot learning algorithms in the literature and develop a differentiable searching and decoding algorithm based on meta-learning that supports gradient-based optimization. We demonstrate the effectiveness of our searching-based method on multiple benchmark datasets. Extensive experiments show that our approach significantly outperforms baselines and demonstrates performance advantages over many state-of-the-art methods. Code and models will be made publicly available.",
|
| 9 |
+
"conference": {
|
| 10 |
+
"name": "ICCV",
|
| 11 |
+
"year": 2021
|
| 12 |
+
},
|
| 13 |
+
"template": null,
|
| 14 |
+
"category": "01. Deep Learning Architectures and Methods",
|
| 15 |
+
"is_done": true,
|
| 16 |
+
"timestamp": "2025-08-05T17:58:47.236052",
|
| 17 |
+
"rule_paper_possible_url": null,
|
| 18 |
+
"github_base": null,
|
| 19 |
+
"llm_believed_url": null,
|
| 20 |
+
"rule_base_possible_url": null,
|
| 21 |
+
"confirmed_url": null,
|
| 22 |
+
"Internet_fail": null,
|
| 23 |
+
"html_fail": null,
|
| 24 |
+
"citation_data": {
|
| 25 |
+
"original_title": "Meta Navigator: Search for a Good Adaptation Policy for Few-Shot Learning",
|
| 26 |
+
"matched_title": "Meta Navigator: Search for a Good Adaptation Policy for Few-shot Learning",
|
| 27 |
+
"citation_count": 41,
|
| 28 |
+
"similarity": 1.0,
|
| 29 |
+
"source": "semantic_scholar",
|
| 30 |
+
"year": 2021,
|
| 31 |
+
"authors": [
|
| 32 |
+
{
|
| 33 |
+
"authorId": "2115811693",
|
| 34 |
+
"name": "Chi Zhang"
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"authorId": "49441320",
|
| 38 |
+
"name": "Henghui Ding"
|
| 39 |
+
},
|
| 40 |
+
{
|
| 41 |
+
"authorId": "2604251",
|
| 42 |
+
"name": "Guosheng Lin"
|
| 43 |
+
},
|
| 44 |
+
{
|
| 45 |
+
"authorId": "2150924478",
|
| 46 |
+
"name": "Ruibo Li"
|
| 47 |
+
},
|
| 48 |
+
{
|
| 49 |
+
"authorId": "1906061249",
|
| 50 |
+
"name": "Changhu Wang"
|
| 51 |
+
},
|
| 52 |
+
{
|
| 53 |
+
"authorId": "12459603",
|
| 54 |
+
"name": "Chunhua Shen"
|
| 55 |
+
}
|
| 56 |
+
]
|
| 57 |
+
}
|
| 58 |
+
}
|
data_without_website/Mixed-modality_Representation_Learning_and_Pre-training_for_Joint_Table-and-Text_Retrieval_in_OpenQA.json
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"id": "2022.findings-emnlp.303",
|
| 3 |
+
"title": "Mixed-modality Representation Learning and Pre-training for Joint Table-and-Text Retrieval in OpenQA",
|
| 4 |
+
"track": "main",
|
| 5 |
+
"author": "Junjie Huang; Wanjun Zhong; Qian Liu; Ming Gong; Daxin Jiang; Nan Duan",
|
| 6 |
+
"pdf": "https://aclanthology.org/2022.findings-emnlp.303.pdf",
|
| 7 |
+
"keyword": "",
|
| 8 |
+
"abstract": "Retrieving evidences from tabular and textual resources is essential for open-domain question answering (OpenQA), which provides more comprehensive information. However, training an effective dense table-text retriever is difficult due to the challenges of table-text discrepancy and data sparsity problem. To address the above challenges, we introduce an optimized OpenQA Table-Text Retriever (OTTeR) to jointly retrieve tabular and textual evidences. Firstly, we propose to enhance mixed-modality representation learning via two mechanisms: modality-enhanced representation and mixed-modality negative sampling strategy. Secondly, to alleviate data sparsity problem and enhance the general retrieval ability, we conduct retrieval-centric mixed-modality synthetic pre-training. Experimental results demonstrate that OTTeR substantially improves the performance of table-and-text retrieval on the OTT-QA dataset. Comprehensive analyses examine the effectiveness of all the proposed mechanisms. Besides, equipped with OTTeR, our OpenQA system achieves the state-of-the-art result on the downstream QA task, with 10.1% absolute improvement in terms of the exact match over the previous best system.",
|
| 9 |
+
"conference": {
|
| 10 |
+
"name": "EMNLP",
|
| 11 |
+
"year": 2022
|
| 12 |
+
},
|
| 13 |
+
"llm_believed_url": "https://github.com/Jun-jie-Huang/OTTeR",
|
| 14 |
+
"template": null,
|
| 15 |
+
"category": "13. Information Retrieval and Recommender Systems",
|
| 16 |
+
"is_done": true,
|
| 17 |
+
"timestamp": "2025-08-04T03:15:46.561759",
|
| 18 |
+
"citation_data": {
|
| 19 |
+
"original_title": "Mixed-modality Representation Learning and Pre-training for Joint Table-and-Text Retrieval in OpenQA",
|
| 20 |
+
"matched_title": "Mixed-modality Representation Learning and Pre-training for Joint Table-and-Text Retrieval in OpenQA",
|
| 21 |
+
"citation_count": 14,
|
| 22 |
+
"similarity": 1.0,
|
| 23 |
+
"source": "semantic_scholar",
|
| 24 |
+
"year": 2022,
|
| 25 |
+
"authors": [
|
| 26 |
+
{
|
| 27 |
+
"authorId": "145505727",
|
| 28 |
+
"name": "Junjie Huang"
|
| 29 |
+
},
|
| 30 |
+
{
|
| 31 |
+
"authorId": "81970097",
|
| 32 |
+
"name": "Wanjun Zhong"
|
| 33 |
+
},
|
| 34 |
+
{
|
| 35 |
+
"authorId": "2145484051",
|
| 36 |
+
"name": "Qianchu Liu"
|
| 37 |
+
},
|
| 38 |
+
{
|
| 39 |
+
"authorId": "50175330",
|
| 40 |
+
"name": "Ming Gong"
|
| 41 |
+
},
|
| 42 |
+
{
|
| 43 |
+
"authorId": "71790825",
|
| 44 |
+
"name": "Daxin Jiang"
|
| 45 |
+
},
|
| 46 |
+
{
|
| 47 |
+
"authorId": "46429989",
|
| 48 |
+
"name": "Nan Duan"
|
| 49 |
+
}
|
| 50 |
+
]
|
| 51 |
+
}
|
| 52 |
+
}
|
data_without_website/Multi-Granularity_Hand_Action_Detection.json
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"id": "Lk9hIPV5uu",
|
| 3 |
+
"title": "Multi-Granularity Hand Action Detection",
|
| 4 |
+
"track": "main",
|
| 5 |
+
"author": "Ting Zhe;Jing Zhang;Yongqian Li;Yong Luo;Han Hu;Dacheng Tao",
|
| 6 |
+
"pdf": "https://openreview.net/pdf?id=Lk9hIPV5uu",
|
| 7 |
+
"keyword": "",
|
| 8 |
+
"abstract": "1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 Multi-Granularity Hand Action Detection Anonymous Author(s) ABSTRACT Detecting hand actions in videos is crucial for understanding video content and has diverse real-world applications. Existing approaches often focus on whole-body actions or coarse-grained action cate- gories, lacking fine-grained hand-action localization information. To fill this gap, we introduce the FHA-Kitchens (Fine-Grained Hand Actions in Kitchen Scenes) dataset, providing both coarse- and fine-grained hand action categories along with localization annotations",
|
| 9 |
+
"conference": {
|
| 10 |
+
"name": "ACMMM",
|
| 11 |
+
"year": 2024
|
| 12 |
+
},
|
| 13 |
+
"template": null,
|
| 14 |
+
"category": "05. 3D Vision and Computational Graphics",
|
| 15 |
+
"is_done": true,
|
| 16 |
+
"timestamp": "2025-08-05T06:12:57.019080",
|
| 17 |
+
"rule_paper_possible_url": null,
|
| 18 |
+
"github_base": null,
|
| 19 |
+
"llm_believed_url": null,
|
| 20 |
+
"rule_base_possible_url": null,
|
| 21 |
+
"confirmed_url": null,
|
| 22 |
+
"Internet_fail": null,
|
| 23 |
+
"html_fail": null,
|
| 24 |
+
"citation_data": {
|
| 25 |
+
"original_title": "Multi-Granularity Hand Action Detection",
|
| 26 |
+
"matched_title": "Multi-Granularity Hand Action Detection",
|
| 27 |
+
"citation_count": 0,
|
| 28 |
+
"similarity": 1.0,
|
| 29 |
+
"source": "semantic_scholar",
|
| 30 |
+
"year": 2023,
|
| 31 |
+
"authors": [
|
| 32 |
+
{
|
| 33 |
+
"authorId": "103058344",
|
| 34 |
+
"name": "Ting Zhe"
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"authorId": "2220318873",
|
| 38 |
+
"name": "Yongqian Li"
|
| 39 |
+
},
|
| 40 |
+
{
|
| 41 |
+
"authorId": "2155699863",
|
| 42 |
+
"name": "Jing Zhang"
|
| 43 |
+
},
|
| 44 |
+
{
|
| 45 |
+
"authorId": "2150649639",
|
| 46 |
+
"name": "Yong Luo"
|
| 47 |
+
},
|
| 48 |
+
{
|
| 49 |
+
"authorId": "1823518756",
|
| 50 |
+
"name": "Han Hu"
|
| 51 |
+
},
|
| 52 |
+
{
|
| 53 |
+
"authorId": "2064619959",
|
| 54 |
+
"name": "Bo Du"
|
| 55 |
+
},
|
| 56 |
+
{
|
| 57 |
+
"authorId": "2114783695",
|
| 58 |
+
"name": "Yonggang Wen"
|
| 59 |
+
},
|
| 60 |
+
{
|
| 61 |
+
"authorId": "2075330732",
|
| 62 |
+
"name": "Dacheng Tao"
|
| 63 |
+
}
|
| 64 |
+
]
|
| 65 |
+
}
|
| 66 |
+
}
|
data_without_website/Nearly_Optimal_Approximation_of_Matrix_Functions_by_the_Lanczos_Method.json
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"id": "3s8V8QP9XV",
|
| 3 |
+
"title": "Nearly Optimal Approximation of Matrix Functions by the Lanczos Method",
|
| 4 |
+
"track": "main",
|
| 5 |
+
"author": "Noah Amsel;Tyler Chen;Anne Greenbaum;Cameron N Musco;Christopher Musco",
|
| 6 |
+
"pdf": "https://openreview.net/pdf?id=3s8V8QP9XV",
|
| 7 |
+
"keyword": "",
|
| 8 |
+
"abstract": "Approximating the action of a matrix function $f(\\vec{A})$ on a vector $\\vec{b}$ is an increasingly important primitive in machine learning, data science, and statistics, with applications such as sampling high dimensional Gaussians, Gaussian process regression and Bayesian inference, principle component analysis, and approximating Hessian spectral densities.\nOver the past decade, a number of algorithms enjoying strong theoretical guarantees have been proposed for this task.\nMany of the most successful belong to a family of algorithms called Krylov subspace methods.\nRemarkably, a classic Krylov subspace method, called the Lanczos method for matrix functions (Lanczos-FA), frequently outperforms newer methods in practice. Our main result is a theoretical justification for this finding: we show that, for a natural class of rational functions, Lanczos-FA matches the error of the best possible Krylov subspace method up to a multiplicative approximation factor. \nThe approximation factor depends on the degree of $f(x)$'s denominator and the condition number of $\\vec{A}$, but not on the number of iterations $k$. Our result provides a strong justification for the excellent performance of Lanczos-FA, especially on functions that are well approximated by rationals, such as the matrix square root.",
|
| 9 |
+
"conference": {
|
| 10 |
+
"name": "NIPS",
|
| 11 |
+
"year": 2024
|
| 12 |
+
},
|
| 13 |
+
"template": null,
|
| 14 |
+
"category": "03. ML Theory and Optimization",
|
| 15 |
+
"is_done": true,
|
| 16 |
+
"timestamp": "2025-08-05T18:41:48.103056",
|
| 17 |
+
"rule_paper_possible_url": null,
|
| 18 |
+
"github_base": null,
|
| 19 |
+
"llm_believed_url": null,
|
| 20 |
+
"rule_base_possible_url": null,
|
| 21 |
+
"confirmed_url": null,
|
| 22 |
+
"Internet_fail": null,
|
| 23 |
+
"html_fail": null,
|
| 24 |
+
"citation_data": {
|
| 25 |
+
"original_title": "Nearly Optimal Approximation of Matrix Functions by the Lanczos Method",
|
| 26 |
+
"matched_title": "Nearly Optimal Approximation of Matrix Functions by the Lanczos Method",
|
| 27 |
+
"citation_count": 8,
|
| 28 |
+
"similarity": 1.0,
|
| 29 |
+
"source": "semantic_scholar",
|
| 30 |
+
"year": 2023,
|
| 31 |
+
"authors": [
|
| 32 |
+
{
|
| 33 |
+
"authorId": "81087552",
|
| 34 |
+
"name": "Noah Amsel"
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"authorId": "31181303",
|
| 38 |
+
"name": "Tyler Chen"
|
| 39 |
+
},
|
| 40 |
+
{
|
| 41 |
+
"authorId": "36367121",
|
| 42 |
+
"name": "A. Greenbaum"
|
| 43 |
+
},
|
| 44 |
+
{
|
| 45 |
+
"authorId": "2032038",
|
| 46 |
+
"name": "Cameron Musco"
|
| 47 |
+
},
|
| 48 |
+
{
|
| 49 |
+
"authorId": "2767340",
|
| 50 |
+
"name": "Christopher Musco"
|
| 51 |
+
}
|
| 52 |
+
]
|
| 53 |
+
}
|
| 54 |
+
}
|
data_without_website/Neural_Collapse_meets_Differential_Privacy__Curious_behaviors_of_NoisyGD_with_Near-Perfect_Representation_Learning.json
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"id": "7rrN6E4KU0",
|
| 3 |
+
"title": "Neural Collapse meets Differential Privacy: Curious behaviors of NoisyGD with Near-Perfect Representation Learning",
|
| 4 |
+
"track": "main",
|
| 5 |
+
"author": "Chendi Wang;Yuqing Zhu;Weijie J Su;Yu-Xiang Wang",
|
| 6 |
+
"pdf": "https://openreview.net/pdf?id=7rrN6E4KU0",
|
| 7 |
+
"keyword": "",
|
| 8 |
+
"abstract": "A recent study by De et al. (2022) shows that large-scale representation learning through pre-training on a public dataset significantly enhances differentially private (DP) learning in downstream tasks. To explain this, we consider a layer-peeled model in representation learning, resulting in Neural Collapse (NC) phenomena. Within NC, we establish that the misclassification error is independent of dimension when the distance between actual and ideal features is below a threshold. We empirically evaluate feature quality in the last layer under different pre-trained models, showing that a more powerful pre-trained model improves feature representation. Moreover, we show that DP fine-tuning is less robust compared to non-DP fine-tuning, especially with perturbations. Supported by theoretical analyses and experiments, we suggest strategies like feature normalization and dimension reduction methods such as PCA to enhance DP fine-tuning robustness. Conducting PCA on last-layer features significantly improves testing accuracy.",
|
| 9 |
+
"conference": {
|
| 10 |
+
"name": "ICML",
|
| 11 |
+
"year": 2024
|
| 12 |
+
},
|
| 13 |
+
"template": null,
|
| 14 |
+
"category": "03. ML Theory and Optimization",
|
| 15 |
+
"is_done": true,
|
| 16 |
+
"timestamp": "2025-08-05T06:42:07.132274",
|
| 17 |
+
"rule_paper_possible_url": null,
|
| 18 |
+
"github_base": null,
|
| 19 |
+
"llm_believed_url": null,
|
| 20 |
+
"rule_base_possible_url": null,
|
| 21 |
+
"confirmed_url": null,
|
| 22 |
+
"Internet_fail": null,
|
| 23 |
+
"html_fail": null,
|
| 24 |
+
"citation_data": {
|
| 25 |
+
"original_title": "Neural Collapse meets Differential Privacy: Curious behaviors of NoisyGD with Near-Perfect Representation Learning",
|
| 26 |
+
"matched_title": "Neural Collapse Meets Differential Privacy: Curious Behaviors of NoisyGD with Near-perfect Representation Learning",
|
| 27 |
+
"citation_count": 5,
|
| 28 |
+
"similarity": 1.0,
|
| 29 |
+
"source": "semantic_scholar",
|
| 30 |
+
"year": 2024,
|
| 31 |
+
"authors": [
|
| 32 |
+
{
|
| 33 |
+
"authorId": "2263944757",
|
| 34 |
+
"name": "Chendi Wang"
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"authorId": "2144322624",
|
| 38 |
+
"name": "Yuqing Zhu"
|
| 39 |
+
},
|
| 40 |
+
{
|
| 41 |
+
"authorId": "2282875732",
|
| 42 |
+
"name": "Weijie J. Su"
|
| 43 |
+
},
|
| 44 |
+
{
|
| 45 |
+
"authorId": "2288364999",
|
| 46 |
+
"name": "Yu-Xiang Wang"
|
| 47 |
+
}
|
| 48 |
+
]
|
| 49 |
+
}
|
| 50 |
+
}
|
data_without_website/No-Regret_and_Incentive-Compatible_Combinatorial_Online_Prediction.json
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"id": "liSixK3eY4",
|
| 3 |
+
"title": "No-Regret and Incentive-Compatible Combinatorial Online Prediction",
|
| 4 |
+
"track": "main",
|
| 5 |
+
"author": "Jing Dong;Yaoliang Yu;Baoxiang Wang",
|
| 6 |
+
"pdf": "https://openreview.net/pdf?id=liSixK3eY4",
|
| 7 |
+
"keyword": "",
|
| 8 |
+
"abstract": "We study the combinatorial online learning prediction problem with bandit feedback in a strategic setting, where the experts can strategically influence the learning algorithm’s predictions by manipulating their beliefs about a sequence of binary events. There are two learning objectives for the algorithm. The first is maximizing its cumulative utility over a fixed time horizon, equivalent to minimizing regret. The second objective is to ensure incentive compatibility, guaranteeing that each expert's optimal strategy is to report their true beliefs about the outcomes of each event. In real applications, the learning algorithm only receives the utility corresponding to their chosen experts, which is referred to as the full-bandit setting. In this work, we present an algorithm based on mirror descent, which achieves a regret of $O(T^{3/4})$ under both the full-bandit or semi-bandit feedback model, while ensuring incentive compatibility. To our best knowledge, this is the first algorithm that can simultaneously achieve sublinear regret and incentive compatibility. To demonstrate the effectiveness of our algorithm, we conduct extensive empirical evaluation with the algorithm on a synthetic dataset.",
|
| 9 |
+
"conference": {
|
| 10 |
+
"name": "ICLR",
|
| 11 |
+
"year": 2025
|
| 12 |
+
},
|
| 13 |
+
"template": null,
|
| 14 |
+
"category": "02. Reinforcement Learning and Control",
|
| 15 |
+
"is_done": true,
|
| 16 |
+
"timestamp": "2025-08-05T04:03:31.775181",
|
| 17 |
+
"rule_paper_possible_url": null,
|
| 18 |
+
"github_base": null,
|
| 19 |
+
"llm_believed_url": null,
|
| 20 |
+
"rule_base_possible_url": null,
|
| 21 |
+
"confirmed_url": null,
|
| 22 |
+
"Internet_fail": null,
|
| 23 |
+
"html_fail": null,
|
| 24 |
+
"citation_data": {
|
| 25 |
+
"citation_count": 0
|
| 26 |
+
}
|
| 27 |
+
}
|
data_without_website/Non-Smooth_Weakly-Convex_Finite-sum_Coupled_Compositional_Optimization.json
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"id": "pLOWV1UGF6",
|
| 3 |
+
"title": "Non-Smooth Weakly-Convex Finite-sum Coupled Compositional Optimization",
|
| 4 |
+
"track": "main",
|
| 5 |
+
"author": "Quanqi Hu;Dixian Zhu;Tianbao Yang",
|
| 6 |
+
"pdf": "https://openreview.net/pdf?id=pLOWV1UGF6",
|
| 7 |
+
"keyword": "",
|
| 8 |
+
"abstract": "This paper investigates new families of compositional optimization problems, called non-smooth weakly-convex finite-sum coupled compositional optimization (NSWC FCCO). There has been a growing interest in FCCO due to its wide-ranging applications in machine learning and AI, as well as its ability to address the shortcomings of stochastic algorithms based on empirical risk minimization. However, current research on FCCO presumes that both the inner and outer functions are smooth, limiting their potential to tackle a more diverse set of problems. Our research expands on this area by examining non-smooth weakly-convex FCCO, where the outer function is weakly convex and non-decreasing, and the inner function is weakly-convex. We analyze a single-loop algorithm and establish its complexity for finding an $\\epsilon$-stationary point of the Moreau envelop of the objective function. Additionally, we also extend the algorithm for solving novel non-smooth weakly-convex tri-level finite-sum coupled compositional optimization problems, which feature a nested arrangement of three functions. Lastly, we explore the applications of our algorithms in deep learning for two-way partial AUC maximization and multi-instance two-way partial AUC maximization, using empirical studies to showcase the effectiveness of the proposed algorithms.",
|
| 9 |
+
"conference": {
|
| 10 |
+
"name": "NIPS",
|
| 11 |
+
"year": 2023
|
| 12 |
+
},
|
| 13 |
+
"template": null,
|
| 14 |
+
"category": "03. ML Theory and Optimization",
|
| 15 |
+
"is_done": true,
|
| 16 |
+
"timestamp": "2025-08-04T09:24:48.927394",
|
| 17 |
+
"rule_paper_possible_url": null,
|
| 18 |
+
"github_base": null,
|
| 19 |
+
"llm_believed_url": null,
|
| 20 |
+
"rule_base_possible_url": null,
|
| 21 |
+
"confirmed_url": null,
|
| 22 |
+
"Internet_fail": null,
|
| 23 |
+
"html_fail": null,
|
| 24 |
+
"citation_data": {
|
| 25 |
+
"original_title": "Non-Smooth Weakly-Convex Finite-sum Coupled Compositional Optimization",
|
| 26 |
+
"matched_title": "Non-Smooth Weakly-Convex Finite-sum Coupled Compositional Optimization",
|
| 27 |
+
"citation_count": 11,
|
| 28 |
+
"similarity": 1.0,
|
| 29 |
+
"source": "semantic_scholar",
|
| 30 |
+
"year": 2023,
|
| 31 |
+
"authors": [
|
| 32 |
+
{
|
| 33 |
+
"authorId": "2156351438",
|
| 34 |
+
"name": "Quanqi Hu"
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"authorId": "3417087",
|
| 38 |
+
"name": "Dixian Zhu"
|
| 39 |
+
},
|
| 40 |
+
{
|
| 41 |
+
"authorId": "2255476035",
|
| 42 |
+
"name": "Tianbao Yang"
|
| 43 |
+
}
|
| 44 |
+
]
|
| 45 |
+
}
|
| 46 |
+
}
|
data_without_website/Online_false_discovery_rate_control_for_anomaly_detection_in_time_series.json
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"id": "NvN_B_ZEY5c",
|
| 3 |
+
"title": "Online false discovery rate control for anomaly detection in time series",
|
| 4 |
+
"track": "main",
|
| 5 |
+
"author": "Quentin Rebjock;Baris Kurt;Tim Januschowski;Laurent Callot",
|
| 6 |
+
"pdf": "https://openreview.net/pdf?id=NvN_B_ZEY5c",
|
| 7 |
+
"keyword": "",
|
| 8 |
+
"abstract": "This article proposes novel rules for false discovery rate control (FDRC) geared towards online anomaly detection in time series. Online FDRC rules allow to control the properties of a sequence of statistical tests. In the context of anomaly detection, the null hypothesis is that an observation is normal and the alternative is that it is anomalous. FDRC rules allow users to target a lower bound on precision in unsupervised settings. The methods proposed in this article overcome short-comings of previous FDRC rules in the context of anomaly detection, in particular ensuring that power remains high even when the alternative is exceedingly rare (typical in anomaly detection) and the test statistics are serially dependent (typical in time series). We show the soundness of these rules in both theory and experiments.",
|
| 9 |
+
"conference": {
|
| 10 |
+
"name": "NIPS",
|
| 11 |
+
"year": 2021
|
| 12 |
+
},
|
| 13 |
+
"template": null,
|
| 14 |
+
"category": "03. ML Theory and Optimization",
|
| 15 |
+
"is_done": true,
|
| 16 |
+
"timestamp": "2025-08-04T04:25:34.722816",
|
| 17 |
+
"rule_paper_possible_url": null,
|
| 18 |
+
"github_base": null,
|
| 19 |
+
"llm_believed_url": null,
|
| 20 |
+
"rule_base_possible_url": null,
|
| 21 |
+
"confirmed_url": null,
|
| 22 |
+
"Internet_fail": null,
|
| 23 |
+
"html_fail": null,
|
| 24 |
+
"citation_data": {
|
| 25 |
+
"original_title": "Online false discovery rate control for anomaly detection in time series",
|
| 26 |
+
"matched_title": "Online false discovery rate control for anomaly detection in time series",
|
| 27 |
+
"citation_count": 14,
|
| 28 |
+
"similarity": 1.0,
|
| 29 |
+
"source": "semantic_scholar",
|
| 30 |
+
"year": 2021,
|
| 31 |
+
"authors": [
|
| 32 |
+
{
|
| 33 |
+
"authorId": "66792975",
|
| 34 |
+
"name": "Quentin Rebjock"
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"authorId": "2143214980",
|
| 38 |
+
"name": "Barics Kurt"
|
| 39 |
+
},
|
| 40 |
+
{
|
| 41 |
+
"authorId": "2166235",
|
| 42 |
+
"name": "Tim Januschowski"
|
| 43 |
+
},
|
| 44 |
+
{
|
| 45 |
+
"authorId": "11788397",
|
| 46 |
+
"name": "Laurent Callot"
|
| 47 |
+
}
|
| 48 |
+
]
|
| 49 |
+
}
|
| 50 |
+
}
|
data_without_website/OpenCoS__Contrastive_Semi-supervised_Learning_for_Handling_Open-set_Unlabeled_Data.json
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"id": "lJgbDxGhJ4r",
|
| 3 |
+
"title": "OpenCoS: Contrastive Semi-supervised Learning for Handling Open-set Unlabeled Data",
|
| 4 |
+
"track": "main",
|
| 5 |
+
"author": "Jongjin Park;Sukmin Yun;Jongheon Jeong;Jinwoo Shin",
|
| 6 |
+
"pdf": "https://openreview.net/pdf?id=lJgbDxGhJ4r",
|
| 7 |
+
"keyword": "",
|
| 8 |
+
"abstract": "Modern semi-supervised learning methods conventionally assume both labeled and unlabeled data have the same class distribution. However, unlabeled data may include out-of-class samples in practice; those that cannot have one-hot encoded labels from a closed-set of classes in label data, i.e., unlabeled data is an open-set. In this paper, we introduce OpenCoS, a method for handling this realistic semi-supervised learning scenario based on a recent framework of contrastive learning. One of our key findings is that out-of-class samples in the unlabeled dataset can be identified effectively via (unsupervised) contrastive learning. OpenCoS utilizes this information to overcome the failure modes in the existing state-of-the-art semi-supervised methods, e.g., ReMixMatch or FixMatch. In particular, we propose to assign soft-labels for out-of-class samples using the representation learned from contrastive learning. Our extensive experimental results show the effectiveness of OpenCoS, fixing the state-of-the-art semi-supervised methods to be suitable for diverse scenarios involving open-set unlabeled data.",
|
| 9 |
+
"conference": {
|
| 10 |
+
"name": "ICLR",
|
| 11 |
+
"year": 2021
|
| 12 |
+
},
|
| 13 |
+
"template": null,
|
| 14 |
+
"category": "01. Deep Learning Architectures and Methods",
|
| 15 |
+
"is_done": true,
|
| 16 |
+
"timestamp": "2025-08-04T09:32:09.750610",
|
| 17 |
+
"rule_paper_possible_url": null,
|
| 18 |
+
"github_base": null,
|
| 19 |
+
"llm_believed_url": null,
|
| 20 |
+
"rule_base_possible_url": null,
|
| 21 |
+
"confirmed_url": null,
|
| 22 |
+
"Internet_fail": null,
|
| 23 |
+
"html_fail": null,
|
| 24 |
+
"citation_data": {
|
| 25 |
+
"original_title": "OpenCoS: Contrastive Semi-supervised Learning for Handling Open-set Unlabeled Data",
|
| 26 |
+
"matched_title": "OpenCoS: Contrastive Semi-supervised Learning for Handling Open-set Unlabeled Data",
|
| 27 |
+
"citation_count": 30,
|
| 28 |
+
"similarity": 1.0,
|
| 29 |
+
"source": "semantic_scholar",
|
| 30 |
+
"year": 2021,
|
| 31 |
+
"authors": [
|
| 32 |
+
{
|
| 33 |
+
"authorId": "2109073979",
|
| 34 |
+
"name": "Jongjin Park"
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"authorId": "66863443",
|
| 38 |
+
"name": "Sukmin Yun"
|
| 39 |
+
},
|
| 40 |
+
{
|
| 41 |
+
"authorId": "83125078",
|
| 42 |
+
"name": "Jongheon Jeong"
|
| 43 |
+
},
|
| 44 |
+
{
|
| 45 |
+
"authorId": "143720148",
|
| 46 |
+
"name": "Jinwoo Shin"
|
| 47 |
+
}
|
| 48 |
+
]
|
| 49 |
+
}
|
| 50 |
+
}
|
data_without_website/Optimal_Rates_for_Random_Order_Online_Optimization.json
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"id": "dfyjet3BMKA",
|
| 3 |
+
"title": "Optimal Rates for Random Order Online Optimization",
|
| 4 |
+
"track": "main",
|
| 5 |
+
"author": "Uri Sherman;Tomer Koren;Yishay Mansour",
|
| 6 |
+
"pdf": "https://openreview.net/pdf?id=dfyjet3BMKA",
|
| 7 |
+
"keyword": "",
|
| 8 |
+
"abstract": "We study online convex optimization in the random order model, recently proposed by Garber et al. (2020), where the loss functions may be chosen by an adversary, but are then presented to the online algorithm in a uniformly random order. Focusing on the scenario where the cumulative loss function is (strongly) convex, yet individual loss functions are smooth but might be non-convex, we give algorithms that achieve the optimal bounds and significantly outperform the results of Garber et al. (2020), completely removing the dimension dependence and improve their scaling with respect to the strong convexity parameter. Our analysis relies on novel connections between algorithmic stability and generalization for sampling without-replacement analogous to those studied in the with-replacement i.i.d. setting, as well as on a refined average stability analysis of stochastic gradient descent.",
|
| 9 |
+
"conference": {
|
| 10 |
+
"name": "NIPS",
|
| 11 |
+
"year": 2021
|
| 12 |
+
},
|
| 13 |
+
"template": null,
|
| 14 |
+
"category": "03. ML Theory and Optimization",
|
| 15 |
+
"is_done": true,
|
| 16 |
+
"timestamp": "2025-08-04T04:03:10.143481",
|
| 17 |
+
"rule_paper_possible_url": null,
|
| 18 |
+
"github_base": null,
|
| 19 |
+
"llm_believed_url": null,
|
| 20 |
+
"rule_base_possible_url": null,
|
| 21 |
+
"confirmed_url": null,
|
| 22 |
+
"Internet_fail": null,
|
| 23 |
+
"html_fail": null,
|
| 24 |
+
"citation_data": {
|
| 25 |
+
"original_title": "Optimal Rates for Random Order Online Optimization",
|
| 26 |
+
"matched_title": "Optimal Rates for Random Order Online Optimization",
|
| 27 |
+
"citation_count": 8,
|
| 28 |
+
"similarity": 1.0,
|
| 29 |
+
"source": "semantic_scholar",
|
| 30 |
+
"year": 2021,
|
| 31 |
+
"authors": [
|
| 32 |
+
{
|
| 33 |
+
"authorId": "2092846929",
|
| 34 |
+
"name": "Uri Sherman"
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"authorId": "1711492",
|
| 38 |
+
"name": "Tomer Koren"
|
| 39 |
+
},
|
| 40 |
+
{
|
| 41 |
+
"authorId": "144830983",
|
| 42 |
+
"name": "Y. Mansour"
|
| 43 |
+
}
|
| 44 |
+
]
|
| 45 |
+
}
|
| 46 |
+
}
|
data_without_website/Optimal_transport-based_conformal_prediction.json
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"id": "kEAyffH3tn",
|
| 3 |
+
"title": "Optimal transport-based conformal prediction",
|
| 4 |
+
"track": "main",
|
| 5 |
+
"author": "Gauthier Thurin;Kimia Nadjahi;Claire Boyer",
|
| 6 |
+
"pdf": "https://openreview.net/pdf?id=kEAyffH3tn",
|
| 7 |
+
"keyword": "",
|
| 8 |
+
"abstract": "Conformal Prediction (CP) is a principled framework for quantifying uncertainty in black-box learning models, by constructing prediction sets with finite-sample coverage guarantees. Traditional approaches rely on scalar nonconformity scores, which fail to fully exploit the geometric structure of multivariate outputs, such as in multi-output regression or multiclass classification. Recent methods addressing this limitation impose predefined convex shapes for the prediction sets, potentially misaligning with the intrinsic data geometry. We introduce a novel CP procedure handling multivariate score functions through the lens of optimal transport. Specifically, we leverage Monge-Kantorovich vector ranks and quantiles to construct prediction region with flexible, potentially non-convex shapes, better suited to the complex uncertainty patterns encountered in multivariate learning tasks. We prove that our approach ensures finite-sample, distribution-free coverage properties, similar to typical CP methods. We then adapt our method for multi-output regression and multiclass classification, and also propose simple adjustments to generate adaptive prediction regions with asymptotic conditional coverage guarantees. Finally, we evaluate our method on practical regression and classification problems, illustrating its advantages in terms of (conditional) coverage and efficiency.",
|
| 9 |
+
"conference": {
|
| 10 |
+
"name": "ICML",
|
| 11 |
+
"year": 2025
|
| 12 |
+
},
|
| 13 |
+
"template": null,
|
| 14 |
+
"category": "04. Probabilistic Methods and Causal Inference",
|
| 15 |
+
"is_done": true,
|
| 16 |
+
"timestamp": "2025-08-05T11:54:18.711792",
|
| 17 |
+
"rule_paper_possible_url": null,
|
| 18 |
+
"github_base": null,
|
| 19 |
+
"llm_believed_url": null,
|
| 20 |
+
"rule_base_possible_url": null,
|
| 21 |
+
"confirmed_url": null,
|
| 22 |
+
"Internet_fail": null,
|
| 23 |
+
"html_fail": null,
|
| 24 |
+
"citation_data": {
|
| 25 |
+
"original_title": "Optimal transport-based conformal prediction",
|
| 26 |
+
"matched_title": "Optimal Transport-based Conformal Prediction",
|
| 27 |
+
"citation_count": 5,
|
| 28 |
+
"similarity": 1.0,
|
| 29 |
+
"source": "semantic_scholar",
|
| 30 |
+
"year": 2025,
|
| 31 |
+
"authors": [
|
| 32 |
+
{
|
| 33 |
+
"authorId": "2203912656",
|
| 34 |
+
"name": "Gauthier Thurin"
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"authorId": "66764332",
|
| 38 |
+
"name": "Kimia Nadjahi"
|
| 39 |
+
},
|
| 40 |
+
{
|
| 41 |
+
"authorId": "2343505598",
|
| 42 |
+
"name": "Claire Boyer"
|
| 43 |
+
}
|
| 44 |
+
]
|
| 45 |
+
}
|
| 46 |
+
}
|