FrancisChen1 commited on
Commit
3a1f3a0
·
verified ·
1 Parent(s): 0c26a65

Add files using upload-large-folder tool

Browse files
Files changed (50) hide show
  1. data_without_website/A_Diffusion_Model_for_Event_Skeleton_Generation.json +59 -0
  2. data_without_website/A_Score-Based_Model_for_Learning_Neural_Wavefunctions.json +46 -0
  3. data_without_website/Advances_in_Black-Box_VI__Normalizing_Flows,_Importance_Weighting,_and_Optimization.json +46 -0
  4. data_without_website/An_Inner_Table_Retriever_for_Robust_Table_Question_Answering.json +50 -0
  5. data_without_website/Asynchronous_Modeling__A_Dual-phase_Perspective_for_Long-Tailed_Recognition.json +27 -0
  6. data_without_website/Bayesian_Nonparametric_Learning_for_Point_Processes_with_Spatial_Homogeneity__A_Spatial_Analysis_of_NBA_Shot_Locations.json +50 -0
  7. data_without_website/Better_Safe_Than_Sorry__Preventing_Delusive_Adversaries_with_Adversarial_Training.json +54 -0
  8. data_without_website/Can_Large_Language_Models_Infer_Causation_from_Correlation_.json +71 -0
  9. data_without_website/Categorical_Distributional_Reinforcement_Learning_with_Kullback-Leibler_Divergence__Convergence_and_Asymptotics.json +27 -0
  10. data_without_website/CharDiff__Improving_Sampling_Convergence_via_Characteristic_Function_Consistency_in_Diffusion_Models.json +42 -0
  11. data_without_website/Contrastive_and_View-Interaction_Structure_Learning_for_Multi-view_Clustering.json +42 -0
  12. data_without_website/Coresets_for_Multiple__ell_p_Regression.json +42 -0
  13. data_without_website/Density-Preserving_Deep_Point_Cloud_Compression.json +58 -0
  14. data_without_website/Detecting_Camouflaged_Object_in_Frequency_Domain.json +58 -0
  15. data_without_website/Distribution-Free_Model-Agnostic_Regression_Calibration_via_Nonparametric_Methods.json +46 -0
  16. data_without_website/Dual-Phase_Accelerated_Prompt_Optimization.json +66 -0
  17. data_without_website/Empowering_Unsupervised_Domain_Adaptation_With_Large-Scale_Pre-Trained_Vision-Language_Models.json +66 -0
  18. data_without_website/Finite_Smoothing_Algorithm_for_High-Dimensional_Support_Vector_Machines_and_Quantile_Regression.json +46 -0
  19. data_without_website/Good_Examples_Make_A_Faster_Learner__Simple_Demonstration-based_Learning_for_Low-resource_NER.json +53 -0
  20. data_without_website/Hallucination_Detox__Sensitive_Neuron_Dropout_(SeND)_for_Large_Language_Model_Training.json +54 -0
  21. data_without_website/Learning_Augmented_Binary_Search_Trees.json +46 -0
  22. data_without_website/Learning_Image_Harmonization_in_the_Linear_Color_Space.json +46 -0
  23. data_without_website/Learning_a_Neural_Solver_for_Multiple_Object_Tracking.json +42 -0
  24. data_without_website/Learning_by_Causality_to_Improve_Channel_Dependency_Modeling_in_Multivariate_Time_Series_Forecasting.json +27 -0
  25. data_without_website/LoRA-One__One-Step_Full_Gradient_Could_Suffice_for_Fine-Tuning_Large_Language_Models,_Provably_and_Efficiently.json +46 -0
  26. data_without_website/MEDIC__Model_Backdoor_Removal_by_Importance_Driven_Cloning.json +62 -0
  27. data_without_website/MFTraj__Map-Free,_Behavior-Driven_Trajectory_Prediction_for_Autonomous_Driving.json +66 -0
  28. data_without_website/Marginalised_Gaussian_Processes_with_Nested_Sampling.json +46 -0
  29. data_without_website/Mechanism_Design_for_Facility_Location_Problems__A_Survey.json +54 -0
  30. data_without_website/Model-Driven_Labeled_Data_Free_Fine-tuning.json +70 -0
  31. data_without_website/Nearly-tight_Bounds_for_Deep_Kernel_Learning.json +42 -0
  32. data_without_website/Neural_Collage_Transfer__Artistic_Reconstruction_via_Material_Manipulation.json +53 -0
  33. data_without_website/Neural_Stochastic_Dual_Dynamic_Programming.json +54 -0
  34. data_without_website/On_the_Convergence_to_a_Global_Solution_of_Shuffling-Type_Gradient_Algorithms.json +42 -0
  35. data_without_website/One_Step_at_a_Time__Long-Horizon_Vision-and-Language_Navigation_With_Milestones.json +58 -0
  36. data_without_website/Optimizing_Connectivity_through_Network_Gradients_for_the_Restricted_Machine.json +42 -0
  37. data_without_website/PC-PG__Policy_Cover_Directed_Exploration_for_Provable_Policy_Gradient_Learning.json +50 -0
  38. data_without_website/RVSL__Robust_Vehicle_Similarity_Learning_in_Real_Hazy_Scenes_Based_on_Semi-Supervised_Learning.json +58 -0
  39. data_without_website/Randomized_Dimensionality_Reduction_for_Facility_Location_and_Single-Linkage_Clustering.json +50 -0
  40. data_without_website/Reliable_learning_in_challenging_environments.json +50 -0
  41. data_without_website/Rethinking_Masked_Data_Reconstruction_Pretraining_for_Strong_3D_Action_Representation_Learning.json +50 -0
  42. data_without_website/Revisiting_the_Stack-Based_Inverse_Tone_Mapping.json +50 -0
  43. data_without_website/Sentence-Incremental_Neural_Coreference_Resolution.json +45 -0
  44. data_without_website/Shielded_Diffusion__Generating_Novel_and_Diverse_Images_using_Sparse_Repellency.json +58 -0
  45. data_without_website/Swapping_Autoencoder_for_Deep_Image_Manipulation.json +62 -0
  46. data_without_website/Towards_Efficient_and_Accurate_Identification_of_Memorization_in_Deep_Models.json +27 -0
  47. data_without_website/Transformer_Interpretability_Beyond_Attention_Visualization.json +47 -0
  48. data_without_website/Unified_Interpretation_of_Smoothing_Methods_for_Negative_Sampling_Loss_Functions_in_Knowledge_Graph_Embedding.json +50 -0
  49. data_without_website/Unsupervised-to-Online_Reinforcement_Learning.json +50 -0
  50. data_without_website/Using_Stochastic_Gradient_Descent_to_Smooth_Nonconvex_Functions__Analysis_of_Implicit_Graduated_Optimization.json +42 -0
data_without_website/A_Diffusion_Model_for_Event_Skeleton_Generation.json ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "id": "2023.findings-acl.800",
3
+ "title": "A Diffusion Model for Event Skeleton Generation",
4
+ "track": "main",
5
+ "author": "Fangqi Zhu; Lin Zhang; Jun Gao; Bing Qin; Ruifeng Xu; Haiqin Yang",
6
+ "pdf": "https://aclanthology.org/2023.findings-acl.800.pdf",
7
+ "keyword": "",
8
+ "abstract": "Event skeleton generation, aiming to induce an event schema skeleton graph with abstracted event nodes and their temporal relations from a set of event instance graphs, is a critical step in the temporal complex event schema induction task. Existing methods effectively address this task from a graph generation perspective but suffer from noise-sensitive and error accumulation, e.g., the inability to correct errors while generating schema. We, therefore, propose a novel Diffusion Event Graph Model (DEGM) to address these issues. Our DEGM is the first workable diffusion model for event skeleton generation, where the embedding and rounding techniques with a custom edge-based loss are introduced to transform a discrete event graph into learnable latent representations. Furthermore, we propose a denoising training process to maintain the model’s robustness. Consequently, DEGM derives the final schema, where error correction is guaranteed by iteratively refining the latent representations during the schema generation process. Experimental results on three IED bombing datasets demonstrate that our DEGM achieves better results than other state-of-the-art baselines. Our code and data are available at https://github.com/zhufq00/EventSkeletonGeneration.",
9
+ "conference": {
10
+ "name": "ACL",
11
+ "year": 2023
12
+ },
13
+ "Internet_problem": "https://github.com/zhufq00/",
14
+ "template": null,
15
+ "category": "07. Generative Model",
16
+ "is_done": true,
17
+ "timestamp": "2025-08-05T05:31:16.757817",
18
+ "rule_paper_possible_url": null,
19
+ "github_base": null,
20
+ "llm_believed_url": null,
21
+ "rule_base_possible_url": null,
22
+ "confirmed_url": null,
23
+ "Internet_fail": null,
24
+ "html_fail": null,
25
+ "citation_data": {
26
+ "original_title": "A Diffusion Model for Event Skeleton Generation",
27
+ "matched_title": "A Diffusion Model for Event Skeleton Generation",
28
+ "citation_count": 2,
29
+ "similarity": 1.0,
30
+ "source": "semantic_scholar",
31
+ "year": 2023,
32
+ "authors": [
33
+ {
34
+ "authorId": "2075369995",
35
+ "name": "Fangqi Zhu"
36
+ },
37
+ {
38
+ "authorId": "2143837504",
39
+ "name": "Lin Zhang"
40
+ },
41
+ {
42
+ "authorId": "2111016840",
43
+ "name": "Junfeng Gao"
44
+ },
45
+ {
46
+ "authorId": "152277111",
47
+ "name": "Bing Qin"
48
+ },
49
+ {
50
+ "authorId": "2115804042",
51
+ "name": "Ruifeng Xu"
52
+ },
53
+ {
54
+ "authorId": "2118697460",
55
+ "name": "Haiqing Yang"
56
+ }
57
+ ]
58
+ }
59
+ }
data_without_website/A_Score-Based_Model_for_Learning_Neural_Wavefunctions.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "id": "rMQ1Wme3S0c",
3
+ "title": "A Score-Based Model for Learning Neural Wavefunctions",
4
+ "track": "main",
5
+ "author": "Xuan Zhang;Shenglong Xu;Shuiwang Ji",
6
+ "pdf": "https://openreview.net/pdf?id=rMQ1Wme3S0c",
7
+ "keyword": "",
8
+ "abstract": "Quantum Monte Carlo coupled with neural network wavefunctions has shown success in finding the ground state of quantum many-body systems. The existing optimization approaches compute the energy by sampling local energy from an explicit probability distribution given by the wavefunction. In this work, we provide a new optimization framework for obtaining properties of quantum many-body ground state using score-based neural networks. This new framework does not require explicit probability distribution and performs the sampling via Langevin dynamics. Our method is based on the key observation that the local energy is directly related to the score, defined as the gradient of the logarithmic wavefunction. Inspired by the score matching and the diffusion Monte Carlo methods, we derive a weighted score matching objective, which guides our score-based models to correctly converge to the ground state. We first validate our approach with experiments on quantum harmonic traps, and further results show that it can accurately learn the ground states of atomic systems. By implicitly modeling the high-dimensional data distribution, our work paves the way toward a more efficient representation of quantum systems.",
9
+ "conference": {
10
+ "name": "ICLR",
11
+ "year": 2023
12
+ },
13
+ "template": null,
14
+ "category": "11. AI for Science",
15
+ "is_done": true,
16
+ "timestamp": "2025-08-04T04:39:42.769007",
17
+ "rule_paper_possible_url": null,
18
+ "github_base": null,
19
+ "llm_believed_url": null,
20
+ "rule_base_possible_url": null,
21
+ "confirmed_url": null,
22
+ "Internet_fail": null,
23
+ "html_fail": null,
24
+ "citation_data": {
25
+ "original_title": "A Score-Based Model for Learning Neural Wavefunctions",
26
+ "matched_title": "A Score-Based Model for Learning Neural Wavefunctions",
27
+ "citation_count": 1,
28
+ "similarity": 1.0,
29
+ "source": "semantic_scholar",
30
+ "year": 2023,
31
+ "authors": [
32
+ {
33
+ "authorId": "2108231795",
34
+ "name": "Xuan Zhang"
35
+ },
36
+ {
37
+ "authorId": "50433504",
38
+ "name": "Shenglong Xu"
39
+ },
40
+ {
41
+ "authorId": "1743600",
42
+ "name": "Shuiwang Ji"
43
+ }
44
+ ]
45
+ }
46
+ }
data_without_website/Advances_in_Black-Box_VI__Normalizing_Flows,_Importance_Weighting,_and_Optimization.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "id": "17349",
3
+ "title": "Advances in Black-Box VI: Normalizing Flows, Importance Weighting, and Optimization",
4
+ "track": "main",
5
+ "author": "Abhinav Agrawal; Daniel R. Sheldon; Justin Domke",
6
+ "pdf": "https://papers.nips.cc/paper_files/paper/2020/file/c91e3483cf4f90057d02aa492d2b25b1-Paper.pdf",
7
+ "keyword": "",
8
+ "abstract": "Recent research has seen several advances relevant to black-box VI, but the current state of automatic posterior inference is unclear. One such advance is the use of normalizing flows to define flexible posterior densities for deep latent variable models. Another direction is the integration of Monte-Carlo methods to serve two purposes; first, to obtain tighter variational objectives for optimization, and second, to define enriched variational families through sampling. However, both flows and variational Monte-Carlo methods remain relatively unexplored for black-box VI. Moreover, on a pragmatic front, there are several optimization considerations like step-size scheme, parameter initialization, and choice of gradient estimators, for which there are no clear guidance in the existing literature. In this paper, we postulate that black-box VI is best addressed through a careful combination of numerous algorithmic components. We evaluate components relating to optimization, flows, and Monte-Carlo methods on a benchmark of 30 models from the Stan model library. The combination of these algorithmic components significantly advances the state-of-the-art \"out of the box\" variational inference.",
9
+ "conference": {
10
+ "name": "NIPS",
11
+ "year": 2020
12
+ },
13
+ "template": null,
14
+ "category": "04. Probabilistic Methods and Causal Inference",
15
+ "is_done": true,
16
+ "timestamp": "2025-08-05T14:14:44.498893",
17
+ "rule_paper_possible_url": null,
18
+ "github_base": null,
19
+ "llm_believed_url": null,
20
+ "rule_base_possible_url": null,
21
+ "confirmed_url": null,
22
+ "Internet_fail": null,
23
+ "html_fail": null,
24
+ "citation_data": {
25
+ "original_title": "Advances in Black-Box VI: Normalizing Flows, Importance Weighting, and Optimization",
26
+ "matched_title": "Advances in Black-Box VI: Normalizing Flows, Importance Weighting, and Optimization",
27
+ "citation_count": 41,
28
+ "similarity": 1.0,
29
+ "source": "semantic_scholar",
30
+ "year": 2020,
31
+ "authors": [
32
+ {
33
+ "authorId": "2075308276",
34
+ "name": "Abhinav Agrawal"
35
+ },
36
+ {
37
+ "authorId": "144799908",
38
+ "name": "D. Sheldon"
39
+ },
40
+ {
41
+ "authorId": "1722101",
42
+ "name": "Justin Domke"
43
+ }
44
+ ]
45
+ }
46
+ }
data_without_website/An_Inner_Table_Retriever_for_Robust_Table_Question_Answering.json ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "id": "2023.acl-long.551",
3
+ "title": "An Inner Table Retriever for Robust Table Question Answering",
4
+ "track": "main",
5
+ "author": "Weizhe Lin; Rexhina Blloshmi; Bill Byrne; Adria de Gispert; Gonzalo Iglesias",
6
+ "pdf": "https://aclanthology.org/2023.acl-long.551.pdf",
7
+ "keyword": "",
8
+ "abstract": "Recent years have witnessed the thriving of pretrained Transformer-based language models for understanding semi-structured tables, with several applications, such as Table Question Answering (TableQA).These models are typically trained on joint tables and surrounding natural language text, by linearizing table content into sequences comprising special tokens and cell information. This yields very long sequences which increase system inefficiency, and moreover, simply truncating long sequences results in information loss for downstream tasks. We propose Inner Table Retriever (ITR), a general-purpose approach for handling long tables in TableQA that extracts sub-tables to preserve the most relevant information for a question. We show that ITR can be easily integrated into existing systems to improve their accuracy with up to 1.3-4.8% and achieve state-of-the-art results in two benchmarks, i.e., 63.4% in WikiTableQuestions and 92.1% in WikiSQL. Additionally, we show that ITR makes TableQA systems more robust to reduced model capacity and to different ordering of columns and rows. We make our code available at: https://github.com/amazon-science/robust-tableqa.",
9
+ "conference": {
10
+ "name": "ACL",
11
+ "year": 2023
12
+ },
13
+ "template": "no template",
14
+ "github_base": "no github url in pdf and no project_url",
15
+ "llm_believed_url": "https://github.com/amazon-science/robust-tableqa",
16
+ "llm_final_url": "https://github.com/amazon-science/robust-tableqa",
17
+ "rule_final_url": "https://github.com/amazon-science/robust-tableqa",
18
+ "category": "13. Information Retrieval and Recommender Systems",
19
+ "is_done": true,
20
+ "citation_data": {
21
+ "original_title": "An Inner Table Retriever for Robust Table Question Answering",
22
+ "matched_title": "An Inner Table Retriever for Robust Table Question Answering",
23
+ "citation_count": 14,
24
+ "similarity": 1.0,
25
+ "source": "semantic_scholar",
26
+ "year": 2023,
27
+ "authors": [
28
+ {
29
+ "authorId": "1454363000",
30
+ "name": "Weizhe Lin"
31
+ },
32
+ {
33
+ "authorId": "2221287630",
34
+ "name": "Rexhina Blloshmi"
35
+ },
36
+ {
37
+ "authorId": "36126076",
38
+ "name": "B. Byrne"
39
+ },
40
+ {
41
+ "authorId": "1738657356",
42
+ "name": "A. de Gispert"
43
+ },
44
+ {
45
+ "authorId": "145833974",
46
+ "name": "Gonzalo Iglesias"
47
+ }
48
+ ]
49
+ }
50
+ }
data_without_website/Asynchronous_Modeling__A_Dual-phase_Perspective_for_Long-Tailed_Recognition.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "id": "u846Bqhry_",
3
+ "title": "Asynchronous Modeling: A Dual-phase Perspective for Long-Tailed Recognition",
4
+ "track": "main",
5
+ "author": "Hu Zhang;Linchao Zhu;Yi Yang",
6
+ "pdf": "https://openreview.net/pdf?id=u846Bqhry_",
7
+ "keyword": "",
8
+ "abstract": "This work explores deep learning based classification model on real-world datasets with a long-tailed distribution. Most of previous works deal with the long-tailed classification problem by re-balancing the overall distribution within the whole dataset or directly transferring knowledge from data-rich classes to data-poor ones. In this work, we consider the gradient distortion in long-tailed classification when the gradient on data-rich classes and data-poor ones are incorporated simultaneously, i.e., shifted gradient direction towards data-rich classes as well as the enlarged variance by the gradient fluctuation on data-poor classes. Motivated by such phenomenon, we propose to disentangle the distinctive effects of data-rich and data-poor gradient and asynchronously train a model via a dual-phase learning process. The first phase only concerns the data-rich classes. In the second phase, besides the standard classification upon data-poor classes, we propose an exemplar memory bank to reserve representative examples and a memory-retentive loss via graph matching to retain the relation between two phases. The extensive experimental results on four commonly used long-tailed benchmarks including CIFAR100-LT, Places-LT, ImageNet-LT and iNaturalist 2018 highlight the excellent performance of our proposed method.",
9
+ "conference": {
10
+ "name": "ICLR",
11
+ "year": 2021
12
+ },
13
+ "template": null,
14
+ "category": "01. Deep Learning Architectures and Methods",
15
+ "is_done": true,
16
+ "timestamp": "2025-08-04T09:07:47.328026",
17
+ "rule_paper_possible_url": null,
18
+ "github_base": null,
19
+ "llm_believed_url": null,
20
+ "rule_base_possible_url": null,
21
+ "confirmed_url": null,
22
+ "Internet_fail": null,
23
+ "html_fail": null,
24
+ "citation_data": {
25
+ "citation_count": 0
26
+ }
27
+ }
data_without_website/Bayesian_Nonparametric_Learning_for_Point_Processes_with_Spatial_Homogeneity__A_Spatial_Analysis_of_NBA_Shot_Locations.json ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "id": "15957",
3
+ "title": "Bayesian Nonparametric Learning for Point Processes with Spatial Homogeneity: A Spatial Analysis of NBA Shot Locations",
4
+ "track": "main",
5
+ "author": "Fan Yin; Jieying Jiao; Jun Yan; Guanyu Hu",
6
+ "pdf": "https://proceedings.mlr.press/v162/yin22a/yin22a.pdf",
7
+ "keyword": "",
8
+ "abstract": "Basketball shot location data provide valuable summary information regarding players to coaches, sports analysts, fans, statisticians, as well as players themselves. Represented by spatial points, such data are naturally analyzed with spatial point process models. We present a novel nonparametric Bayesian method for learning the underlying intensity surface built upon a combination of Dirichlet process and Markov random field. Our method has the advantage of effectively encouraging local spatial homogeneity when estimating a globally heterogeneous intensity surface. Posterior inferences are performed with an efficient Markov chain Monte Carlo (MCMC) algorithm. Simulation studies show that the inferences are accurate and the method is superior compared to a wide range of competing methods. Application to the shot location data of $20$ representative NBA players in the 2017-2018 regular season offers interesting insights about the shooting patterns of these players. A comparison against the competing method shows that the proposed method can effectively incorporate spatial contiguity into the estimation of intensity surfaces.",
9
+ "conference": {
10
+ "name": "ICML",
11
+ "year": 2022
12
+ },
13
+ "template": null,
14
+ "category": "04. Probabilistic Methods and Causal Inference",
15
+ "is_done": true,
16
+ "timestamp": "2025-08-05T12:26:58.402164",
17
+ "rule_paper_possible_url": null,
18
+ "github_base": null,
19
+ "llm_believed_url": null,
20
+ "rule_base_possible_url": null,
21
+ "confirmed_url": null,
22
+ "Internet_fail": null,
23
+ "html_fail": null,
24
+ "citation_data": {
25
+ "original_title": "Bayesian Nonparametric Learning for Point Processes with Spatial Homogeneity: A Spatial Analysis of NBA Shot Locations",
26
+ "matched_title": "Bayesian Nonparametric Learning for Point Processes with Spatial Homogeneity: A Spatial Analysis of NBA Shot Locations",
27
+ "citation_count": 9,
28
+ "similarity": 1.0,
29
+ "source": "semantic_scholar",
30
+ "year": 2022,
31
+ "authors": [
32
+ {
33
+ "authorId": "1423647970",
34
+ "name": "Fan Yin"
35
+ },
36
+ {
37
+ "authorId": "123414248",
38
+ "name": "Jieying Jiao"
39
+ },
40
+ {
41
+ "authorId": "2112592833",
42
+ "name": "Jun Yan"
43
+ },
44
+ {
45
+ "authorId": "143697962",
46
+ "name": "Guanyu Hu"
47
+ }
48
+ ]
49
+ }
50
+ }
data_without_website/Better_Safe_Than_Sorry__Preventing_Delusive_Adversaries_with_Adversarial_Training.json ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "id": "I39u89067j",
3
+ "title": "Better Safe Than Sorry: Preventing Delusive Adversaries with Adversarial Training",
4
+ "track": "main",
5
+ "author": "Lue Tao;Lei Feng;Jinfeng Yi;Sheng-Jun Huang;Songcan Chen",
6
+ "pdf": "https://openreview.net/pdf?id=I39u89067j",
7
+ "keyword": "",
8
+ "abstract": "Delusive attacks aim to substantially deteriorate the test accuracy of the learning model by slightly perturbing the features of correctly labeled training examples. By formalizing this malicious attack as finding the worst-case training data within a specific $\\infty$-Wasserstein ball, we show that minimizing adversarial risk on the perturbed data is equivalent to optimizing an upper bound of natural risk on the original data. This implies that adversarial training can serve as a principled defense against delusive attacks. Thus, the test accuracy decreased by delusive attacks can be largely recovered by adversarial training. To further understand the internal mechanism of the defense, we disclose that adversarial training can resist the delusive perturbations by preventing the learner from overly relying on non-robust features in a natural setting. Finally, we complement our theoretical findings with a set of experiments on popular benchmark datasets, which show that the defense withstands six different practical attacks. Both theoretical and empirical results vote for adversarial training when confronted with delusive adversaries.",
9
+ "conference": {
10
+ "name": "NIPS",
11
+ "year": 2021
12
+ },
13
+ "template": null,
14
+ "category": "03. ML Theory and Optimization",
15
+ "is_done": true,
16
+ "timestamp": "2025-08-04T04:07:47.553523",
17
+ "rule_paper_possible_url": null,
18
+ "github_base": null,
19
+ "llm_believed_url": null,
20
+ "rule_base_possible_url": null,
21
+ "confirmed_url": null,
22
+ "Internet_fail": null,
23
+ "html_fail": null,
24
+ "citation_data": {
25
+ "original_title": "Better Safe Than Sorry: Preventing Delusive Adversaries with Adversarial Training",
26
+ "matched_title": "Better Safe Than Sorry: Preventing Delusive Adversaries with Adversarial Training",
27
+ "citation_count": 73,
28
+ "similarity": 1.0,
29
+ "source": "semantic_scholar",
30
+ "year": 2021,
31
+ "authors": [
32
+ {
33
+ "authorId": "3023914",
34
+ "name": "Lue Tao"
35
+ },
36
+ {
37
+ "authorId": "2269837261",
38
+ "name": "Lei Feng"
39
+ },
40
+ {
41
+ "authorId": "2273542224",
42
+ "name": "Jinfeng Yi"
43
+ },
44
+ {
45
+ "authorId": "2271420190",
46
+ "name": "Sheng-Jun Huang"
47
+ },
48
+ {
49
+ "authorId": "2210829231",
50
+ "name": "Songcan Chen"
51
+ }
52
+ ]
53
+ }
54
+ }
data_without_website/Can_Large_Language_Models_Infer_Causation_from_Correlation_.json ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "id": "vqIH0ObdqL",
3
+ "title": "Can Large Language Models Infer Causation from Correlation?",
4
+ "track": "main",
5
+ "author": "Zhijing Jin;Jiarui Liu;Zhiheng LYU;Spencer Poff;Mrinmaya Sachan;Rada Mihalcea;Mona T. Diab;Bernhard Schölkopf",
6
+ "pdf": "https://openreview.net/pdf?id=vqIH0ObdqL",
7
+ "keyword": "",
8
+ "abstract": "Causal inference is one of the hallmarks of human intelligence. While the field of CausalNLP has attracted much interest in the recent years, existing causal inference datasets in NLP primarily rely on discovering causality from empirical knowledge (e.g., commonsense knowledge). In this work, we propose the first benchmark dataset to test the pure causal inference skills of large language models (LLMs). Specifically, we formulate a novel task Corr2Cause, which takes a set of correlational statements and determines the causal relationship between the variables. We curate a large-scale dataset of more than 200K samples, on which we evaluate seventeen existing LLMs. Through our experiments, we identify a key shortcoming of LLMs in terms of their causal inference skills, and show that these models achieve almost close to random performance on the task. This shortcoming is somewhat mitigated when we try to re-purpose LLMs for this skill via finetuning, but we find that these models still fail to generalize – they can only perform causal inference in in-distribution settings when variable names and textual expressions used in the queries are similar to those in the training set, but fail in out-of-distribution settings generated by perturbing these queries. Corr2Cause is a challenging task for LLMs, and can be helpful in guiding future research on improving LLMs’ pure reasoning skills and generalizability. Our data is at https://huggingface.co/datasets/causalnlp/corr2cause. Our code is at https://github.com/causalNLP/corr2cause.",
9
+ "conference": {
10
+ "name": "ICLR",
11
+ "year": 2024
12
+ },
13
+ "project_url": "https://cogito233.github.io/",
14
+ "project_url1": "https://jiarui-liu.github.io/",
15
+ "project_url2": "INTERNET_PROBLEM",
16
+ "github_base": "https://github.com/causalNLP/corr2cause",
17
+ "llm_believed_url": "https://huggingface.co/datasets/causalnlp/corr2cause",
18
+ "llm_believed_url1": "https://github.com/causalNLP/corr2cause",
19
+ "rule_base_possible_url": "https://cogito233.github.io/",
20
+ "rule_base_possible_url1": "https://jiarui-liu.github.io/",
21
+ "rule_base_possible_url2": "https://cogito233.github.io",
22
+ "rule_base_possible_url3": "https://jiarui-liu.github.io",
23
+ "llm_believed_url2": "https://huggingface.co/datasets/causalnlp/corr2cause",
24
+ "llm_believed_url3": "https://edmond.mpdl.mpg.de/dataset.xhtml?persistentId=doi:10.17617/3.VYGWHY",
25
+ "template": null,
26
+ "category": "04. Probabilistic Methods and Causal Inference",
27
+ "is_done": true,
28
+ "timestamp": "2025-08-06T03:43:30.295695",
29
+ "citation_data": {
30
+ "original_title": "Can Large Language Models Infer Causation from Correlation?",
31
+ "matched_title": "Can Large Language Models Infer Causation from Correlation?",
32
+ "citation_count": 133,
33
+ "similarity": 1.0,
34
+ "source": "semantic_scholar",
35
+ "year": 2023,
36
+ "authors": [
37
+ {
38
+ "authorId": "2111472502",
39
+ "name": "Zhijing Jin"
40
+ },
41
+ {
42
+ "authorId": "146961917",
43
+ "name": "Jiarui Liu"
44
+ },
45
+ {
46
+ "authorId": "2114227440",
47
+ "name": "Zhiheng Lyu"
48
+ },
49
+ {
50
+ "authorId": "1753626755",
51
+ "name": "Spencer Poff"
52
+ },
53
+ {
54
+ "authorId": "2790926",
55
+ "name": "Mrinmaya Sachan"
56
+ },
57
+ {
58
+ "authorId": "2105984203",
59
+ "name": "Rada Mihalcea"
60
+ },
61
+ {
62
+ "authorId": "2138579860",
63
+ "name": "Mona T. Diab"
64
+ },
65
+ {
66
+ "authorId": "1707625",
67
+ "name": "B. Scholkopf"
68
+ }
69
+ ]
70
+ }
71
+ }
data_without_website/Categorical_Distributional_Reinforcement_Learning_with_Kullback-Leibler_Divergence__Convergence_and_Asymptotics.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "id": "f4qxkR6GQK",
3
+ "title": "Categorical Distributional Reinforcement Learning with Kullback-Leibler Divergence: Convergence and Asymptotics",
4
+ "track": "main",
5
+ "author": "Tyler Kastner;Mark Rowland;Yunhao Tang;Murat A Erdogdu;Amir-massoud Farahmand",
6
+ "pdf": "https://openreview.net/pdf?id=f4qxkR6GQK",
7
+ "keyword": "",
8
+ "abstract": "We study the problem of distributional reinforcement learning using categorical parametrisations and a KL divergence loss. Previous work analyzing categorical distributional RL has done so using a Cramér distance-based loss, simplifying the analysis but creating a theory-practice gap. We introduce a preconditioned version of the algorithm, and prove that it is guaranteed to converge. We further derive the asymptotic variance of the categorical estimates under different learning rate regimes, and compare to that of classical reinforcement learning. We finally empirically validate our theoretical results and perform an empirical investigation into the relative strengths of using KL losses, and derive a number of actionable insights for practitioners.",
9
+ "conference": {
10
+ "name": "ICML",
11
+ "year": 2025
12
+ },
13
+ "template": null,
14
+ "category": "02. Reinforcement Learning and Control",
15
+ "is_done": true,
16
+ "timestamp": "2025-08-05T11:54:08.918406",
17
+ "rule_paper_possible_url": null,
18
+ "github_base": null,
19
+ "llm_believed_url": null,
20
+ "rule_base_possible_url": null,
21
+ "confirmed_url": null,
22
+ "Internet_fail": null,
23
+ "html_fail": null,
24
+ "citation_data": {
25
+ "citation_count": 0
26
+ }
27
+ }
data_without_website/CharDiff__Improving_Sampling_Convergence_via_Characteristic_Function_Consistency_in_Diffusion_Models.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "id": "f72680706e",
3
+ "title": "CharDiff: Improving Sampling Convergence via Characteristic Function Consistency in Diffusion Models",
4
+ "track": "main",
5
+ "author": "Abhishek Kumar Sinha; S. Manthira Moorthi",
6
+ "pdf": "https://openaccess.thecvf.com/content/WACV2025/papers/Sinha_CharDiff_Improving_Sampling_Convergence_via_Characteristic_Function_Consistency_in_Diffusion_WACV_2025_paper.pdf",
7
+ "keyword": "",
8
+ "abstract": "Diffusion models have demonstrated extensive capabilities for generative modelling in both conditional and conditional image synthesis tasks. The reverse sampling has been the center of interest to improve the overall image quality without retraining the model from scratch. In this work we propose a plug-and-play module by utilizing the characteristic function of the distributions to minimize sampling drift. We experiment with existing diffusion solvers with our module in-place during denoising step to provide additional performance gain in image synthesis linear inverse problem tasks and text-conditioned image synthesis. Moreover We theoretically establish the effectiveness of the method in terms of improved Frechet Inception Distance (FID) and second order Tweedie moment for reduced trajectory deviation.",
9
+ "conference": {
10
+ "name": "WACV",
11
+ "year": 2025
12
+ },
13
+ "template": null,
14
+ "category": "07. Generative Model",
15
+ "is_done": true,
16
+ "timestamp": "2025-08-04T07:40:35.019256",
17
+ "rule_paper_possible_url": null,
18
+ "github_base": null,
19
+ "llm_believed_url": null,
20
+ "rule_base_possible_url": null,
21
+ "confirmed_url": null,
22
+ "Internet_fail": null,
23
+ "html_fail": null,
24
+ "citation_data": {
25
+ "original_title": "CharDiff: Improving Sampling Convergence via Characteristic Function Consistency in Diffusion Models",
26
+ "matched_title": "CharDiff: Improving Sampling Convergence via Characteristic Function Consistency in Diffusion Models",
27
+ "citation_count": 0,
28
+ "similarity": 1.0,
29
+ "source": "semantic_scholar",
30
+ "year": 2025,
31
+ "authors": [
32
+ {
33
+ "authorId": "2351342321",
34
+ "name": "Abhishek Kumar"
35
+ },
36
+ {
37
+ "authorId": "2351078969",
38
+ "name": "Sinha S. Manthira Moorthi"
39
+ }
40
+ ]
41
+ }
42
+ }
data_without_website/Contrastive_and_View-Interaction_Structure_Learning_for_Multi-view_Clustering.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "id": "paper559",
3
+ "title": "Contrastive and View-Interaction Structure Learning for Multi-view Clustering",
4
+ "track": "main",
5
+ "author": "Jing Wang; Songhe Feng",
6
+ "pdf": "https://www.ijcai.org/proceedings/2024/0559.pdf",
7
+ "keyword": "",
8
+ "abstract": "Existing Deep Multi-view Clustering (DMVC) approaches typically concentrate on capturing consensus semantics from multiple views, where contrastive learning is widely used to align view-specific representations of each view. Unfortunately, view-specific representations are extracted from the content information of the corresponding instance, neglecting the relationships among different instances. Furthermore, existing contrastive loss imports numerous false negative pairs that conflict with the clustering objectives. In response to these challenges, we propose a contraStive and viEw-interaction stRucture learning framework for multI-viEw cluStering (SERIES). Our method takes into account the structural relations among instances and boosts the contrastive loss to improve intra-class compactness. Meanwhile, a cross-view dual relation generation mechanism is introduced to achieve the consensus structural graph across multiple views for clustering. Specifically, we initially acquire view-specific representations using multiple graph autoencoders to exploit both content information and structural information. Furthermore, to pull together the same cluster instances, a soft negative pair aware contrastive loss is employed to distinguish the dissimilar instances while attracting similar instances. Thereafter, the view-specific representations are fed into cross-view dual relation generation layers to generate the affinity matrices of each other, aiming to reveal a consistent structural graph across various views. Extensive experiments conducted on six benchmarks illustrate the superiority of our method compared to other state-of-the-art approaches.",
9
+ "conference": {
10
+ "name": "IJCAI",
11
+ "year": 2024
12
+ },
13
+ "template": null,
14
+ "category": "01. Deep Learning Architectures and Methods",
15
+ "is_done": true,
16
+ "timestamp": "2025-08-05T04:21:23.197531",
17
+ "rule_paper_possible_url": null,
18
+ "github_base": null,
19
+ "llm_believed_url": null,
20
+ "rule_base_possible_url": null,
21
+ "confirmed_url": null,
22
+ "Internet_fail": null,
23
+ "html_fail": null,
24
+ "citation_data": {
25
+ "original_title": "Contrastive and View-Interaction Structure Learning for Multi-view Clustering",
26
+ "matched_title": "Contrastive and View-Interaction Structure Learning for Multi-view Clustering",
27
+ "citation_count": 1,
28
+ "similarity": 1.0,
29
+ "source": "semantic_scholar",
30
+ "year": 2024,
31
+ "authors": [
32
+ {
33
+ "authorId": "2261863499",
34
+ "name": "Jing Wang"
35
+ },
36
+ {
37
+ "authorId": "2313723036",
38
+ "name": "Songhe Feng"
39
+ }
40
+ ]
41
+ }
42
+ }
data_without_website/Coresets_for_Multiple__ell_p_Regression.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "id": "4UWjqrMmFp",
3
+ "title": "Coresets for Multiple $\\ell_p$ Regression",
4
+ "track": "main",
5
+ "author": "David Woodruff;Taisuke Yasuda",
6
+ "pdf": "https://openreview.net/pdf?id=4UWjqrMmFp",
7
+ "keyword": "",
8
+ "abstract": "A *coreset* of a dataset with $n$ examples and $d$ features is a weighted subset of examples that is sufficient for solving downstream data analytic tasks. Nearly optimal constructions of coresets for least squares and $\\ell_p$ linear regression with a single response are known in prior work. However, for multiple $\\ell_p$ regression where there can be $m$ responses, there are no known constructions with size sublinear in $m$. In this work, we construct coresets of size $\\tilde O(\\varepsilon^{-2}d)$ for $p<2$ and $\\tilde O(\\varepsilon^{-p}d^{p/2})$ for $p>2$ independently of $m$ (i.e., dimension-free) that approximate the multiple $\\ell_p$ regression objective at every point in the domain up to $(1\\pm\\varepsilon)$ relative error. If we only need to preserve the minimizer subject to a subspace constraint, we improve these bounds by an $\\varepsilon$ factor for all $p>1$. All of our bounds are nearly tight. We give two application of our results. First, we settle the number of uniform samples needed to approximate $\\ell_p$ Euclidean power means up to a $(1+\\varepsilon)$ factor, showing that $\\tilde\\Theta(\\varepsilon^{-2})$ samples for $p = 1$, $\\tilde\\Theta(\\varepsilon^{-1})$ samples for $1 < p < 2$, and $\\tilde\\Theta(\\varepsilon^{1-p})$ samples for $p>2$ is tight, answering a question of Cohen-Addad, Saulpic, and Schwiegelshohn. Second, we show that for $1<p<2$, every matrix has a subset of $\\tilde O(\\varepsilon^{-1}k)$ rows which spans a $(1+\\varepsilon)$-approximately optimal $k$-dimensional subspace for $\\ell_p$ subspace approximation, which is also nearly optimal.",
9
+ "conference": {
10
+ "name": "ICML",
11
+ "year": 2024
12
+ },
13
+ "template": null,
14
+ "category": "03. ML Theory and Optimization",
15
+ "is_done": true,
16
+ "timestamp": "2025-08-05T06:41:46.878442",
17
+ "rule_paper_possible_url": null,
18
+ "github_base": null,
19
+ "llm_believed_url": null,
20
+ "rule_base_possible_url": null,
21
+ "confirmed_url": null,
22
+ "Internet_fail": null,
23
+ "html_fail": null,
24
+ "citation_data": {
25
+ "original_title": "Coresets for Multiple $\\ell_p$ Regression",
26
+ "matched_title": "Coresets for Multiple $\\ell_p$ Regression",
27
+ "citation_count": 0,
28
+ "similarity": 1.0,
29
+ "source": "semantic_scholar",
30
+ "year": 2024,
31
+ "authors": [
32
+ {
33
+ "authorId": "2237641814",
34
+ "name": "David P. Woodruff"
35
+ },
36
+ {
37
+ "authorId": "144738628",
38
+ "name": "T. Yasuda"
39
+ }
40
+ ]
41
+ }
42
+ }
data_without_website/Density-Preserving_Deep_Point_Cloud_Compression.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "id": "",
3
+ "title": "Density-Preserving Deep Point Cloud Compression",
4
+ "track": "main",
5
+ "author": "Yun He; Xinlin Ren; Danhang Tang; Yinda Zhang; Xiangyang Xue; Yanwei Fu",
6
+ "pdf": "https://openaccess.thecvf.com/content/CVPR2022/papers/He_Density-Preserving_Deep_Point_Cloud_Compression_CVPR_2022_paper.pdf",
7
+ "keyword": "",
8
+ "abstract": "Local density of point clouds is crucial for representing local details, but has been overlooked by existing point cloud compression methods. To address this, we propose a novel deep point cloud compression method that preserves local density information. Our method works in an auto-encoder fashion: the encoder downsamples the points and learns point-wise features, while the decoder upsamples the points using these features. Specifically, we propose to encode local geometry and density with three embeddings: density embedding, local position embedding and ancestor embedding. During the decoding, we explicitly predict the upsampling factor for each point, and the directions and scales of the upsampled points. To mitigate the clustered points issue in existing methods, we design a novel sub-point convolution layer, and an upsampling block with adaptive scale. Furthermore, our method can also compress point-wise attributes, such as normal. Extensive qualitative and quantitative results on SemanticKITTI and ShapeNet demonstrate that our method achieves the state-of-the-art rate-distortion trade-off.",
9
+ "conference": {
10
+ "name": "CVPR",
11
+ "year": 2022
12
+ },
13
+ "template": null,
14
+ "category": "05. 3D Vision and Computational Graphics",
15
+ "is_done": true,
16
+ "timestamp": "2025-08-05T11:37:32.776361",
17
+ "rule_paper_possible_url": null,
18
+ "github_base": null,
19
+ "llm_believed_url": null,
20
+ "rule_base_possible_url": null,
21
+ "confirmed_url": null,
22
+ "Internet_fail": null,
23
+ "html_fail": null,
24
+ "citation_data": {
25
+ "original_title": "Density-Preserving Deep Point Cloud Compression",
26
+ "matched_title": "Density-preserving Deep Point Cloud Compression",
27
+ "citation_count": 57,
28
+ "similarity": 1.0,
29
+ "source": "semantic_scholar",
30
+ "year": 2022,
31
+ "authors": [
32
+ {
33
+ "authorId": "2145044110",
34
+ "name": "Yun He"
35
+ },
36
+ {
37
+ "authorId": "2163652354",
38
+ "name": "Xinlin Ren"
39
+ },
40
+ {
41
+ "authorId": "40245930",
42
+ "name": "Danhang Tang"
43
+ },
44
+ {
45
+ "authorId": "1591143181",
46
+ "name": "Yinda Zhang"
47
+ },
48
+ {
49
+ "authorId": "145905953",
50
+ "name": "X. Xue"
51
+ },
52
+ {
53
+ "authorId": "35782003",
54
+ "name": "Yanwei Fu"
55
+ }
56
+ ]
57
+ }
58
+ }
data_without_website/Detecting_Camouflaged_Object_in_Frequency_Domain.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "id": "",
3
+ "title": "Detecting Camouflaged Object in Frequency Domain",
4
+ "track": "main",
5
+ "author": "Yijie Zhong; Bo Li; Lv Tang; Senyun Kuang; Shuang Wu; Shouhong Ding",
6
+ "pdf": "https://openaccess.thecvf.com/content/CVPR2022/papers/Zhong_Detecting_Camouflaged_Object_in_Frequency_Domain_CVPR_2022_paper.pdf",
7
+ "keyword": "",
8
+ "abstract": "Camouflaged object detection (COD) aims to identify objects that are perfectly embedded in their environment, which has various downstream applications in fields such as medicine, art, and agriculture. However, it is an extremely challenging task to spot camouflaged objects with the perception ability of human eyes. Hence, we claim that the goal of COD task is not just to mimic the human visual ability in a single RGB domain, but to go beyond the human biological vision. We then introduce the frequency domain as an additional clue to better detect camouflaged objects from backgrounds. To well involve the frequency clues into the CNN models, we present a powerful network with two special components. We first design a novel frequency enhancement module (FEM) to dig clues of camouflaged objects in the frequency domain. It contains the offline discrete cosine transform followed by the learnable enhancement. Then we use a feature alignment to fuse the features from RGB domain and frequency domain. Moreover, to further make full use of the frequency information, we propose the high-order relation module (HOR) to handle the rich fusion feature. Comprehensive experiments on three widely-used COD datasets show the proposed method significantly outperforms other state-of-the-art methods by a large margin. The code and results are released in https://github.com/luckybird1994/FDCOD.",
9
+ "conference": {
10
+ "name": "CVPR",
11
+ "year": 2022
12
+ },
13
+ "template": null,
14
+ "category": "05. 3D Vision and Computational Graphics",
15
+ "is_done": true,
16
+ "timestamp": "2025-08-05T11:43:17.422591",
17
+ "rule_paper_possible_url": null,
18
+ "github_base": null,
19
+ "llm_believed_url": null,
20
+ "rule_base_possible_url": null,
21
+ "confirmed_url": null,
22
+ "Internet_fail": null,
23
+ "html_fail": null,
24
+ "citation_data": {
25
+ "original_title": "Detecting Camouflaged Object in Frequency Domain",
26
+ "matched_title": "Detecting Camouflaged Object in Frequency Domain",
27
+ "citation_count": 167,
28
+ "similarity": 1.0,
29
+ "source": "semantic_scholar",
30
+ "year": 2022,
31
+ "authors": [
32
+ {
33
+ "authorId": "134899359",
34
+ "name": "Yijie Zhong"
35
+ },
36
+ {
37
+ "authorId": "2155882895",
38
+ "name": "Bo Li"
39
+ },
40
+ {
41
+ "authorId": "2110141073",
42
+ "name": "Lv Tang"
43
+ },
44
+ {
45
+ "authorId": "2154131566",
46
+ "name": "Senyun Kuang"
47
+ },
48
+ {
49
+ "authorId": "2117212193",
50
+ "name": "Shuang Wu"
51
+ },
52
+ {
53
+ "authorId": "7406856",
54
+ "name": "Shouhong Ding"
55
+ }
56
+ ]
57
+ }
58
+ }
data_without_website/Distribution-Free_Model-Agnostic_Regression_Calibration_via_Nonparametric_Methods.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "id": "tGPx7HdBr4",
3
+ "title": "Distribution-Free Model-Agnostic Regression Calibration via Nonparametric Methods",
4
+ "track": "main",
5
+ "author": "Shang Liu;Zhongze Cai;Xiaocheng Li",
6
+ "pdf": "https://openreview.net/pdf?id=tGPx7HdBr4",
7
+ "keyword": "",
8
+ "abstract": "In this paper, we consider the uncertainty quantification problem for regression models. Specifically, we consider an individual calibration objective for characterizing the quantiles of the prediction model. While such an objective is well-motivated from downstream tasks such as newsvendor cost, the existing methods have been largely heuristic and lack of statistical guarantee in terms of individual calibration. We show via simple examples that the existing methods focusing on population-level calibration guarantees such as average calibration or sharpness can lead to harmful and unexpected results. We propose simple nonparametric calibration methods that are agnostic of the underlying prediction model and enjoy both computational efficiency and statistical consistency. Our approach enables a better understanding of the possibility of individual calibration, and we establish matching upper and lower bounds for the calibration error of our proposed methods. Technically, our analysis combines the nonparametric analysis with a covering number argument for parametric analysis, which advances the existing theoretical analyses in the literature of nonparametric density estimation and quantile bandit problems. Importantly, the nonparametric perspective sheds new theoretical insights into regression calibration in terms of the curse of dimensionality and reconciles the existing results on the impossibility of individual calibration. To our knowledge, we make the first effort to reach both individual calibration and finite-sample guarantee with minimal assumptions in terms of conformal prediction. Numerical experiments show the advantage of such a simple approach under various metrics, and also under covariates shift. We hope our work provides a simple benchmark and a starting point of theoretical ground for future research on regression calibration.",
9
+ "conference": {
10
+ "name": "NIPS",
11
+ "year": 2023
12
+ },
13
+ "template": null,
14
+ "category": "03. ML Theory and Optimization",
15
+ "is_done": true,
16
+ "timestamp": "2025-08-04T09:24:07.675255",
17
+ "rule_paper_possible_url": null,
18
+ "github_base": null,
19
+ "llm_believed_url": null,
20
+ "rule_base_possible_url": null,
21
+ "confirmed_url": null,
22
+ "Internet_fail": null,
23
+ "html_fail": null,
24
+ "citation_data": {
25
+ "original_title": "Distribution-Free Model-Agnostic Regression Calibration via Nonparametric Methods",
26
+ "matched_title": "Distribution-Free Model-Agnostic Regression Calibration via Nonparametric Methods",
27
+ "citation_count": 4,
28
+ "similarity": 1.0,
29
+ "source": "semantic_scholar",
30
+ "year": 2023,
31
+ "authors": [
32
+ {
33
+ "authorId": "2156038765",
34
+ "name": "Shang Liu"
35
+ },
36
+ {
37
+ "authorId": "51440709",
38
+ "name": "Zhongze Cai"
39
+ },
40
+ {
41
+ "authorId": "2145438161",
42
+ "name": "Xiaocheng Li"
43
+ }
44
+ ]
45
+ }
46
+ }
data_without_website/Dual-Phase_Accelerated_Prompt_Optimization.json ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "id": "2024.findings-emnlp.709",
3
+ "title": "Dual-Phase Accelerated Prompt Optimization",
4
+ "track": "main",
5
+ "author": "Muchen Yang; Moxin Li; Yongle Li; Zijun Chen; Chongming Gao; Junqi Zhang; Yangyang Li; Fuli Feng",
6
+ "pdf": "https://aclanthology.org/2024.findings-emnlp.709.pdf",
7
+ "keyword": "",
8
+ "abstract": "Gradient-free prompt optimization methods have made significant strides in enhancing the performance of closed-source Large Language Model (LLMs) across a wide range of tasks. However, existing approaches make light of the importance of high-quality prompt initialization and the identification of effective optimization directions, thus resulting in substantial optimization steps to obtain satisfactory performance. In this light, we aim to accelerate prompt optimization process to tackle the challenge of low convergence rate. We propose a dual-phase approach which starts with generating high-quality initial prompts by adopting a well-designed meta-instruction to delve into task-specific information, and iteratively optimize the prompts at the sentence level, leveraging previous tuning experience to expand prompt candidates and accept effective ones. Extensive experiments on eight datasets demonstrate the effectiveness of our proposed method, achieving a consistent accuracy gain over baselines with less than five optimization steps.",
9
+ "conference": {
10
+ "name": "EMNLP",
11
+ "year": 2024
12
+ },
13
+ "template": null,
14
+ "category": "06. Natural Language Understanding and Semantics",
15
+ "is_done": true,
16
+ "timestamp": "2025-08-05T13:01:53.153463",
17
+ "rule_paper_possible_url": null,
18
+ "github_base": null,
19
+ "llm_believed_url": null,
20
+ "rule_base_possible_url": null,
21
+ "confirmed_url": null,
22
+ "Internet_fail": null,
23
+ "html_fail": null,
24
+ "citation_data": {
25
+ "original_title": "Dual-Phase Accelerated Prompt Optimization",
26
+ "matched_title": "Dual-Phase Accelerated Prompt Optimization",
27
+ "citation_count": 1,
28
+ "similarity": 1.0,
29
+ "source": "semantic_scholar",
30
+ "year": 2024,
31
+ "authors": [
32
+ {
33
+ "authorId": "2307559026",
34
+ "name": "Muchen Yang"
35
+ },
36
+ {
37
+ "authorId": "2118769749",
38
+ "name": "Moxin Li"
39
+ },
40
+ {
41
+ "authorId": "2307555695",
42
+ "name": "Yongle Li"
43
+ },
44
+ {
45
+ "authorId": "2307513797",
46
+ "name": "Zijun Chen"
47
+ },
48
+ {
49
+ "authorId": "2265123543",
50
+ "name": "Chongming Gao"
51
+ },
52
+ {
53
+ "authorId": "2307557096",
54
+ "name": "Junqi Zhang"
55
+ },
56
+ {
57
+ "authorId": "2307555570",
58
+ "name": "Yangyang Li"
59
+ },
60
+ {
61
+ "authorId": "2280911299",
62
+ "name": "Fuli Feng"
63
+ }
64
+ ]
65
+ }
66
+ }
data_without_website/Empowering_Unsupervised_Domain_Adaptation_With_Large-Scale_Pre-Trained_Vision-Language_Models.json ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "id": "954de34d09",
3
+ "title": "Empowering Unsupervised Domain Adaptation With Large-Scale Pre-Trained Vision-Language Models",
4
+ "track": "main",
5
+ "author": "Zhengfeng Lai; Haoping Bai; Haotian Zhang; Xianzhi Du; Jiulong Shan; Yinfei Yang; Chen-Nee Chuah; Meng Cao",
6
+ "pdf": "https://openaccess.thecvf.com/content/WACV2024/papers/Lai_Empowering_Unsupervised_Domain_Adaptation_With_Large-Scale_Pre-Trained_Vision-Language_Models_WACV_2024_paper.pdf",
7
+ "keyword": "",
8
+ "abstract": "Unsupervised Domain Adaptation (UDA) aims to leverage the labeled source domain to solve the tasks on the unlabeled target domain. Traditional UDA methods face the challenge of the tradeoff between domain alignment and semantic class discriminability, especially when a large domain gap exists between the source and target domain. The efforts of applying large-scale pre-training to bridge the domain gaps remain limited. In this work, we propose that Vision-Language Models (VLMs) can empower UDA tasks due to their training pattern with language alignment and their large-scale pre-trained datasets. For example, CLIP and GLIP have shown promising zero-shot generalization in classification and detection tasks. However, directly fine-tuning these VLMs into downstream tasks may be computationally expensive and not scalable if we have multiple domains that need to be adapted. Therefore, in this work, we first study an efficient adaption of VLMs to preserve the original knowledge while maximizing its flexibility for learning new knowledge. Then, we design a domain-aware pseudo-labeling scheme tailored to VLMs for domain disentanglement. We show the superiority of the proposed methods in four UDA-classification and two UDA-detection benchmarks, with a significant improvement (+9.9%) on DomainNet.",
9
+ "conference": {
10
+ "name": "WACV",
11
+ "year": 2024
12
+ },
13
+ "template": null,
14
+ "category": "01. Deep Learning Architectures and Methods",
15
+ "is_done": true,
16
+ "timestamp": "2025-08-04T05:45:37.043567",
17
+ "rule_paper_possible_url": null,
18
+ "github_base": null,
19
+ "llm_believed_url": null,
20
+ "rule_base_possible_url": null,
21
+ "confirmed_url": null,
22
+ "Internet_fail": null,
23
+ "html_fail": null,
24
+ "citation_data": {
25
+ "original_title": "Empowering Unsupervised Domain Adaptation With Large-Scale Pre-Trained Vision-Language Models",
26
+ "matched_title": "Empowering Unsupervised Domain Adaptation with Large-scale Pre-trained Vision-Language Models",
27
+ "citation_count": 21,
28
+ "similarity": 1.0,
29
+ "source": "semantic_scholar",
30
+ "year": 2024,
31
+ "authors": [
32
+ {
33
+ "authorId": "1878929788",
34
+ "name": "Zhengfeng Lai"
35
+ },
36
+ {
37
+ "authorId": "9383929",
38
+ "name": "Haoping Bai"
39
+ },
40
+ {
41
+ "authorId": "2257340591",
42
+ "name": "Haotian Zhang"
43
+ },
44
+ {
45
+ "authorId": "2239065938",
46
+ "name": "Xianzhi Du"
47
+ },
48
+ {
49
+ "authorId": "2091600962",
50
+ "name": "Jiulong Shan"
51
+ },
52
+ {
53
+ "authorId": "2249897805",
54
+ "name": "Yinfei Yang"
55
+ },
56
+ {
57
+ "authorId": "2283813429",
58
+ "name": "Chen-Nee Chuah"
59
+ },
60
+ {
61
+ "authorId": "2257045916",
62
+ "name": "Meng Cao"
63
+ }
64
+ ]
65
+ }
66
+ }
data_without_website/Finite_Smoothing_Algorithm_for_High-Dimensional_Support_Vector_Machines_and_Quantile_Regression.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "id": "RvwMTDYTOb",
3
+ "title": "Finite Smoothing Algorithm for High-Dimensional Support Vector Machines and Quantile Regression",
4
+ "track": "main",
5
+ "author": "Qian Tang;Yikai Zhang;Boxiang Wang",
6
+ "pdf": "https://openreview.net/pdf?id=RvwMTDYTOb",
7
+ "keyword": "",
8
+ "abstract": "This paper introduces a finite smoothing algorithm (FSA), a novel approach to tackle computational challenges in applying support vector machines (SVM) and quantile regression to high-dimensional data. The critical issue with these methods is the non-smooth nature of their loss functions, which traditionally limits the use of highly efficient coordinate descent techniques in high-dimensional settings. FSA innovatively addresses this issue by transforming these loss functions into their smooth counterparts, thereby facilitating more efficient computation. A distinctive feature of FSA is its theoretical foundation: FSA can yield exact solutions, not just approximations, despite the smoothing approach. Our simulation and benchmark tests demonstrate that FSA significantly outpaces its competitors in speed, often by orders of magnitude, while improving or at least maintaining precision. We have implemented FSA in two open-source R packages: hdsvm for high-dimensional SVM and hdqr for high-dimensional quantile regression.",
9
+ "conference": {
10
+ "name": "ICML",
11
+ "year": 2024
12
+ },
13
+ "template": null,
14
+ "category": "03. ML Theory and Optimization",
15
+ "is_done": true,
16
+ "timestamp": "2025-08-05T06:37:31.628016",
17
+ "rule_paper_possible_url": null,
18
+ "github_base": null,
19
+ "llm_believed_url": null,
20
+ "rule_base_possible_url": null,
21
+ "confirmed_url": null,
22
+ "Internet_fail": null,
23
+ "html_fail": null,
24
+ "citation_data": {
25
+ "original_title": "Finite Smoothing Algorithm for High-Dimensional Support Vector Machines and Quantile Regression",
26
+ "matched_title": "Finite Smoothing Algorithm for High-Dimensional Support Vector Machines and Quantile Regression",
27
+ "citation_count": 0,
28
+ "similarity": 1.0,
29
+ "source": "semantic_scholar",
30
+ "year": 2024,
31
+ "authors": [
32
+ {
33
+ "authorId": "2315921823",
34
+ "name": "Qian Tang"
35
+ },
36
+ {
37
+ "authorId": "2319177802",
38
+ "name": "Yikai Zhang"
39
+ },
40
+ {
41
+ "authorId": "2315951641",
42
+ "name": "Boxiang Wang"
43
+ }
44
+ ]
45
+ }
46
+ }
data_without_website/Good_Examples_Make_A_Faster_Learner__Simple_Demonstration-based_Learning_for_Low-resource_NER.json ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "id": "2022.acl-long.192",
3
+ "title": "Good Examples Make A Faster Learner: Simple Demonstration-based Learning for Low-resource NER",
4
+ "track": "main",
5
+ "author": "Dong-Ho Lee; Akshen Kadakia; Kangmin Tan; Mahak Agarwal; Xinyu Feng; Takashi Shibuya; Ryosuke Mitani; Toshiyuki Sekiya; Jay Pujara; Xiang Ren",
6
+ "pdf": "https://aclanthology.org/2022.acl-long.192.pdf",
7
+ "keyword": "",
8
+ "abstract": "Recent advances in prompt-based learning have shown strong results on few-shot text classification by using cloze-style templates. Similar attempts have been made on named entity recognition (NER) which manually design templates to predict entity types for every text span in a sentence. However, such methods may suffer from error propagation induced by entity span detection, high cost due to enumeration of all possible text spans, and omission of inter-dependencies among token labels in a sentence. Here we present a simple demonstration-based learning method for NER, which lets the input be prefaced by task demonstrations for in-context learning. We perform a systematic study on demonstration strategy regarding what to include (entity examples, with or without surrounding context), how to select the examples, and what templates to use. Results on in-domain learning and domain adaptation show that the model’s performance in low-resource settings can be largely improved with a suitable demonstration strategy (e.g., a 4-17% improvement on 25 train instances). We also find that good demonstration can save many labeled examples and consistency in demonstration contributes to better performance.",
9
+ "conference": {
10
+ "name": "ACL",
11
+ "year": 2022
12
+ },
13
+ "github_base": "https://github.com/INK-USC/fewNER",
14
+ "llm_believed_url": "https://github.com/INK-USC/fewNER",
15
+ "template": null,
16
+ "category": "06. Natural Language Understanding and Semantics",
17
+ "is_done": true,
18
+ "timestamp": "2025-08-05T15:03:24.307407",
19
+ "citation_data": {
20
+ "original_title": "Good Examples Make A Faster Learner: Simple Demonstration-based Learning for Low-resource NER",
21
+ "matched_title": "Good Examples Make A Faster Learner: Simple Demonstration-based Learning for Low-resource NER",
22
+ "citation_count": 89,
23
+ "similarity": 1.0,
24
+ "source": "semantic_scholar",
25
+ "year": 2021,
26
+ "authors": [
27
+ {
28
+ "authorId": "2115475530",
29
+ "name": "Dong-Ho Lee"
30
+ },
31
+ {
32
+ "authorId": "2054506633",
33
+ "name": "Mahak Agarwal"
34
+ },
35
+ {
36
+ "authorId": "114565986",
37
+ "name": "Akshen Kadakia"
38
+ },
39
+ {
40
+ "authorId": "47720660",
41
+ "name": "Takashi Shibuya"
42
+ },
43
+ {
44
+ "authorId": "2634786",
45
+ "name": "J. Pujara"
46
+ },
47
+ {
48
+ "authorId": "1384550891",
49
+ "name": "Xiang Ren"
50
+ }
51
+ ]
52
+ }
53
+ }
data_without_website/Hallucination_Detox__Sensitive_Neuron_Dropout_(SeND)_for_Large_Language_Model_Training.json ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "id": "6L8OdH5PBu",
3
+ "title": "Hallucination Detox: Sensitive Neuron Dropout (SeND) for Large Language Model Training",
4
+ "track": "main",
5
+ "author": "Shahrad Mohammadzadeh;Juan David Guerra;Marco Bonizzato;Reihaneh Rabbany;Golnoosh Farnadi",
6
+ "pdf": "https://openreview.net/pdf?id=6L8OdH5PBu",
7
+ "keyword": "",
8
+ "abstract": "As large language models (LLMs) are increasingly deployed across various industries, concerns regarding their reliability, particularly due to hallucinations—outputs that are factually inaccurate or irrelevant to user input—have grown. Our research investigates the relationship between the training process and the emergence of hallucinations to address a key gap in existing research that focuses primarily on post hoc detection and mitigation strategies. Using models from the Pythia suite (70M–12B parameters) and several hallucination detection metrics, we analyze hallucination trends throughout training and explore LLM internal dynamics. We introduce Sensitivity Dropout SenD, a novel training protocol designed to mitigate hallucinations by reducing variance during training. SenD achieves this by deterministically dropping embedding indices with significant variability, referred to as Sensitive Embedding Indices. In addition, we develop an unsupervised hallucination detection metric, Efficient EigenScore (EES), which approximates the traditional EigenScore in 2x speed. This efficient metric is integrated into our protocol, allowing SenD to be both computationally scalable and effective at reducing hallucinations. Our empirical evaluation demonstrates that our approach improves LLM reliability at test time by up to 40\\% compared to normal training while also providing an efficient method to improve factual accuracy when adapting LLMs to Wikipedia, Medical, and LegalBench domains.",
9
+ "conference": {
10
+ "name": "ICLR",
11
+ "year": 2025
12
+ },
13
+ "template": null,
14
+ "category": "01. Deep Learning Architectures and Methods",
15
+ "is_done": true,
16
+ "timestamp": "2025-08-05T04:01:45.492731",
17
+ "rule_paper_possible_url": null,
18
+ "github_base": null,
19
+ "llm_believed_url": null,
20
+ "rule_base_possible_url": null,
21
+ "confirmed_url": null,
22
+ "Internet_fail": null,
23
+ "html_fail": null,
24
+ "citation_data": {
25
+ "original_title": "Hallucination Detox: Sensitive Neuron Dropout (SeND) for Large Language Model Training",
26
+ "matched_title": "Hallucination Detox: Sensitive Neuron Dropout (SeND) for Large Language Model Training",
27
+ "citation_count": 0,
28
+ "similarity": 1.0,
29
+ "source": "semantic_scholar",
30
+ "year": 2024,
31
+ "authors": [
32
+ {
33
+ "authorId": "2281945133",
34
+ "name": "Shahrad Mohammadzadeh"
35
+ },
36
+ {
37
+ "authorId": "2326989698",
38
+ "name": "Juan David Guerra"
39
+ },
40
+ {
41
+ "authorId": "2326993291",
42
+ "name": "Marco Bonizzato"
43
+ },
44
+ {
45
+ "authorId": "2490772",
46
+ "name": "Reihaneh Rabbany"
47
+ },
48
+ {
49
+ "authorId": "2086602",
50
+ "name": "G. Farnadi"
51
+ }
52
+ ]
53
+ }
54
+ }
data_without_website/Learning_Augmented_Binary_Search_Trees.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "id": "16743",
3
+ "title": "Learning Augmented Binary Search Trees",
4
+ "track": "main",
5
+ "author": "Honghao Lin; Tian Luo; David Woodruff",
6
+ "pdf": "https://proceedings.mlr.press/v162/lin22f/lin22f.pdf",
7
+ "keyword": "",
8
+ "abstract": "A treap is a classic randomized binary search tree data structure that is easy to implement and supports O(log n) expected time access. However, classic treaps do not take advantage of the input distribution or patterns in the input. Given recent advances in algorithms with predictions, we propose pairing treaps with machine advice to form a learning-augmented treap. We are the first to propose a learning-augmented data structure that supports binary search tree operations such as range-query and successor functionalities. With the assumption that we have access to advice from a frequency estimation oracle, we assign learned priorities to the nodes to better improve the treap’s structure. We theoretically analyze the learning-augmented treap’s performance under various input distributions and show that under those circumstances, our learning-augmented treap has stronger guarantees than classic treaps and other classic tree-based data structures. Further, we experimentally evaluate our learned treap on synthetic datasets and demonstrate a performance advantage over other search tree data structures. We also present experiments on real world datasets with known frequency estimation oracles and show improvements as well.",
9
+ "conference": {
10
+ "name": "ICML",
11
+ "year": 2022
12
+ },
13
+ "template": null,
14
+ "category": "03. ML Theory and Optimization",
15
+ "is_done": true,
16
+ "timestamp": "2025-08-05T12:44:27.565538",
17
+ "rule_paper_possible_url": null,
18
+ "github_base": null,
19
+ "llm_believed_url": null,
20
+ "rule_base_possible_url": null,
21
+ "confirmed_url": null,
22
+ "Internet_fail": null,
23
+ "html_fail": null,
24
+ "citation_data": {
25
+ "original_title": "Learning Augmented Binary Search Trees",
26
+ "matched_title": "Learning Augmented Binary Search Trees",
27
+ "citation_count": 32,
28
+ "similarity": 1.0,
29
+ "source": "semantic_scholar",
30
+ "year": 2022,
31
+ "authors": [
32
+ {
33
+ "authorId": "48444613",
34
+ "name": "Honghao Lin"
35
+ },
36
+ {
37
+ "authorId": "1387674334",
38
+ "name": "Tiancheng Luo"
39
+ },
40
+ {
41
+ "authorId": "143982862",
42
+ "name": "David P. Woodruff"
43
+ }
44
+ ]
45
+ }
46
+ }
data_without_website/Learning_Image_Harmonization_in_the_Linear_Color_Space.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "id": "",
3
+ "title": "Learning Image Harmonization in the Linear Color Space",
4
+ "track": "main",
5
+ "author": "Ke Xu; Gerhard Petrus Hancke; Rynson W.H. Lau",
6
+ "pdf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xu_Learning_Image_Harmonization_in_the_Linear_Color_Space_ICCV_2023_paper.pdf",
7
+ "keyword": "",
8
+ "abstract": "Harmonizing cut-and-paste images into perceptually realistic ones is challenging, as it requires a full understanding of the discrepancies between the background of the target image and the inserted object. Existing methods mainly adjust the appearances of the inserted object via pixel-level manipulations. They are not effective in correcting color discrepancy caused by different scene illuminations and the image formation processes. We note that image colors are essentially camera ISP projection of the scene radiance. If we can trace the image colors back to the radiance field, we may be able to model the scene illumination and harmonize the discrepancy better. In this paper, we propose a novel neural approach to harmonize the image colors in a camera-independent color space, in which color values are proportional to the scene radiance. To this end, we propose a novel image unprocessing module to estimate an intermediate high dynamic range version of the object to be inserted. We then propose a novel color harmonization module that harmonizes the colors of the inserted object by querying the estimated scene radiance and re-rendering the harmonized object in the output color space. Extensive experiments demonstrate that our method outperforms the state-of-the-art approaches.",
9
+ "conference": {
10
+ "name": "ICCV",
11
+ "year": 2023
12
+ },
13
+ "template": null,
14
+ "category": "05. 3D Vision and Computational Graphics",
15
+ "is_done": true,
16
+ "timestamp": "2025-08-05T15:45:17.202699",
17
+ "rule_paper_possible_url": null,
18
+ "github_base": null,
19
+ "llm_believed_url": null,
20
+ "rule_base_possible_url": null,
21
+ "confirmed_url": null,
22
+ "Internet_fail": null,
23
+ "html_fail": null,
24
+ "citation_data": {
25
+ "original_title": "Learning Image Harmonization in the Linear Color Space",
26
+ "matched_title": "Learning Image Harmonization in the Linear Color Space",
27
+ "citation_count": 4,
28
+ "similarity": 1.0,
29
+ "source": "semantic_scholar",
30
+ "year": 2023,
31
+ "authors": [
32
+ {
33
+ "authorId": "2265723720",
34
+ "name": "Ke Xu"
35
+ },
36
+ {
37
+ "authorId": "2229808348",
38
+ "name": "G. Hancke"
39
+ },
40
+ {
41
+ "authorId": "2265657606",
42
+ "name": "Rynson W. H. Lau"
43
+ }
44
+ ]
45
+ }
46
+ }
data_without_website/Learning_a_Neural_Solver_for_Multiple_Object_Tracking.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "id": "",
3
+ "title": "Learning a Neural Solver for Multiple Object Tracking",
4
+ "track": "main",
5
+ "author": "Guillem Braso; Laura Leal-Taixe",
6
+ "pdf": "https://openaccess.thecvf.com/content_CVPR_2020/papers/Braso_Learning_a_Neural_Solver_for_Multiple_Object_Tracking_CVPR_2020_paper.pdf",
7
+ "keyword": "",
8
+ "abstract": "Graphs offer a natural way to formulate Multiple Object Tracking (MOT) within the tracking-by-detection paradigm. However, they also introduce a major challenge for learning methods, as defining a model that can operate on such structured domain is not trivial. As a consequence, most learning-based work has been devoted to learning better features for MOT and then using these with well-established optimization frameworks. In this work, we exploit the classical network flow formulation of MOT to define a fully differentiable framework based on Message Passing Networks (MPNs). By operating directly on the graph domain, our method can reason globally over an entire set of detections and predict final solutions. Hence, we show that learning in MOT does not need to be restricted to feature extraction, but it can also be applied to the data association step. We show a significant improvement in both MOTA and IDF1 on three publicly available benchmarks. Our code is available at https://bit.ly/motsolv.",
9
+ "conference": {
10
+ "name": "CVPR",
11
+ "year": 2020
12
+ },
13
+ "template": null,
14
+ "category": "01. Deep Learning Architectures and Methods",
15
+ "is_done": true,
16
+ "timestamp": "2025-08-04T08:55:00.620184",
17
+ "rule_paper_possible_url": null,
18
+ "github_base": null,
19
+ "llm_believed_url": null,
20
+ "rule_base_possible_url": null,
21
+ "confirmed_url": null,
22
+ "Internet_fail": null,
23
+ "html_fail": null,
24
+ "citation_data": {
25
+ "original_title": "Learning a Neural Solver for Multiple Object Tracking",
26
+ "matched_title": "Learning a Neural Solver for Multiple Object Tracking",
27
+ "citation_count": 406,
28
+ "similarity": 1.0,
29
+ "source": "semantic_scholar",
30
+ "year": 2019,
31
+ "authors": [
32
+ {
33
+ "authorId": "2064987101",
34
+ "name": "Guillem Brasó"
35
+ },
36
+ {
37
+ "authorId": "2065226982",
38
+ "name": "L. Leal-Taix'e"
39
+ }
40
+ ]
41
+ }
42
+ }
data_without_website/Learning_by_Causality_to_Improve_Channel_Dependency_Modeling_in_Multivariate_Time_Series_Forecasting.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "id": "VojvkUEq8q",
3
+ "title": "Learning by Causality to Improve Channel Dependency Modeling in Multivariate Time Series Forecasting",
4
+ "track": "main",
5
+ "author": "Hyunwook Lee;Hyotaek Jeon;Juwon Kim;Minsik Lee;Ko Keun Kim;Sungahn Ko",
6
+ "pdf": "https://openreview.net/pdf?id=VojvkUEq8q",
7
+ "keyword": "",
8
+ "abstract": "Beyond the conventional long-term temporal dependency modeling, multivariate time series (MTS) forecasting has rapidly shifted toward channel dependency (CD) modeling. This shift significantly improves modeling quality by fully leveraging both multivariate relationships and temporal dependencies. Recent methods primarily model channel dependency through correlation learning (e.g., crossattention) or non-trainable statistical techniques (e.g., cross-correlation). However, these approaches struggle to fully capture the intrinsic relationships within MTS, particularly those stemming from directed cause-effect (i.e., causality) and nonstationary variates originating from diverse sources. In addition, causality may arise from the signals with different temporal behaviors, such as varying periodicity or discrete event sequences, which is not sufficiently discussed before. In this paper, we propose CALAS (Causality-enhanced Attention with Learnable and Adaptive Spacing), the first end-to-end learning method for MTS forecasting that uncover causality among variates without relying on statistical measures or prior knowledge. To model underlying causality, which consists of causal strength and propagation delay, we newly design a hypernetworks-based 1D convolutions mechanism. Inspired by dilated convolution with learnable spacings (DCLS) and spiking neural networks (SNNs), we extend discrete time delay into a continuous Gaussian kernel. Combining the hypernetworks-generated Gaussian kernel and convolutional weights (i.e., attention or causal strength), we achieve the end-to-end dynamic causality modeling mechanism. This mechanism enhances the model’s ability to capture time-varying causality across multi-source variates, ultimately improving the prediction accuracy, quality, and interpretability. For evaluation, we conduct extensive experiments with six real-world datasets and qualitative analysis to demonstrate CALAS’s superiority in capturing varying causality in a data-agnostic manner. The experiment results indicate that CALAS has significantly improved MTS forecasting accuracy compared to state-of-the-art methods by dynamically modeling causality among variates.",
9
+ "conference": {
10
+ "name": "ICLR",
11
+ "year": 2025
12
+ },
13
+ "template": null,
14
+ "category": "04. Probabilistic Methods and Causal Inference",
15
+ "is_done": true,
16
+ "timestamp": "2025-08-05T03:47:42.142457",
17
+ "rule_paper_possible_url": null,
18
+ "github_base": null,
19
+ "llm_believed_url": null,
20
+ "rule_base_possible_url": null,
21
+ "confirmed_url": null,
22
+ "Internet_fail": null,
23
+ "html_fail": null,
24
+ "citation_data": {
25
+ "citation_count": 0
26
+ }
27
+ }
data_without_website/LoRA-One__One-Step_Full_Gradient_Could_Suffice_for_Fine-Tuning_Large_Language_Models,_Provably_and_Efficiently.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "id": "KwIlvmLDLm",
3
+ "title": "LoRA-One: One-Step Full Gradient Could Suffice for Fine-Tuning Large Language Models, Provably and Efficiently",
4
+ "track": "main",
5
+ "author": "Yuanhe Zhang;Fanghui Liu;Yudong Chen",
6
+ "pdf": "https://openreview.net/pdf?id=KwIlvmLDLm",
7
+ "keyword": "",
8
+ "abstract": "This paper explores how theory can guide and enhance practical algorithms, using Low-Rank Adaptation (LoRA) (Hu et al., 2022) in large language models as a case study. We rigorously prove that, under gradient descent, LoRA adapters align with specific singular subspaces of the one-step full fine-tuning gradient. This result suggests that, by properly initializing the adapters using the one-step full gradient, subspace alignment can be achieved immediately—applicable to both linear and nonlinear models. Building on our theory, we propose a theory-driven algorithm, LoRA-One, where the linear convergence (as well as generalization) is built and incorporating preconditioners theoretically helps mitigate the effects of ill-conditioning. Besides, our theory reveals connections between LoRA-One and other gradient-alignment-based methods, helping to clarify misconceptions in the design of such algorithms. LoRA-One achieves significant empirical improvements over LoRA and its variants across benchmarks in natural language understanding, mathematical reasoning, and code generation. Code is available at: https://github.com/YuanheZ/LoRA-One.",
9
+ "conference": {
10
+ "name": "ICML",
11
+ "year": 2025
12
+ },
13
+ "template": null,
14
+ "category": "03. ML Theory and Optimization",
15
+ "is_done": true,
16
+ "timestamp": "2025-08-05T12:00:27.208271",
17
+ "rule_paper_possible_url": null,
18
+ "github_base": null,
19
+ "llm_believed_url": null,
20
+ "rule_base_possible_url": null,
21
+ "confirmed_url": null,
22
+ "Internet_fail": null,
23
+ "html_fail": null,
24
+ "citation_data": {
25
+ "original_title": "LoRA-One: One-Step Full Gradient Could Suffice for Fine-Tuning Large Language Models, Provably and Efficiently",
26
+ "matched_title": "LoRA-One: One-Step Full Gradient Could Suffice for Fine-Tuning Large Language Models, Provably and Efficiently",
27
+ "citation_count": 1,
28
+ "similarity": 1.0,
29
+ "source": "semantic_scholar",
30
+ "year": 2025,
31
+ "authors": [
32
+ {
33
+ "authorId": "2343726229",
34
+ "name": "Yuanhe Zhang"
35
+ },
36
+ {
37
+ "authorId": "2343803957",
38
+ "name": "Fanghui Liu"
39
+ },
40
+ {
41
+ "authorId": "2343691720",
42
+ "name": "Yudong Chen"
43
+ }
44
+ ]
45
+ }
46
+ }
data_without_website/MEDIC__Model_Backdoor_Removal_by_Importance_Driven_Cloning.json ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "id": "qHcR93949op",
3
+ "title": "MEDIC: Model Backdoor Removal by Importance Driven Cloning",
4
+ "track": "main",
5
+ "author": "Qiuling Xu;Guanhong Tao;Jean Honorio;Yingqi Liu;Shengwei An;Guangyu Shen;Siyuan Cheng;Xiangyu Zhang",
6
+ "pdf": "https://openreview.net/pdf?id=qHcR93949op",
7
+ "keyword": "",
8
+ "abstract": "We develop a novel method to remove injected backdoors in Deep Learning models. It works by cloning the benign behaviors of a trojaned model to a new model of the same structure. It trains the clone model from scratch on a very small subset of samples and aims to minimize a cloning loss that denotes the differences between the activations of important neurons across the two models. The set of important neurons varies for each input, depending on their magnitude of activations and their impact on the classification result.\nOur experiments show that our technique can effectively remove nine different types of backdoors with minor benign accuracy degradation, outperforming the state-of-the-art backdoor removal techniques that are based on fine-tuning, knowledge distillation, and neuron pruning.",
9
+ "conference": {
10
+ "name": "ICLR",
11
+ "year": 2023
12
+ },
13
+ "template": null,
14
+ "category": "10. Trustworthy and Ethical AI",
15
+ "is_done": true,
16
+ "timestamp": "2025-08-04T04:31:04.110179",
17
+ "rule_paper_possible_url": null,
18
+ "github_base": null,
19
+ "llm_believed_url": null,
20
+ "rule_base_possible_url": null,
21
+ "confirmed_url": null,
22
+ "Internet_fail": null,
23
+ "html_fail": null,
24
+ "citation_data": {
25
+ "original_title": "MEDIC: Model Backdoor Removal by Importance Driven Cloning",
26
+ "matched_title": "MEDIC: Remove Model Backdoors via Importance Driven Cloning",
27
+ "citation_count": 7,
28
+ "similarity": 0.8521739130434782,
29
+ "source": "semantic_scholar",
30
+ "year": 2023,
31
+ "authors": [
32
+ {
33
+ "authorId": "50536989",
34
+ "name": "Qiuling Xu"
35
+ },
36
+ {
37
+ "authorId": "48927894",
38
+ "name": "Guanhong Tao"
39
+ },
40
+ {
41
+ "authorId": "1744068",
42
+ "name": "J. Honorio"
43
+ },
44
+ {
45
+ "authorId": "2007064152",
46
+ "name": "Yingqi Liu"
47
+ },
48
+ {
49
+ "authorId": "2052467415",
50
+ "name": "Guangyu Shen"
51
+ },
52
+ {
53
+ "authorId": "46378881",
54
+ "name": "Siyuan Cheng"
55
+ },
56
+ {
57
+ "authorId": "2156004395",
58
+ "name": "Xiangyu Zhang"
59
+ }
60
+ ]
61
+ }
62
+ }
data_without_website/MFTraj__Map-Free,_Behavior-Driven_Trajectory_Prediction_for_Autonomous_Driving.json ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "id": "paper657",
3
+ "title": "MFTraj: Map-Free, Behavior-Driven Trajectory Prediction for Autonomous Driving",
4
+ "track": "main",
5
+ "author": "Haicheng Liao; Zhenning Li; Chengyue Wang; Huanming Shen; Dongping Liao; Bonan Wang; Guofa Li; Chengzhong Xu",
6
+ "pdf": "https://www.ijcai.org/proceedings/2024/0657.pdf",
7
+ "keyword": "",
8
+ "abstract": "This paper introduces a trajectory prediction model tailored for autonomous driving, focusing on capturing complex interactions in dynamic traffic scenarios without reliance on high-definition maps. The model, termed MFTraj, harnesses historical trajectory data combined with a novel dynamic geometric graph-based behavior-aware module. At its core, an adaptive structure-aware interactive graph convolutional network captures both positional and behavioral features of road users, preserving spatial-temporal intricacies. Enhanced by a linear attention mechanism, the model achieves computational efficiency and reduced parameter overhead. Evaluations on the Argoverse, NGSIM, HighD, and MoCAD datasets underscore MFTraj's robustness and adaptability, outperforming numerous benchmarks even in data-challenged scenarios without the need for additional information such as HD maps or vectorized maps. Importantly, it maintains competitive performance even in scenarios with substantial missing data (12.5%-50%), outperforming most existing state-of-the-art models. The results and methodology suggest a significant advancement in autonomous driving trajectory prediction, paving the way for safer and efficient autonomous systems.",
9
+ "conference": {
10
+ "name": "IJCAI",
11
+ "year": 2024
12
+ },
13
+ "template": null,
14
+ "category": "01. Deep Learning Architectures and Methods",
15
+ "is_done": true,
16
+ "timestamp": "2025-08-05T04:29:33.902757",
17
+ "rule_paper_possible_url": null,
18
+ "github_base": null,
19
+ "llm_believed_url": null,
20
+ "rule_base_possible_url": null,
21
+ "confirmed_url": null,
22
+ "Internet_fail": null,
23
+ "html_fail": null,
24
+ "citation_data": {
25
+ "original_title": "MFTraj: Map-Free, Behavior-Driven Trajectory Prediction for Autonomous Driving",
26
+ "matched_title": "MFTraj: Map-Free, Behavior-Driven Trajectory Prediction for Autonomous Driving",
27
+ "citation_count": 15,
28
+ "similarity": 1.0,
29
+ "source": "semantic_scholar",
30
+ "year": 2024,
31
+ "authors": [
32
+ {
33
+ "authorId": "2269958997",
34
+ "name": "Haicheng Liao"
35
+ },
36
+ {
37
+ "authorId": "2273365115",
38
+ "name": "Zhenning Li"
39
+ },
40
+ {
41
+ "authorId": "2272610917",
42
+ "name": "Chengyue Wang"
43
+ },
44
+ {
45
+ "authorId": "2271098858",
46
+ "name": "Huanming Shen"
47
+ },
48
+ {
49
+ "authorId": "2298915388",
50
+ "name": "Bonan Wang"
51
+ },
52
+ {
53
+ "authorId": "2274931368",
54
+ "name": "Dongping Liao"
55
+ },
56
+ {
57
+ "authorId": "2272344125",
58
+ "name": "Guofa Li"
59
+ },
60
+ {
61
+ "authorId": "2272208211",
62
+ "name": "Chengzhong Xu"
63
+ }
64
+ ]
65
+ }
66
+ }
data_without_website/Marginalised_Gaussian_Processes_with_Nested_Sampling.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "id": "zHj5fx11jQC",
3
+ "title": "Marginalised Gaussian Processes with Nested Sampling",
4
+ "track": "main",
5
+ "author": "Fergus Simpson;Vidhi Lalchand;Carl Edward Rasmussen",
6
+ "pdf": "https://openreview.net/pdf?id=zHj5fx11jQC",
7
+ "keyword": "",
8
+ "abstract": "Gaussian Process models are a rich distribution over functions with inductive biases controlled by a kernel function. Learning occurs through optimisation of the kernel hyperparameters using the marginal likelihood as the objective. This work proposes nested sampling as a means of marginalising kernel hyperparameters, because it is a technique that is well-suited to exploring complex, multi-modal distributions. We benchmark against Hamiltonian Monte Carlo on time-series and two-dimensional regression tasks, finding that a principled approach to quantifying hyperparameter uncertainty substantially improves the quality of prediction intervals.\n",
9
+ "conference": {
10
+ "name": "NIPS",
11
+ "year": 2021
12
+ },
13
+ "template": null,
14
+ "category": "04. Probabilistic Methods and Causal Inference",
15
+ "is_done": true,
16
+ "timestamp": "2025-08-04T04:27:04.267763",
17
+ "rule_paper_possible_url": null,
18
+ "github_base": null,
19
+ "llm_believed_url": null,
20
+ "rule_base_possible_url": null,
21
+ "confirmed_url": null,
22
+ "Internet_fail": null,
23
+ "html_fail": null,
24
+ "citation_data": {
25
+ "original_title": "Marginalised Gaussian Processes with Nested Sampling",
26
+ "matched_title": "Marginalised Gaussian Processes with Nested Sampling",
27
+ "citation_count": 10,
28
+ "similarity": 1.0,
29
+ "source": "semantic_scholar",
30
+ "year": 2020,
31
+ "authors": [
32
+ {
33
+ "authorId": "143941510",
34
+ "name": "F. Simpson"
35
+ },
36
+ {
37
+ "authorId": "2078423",
38
+ "name": "Vidhi Lalchand"
39
+ },
40
+ {
41
+ "authorId": "3472959",
42
+ "name": "C. Rasmussen"
43
+ }
44
+ ]
45
+ }
46
+ }
data_without_website/Mechanism_Design_for_Facility_Location_Problems__A_Survey.json ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "id": "paper596",
3
+ "title": "Mechanism Design for Facility Location Problems: A Survey",
4
+ "track": "Survey Track",
5
+ "author": "Hau Chan; Aris Filos-Ratsikas; Bo Li; Minming Li; Chenhao Wang",
6
+ "pdf": "https://www.ijcai.org/proceedings/2021/0596.pdf",
7
+ "keyword": "",
8
+ "abstract": "The study of approximate mechanism design for facility location has been in the center of research at the intersection of artificial intelligence and economics for the last decade, largely due to its practical importance in various domains, such as social planning and clustering. At a high level, the goal is to select a number of locations on which to build a set of facilities, aiming to optimize some social objective based on the preferences of strategic agents, who might have incentives to misreport their private information. This paper presents a comprehensive survey of the significant progress that has been made since the introduction of the problem, highlighting all the different variants and methodologies, as well as the most interesting directions for future research.",
9
+ "conference": {
10
+ "name": "IJCAI",
11
+ "year": 2021
12
+ },
13
+ "template": null,
14
+ "category": "02. Reinforcement Learning and Control",
15
+ "is_done": true,
16
+ "timestamp": "2025-08-05T07:14:38.731146",
17
+ "rule_paper_possible_url": null,
18
+ "github_base": null,
19
+ "llm_believed_url": null,
20
+ "rule_base_possible_url": null,
21
+ "confirmed_url": null,
22
+ "Internet_fail": null,
23
+ "html_fail": null,
24
+ "citation_data": {
25
+ "original_title": "Mechanism Design for Facility Location Problems: A Survey",
26
+ "matched_title": "Mechanism Design for Facility Location Problems: A Survey",
27
+ "citation_count": 86,
28
+ "similarity": 1.0,
29
+ "source": "semantic_scholar",
30
+ "year": 2021,
31
+ "authors": [
32
+ {
33
+ "authorId": "143695668",
34
+ "name": "Hau Chan"
35
+ },
36
+ {
37
+ "authorId": "1403863711",
38
+ "name": "Aris Filos-Ratsikas"
39
+ },
40
+ {
41
+ "authorId": "2151287710",
42
+ "name": "Bo Li"
43
+ },
44
+ {
45
+ "authorId": "33099410",
46
+ "name": "Minming Li"
47
+ },
48
+ {
49
+ "authorId": "2299750244",
50
+ "name": "Chenhao Wang"
51
+ }
52
+ ]
53
+ }
54
+ }
data_without_website/Model-Driven_Labeled_Data_Free_Fine-tuning.json ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "id": "nA9SCxGy2M",
3
+ "title": "Model-Driven Labeled Data Free Fine-tuning",
4
+ "track": "main",
5
+ "author": "Yuebin XU;Xuemei Peng;Zeyi Wen",
6
+ "pdf": "https://openreview.net/pdf?id=nA9SCxGy2M",
7
+ "keyword": "",
8
+ "abstract": "Supervised fine-tuning is a prevalent technique for boosting model performance. However, it heavily depends on extensive training over labeled data. This paper introduces a novel model-driven fine-tuning method that operates independently of supervised training and labeled data. By harnessing the collective intelligence of a diverse model pool, our method enhances individual model performance through a two-phase process. Initially, we consolidate the expertise of the models within the pool to create a general meta-model. This meta-model then serves as a guide for iteratively fine-tuning the original models in a few shots, promoting a synergistic improvement in performance. Our experimental results show that this model-driven approach not only surpasses the performance of full-parameter fine-tuning models but also does so without the need for supervised training. This breakthrough offers a cost-effective and scalable alternative to traditional supervised fine-tuning, addressing the challenge of data scarcity and paving the way for future research in unsupervised model enhancement. Our work represents a significant step towards making fine-tuning techniques more accessible and practical in environments where labeled data is limited or even unavailable.",
9
+ "conference": {
10
+ "name": "ICLR",
11
+ "year": 2025
12
+ },
13
+ "template": null,
14
+ "category": "03. ML Theory and Optimization",
15
+ "is_done": true,
16
+ "timestamp": "2025-08-05T03:59:12.493718",
17
+ "rule_paper_possible_url": null,
18
+ "github_base": null,
19
+ "llm_believed_url": null,
20
+ "rule_base_possible_url": null,
21
+ "confirmed_url": null,
22
+ "Internet_fail": null,
23
+ "html_fail": null,
24
+ "citation_data": {
25
+ "original_title": "Model-Driven Labeled Data Free Fine-tuning",
26
+ "matched_title": "Dynamic Sparse No Training: Training-Free Fine-tuning for Sparse LLMs",
27
+ "citation_count": 47,
28
+ "similarity": 0.41509433962264153,
29
+ "source": "semantic_scholar",
30
+ "year": 2023,
31
+ "authors": [
32
+ {
33
+ "authorId": "2108078624",
34
+ "name": "Yu-xin Zhang"
35
+ },
36
+ {
37
+ "authorId": "2258678648",
38
+ "name": "Lirui Zhao"
39
+ },
40
+ {
41
+ "authorId": "49352079",
42
+ "name": "Mingbao Lin"
43
+ },
44
+ {
45
+ "authorId": "2258670567",
46
+ "name": "Yunyun Sun"
47
+ },
48
+ {
49
+ "authorId": "2258671504",
50
+ "name": "Yiwu Yao"
51
+ },
52
+ {
53
+ "authorId": "2258598205",
54
+ "name": "Xingjia Han"
55
+ },
56
+ {
57
+ "authorId": "2258549938",
58
+ "name": "Jared Tanner"
59
+ },
60
+ {
61
+ "authorId": "2258718674",
62
+ "name": "Shiwei Liu"
63
+ },
64
+ {
65
+ "authorId": "2258551942",
66
+ "name": "Rongrong Ji"
67
+ }
68
+ ]
69
+ }
70
+ }
data_without_website/Nearly-tight_Bounds_for_Deep_Kernel_Learning.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "id": "F5bcSnILOZ",
3
+ "title": "Nearly-tight Bounds for Deep Kernel Learning",
4
+ "track": "main",
5
+ "author": "Yifan Zhang;Min-Ling Zhang",
6
+ "pdf": "https://openreview.net/pdf?id=F5bcSnILOZ",
7
+ "keyword": "",
8
+ "abstract": "The generalization analysis of deep kernel learning (DKL) is a crucial and open problem of kernel methods for deep learning. The implicit nonlinear mapping in DKL makes existing methods of capacity-based generalization analysis for deep learning invalid. In an attempt to overcome this challenge and make up for the gap in the generalization theory of DKL, we develop an analysis method based on the composite relationship of function classes and derive capacity-based bounds with mild dependence on the depth, which generalizes learning theory bounds to deep kernels and serves as theoretical guarantees for the generalization of DKL. In this paper, we prove novel and nearly-tight generalization bounds based on the uniform covering number and the Rademacher chaos complexity for deep (multiple) kernel machines. In addition, for some common classes, we estimate their uniform covering numbers and Rademacher chaos complexities by bounding their pseudo-dimensions and kernel pseudo-dimensions, respectively. The mild bounds without strong assumptions partially explain the good generalization ability of deep learning combined with kernel methods.",
9
+ "conference": {
10
+ "name": "ICML",
11
+ "year": 2023
12
+ },
13
+ "template": null,
14
+ "category": "03. ML Theory and Optimization",
15
+ "is_done": true,
16
+ "timestamp": "2025-08-05T07:54:18.569479",
17
+ "rule_paper_possible_url": null,
18
+ "github_base": null,
19
+ "llm_believed_url": null,
20
+ "rule_base_possible_url": null,
21
+ "confirmed_url": null,
22
+ "Internet_fail": null,
23
+ "html_fail": null,
24
+ "citation_data": {
25
+ "original_title": "Nearly-tight Bounds for Deep Kernel Learning",
26
+ "matched_title": "Nearly-tight Bounds for Deep Kernel Learning",
27
+ "citation_count": 2,
28
+ "similarity": 1.0,
29
+ "source": "semantic_scholar",
30
+ "year": 2023,
31
+ "authors": [
32
+ {
33
+ "authorId": "48380079",
34
+ "name": "YiFan Zhang"
35
+ },
36
+ {
37
+ "authorId": "3039887",
38
+ "name": "Min-Ling Zhang"
39
+ }
40
+ ]
41
+ }
42
+ }
data_without_website/Neural_Collage_Transfer__Artistic_Reconstruction_via_Material_Manipulation.json ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "id": "",
3
+ "title": "Neural Collage Transfer: Artistic Reconstruction via Material Manipulation",
4
+ "track": "main",
5
+ "author": "Ganghun Lee; Minji Kim; Yunsu Lee; Minsu Lee; Byoung-Tak Zhang",
6
+ "pdf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lee_Neural_Collage_Transfer_Artistic_Reconstruction_via_Material_Manipulation_ICCV_2023_paper.pdf",
7
+ "keyword": "",
8
+ "abstract": "Collage is a creative art form that uses diverse material scraps as a base unit to compose a single image.\n Although pixel-wise generation techniques can reproduce a target image in collage style, it is not a suitable method due to the solid stroke-by-stroke nature of the collage form.\n While some previous works for stroke-based rendering produced decent sketches and paintings, collages have received much less attention in research despite their popularity as a style.\n In this paper, we propose a method for learning to make collages via reinforcement learning without the need for demonstrations or collage artwork data.\n We design the collage Markov Decision Process (MDP), which allows the agent to handle various materials and propose a model-based soft actor-critic to mitigate the agent's training burden derived from the sophisticated dynamics of collage.\n Moreover, we devise additional techniques such as active material selection and complexity-based multi-scale collage to handle target images at any size and enhance the results' aesthetics by placing relatively more scraps in areas of high complexity.\n Experimental results show that the trained agent appropriately selected and pasted materials to regenerate the target image into a collage and obtained a higher evaluation score on content and style than pixel-wise generation methods. Code is available at https://github.com/northadventure/CollageRL.",
9
+ "conference": {
10
+ "name": "ICCV",
11
+ "year": 2023
12
+ },
13
+ "github_base": "https://github.com/northadventure/CollageRL",
14
+ "template": null,
15
+ "category": "02. Reinforcement Learning and Control",
16
+ "is_done": true,
17
+ "timestamp": "2025-08-07T10:24:30.350332",
18
+ "log": {
19
+ "timestamp": "2025-08-07T10:24:30.350332",
20
+ "stage": "special situation",
21
+ "note": "论文没有项目主页但找到了GitHub相关信息"
22
+ },
23
+ "citation_data": {
24
+ "original_title": "Neural Collage Transfer: Artistic Reconstruction via Material Manipulation",
25
+ "matched_title": "Neural Collage Transfer: Artistic Reconstruction via Material Manipulation",
26
+ "citation_count": 1,
27
+ "similarity": 1.0,
28
+ "source": "semantic_scholar",
29
+ "year": 2023,
30
+ "authors": [
31
+ {
32
+ "authorId": "2110880154",
33
+ "name": "Ganghun Lee"
34
+ },
35
+ {
36
+ "authorId": "2117956064",
37
+ "name": "Minji Kim"
38
+ },
39
+ {
40
+ "authorId": "2265575348",
41
+ "name": "Yunsu Lee"
42
+ },
43
+ {
44
+ "authorId": "2152165744",
45
+ "name": "M. Lee"
46
+ },
47
+ {
48
+ "authorId": "2260470204",
49
+ "name": "Byoung-Tak Zhang"
50
+ }
51
+ ]
52
+ }
53
+ }
data_without_website/Neural_Stochastic_Dual_Dynamic_Programming.json ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "id": "aisKPsMM3fg",
3
+ "title": "Neural Stochastic Dual Dynamic Programming",
4
+ "track": "main",
5
+ "author": "Hanjun Dai;Yuan Xue;Zia Syed;Dale Schuurmans;Bo Dai",
6
+ "pdf": "https://openreview.net/pdf?id=aisKPsMM3fg",
7
+ "keyword": "",
8
+ "abstract": "Stochastic dual dynamic programming (SDDP) is a state-of-the-art method for solving multi-stage stochastic optimization, widely used for modeling real-world process optimization tasks. Unfortunately, SDDP has a worst-case complexity that scales exponentially in the number of decision variables, which severely limits applicability to only low dimensional problems. To overcome this limitation, we extend SDDP by introducing a trainable neural model that learns to map problem instances to a piece-wise linear value function within intrinsic low-dimension space, which is architected specifically to interact with a base SDDP solver, so that can accelerate optimization performance on new instances. The proposed Neural Stochastic Dual Dynamic Programming ($$\\nu$$-SDDP) continually self-improves by solving successive problems. An empirical investigation demonstrates that $$\\nu$$-SDDP can significantly reduce problem solving cost without sacrificing solution quality over competitors such as SDDP and reinforcement learning algorithms, across a range of synthetic and real-world process optimization problems.",
9
+ "conference": {
10
+ "name": "ICLR",
11
+ "year": 2022
12
+ },
13
+ "template": null,
14
+ "category": "02. Reinforcement Learning and Control",
15
+ "is_done": true,
16
+ "timestamp": "2025-08-04T03:49:30.761096",
17
+ "rule_paper_possible_url": null,
18
+ "github_base": null,
19
+ "llm_believed_url": null,
20
+ "rule_base_possible_url": null,
21
+ "confirmed_url": null,
22
+ "Internet_fail": null,
23
+ "html_fail": null,
24
+ "citation_data": {
25
+ "original_title": "Neural Stochastic Dual Dynamic Programming",
26
+ "matched_title": "Neural Stochastic Dual Dynamic Programming",
27
+ "citation_count": 14,
28
+ "similarity": 1.0,
29
+ "source": "semantic_scholar",
30
+ "year": 2021,
31
+ "authors": [
32
+ {
33
+ "authorId": "2791430",
34
+ "name": "H. Dai"
35
+ },
36
+ {
37
+ "authorId": "1556311931",
38
+ "name": "Yuan Xue"
39
+ },
40
+ {
41
+ "authorId": "2142842910",
42
+ "name": "Zia Syed"
43
+ },
44
+ {
45
+ "authorId": "50319359",
46
+ "name": "D. Schuurmans"
47
+ },
48
+ {
49
+ "authorId": "144445933",
50
+ "name": "Bo Dai"
51
+ }
52
+ ]
53
+ }
54
+ }
data_without_website/On_the_Convergence_to_a_Global_Solution_of_Shuffling-Type_Gradient_Algorithms.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "id": "Nr1XSeDzpn",
3
+ "title": "On the Convergence to a Global Solution of Shuffling-Type Gradient Algorithms",
4
+ "track": "main",
5
+ "author": "Lam M. Nguyen;Trang H. Tran",
6
+ "pdf": "https://openreview.net/pdf?id=Nr1XSeDzpn",
7
+ "keyword": "",
8
+ "abstract": "Stochastic gradient descent (SGD) algorithm is the method of choice in many machine learning tasks thanks to its scalability and efficiency in dealing with large-scale problems. In this paper, we focus on the shuffling version of SGD which matches the mainstream practical heuristics. We show the convergence to a global solution of shuffling SGD for a class of non-convex functions under over-parameterized settings. Our analysis employs more relaxed non-convex assumptions than previous literature. Nevertheless, we maintain the desired computational complexity as shuffling SGD has achieved in the general convex setting.",
9
+ "conference": {
10
+ "name": "NIPS",
11
+ "year": 2023
12
+ },
13
+ "template": null,
14
+ "category": "03. ML Theory and Optimization",
15
+ "is_done": true,
16
+ "timestamp": "2025-08-04T08:54:17.307929",
17
+ "rule_paper_possible_url": null,
18
+ "github_base": null,
19
+ "llm_believed_url": null,
20
+ "rule_base_possible_url": null,
21
+ "confirmed_url": null,
22
+ "Internet_fail": null,
23
+ "html_fail": null,
24
+ "citation_data": {
25
+ "original_title": "On the Convergence to a Global Solution of Shuffling-Type Gradient Algorithms",
26
+ "matched_title": "On the Convergence to a Global Solution of Shuffling-Type Gradient Algorithms",
27
+ "citation_count": 2,
28
+ "similarity": 1.0,
29
+ "source": "semantic_scholar",
30
+ "year": 2022,
31
+ "authors": [
32
+ {
33
+ "authorId": "144274166",
34
+ "name": "Lam M. Nguyen"
35
+ },
36
+ {
37
+ "authorId": "2072581996",
38
+ "name": "Trang H. Tran"
39
+ }
40
+ ]
41
+ }
42
+ }
data_without_website/One_Step_at_a_Time__Long-Horizon_Vision-and-Language_Navigation_With_Milestones.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "id": "",
3
+ "title": "One Step at a Time: Long-Horizon Vision-and-Language Navigation With Milestones",
4
+ "track": "main",
5
+ "author": "Chan Hee Song; Jihyung Kil; Tai-Yu Pan; Brian M. Sadler; Wei-Lun Chao; Yu Su",
6
+ "pdf": "https://openaccess.thecvf.com/content/CVPR2022/papers/Song_One_Step_at_a_Time_Long-Horizon_Vision-and-Language_Navigation_With_Milestones_CVPR_2022_paper.pdf",
7
+ "keyword": "",
8
+ "abstract": "We study the problem of developing autonomous agents that can follow human instructions to infer and perform a sequence of actions to complete the underlying task. Significant progress has been made in recent years, especially for tasks with short horizons. However, when it comes to long-horizon tasks with extended sequences of actions, an agent can easily ignore some instructions or get stuck in the middle of the long instructions and eventually fail the task. To address this challenge, we propose a model-agnostic milestone-based task tracker (M-Track) to guide the agent and monitor its progress. Specifically, we propose a milestone builder that tags the instructions with navigation and interaction milestones which the agent needs to complete step by step, and a milestone checker that systemically checks the agent's progress in its current milestone and determines when to proceed to the next. On the challenging ALFRED dataset, our M-Track leads to a notable 33% and 52% relative improvement in unseen success rate over two competitive base models.",
9
+ "conference": {
10
+ "name": "CVPR",
11
+ "year": 2022
12
+ },
13
+ "template": null,
14
+ "category": "02. Reinforcement Learning and Control",
15
+ "is_done": true,
16
+ "timestamp": "2025-08-05T11:46:00.223677",
17
+ "rule_paper_possible_url": null,
18
+ "github_base": null,
19
+ "llm_believed_url": null,
20
+ "rule_base_possible_url": null,
21
+ "confirmed_url": null,
22
+ "Internet_fail": null,
23
+ "html_fail": null,
24
+ "citation_data": {
25
+ "original_title": "One Step at a Time: Long-Horizon Vision-and-Language Navigation With Milestones",
26
+ "matched_title": "One Step at a Time: Long-Horizon Vision-and-Language Navigation with Milestones",
27
+ "citation_count": 35,
28
+ "similarity": 1.0,
29
+ "source": "semantic_scholar",
30
+ "year": 2022,
31
+ "authors": [
32
+ {
33
+ "authorId": "153409139",
34
+ "name": "Chan Hee Song"
35
+ },
36
+ {
37
+ "authorId": "2080027567",
38
+ "name": "Jihyung Kil"
39
+ },
40
+ {
41
+ "authorId": "2047046139",
42
+ "name": "Tai-Yu Pan"
43
+ },
44
+ {
45
+ "authorId": "1709722",
46
+ "name": "Brian M. Sadler"
47
+ },
48
+ {
49
+ "authorId": "2113951006",
50
+ "name": "Wei-Lun Chao"
51
+ },
52
+ {
53
+ "authorId": "1758652",
54
+ "name": "Yu Su"
55
+ }
56
+ ]
57
+ }
58
+ }
data_without_website/Optimizing_Connectivity_through_Network_Gradients_for_the_Restricted_Machine.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "id": "7tmlbL5JQyt",
3
+ "title": "Optimizing Connectivity through Network Gradients for the Restricted Machine",
4
+ "track": "main",
5
+ "author": "Amanda C N de Oliveira;Daniel R. Figueiredo",
6
+ "pdf": "https://openreview.net/pdf?id=7tmlbL5JQyt",
7
+ "keyword": "",
8
+ "abstract": "Leveraging sparse networks to connect successive layers in deep neural networks has recently been shown to provide benefits to large scale state-of-the-art models. However, network connectivity also plays a significant role on the learning performance of shallow networks, such as the classic Restricted Boltzmann Machines (RBM). Efficiently finding sparse connectivity patterns that improve the learning performance of shallow networks is a fundamental problem. While recent principled approaches explicitly include network connections as model parameters that must be optimized, they often rely on explicit penalization or have network sparsity as a hyperparameter. This work presents a method to find optimal connectivity patterns for RBMs based on the idea of network gradients (NCG): computing the gradient of every possible connection, given a specific connection pattern, and using the gradient to drive a continuous connection strength parameter that in turn is used to determine the connection pattern. Thus, learning RBM parameters and learning network connections is truly jointly performed, albeit with different learning rates, and without changes to the objective function. The method is applied to the MNIST and other datasets showing that better RBM models are found for the benchmark tasks of sample generation and input classification. Results also show that NCG is robust to network initialization, both adding and removing network connections while learning. ",
9
+ "conference": {
10
+ "name": "ICLR",
11
+ "year": 2023
12
+ },
13
+ "template": null,
14
+ "category": "01. Deep Learning Architectures and Methods",
15
+ "is_done": true,
16
+ "timestamp": "2025-08-04T04:05:30.090685",
17
+ "rule_paper_possible_url": null,
18
+ "github_base": null,
19
+ "llm_believed_url": null,
20
+ "rule_base_possible_url": null,
21
+ "confirmed_url": null,
22
+ "Internet_fail": null,
23
+ "html_fail": null,
24
+ "citation_data": {
25
+ "original_title": "Optimizing Connectivity through Network Gradients for the Restricted Machine",
26
+ "matched_title": "Optimizing Connectivity through Network Gradients for the Restricted Boltzmann Machine",
27
+ "citation_count": 1,
28
+ "similarity": 0.9382716049382716,
29
+ "source": "semantic_scholar",
30
+ "year": 2022,
31
+ "authors": [
32
+ {
33
+ "authorId": "2364698796",
34
+ "name": "Amanda C. N. de Oliveira"
35
+ },
36
+ {
37
+ "authorId": "2364687133",
38
+ "name": "Daniel R. Figueiredo"
39
+ }
40
+ ]
41
+ }
42
+ }
data_without_website/PC-PG__Policy_Cover_Directed_Exploration_for_Provable_Policy_Gradient_Learning.json ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "id": "18445",
3
+ "title": "PC-PG: Policy Cover Directed Exploration for Provable Policy Gradient Learning",
4
+ "track": "main",
5
+ "author": "Alekh Agarwal; Mikael Henaff; Sham Kakade; Wen Sun",
6
+ "pdf": "https://papers.nips.cc/paper_files/paper/2020/file/9b3a9fb4db30fc6594ec3990cbc09932-Paper.pdf",
7
+ "keyword": "",
8
+ "abstract": "Direct policy gradient methods for reinforcement learning are a successful approach for a variety of reasons: they are model free, they directly optimize the performance metric of interest, and they allow for richly parameterized policies. Their primary drawback is that, by being local in nature, they fail to adequately explore the environment. In contrast, while model-based approaches and Q-learning can, at least in theory, directly handle exploration through the use of optimism, their ability to handle model misspecification and function approximation is far less evident. This work introduces the the POLICY COVER GUIDED POLICY GRADIENT (PC- PG) algorithm, which provably balances the exploration vs. exploitation tradeoff using an ensemble of learned policies (the policy cover). PC-PG enjoys polynomial sample complexity and run time for both tabular MDPs and, more generally, linear MDPs in an infinite dimensional RKHS. Furthermore, PC-PG also has strong guarantees under model misspecification that go beyond the standard worst case L infinity assumptions; these include approximation guarantees for state aggregation under an average case error assumption, along with guarantees under a more general assumption where the approximation error under distribution shift is controlled. We complement the theory with empirical evaluation across a variety of domains in both reward-free and reward-driven settings.",
9
+ "conference": {
10
+ "name": "NIPS",
11
+ "year": 2020
12
+ },
13
+ "template": null,
14
+ "category": "02. Reinforcement Learning and Control",
15
+ "is_done": true,
16
+ "timestamp": "2025-08-04T03:10:46.891785",
17
+ "rule_paper_possible_url": null,
18
+ "github_base": null,
19
+ "llm_believed_url": null,
20
+ "rule_base_possible_url": null,
21
+ "confirmed_url": null,
22
+ "Internet_fail": null,
23
+ "html_fail": null,
24
+ "citation_data": {
25
+ "original_title": "PC-PG: Policy Cover Directed Exploration for Provable Policy Gradient Learning",
26
+ "matched_title": "PC-PG: Policy Cover Directed Exploration for Provable Policy Gradient Learning",
27
+ "citation_count": 110,
28
+ "similarity": 1.0,
29
+ "source": "semantic_scholar",
30
+ "year": 2020,
31
+ "authors": [
32
+ {
33
+ "authorId": "40333747",
34
+ "name": "Alekh Agarwal"
35
+ },
36
+ {
37
+ "authorId": "39713408",
38
+ "name": "Mikael Henaff"
39
+ },
40
+ {
41
+ "authorId": "144695232",
42
+ "name": "S. Kakade"
43
+ },
44
+ {
45
+ "authorId": "144426657",
46
+ "name": "Wen Sun"
47
+ }
48
+ ]
49
+ }
50
+ }
data_without_website/RVSL__Robust_Vehicle_Similarity_Learning_in_Real_Hazy_Scenes_Based_on_Semi-Supervised_Learning.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "id": "cdfaeff253",
3
+ "title": "RVSL: Robust Vehicle Similarity Learning in Real Hazy Scenes Based on Semi-Supervised Learning",
4
+ "track": "main",
5
+ "author": "Wei-Ting Chen; I-Hsiang Chen; Chih-Yuan Yeh; Hao-Hsiang Yang; Hua-En Chang; Jian-Jiun Ding; Sy-Yen Kuo",
6
+ "pdf": "https://www.ecva.net/papers/eccv_2022/papers_ECCV/papers/136740415.pdf",
7
+ "keyword": "",
8
+ "abstract": "\"Recently, vehicle similarity learning, also called re-identification (ReID), has attracted significant attention in computer vision. Several algorithms have been developed and obtained considerable success. However, most existing methods have unpleasant performance in the hazy scenario due to poor visibility. Though some strategies are possible to resolve this problem, they still have room to be improved due to the limited performance in real-world scenarios and the lack of real-world clear ground truth. Thus, to resolve this problem, inspired by CycleGAN, we construct a training paradigm called \\textbf{RVSL} which integrates ReID and domain transformation techniques. The network is trained on semi-supervised fashion and does not require to employ the ID labels and the corresponding clear ground truths to learn hazy vehicle ReID mission in the real-world haze scenes. To further constrain the unsupervised learning process effectively, several losses are developed. Experimental results on synthetic and real-world datasets indicate that the proposed method can achieve state-of-the-art performance on hazy vehicle ReID problems. It is worth mentioning that although the proposed method is trained without real-world label information, it can achieve competitive performance compared to existing supervised methods trained on complete label information.\"",
9
+ "conference": {
10
+ "name": "ECCV",
11
+ "year": 2022
12
+ },
13
+ "rule_final_url": "https://github.com/Cihsaing/rvsl-robust-vehicle-similarity-learning--ECCV22",
14
+ "Internet_problem": "https://github.com/Cihsaing/",
15
+ "llm_believed_url": "https://github.com/Cihsaing/rvsl-robust-vehicle-similarity-learning--ECCV22",
16
+ "template": null,
17
+ "category": "05. 3D Vision and Computational Graphics",
18
+ "is_done": true,
19
+ "timestamp": "2025-08-05T13:47:00.458820",
20
+ "citation_data": {
21
+ "original_title": "RVSL: Robust Vehicle Similarity Learning in Real Hazy Scenes Based on Semi-Supervised Learning",
22
+ "matched_title": "RVSL: Robust Vehicle Similarity Learning in Real Hazy Scenes Based on Semi-supervised Learning",
23
+ "citation_count": 9,
24
+ "similarity": 1.0,
25
+ "source": "semantic_scholar",
26
+ "year": 2022,
27
+ "authors": [
28
+ {
29
+ "authorId": "2144304732",
30
+ "name": "Wei-Ting Chen"
31
+ },
32
+ {
33
+ "authorId": "2149826263",
34
+ "name": "I-Hsiang Chen"
35
+ },
36
+ {
37
+ "authorId": "1801891",
38
+ "name": "C. Yeh"
39
+ },
40
+ {
41
+ "authorId": "50841357",
42
+ "name": "Han Yang"
43
+ },
44
+ {
45
+ "authorId": "30854764",
46
+ "name": "Hua-En Chang"
47
+ },
48
+ {
49
+ "authorId": "3327380",
50
+ "name": "Jianwei Ding"
51
+ },
52
+ {
53
+ "authorId": "2056250830",
54
+ "name": "Sy-Yen Kuo"
55
+ }
56
+ ]
57
+ }
58
+ }
data_without_website/Randomized_Dimensionality_Reduction_for_Facility_Location_and_Single-Linkage_Clustering.json ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "id": "9975",
3
+ "title": "Randomized Dimensionality Reduction for Facility Location and Single-Linkage Clustering",
4
+ "track": "main",
5
+ "author": "Shyam Narayanan; Sandeep Silwal; Piotr Indyk; Or Zamir",
6
+ "pdf": "http://proceedings.mlr.press/v139/narayanan21b/narayanan21b.pdf",
7
+ "keyword": "",
8
+ "abstract": "Random dimensionality reduction is a versatile tool for speeding up algorithms for high-dimensional problems. We study its application to two clustering problems: the facility location problem, and the single-linkage hierarchical clustering problem, which is equivalent to computing the minimum spanning tree. We show that if we project the input pointset $X$ onto a random $d = O(d_X)$-dimensional subspace (where $d_X$ is the doubling dimension of $X$), then the optimum facility location cost in the projected space approximates the original cost up to a constant factor. We show an analogous statement for minimum spanning tree, but with the dimension $d$ having an extra $\\log \\log n$ term and the approximation factor being arbitrarily close to $1$. Furthermore, we extend these results to approximating {\\em solutions} instead of just their {\\em costs}. Lastly, we provide experimental results to validate the quality of solutions and the speedup due to the dimensionality reduction. Unlike several previous papers studying this approach in the context of $k$-means and $k$-medians, our dimension bound does not depend on the number of clusters but only on the intrinsic dimensionality of $X$.",
9
+ "conference": {
10
+ "name": "ICML",
11
+ "year": 2021
12
+ },
13
+ "template": null,
14
+ "category": "03. ML Theory and Optimization",
15
+ "is_done": true,
16
+ "timestamp": "2025-08-05T07:47:44.586588",
17
+ "rule_paper_possible_url": null,
18
+ "github_base": null,
19
+ "llm_believed_url": null,
20
+ "rule_base_possible_url": null,
21
+ "confirmed_url": null,
22
+ "Internet_fail": null,
23
+ "html_fail": null,
24
+ "citation_data": {
25
+ "original_title": "Randomized Dimensionality Reduction for Facility Location and Single-Linkage Clustering",
26
+ "matched_title": "Randomized Dimensionality Reduction for Facility Location and Single-Linkage Clustering",
27
+ "citation_count": 12,
28
+ "similarity": 1.0,
29
+ "source": "semantic_scholar",
30
+ "year": 2021,
31
+ "authors": [
32
+ {
33
+ "authorId": "50812086",
34
+ "name": "Shyam Narayanan"
35
+ },
36
+ {
37
+ "authorId": "51221589",
38
+ "name": "Sandeep Silwal"
39
+ },
40
+ {
41
+ "authorId": "1688317",
42
+ "name": "P. Indyk"
43
+ },
44
+ {
45
+ "authorId": "3147193",
46
+ "name": "Or Zamir"
47
+ }
48
+ ]
49
+ }
50
+ }
data_without_website/Reliable_learning_in_challenging_environments.json ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "id": "1h7Uh9zUXc",
3
+ "title": "Reliable learning in challenging environments",
4
+ "track": "main",
5
+ "author": "Nina Balcan;Steve Hanneke;Rattana Pukdee;Dravyansh Sharma",
6
+ "pdf": "https://openreview.net/pdf?id=1h7Uh9zUXc",
7
+ "keyword": "",
8
+ "abstract": "The problem of designing learners that provide guarantees that their predictions are provably correct is of increasing importance in machine learning. However, learning theoretic guarantees have only been considered in very specific settings. In this work, we consider the design and analysis of reliable learners in challenging test-time environments as encountered in modern machine learning problems: namely adversarial test-time attacks (in several variations) and natural distribution shifts. In this work, we provide a reliable learner with provably optimal guarantees in such settings. We discuss computationally feasible implementations of the learner and further show that our algorithm achieves strong positive performance guarantees on several natural examples: for example, linear separators under log-concave distributions or smooth boundary classifiers under smooth probability distributions.",
9
+ "conference": {
10
+ "name": "NIPS",
11
+ "year": 2023
12
+ },
13
+ "template": null,
14
+ "category": "03. ML Theory and Optimization",
15
+ "is_done": true,
16
+ "timestamp": "2025-08-04T08:51:58.428106",
17
+ "rule_paper_possible_url": null,
18
+ "github_base": null,
19
+ "llm_believed_url": null,
20
+ "rule_base_possible_url": null,
21
+ "confirmed_url": null,
22
+ "Internet_fail": null,
23
+ "html_fail": null,
24
+ "citation_data": {
25
+ "original_title": "Reliable learning in challenging environments",
26
+ "matched_title": "Reliable learning in challenging environments",
27
+ "citation_count": 7,
28
+ "similarity": 1.0,
29
+ "source": "semantic_scholar",
30
+ "year": 2023,
31
+ "authors": [
32
+ {
33
+ "authorId": "1745410",
34
+ "name": "Maria-Florina Balcan"
35
+ },
36
+ {
37
+ "authorId": "1762565",
38
+ "name": "Steve Hanneke"
39
+ },
40
+ {
41
+ "authorId": "1999340664",
42
+ "name": "Rattana Pukdee"
43
+ },
44
+ {
45
+ "authorId": "2993069",
46
+ "name": "Dravyansh Sharma"
47
+ }
48
+ ]
49
+ }
50
+ }
data_without_website/Rethinking_Masked_Data_Reconstruction_Pretraining_for_Strong_3D_Action_Representation_Learning.json ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "id": "article-32324",
3
+ "title": "Rethinking Masked Data Reconstruction Pretraining for Strong 3D Action Representation Learning",
4
+ "track": "main",
5
+ "author": "Tao Gong; Qi Chu; Bin Liu; Nenghai Yu",
6
+ "pdf": "https://ojs.aaai.org/index.php/AAAI/article/view/32324/34479",
7
+ "keyword": "",
8
+ "abstract": "In 3D human action recognition, limited supervised data makes it challenging to fully tap into the modeling potential of powerful networks such as transformers. As a result, researchers have been actively investigating effective self-supervised pre-training strategies. For example, MAMP shows that instead of following the prevalent masked joint reconstruction, explicit masked motion reconstruction is key to the success of learning effective feature representation for 3D action recognition. However, we find that if we make a simple and effective change to the reconstructed target of masked joint reconstruction, masked joint reconstruction can achieve the same results as masked motion reconstruction. The devil is in the special characteristic of 3D skeleton data and the normalization process of training targets. We need to dig for all effective information of targets during normalization. Besides, considering that mask data reconstruction focuses more on learning local relations in input data for fulfilling the reconstruction task, instead of modeling the relation among samples, we further employ contrastive learning to learn more discriminative 3D action representations. We show that contrastive learning can consistently boost the performance of model pre-trained by masked joint prediction under various settings, especially in the semi-supervised setting that has a very limited number of labeled samples. Extensive experiments on NTU-60, NTU-120, and PKU-MMD datasets show that the proposed pre-training strategy achieves state-of-the-art results without bells and whistles.",
9
+ "conference": {
10
+ "name": "AAAI",
11
+ "year": 2025
12
+ },
13
+ "template": null,
14
+ "category": "05. 3D Vision and Computational Graphics",
15
+ "is_done": true,
16
+ "timestamp": "2025-08-05T02:43:51.887872",
17
+ "rule_paper_possible_url": null,
18
+ "github_base": null,
19
+ "llm_believed_url": null,
20
+ "rule_base_possible_url": null,
21
+ "confirmed_url": null,
22
+ "Internet_fail": null,
23
+ "html_fail": null,
24
+ "citation_data": {
25
+ "original_title": "Rethinking Masked Data Reconstruction Pretraining for Strong 3D Action Representation Learning",
26
+ "matched_title": "Rethinking Masked Data Reconstruction Pretraining for Strong 3D Action Representation Learning",
27
+ "citation_count": 0,
28
+ "similarity": 1.0,
29
+ "source": "semantic_scholar",
30
+ "year": 2025,
31
+ "authors": [
32
+ {
33
+ "authorId": "2269736602",
34
+ "name": "Tao Gong"
35
+ },
36
+ {
37
+ "authorId": "2047192315",
38
+ "name": "Qi Chu"
39
+ },
40
+ {
41
+ "authorId": "2266223672",
42
+ "name": "Bin Liu"
43
+ },
44
+ {
45
+ "authorId": "2244620568",
46
+ "name": "Nenghai Yu"
47
+ }
48
+ ]
49
+ }
50
+ }
data_without_website/Revisiting_the_Stack-Based_Inverse_Tone_Mapping.json ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "id": "22098",
3
+ "title": "Revisiting the Stack-Based Inverse Tone Mapping",
4
+ "track": "main",
5
+ "author": "",
6
+ "pdf": "https://openaccess.thecvf.com/content/CVPR2023/papers/Zhang_Revisiting_the_Stack-Based_Inverse_Tone_Mapping_CVPR_2023_paper.pdf",
7
+ "keyword": "",
8
+ "abstract": "Revisiting the Stack-Based Inverse Tone Mapping Ning Zhang1,3, Yuyao Ye1,3, Yang Zhao2,3, and Ronggang Wang1,3 1School of Electronics and Computer Engineering, Peking University 2School of Computer and Information, Hefei University of Technology 3Peng Cheng Laboratory zhangn77@pku. edu. cn yeyuyao@pku",
9
+ "conference": {
10
+ "name": "CVPR",
11
+ "year": 2023
12
+ },
13
+ "template": null,
14
+ "category": "05. 3D Vision and Computational Graphics",
15
+ "is_done": true,
16
+ "timestamp": "2025-08-05T05:12:58.406663",
17
+ "rule_paper_possible_url": null,
18
+ "github_base": null,
19
+ "llm_believed_url": null,
20
+ "rule_base_possible_url": null,
21
+ "confirmed_url": null,
22
+ "Internet_fail": null,
23
+ "html_fail": null,
24
+ "citation_data": {
25
+ "original_title": "Revisiting the Stack-Based Inverse Tone Mapping",
26
+ "matched_title": "Revisiting the Stack-Based Inverse Tone Mapping",
27
+ "citation_count": 5,
28
+ "similarity": 1.0,
29
+ "source": "semantic_scholar",
30
+ "year": 2023,
31
+ "authors": [
32
+ {
33
+ "authorId": "2153008995",
34
+ "name": "Ning Zhang"
35
+ },
36
+ {
37
+ "authorId": "2087707502",
38
+ "name": "Yuyao Ye"
39
+ },
40
+ {
41
+ "authorId": "9083546",
42
+ "name": "Yangshen Zhao"
43
+ },
44
+ {
45
+ "authorId": "2155887982",
46
+ "name": "Ronggang Wang"
47
+ }
48
+ ]
49
+ }
50
+ }
data_without_website/Sentence-Incremental_Neural_Coreference_Resolution.json ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "id": "2022.emnlp-main.28",
3
+ "title": "Sentence-Incremental Neural Coreference Resolution",
4
+ "track": "main",
5
+ "author": "Matt Grenander; Shay B. Cohen; Mark Steedman",
6
+ "pdf": "https://aclanthology.org/2022.emnlp-main.28.pdf",
7
+ "keyword": "",
8
+ "abstract": "We propose a sentence-incremental neural coreference resolution system which incrementally builds clusters after marking mention boundaries in a shift-reduce method. The system is aimed at bridging two recent approaches at coreference resolution: (1) state-of-the-art non-incremental models that incur quadratic complexity in document length with high computational cost, and (2) memory network-based models which operate incrementally but do not generalize beyond pronouns. For comparison, we simulate an incremental setting by constraining non-incremental systems to form partial coreference chains before observing new sentences. In this setting, our system outperforms comparable state-of-the-art methods by 2 F1 on OntoNotes and 6.8 F1 on the CODI-CRAC 2021 corpus. In a conventional coreference setup, our system achieves 76.3 F1 on OntoNotes and 45.5 F1 on CODI-CRAC 2021, which is comparable to state-of-the-art baselines. We also analyze variations of our system and show that the degree of incrementality in the encoder has a surprisingly large effect on the resulting performance.",
9
+ "conference": {
10
+ "name": "EMNLP",
11
+ "year": 2022
12
+ },
13
+ "github_base": "https://github.com/mgrenander/sentence-incremental-coref",
14
+ "template": null,
15
+ "category": "06. Natural Language Understanding and Semantics",
16
+ "is_done": true,
17
+ "timestamp": "2025-08-07T07:11:06.192576",
18
+ "log": {
19
+ "timestamp": "2025-08-07T07:11:06.192576",
20
+ "stage": "special situation",
21
+ "note": "论文没有项目主页但找到了GitHub相关信息"
22
+ },
23
+ "citation_data": {
24
+ "original_title": "Sentence-Incremental Neural Coreference Resolution",
25
+ "matched_title": "Sentence-Incremental Neural Coreference Resolution",
26
+ "citation_count": 5,
27
+ "similarity": 1.0,
28
+ "source": "semantic_scholar",
29
+ "year": 2023,
30
+ "authors": [
31
+ {
32
+ "authorId": "2097949114",
33
+ "name": "Matt Grenander"
34
+ },
35
+ {
36
+ "authorId": "40146204",
37
+ "name": "Shay B. Cohen"
38
+ },
39
+ {
40
+ "authorId": "145332819",
41
+ "name": "Mark Steedman"
42
+ }
43
+ ]
44
+ }
45
+ }
data_without_website/Shielded_Diffusion__Generating_Novel_and_Diverse_Images_using_Sparse_Repellency.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "id": "XAckVo0iNj",
3
+ "title": "Shielded Diffusion: Generating Novel and Diverse Images using Sparse Repellency",
4
+ "track": "main",
5
+ "author": "Michael Kirchhof;James Thornton;Louis Béthune;Pierre Ablin;Eugene Ndiaye;marco cuturi",
6
+ "pdf": "https://openreview.net/pdf?id=XAckVo0iNj",
7
+ "keyword": "",
8
+ "abstract": "The adoption of text-to-image diffusion models raises concerns over reliability, drawing scrutiny under the lens of various metrics like calibration, fairness, or compute efficiency. We focus in this work on two issues that arise when deploying these models: a lack of diversity when prompting images, and a tendency to recreate images from the training set. To solve both problems, we propose a method that coaxes the sampled trajectories of pretrained diffusion models to land on images that fall outside of a reference set. We achieve this by adding repellency terms to the diffusion SDE throughout the generation trajectory, which are triggered whenever the path is expected to land too closely to an image in the shielded reference set. Our method is sparse in the sense that these repellency terms are zero and inactive most of the time, and even more so towards the end of the generation trajectory. Our method, named SPELL for sparse repellency, can be used either with a static reference set that contains protected images, or dynamically, by updating the set at each timestep with the expected images concurrently generated within a batch, and with the images of previously generated batches. We show that adding SPELL to popular diffusion models improves their diversity while impacting their FID only marginally, and performs comparatively better than other recent training-free diversity methods. We also demonstrate how SPELL can ensure a shielded generation away from a very large set of protected images by considering all 1.2M images from ImageNet as the protected set.",
9
+ "conference": {
10
+ "name": "ICML",
11
+ "year": 2025
12
+ },
13
+ "template": null,
14
+ "category": "07. Generative Model",
15
+ "is_done": true,
16
+ "timestamp": "2025-08-05T12:05:10.265648",
17
+ "rule_paper_possible_url": null,
18
+ "github_base": null,
19
+ "llm_believed_url": null,
20
+ "rule_base_possible_url": null,
21
+ "confirmed_url": null,
22
+ "Internet_fail": null,
23
+ "html_fail": null,
24
+ "citation_data": {
25
+ "original_title": "Shielded Diffusion: Generating Novel and Diverse Images using Sparse Repellency",
26
+ "matched_title": "Shielded Diffusion: Generating Novel and Diverse Images using Sparse Repellency",
27
+ "citation_count": 1,
28
+ "similarity": 1.0,
29
+ "source": "semantic_scholar",
30
+ "year": 2024,
31
+ "authors": [
32
+ {
33
+ "authorId": "2257346405",
34
+ "name": "Michael Kirchhof"
35
+ },
36
+ {
37
+ "authorId": "2282532729",
38
+ "name": "James Thornton"
39
+ },
40
+ {
41
+ "authorId": "1763708",
42
+ "name": "Pierre Ablin"
43
+ },
44
+ {
45
+ "authorId": "2065593336",
46
+ "name": "Louis B'ethune"
47
+ },
48
+ {
49
+ "authorId": "2282531350",
50
+ "name": "Eugene Ndiaye"
51
+ },
52
+ {
53
+ "authorId": "2258552804",
54
+ "name": "Marco Cuturi"
55
+ }
56
+ ]
57
+ }
58
+ }
data_without_website/Swapping_Autoencoder_for_Deep_Image_Manipulation.json ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "id": "17001",
3
+ "title": "Swapping Autoencoder for Deep Image Manipulation",
4
+ "track": "main",
5
+ "author": "Taesung Park; Jun-Yan Zhu; Oliver Wang; Jingwan Lu; Eli Shechtman; Alexei Efros; Richard Zhang",
6
+ "pdf": "https://papers.nips.cc/paper_files/paper/2020/file/50905d7b2216bfeccb5b41016357176b-Paper.pdf",
7
+ "keyword": "",
8
+ "abstract": "Deep generative models have become increasingly effective at producing realistic images from randomly sampled seeds, but using such models for controllable manipulation of existing images remains challenging. We propose the Swapping Autoencoder, a deep model designed specifically for image manipulation, rather than random sampling. The key idea is to encode an image into two independent components and enforce that any swapped combination maps to a realistic image. In particular, we encourage the components to represent structure and texture, by enforcing one component to encode co-occurrent patch statistics across different parts of the image. As our method is trained with an encoder, finding the latent codes for a new input image becomes trivial, rather than cumbersome. As a result, our method enables us to manipulate real input images in various ways, including texture swapping, local and global editing, and latent code vector arithmetic. Experiments on multiple datasets show that our model produces better results and is substantially more efficient compared to recent generative models.",
9
+ "conference": {
10
+ "name": "NIPS",
11
+ "year": 2020
12
+ },
13
+ "template": null,
14
+ "category": "07. Generative Model",
15
+ "is_done": true,
16
+ "timestamp": "2025-08-05T14:35:29.806663",
17
+ "rule_paper_possible_url": null,
18
+ "github_base": null,
19
+ "llm_believed_url": null,
20
+ "rule_base_possible_url": null,
21
+ "confirmed_url": null,
22
+ "Internet_fail": null,
23
+ "html_fail": null,
24
+ "citation_data": {
25
+ "original_title": "Swapping Autoencoder for Deep Image Manipulation",
26
+ "matched_title": "Swapping Autoencoder for Deep Image Manipulation",
27
+ "citation_count": 337,
28
+ "similarity": 1.0,
29
+ "source": "semantic_scholar",
30
+ "year": 2020,
31
+ "authors": [
32
+ {
33
+ "authorId": "2071929129",
34
+ "name": "Taesung Park"
35
+ },
36
+ {
37
+ "authorId": "2436356",
38
+ "name": "Jun-Yan Zhu"
39
+ },
40
+ {
41
+ "authorId": "39231399",
42
+ "name": "Oliver Wang"
43
+ },
44
+ {
45
+ "authorId": "2054975",
46
+ "name": "Jingwan Lu"
47
+ },
48
+ {
49
+ "authorId": "2177801",
50
+ "name": "Eli Shechtman"
51
+ },
52
+ {
53
+ "authorId": "1763086",
54
+ "name": "Alexei A. Efros"
55
+ },
56
+ {
57
+ "authorId": "2844849",
58
+ "name": "Richard Zhang"
59
+ }
60
+ ]
61
+ }
62
+ }
data_without_website/Towards_Efficient_and_Accurate_Identification_of_Memorization_in_Deep_Models.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "id": "bCi3Jz0q02",
3
+ "title": "Towards Efficient and Accurate Identification of Memorization in Deep Models",
4
+ "track": "main",
5
+ "author": "Hadi Abdullah",
6
+ "pdf": "https://openreview.net/pdf?id=bCi3Jz0q02",
7
+ "keyword": "",
8
+ "abstract": "\\textit{Memorization} is the ability of deep models to learn verbatim arbitrary inputs from the training data. One of the most popular means of calculating memorization scores (i.e., the probability that a point is memorized) is via the pseudo Leave-One-Out (pLOO) method proposed by~\\citet{feldman2020longtail}. However, this technique suffers from two shortcomings: it is computationally prohibitive (as it requires training thousands of models) and it produces inaccurate scores. The goal of this work is to overcome both these limitations simultaneously. To do so, we take the following approach: \\textbf{First}, we demonstrate that the major source of pLOO's computation bottleneck is its execution on the entire dataset, not just the memorized points. We find running pLOO on all the points is unnecessary since most of them are not even memorized. \\textbf{Second}, we develop a simple proxy to identify the memorized points without having to run pLOO in the first place. To do so, we study the model training cycle and find that memorized points are learned towards the last iterations. We build a simple proxy based on this observation and find that our proxy: \\textit{a)} is strongly correlated with the actual memorization scores (Pearson score $<-0.95$) across all our models and datasets and \\textit{b)} requires only a single model (instead of the thousands needed by pLOO). However, our proxy does not provide the exact memorization scores. \\textbf{Third}, to calculate these, we incorporate our proxy into the pLOO method, resulting in pLOO\\textsubscript{\\textit{improved}}. In doing so, we show that our pLOO\\textsubscript{\\textit{improved}} reduces both computational overhead (by over 90\\%) and the error in the approximated memorization scores (by over 65\\%). Therefore, our work makes it possible to study memorization in large datasets and real-world models while requiring only a fraction of the computational resources.",
9
+ "conference": {
10
+ "name": "ICLR",
11
+ "year": 2025
12
+ },
13
+ "template": null,
14
+ "category": "03. ML Theory and Optimization",
15
+ "is_done": true,
16
+ "timestamp": "2025-08-05T03:37:58.507843",
17
+ "rule_paper_possible_url": null,
18
+ "github_base": null,
19
+ "llm_believed_url": null,
20
+ "rule_base_possible_url": null,
21
+ "confirmed_url": null,
22
+ "Internet_fail": null,
23
+ "html_fail": null,
24
+ "citation_data": {
25
+ "citation_count": 0
26
+ }
27
+ }
data_without_website/Transformer_Interpretability_Beyond_Attention_Visualization.json ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "id": "",
3
+ "title": "Transformer Interpretability Beyond Attention Visualization",
4
+ "track": "main",
5
+ "author": "Hila Chefer; Shir Gur; Lior Wolf",
6
+ "pdf": "https://openaccess.thecvf.com/content/CVPR2021/papers/Chefer_Transformer_Interpretability_Beyond_Attention_Visualization_CVPR_2021_paper.pdf",
7
+ "keyword": "",
8
+ "abstract": "Self-attention techniques, and specifically Transformers, are dominating the field of text processing and are becoming increasingly popular in computer vision classification tasks. In order to visualize the parts of the image that led to a certain classification, existing methods either rely on the obtained attention maps or employ heuristic propagation along the attention graph. In this work, we propose a novel way to compute relevancy for Transformer networks. The method assigns local relevance based on the Deep Taylor Decomposition principle and then propagates these relevancy scores through the layers. This propagation involves attention layers and skip connections, which challenge existing methods. Our solution is based on a specific formulation that is shown to maintain the total relevancy across layers. We benchmark our method on very recent visual Transformer networks, as well as on a text classification problem, and demonstrate a clear advantage over the existing explainability methods.",
9
+ "conference": {
10
+ "name": "CVPR",
11
+ "year": 2021
12
+ },
13
+ "Internet_problem": "https://github.com/hila-",
14
+ "template": null,
15
+ "category": "01. Deep Learning Architectures and Methods",
16
+ "is_done": true,
17
+ "timestamp": "2025-08-05T09:31:32.551730",
18
+ "rule_paper_possible_url": null,
19
+ "github_base": null,
20
+ "llm_believed_url": null,
21
+ "rule_base_possible_url": null,
22
+ "confirmed_url": null,
23
+ "Internet_fail": null,
24
+ "html_fail": null,
25
+ "citation_data": {
26
+ "original_title": "Transformer Interpretability Beyond Attention Visualization",
27
+ "matched_title": "Transformer Interpretability Beyond Attention Visualization",
28
+ "citation_count": 705,
29
+ "similarity": 1.0,
30
+ "source": "semantic_scholar",
31
+ "year": 2020,
32
+ "authors": [
33
+ {
34
+ "authorId": "2038268012",
35
+ "name": "Hila Chefer"
36
+ },
37
+ {
38
+ "authorId": "47509360",
39
+ "name": "Shir Gur"
40
+ },
41
+ {
42
+ "authorId": "48519520",
43
+ "name": "Lior Wolf"
44
+ }
45
+ ]
46
+ }
47
+ }
data_without_website/Unified_Interpretation_of_Smoothing_Methods_for_Negative_Sampling_Loss_Functions_in_Knowledge_Graph_Embedding.json ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "id": "Oz6ABL8o8C",
3
+ "title": "Unified Interpretation of Smoothing Methods for Negative Sampling Loss Functions in Knowledge Graph Embedding",
4
+ "track": "main",
5
+ "author": "Xincan Feng;Hidetaka Kamigaito;Katsuhiko Hayashi;Taro Watanabe",
6
+ "pdf": "https://openreview.net/pdf?id=Oz6ABL8o8C",
7
+ "keyword": "",
8
+ "abstract": "Knowledge Graphs (KGs) are fundamental resources in knowledge-intensive tasks in NLP. Due to the limitation of manually creating KGs, KG Completion (KGC) has an important role in automatically completing KGs by scoring their links with KG Embedding (KGE). To handle many entities in training, KGE relies on Negative Sampling (NS) loss that can reduce the computational cost by sampling. Since the appearance frequencies for each link are at most one in KGs, sparsity is an essential and inevitable problem. The NS loss is no exception. As a solution, the NS loss in KGE relies on smoothing methods like Self-Adversarial Negative Sampling (SANS) and subsampling. However, it is uncertain what kind of smoothing method is suitable for this purpose due to the lack of theoretical understanding. This paper provides theoretical interpretations of the smoothing methods for the NS loss in KGE and induces a new NS loss, Triplet-based SANS (T-SANS), that can cover the characteristics of the conventional smoothing methods. Experimental results on FB15k-237, WN18RR, and YAGO3-10 datasets showed the soundness of our interpretation and performance improvement by our T-SANS.",
9
+ "conference": {
10
+ "name": "ICLR",
11
+ "year": 2024
12
+ },
13
+ "template": null,
14
+ "category": "06. Natural Language Understanding and Semantics",
15
+ "is_done": true,
16
+ "timestamp": "2025-08-04T06:06:01.725171",
17
+ "rule_paper_possible_url": null,
18
+ "github_base": null,
19
+ "llm_believed_url": null,
20
+ "rule_base_possible_url": null,
21
+ "confirmed_url": null,
22
+ "Internet_fail": null,
23
+ "html_fail": null,
24
+ "citation_data": {
25
+ "original_title": "Unified Interpretation of Smoothing Methods for Negative Sampling Loss Functions in Knowledge Graph Embedding",
26
+ "matched_title": "Unified Interpretation of Smoothing Methods for Negative Sampling Loss Functions in Knowledge Graph Embedding",
27
+ "citation_count": 1,
28
+ "similarity": 1.0,
29
+ "source": "semantic_scholar",
30
+ "year": 2024,
31
+ "authors": [
32
+ {
33
+ "authorId": "2187667896",
34
+ "name": "Xincan Feng"
35
+ },
36
+ {
37
+ "authorId": "2300756",
38
+ "name": "Hidetaka Kamigaito"
39
+ },
40
+ {
41
+ "authorId": "2087025575",
42
+ "name": "Katsuhiko Hayashi"
43
+ },
44
+ {
45
+ "authorId": "2266807418",
46
+ "name": "Taro Watanabe"
47
+ }
48
+ ]
49
+ }
50
+ }
data_without_website/Unsupervised-to-Online_Reinforcement_Learning.json ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "id": "YGhV8wQv3C",
3
+ "title": "Unsupervised-to-Online Reinforcement Learning",
4
+ "track": "main",
5
+ "author": "Junsu Kim;Seohong Park;Sergey Levine",
6
+ "pdf": "https://openreview.net/pdf?id=YGhV8wQv3C",
7
+ "keyword": "",
8
+ "abstract": "Offline-to-online reinforcement learning (RL), a framework that trains a policy with offline RL and then further fine-tunes it with online RL,\nhas been considered a promising recipe for data-driven decision-making. While sensible, this framework has drawbacks: it requires domain-specific offline RL pre-training for each task, and is often brittle in practice. In this work, we propose unsupervised-to-online RL (U2O RL),\nwhich replaces domain-specific supervised offline RL with unsupervised offline RL,\nas a better alternative to offline-to-online RL.\nU2O RL not only enables reusing a single pre-trained model for multiple downstream tasks,\nbut also learns better representations, which often result in even better performance and stability\nthan supervised offline-to-online RL.\nTo instantiate U2O RL in practice, we propose a general recipe for U2O RL\nto bridge task-agnostic unsupervised offline skill-based policy pre-training and supervised online fine-tuning.\nThroughout our experiments in nine state-based and pixel-based environments,\nwe empirically demonstrate that U2O RL achieves strong performance\nthat matches or even outperforms previous offline-to-online RL approaches,\nwhile being able to reuse a single pre-trained model for a number of different downstream tasks.",
9
+ "conference": {
10
+ "name": "ICLR",
11
+ "year": 2025
12
+ },
13
+ "template": null,
14
+ "category": "02. Reinforcement Learning and Control",
15
+ "is_done": true,
16
+ "timestamp": "2025-08-05T04:10:02.645266",
17
+ "rule_paper_possible_url": null,
18
+ "github_base": null,
19
+ "llm_believed_url": null,
20
+ "rule_base_possible_url": null,
21
+ "confirmed_url": null,
22
+ "Internet_fail": null,
23
+ "html_fail": null,
24
+ "citation_data": {
25
+ "original_title": "Unsupervised Meta-Learning for Reinforcement Learning",
26
+ "matched_title": "Unsupervised Meta-Learning for Reinforcement Learning",
27
+ "citation_count": 108,
28
+ "similarity": 1.0,
29
+ "source": "semantic_scholar",
30
+ "year": 2018,
31
+ "authors": [
32
+ {
33
+ "authorId": "2129458064",
34
+ "name": "Abhishek Gupta"
35
+ },
36
+ {
37
+ "authorId": "8140754",
38
+ "name": "Benjamin Eysenbach"
39
+ },
40
+ {
41
+ "authorId": "46881670",
42
+ "name": "Chelsea Finn"
43
+ },
44
+ {
45
+ "authorId": "1736651",
46
+ "name": "S. Levine"
47
+ }
48
+ ]
49
+ }
50
+ }
data_without_website/Using_Stochastic_Gradient_Descent_to_Smooth_Nonconvex_Functions__Analysis_of_Implicit_Graduated_Optimization.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "id": "vTRWu9zaWo",
3
+ "title": "Using Stochastic Gradient Descent to Smooth Nonconvex Functions: Analysis of Implicit Graduated Optimization",
4
+ "track": "main",
5
+ "author": "Naoki Sato;Hideaki Iiduka",
6
+ "pdf": "https://openreview.net/pdf?id=vTRWu9zaWo",
7
+ "keyword": "",
8
+ "abstract": "The graduated optimization approach is a heuristic method for finding global optimal solutions for nonconvex functions by using a function smoothing operation with stochastic noise. We show that stochastic noise in stochastic gradient descent (SGD) has the effect of smoothing the objective function, the degree of which is determined by the learning rate, batch size, and variance of the stochastic gradient. Using this finding, we propose and analyze a new graduated optimization algorithm that varies the degree of smoothing by varying the learning rate and batch size, and provide experimental results on image classification tasks with ResNets that support our theoretical findings. We further show that there is an interesting correlation between the degree of smoothing by SGD's stochastic noise, the well-studied ``sharpness'' indicator, and the generalization performance of the model.",
9
+ "conference": {
10
+ "name": "ICLR",
11
+ "year": 2025
12
+ },
13
+ "template": null,
14
+ "category": "03. ML Theory and Optimization",
15
+ "is_done": true,
16
+ "timestamp": "2025-08-05T03:23:06.138044",
17
+ "rule_paper_possible_url": null,
18
+ "github_base": null,
19
+ "llm_believed_url": null,
20
+ "rule_base_possible_url": null,
21
+ "confirmed_url": null,
22
+ "Internet_fail": null,
23
+ "html_fail": null,
24
+ "citation_data": {
25
+ "original_title": "Using Stochastic Gradient Descent to Smooth Nonconvex Functions: Analysis of Implicit Graduated Optimization",
26
+ "matched_title": "Using Stochastic Gradient Descent to Smooth Nonconvex Functions: Analysis of Implicit Graduated Optimization with Optimal Noise Scheduling",
27
+ "citation_count": 4,
28
+ "similarity": 0.8770491803278688,
29
+ "source": "semantic_scholar",
30
+ "year": 2023,
31
+ "authors": [
32
+ {
33
+ "authorId": "2266924850",
34
+ "name": "Naoki Sato"
35
+ },
36
+ {
37
+ "authorId": "2018304",
38
+ "name": "H. Iiduka"
39
+ }
40
+ ]
41
+ }
42
+ }