Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- 2002.09437/record.json +32 -0
- 2003.13951/record.json +32 -0
- 2004.05773/record.json +32 -0
- 2005.06606/record.json +32 -0
- 2006.14804/record.json +32 -0
- 2007.05060/main_diagram/main_diagram.drawio +1 -0
- 2007.05060/main_diagram/main_diagram.pdf +0 -0
- 2007.05060/paper_text/intro_method.md +147 -0
- 2008.12855/record.json +32 -0
- 2010.01402/record.json +32 -0
- 2010.01693/record.json +32 -0
- 2010.12810/record.json +32 -0
- 2012.03500/record.json +32 -0
- 2012.15788/main_diagram/main_diagram.drawio +1 -0
- 2012.15788/main_diagram/main_diagram.pdf +0 -0
- 2012.15788/paper_text/intro_method.md +9 -0
- 2102.09701/record.json +32 -0
- 2103.00673/record.json +32 -0
- 2103.04503/record.json +32 -0
- 2103.10379/record.json +32 -0
- 2103.15573/record.json +32 -0
- 2104.05832/record.json +32 -0
- 2106.02940/record.json +32 -0
- 2106.06795/record.json +32 -0
- 2106.13265/record.json +32 -0
- 2107.12309/record.json +32 -0
- 2108.09079/record.json +32 -0
- 2108.12318/main_diagram/main_diagram.drawio +1 -0
- 2108.12318/main_diagram/main_diagram.pdf +0 -0
- 2108.12318/paper_text/intro_method.md +75 -0
- 2108.13655/record.json +32 -0
- 2108.13702/record.json +32 -0
- 2109.02639/record.json +32 -0
- 2109.06253/record.json +32 -0
- 2109.08303/main_diagram/main_diagram.drawio +1 -0
- 2109.08303/main_diagram/main_diagram.pdf +0 -0
- 2109.08303/paper_text/intro_method.md +80 -0
- 2109.09166/record.json +32 -0
- 2109.11171/main_diagram/main_diagram.drawio +1 -0
- 2109.11171/main_diagram/main_diagram.pdf +0 -0
- 2109.11171/paper_text/intro_method.md +102 -0
- 2110.02027/record.json +32 -0
- 2110.06539/record.json +32 -0
- 2110.13059/record.json +32 -0
- 2112.01853/record.json +32 -0
- 2112.10149/record.json +32 -0
- 2112.11909/record.json +32 -0
- 2201.01666/record.json +32 -0
- 2201.02233/record.json +32 -0
- 2201.02263/record.json +32 -0
2002.09437/record.json
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"arxiv_id": "2002.09437",
|
| 3 |
+
"month": "2020_02",
|
| 4 |
+
"year": 2020,
|
| 5 |
+
"conference": "NEURIPS",
|
| 6 |
+
"title": "Calibrating Deep Neural Networks using Focal Loss",
|
| 7 |
+
"arxiv_url": "https://arxiv.org/abs/2002.09437",
|
| 8 |
+
"source": {
|
| 9 |
+
"paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_02/main_diagram_database/2002.09437",
|
| 10 |
+
"tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_02/tex_files_extracted/2002.09437",
|
| 11 |
+
"paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_02/main_diagram_database/2002.09437/paper_text/paper.md",
|
| 12 |
+
"metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_02/main_diagram_database/2002.09437/metadata.json",
|
| 13 |
+
"intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_02/main_diagram_database/2002.09437/paper_text/paper.md",
|
| 14 |
+
"intro_method_from_kind": "markdown"
|
| 15 |
+
},
|
| 16 |
+
"files": {
|
| 17 |
+
"main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2002.09437/main_diagram/main_diagram.drawio",
|
| 18 |
+
"main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2002.09437/main_diagram/main_diagram.png",
|
| 19 |
+
"main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2002.09437/main_diagram/main_diagram.pdf",
|
| 20 |
+
"intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2002.09437/paper_text/intro_method.md",
|
| 21 |
+
"paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2002.09437/paper.pdf",
|
| 22 |
+
"latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2002.09437/latex_source"
|
| 23 |
+
},
|
| 24 |
+
"status": {
|
| 25 |
+
"copy_drawio": "exists",
|
| 26 |
+
"copy_png": "exists",
|
| 27 |
+
"diagram_pdf": "pdf_exists",
|
| 28 |
+
"intro_method": "exists",
|
| 29 |
+
"paper_pdf": "exists",
|
| 30 |
+
"latex": "exists"
|
| 31 |
+
}
|
| 32 |
+
}
|
2003.13951/record.json
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"arxiv_id": "2003.13951",
|
| 3 |
+
"month": "2020_03",
|
| 4 |
+
"year": 2020,
|
| 5 |
+
"conference": "CVPR",
|
| 6 |
+
"title": "Self-Supervised Monocular Trained Depth Estimation Using Self-Attention and Discrete Disparity Volume",
|
| 7 |
+
"arxiv_url": "https://arxiv.org/abs/2003.13951",
|
| 8 |
+
"source": {
|
| 9 |
+
"paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_03/main_diagram_database/2003.13951",
|
| 10 |
+
"tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_03/tex_files_extracted/2003.13951",
|
| 11 |
+
"paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_03/main_diagram_database/2003.13951/paper_text/paper.md",
|
| 12 |
+
"metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_03/main_diagram_database/2003.13951/metadata.json",
|
| 13 |
+
"intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_03/main_diagram_database/2003.13951/paper_text/paper.md",
|
| 14 |
+
"intro_method_from_kind": "markdown"
|
| 15 |
+
},
|
| 16 |
+
"files": {
|
| 17 |
+
"main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2003.13951/main_diagram/main_diagram.drawio",
|
| 18 |
+
"main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2003.13951/main_diagram/main_diagram.png",
|
| 19 |
+
"main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2003.13951/main_diagram/main_diagram.pdf",
|
| 20 |
+
"intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2003.13951/paper_text/intro_method.md",
|
| 21 |
+
"paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2003.13951/paper.pdf",
|
| 22 |
+
"latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2003.13951/latex_source"
|
| 23 |
+
},
|
| 24 |
+
"status": {
|
| 25 |
+
"copy_drawio": "exists",
|
| 26 |
+
"copy_png": "exists",
|
| 27 |
+
"diagram_pdf": "pdf_exists",
|
| 28 |
+
"intro_method": "exists",
|
| 29 |
+
"paper_pdf": "exists",
|
| 30 |
+
"latex": "exists"
|
| 31 |
+
}
|
| 32 |
+
}
|
2004.05773/record.json
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"arxiv_id": "2004.05773",
|
| 3 |
+
"month": "2020_04",
|
| 4 |
+
"year": 2020,
|
| 5 |
+
"conference": "ACL",
|
| 6 |
+
"title": "Generating Fact Checking Explanations",
|
| 7 |
+
"arxiv_url": "https://arxiv.org/abs/2004.05773",
|
| 8 |
+
"source": {
|
| 9 |
+
"paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_04/main_diagram_database/2004.05773",
|
| 10 |
+
"tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_04/tex_files_extracted/2004.05773",
|
| 11 |
+
"paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_04/main_diagram_database/2004.05773/paper_text/paper.md",
|
| 12 |
+
"metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_04/main_diagram_database/2004.05773/metadata.json",
|
| 13 |
+
"intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_04/main_diagram_database/2004.05773/paper_text/paper.md",
|
| 14 |
+
"intro_method_from_kind": "markdown"
|
| 15 |
+
},
|
| 16 |
+
"files": {
|
| 17 |
+
"main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2004.05773/main_diagram/main_diagram.drawio",
|
| 18 |
+
"main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2004.05773/main_diagram/main_diagram.png",
|
| 19 |
+
"main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2004.05773/main_diagram/main_diagram.pdf",
|
| 20 |
+
"intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2004.05773/paper_text/intro_method.md",
|
| 21 |
+
"paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2004.05773/paper.pdf",
|
| 22 |
+
"latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2004.05773/latex_source"
|
| 23 |
+
},
|
| 24 |
+
"status": {
|
| 25 |
+
"copy_drawio": "exists",
|
| 26 |
+
"copy_png": "exists",
|
| 27 |
+
"diagram_pdf": "pdf_exists",
|
| 28 |
+
"intro_method": "exists",
|
| 29 |
+
"paper_pdf": "exists",
|
| 30 |
+
"latex": "exists"
|
| 31 |
+
}
|
| 32 |
+
}
|
2005.06606/record.json
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"arxiv_id": "2005.06606",
|
| 3 |
+
"month": "2020_05",
|
| 4 |
+
"year": 2020,
|
| 5 |
+
"conference": "ACL",
|
| 6 |
+
"title": "Dynamic Programming Encoding for Subword Segmentation in Neural Machine Translation",
|
| 7 |
+
"arxiv_url": "https://arxiv.org/abs/2005.06606",
|
| 8 |
+
"source": {
|
| 9 |
+
"paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_05/main_diagram_database/2005.06606",
|
| 10 |
+
"tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_05/tex_files_extracted/2005.06606",
|
| 11 |
+
"paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_05/main_diagram_database/2005.06606/paper_text/paper.md",
|
| 12 |
+
"metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_05/main_diagram_database/2005.06606/metadata.json",
|
| 13 |
+
"intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_05/main_diagram_database/2005.06606/paper_text/paper.md",
|
| 14 |
+
"intro_method_from_kind": "markdown"
|
| 15 |
+
},
|
| 16 |
+
"files": {
|
| 17 |
+
"main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2005.06606/main_diagram/main_diagram.drawio",
|
| 18 |
+
"main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2005.06606/main_diagram/main_diagram.png",
|
| 19 |
+
"main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2005.06606/main_diagram/main_diagram.pdf",
|
| 20 |
+
"intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2005.06606/paper_text/intro_method.md",
|
| 21 |
+
"paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2005.06606/paper.pdf",
|
| 22 |
+
"latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2005.06606/latex_source"
|
| 23 |
+
},
|
| 24 |
+
"status": {
|
| 25 |
+
"copy_drawio": "exists",
|
| 26 |
+
"copy_png": "exists",
|
| 27 |
+
"diagram_pdf": "pdf_exists",
|
| 28 |
+
"intro_method": "exists",
|
| 29 |
+
"paper_pdf": "exists",
|
| 30 |
+
"latex": "exists"
|
| 31 |
+
}
|
| 32 |
+
}
|
2006.14804/record.json
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"arxiv_id": "2006.14804",
|
| 3 |
+
"month": "2020_06",
|
| 4 |
+
"year": 2021,
|
| 5 |
+
"conference": "NEURIPS",
|
| 6 |
+
"title": "Widening the Pipeline in Human-Guided Reinforcement Learning with Explanation and Context-Aware Data Augmentation",
|
| 7 |
+
"arxiv_url": "https://arxiv.org/abs/2006.14804",
|
| 8 |
+
"source": {
|
| 9 |
+
"paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_06/main_diagram_database/2006.14804",
|
| 10 |
+
"tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_06/tex_files_extracted/2006.14804",
|
| 11 |
+
"paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_06/main_diagram_database/2006.14804/paper_text/paper.md",
|
| 12 |
+
"metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_06/main_diagram_database/2006.14804/metadata.json",
|
| 13 |
+
"intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_06/main_diagram_database/2006.14804/paper_text/paper.md",
|
| 14 |
+
"intro_method_from_kind": "markdown"
|
| 15 |
+
},
|
| 16 |
+
"files": {
|
| 17 |
+
"main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2006.14804/main_diagram/main_diagram.drawio",
|
| 18 |
+
"main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2006.14804/main_diagram/main_diagram.png",
|
| 19 |
+
"main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2006.14804/main_diagram/main_diagram.pdf",
|
| 20 |
+
"intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2006.14804/paper_text/intro_method.md",
|
| 21 |
+
"paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2006.14804/paper.pdf",
|
| 22 |
+
"latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2006.14804/latex_source"
|
| 23 |
+
},
|
| 24 |
+
"status": {
|
| 25 |
+
"copy_drawio": "exists",
|
| 26 |
+
"copy_png": "exists",
|
| 27 |
+
"diagram_pdf": "pdf_exists",
|
| 28 |
+
"intro_method": "exists",
|
| 29 |
+
"paper_pdf": "exists",
|
| 30 |
+
"latex": "exists"
|
| 31 |
+
}
|
| 32 |
+
}
|
2007.05060/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2020-06-03T23:05:34.052Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36" etag="z9-0tgBhUb9bsGWhxJP2" version="13.1.14" type="google"><diagram id="k2CafVSddcFSxPi8xNKm" name="Page-1">7Vptk5o6FP41ztx+WIcQQPy42m17O+1MO9tua79lJQK3SNgQV+yvb5CEt0RlEPdl5vpF8iQcwnOenJMTHcH5OntPURJ8Jh6ORqbhZSP4dmSawJza/CtHdgViS8CnoScGVcBt+AcL0BDoJvRw2hjICIlYmDTBJYljvGQNDFFKts1hKxI1n5ogHyvA7RJFKvoj9FhQoK5tVPgHHPqBfDIwRM8aycECSAPkkW0NgjcjOKeEsOJqnc1xlJMneSnue3egt5wYxTHrcsOX77/I9msyp/EsY3fgOnlY3V2ZhZVHFG3EC49MJ+L2ZivCzfJZs52gwnnYENlxle4ddc0HACfhzp5V/fzKz783KabS2D09y9Q/aYLRb0zfDGRPmuFkFa8pYbNh1uQEcp3xxmwbhAzfJmiZ92y51DkWsHXEW4BfpoyS36VALDEVoWbgyLawrHGdgB4xZTirQcKV7zFZY0Z3fIjsdYSsxLoype62NZUKKKgLVI5DYmH4pelKO/xCyEcvpcXdxz/33+KHDwFmrv+v9SuMF1e2Rkp9+UTc4fvFvAoz7OX0hVE0JxGhe0Nwtf8oxMPSiQqRGroPcztxxqbdoBe45thWCLYmEqxTXKHncKxdrkBHsoPWOX3xfZp/4Yw3Ix4yW7jqDI8HOtEklAXEJzGKbip0Rskm9nL69z6pxnwiJBHgf5ixndA52jDSdCOOves8BvPmMkJpGi4L8F0YdVo5ZuVQzWLJX+CoOymOEAsfm5Fc5xNx6xcS7mNeJlOV0dJAa+kwRH3MxF31yNsyVK5NaWjSMpSSDV1ixRDnDu1qw5J8QHp4wspzLOPovJQXbI7nF8UMKr2W5PaXMOwQJirZxCTGbU3x5/+sNxa8YfAlJ5pv87czytZOtFShHRRW4dfTWfOkAGuhwtbEYomdqVNz0koFdjd5KYZgSw8QdBP8UNKwXr40XojLlexvTccT0M/rAJ62dWHHd9k6HHV8FrKf0rX8elHDK5/njR4uL3h8NdFAkca0ZzQAE7MZDcynjQbOQKIA/4siTxFjw7BcA1jQdl132vSskfdCDjsWnDqO208wvOB4VsHIbHVMMc0t7VlV3eSYProXcjZoZ29dpWFqtGENUMnpiQSvkUjHfnlE6mJYsyxLKPEpWg9QrT1XfdZUwkFHXrpa48XP2IG9ApCuMFJswcvUbLpH2cfLNt2b2k9QubmKmNNdzAKcH7jRoU76ojBlON4f9V0u4lziQM5ub6PhRBN+gC7+tI8Dhos/Vo84UvFs9IwqcjdeFF6dtl5l6Var1hb1vgOlm7oDrMKXfkvXOfOc3OIBkRVPHhe4Q0fA80Rhns7uAx18D7VFcptLC0y7ZvZ2AhjssLvDDqn/YXeT1Cqrnneo7SpZA7i6EPWkPA6pxafhUZN9gSyxn4vFLmeqQ/30Yuw/JbuyR0TfIYSq7oj0Cx5oKG6fWA5GcZezyVdDMadJpfhiBPNm9St3sRmt/isAb/4C</diagram></mxfile>
|
2007.05060/main_diagram/main_diagram.pdf
ADDED
|
Binary file (20.3 kB). View file
|
|
|
2007.05060/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Programming is a frustrating process: as the computer executes your code literally, any error in communicating how the computer should run would result in a bug. Program synthesis [\[1\]](#page-9-0) aims to address this problem by allowing the user to specify what the program should do; provided this specification, a program synthesizer infers a program that satisfies it. One of the most well-known program synthesizers is FlashFill [\[2\]](#page-9-1), which synthesizes string transformations from input/output examples. For instance, "Gordon Freeman" → "G", the FlashFill system infers the program: "first\_letter(first\_word(input))". FlashFill works inside Microsoft Excel, and this program can then run on the rest of the spreadsheet, saving time for end-users. However, most specifications, especially those provided by a naive end-user, leave the synthesis problem ill-posed as there may be many programs that satisfy the specification. Here we introduce a new paradigm for resolving this ambiguity. We think of program synthesis as a kind of communication between the user and the synthesizer. Framed as communication we can deploy ideas from computational linguistics, namely *pragmatics*, the study of how informative speakers select their utterances, and how astute listeners infer intent from these "pragmatic" utterances [\[3\]](#page-9-2). Intuitively, a pragmatic program synthesizer goes beyond the literal meaning of the specification, and asks why an informative user would select that specification.
|
| 4 |
+
|
| 5 |
+
Resolving the ambiguity inherent in program synthesis has received much attention. Broadly, prior work imposes some form of inductive bias over the space of programs. In a program synthesizer without any built-in inductive bias [\[1\]](#page-9-0), given a specification D, the synthesizer might return any program consistent with D. Interacting with such a synthesizer runs the risk of getting an unintuitive program that is only "technically correct". For instance, given an example "Richard Feynman" →
|
| 6 |
+
|
| 7 |
+
<sup>∗</sup> equal contributions
|
| 8 |
+
|
| 9 |
+
"Mr Feynman", the synthesizer might output a program that prints "Mr Feynman" verbatim on all inputs. Systems such as [4] introduce a notion of syntactic naturalness in the form a prior over the set of programs: $P(prog|D) \propto \mathbb{1}\left[prog \vdash D\right] P_{\theta}(prog)$ , where $prog \vdash D$ means prog is consistent with spec D, and $P_{\theta}(prog)$ is a prior with parameters $\theta$ . For instance $P_{\theta}$ might disprefer constant strings. However, purely syntactic priors can be insufficient: the FlashFill-like system in [5] penalizes constant strings, making its synthesizer explain the "r" in "Mr Feynman" with the "r" from "Richard"; when the program synthesized from "Richard Feynman" $\rightarrow$ "Mr Feynman" executes on "Stephen Wolfram", it outputs "Ms Wolfram." This failure in part motivated the work in [6], which addresses failure such as these via handcrafted features. In this work we take a step back and ask: what are the general principles of communication from which these patterns of inductive reasoning could emerge?
|
| 10 |
+
|
| 11 |
+
We will present a qualitatively different inductive bias, drawing insights from probabilistic recursive reasoning models of pragmatics [7]. Confronted with a set of programs all satisfying the specification, the synthesizer asks the question, "why would a pragmatic speaker use this particular specification to communicate that program?" Mathematically our model works as follows. First, we model a synthesizer without any inductive bias as a literal listener $L_0$ : $P_{L_0}(prog|D) \propto \mathbb{1}[prog \vdash D]$ . Second, we model a pragmatic speaker, which is a conditional distribution over specifications, $S_1$ : $P_{S_1}(D|prog) \propto P_{L_0}(prog|D)$ . This "speaker" generates a specification D in proportion to the probability $L_0$ would recover the program prog given D. Last, we obtain the pragmatic listener, $L_1$ : $P_{L_1}(prog|D) \propto P_{S_1}(D|prog)$ , which is the synthesizer with the desirable inductive bias. It is worth noting that the inductive biases present in $L_1$ are derived from first principles of communication and the synthesis task, rather than trained on actual data of end-user interactions.
|
| 12 |
+
|
| 13 |
+
Algorithmically, computing these probabilities is challenging because they are given as unnormalized proportionalities. Specifically, $P_{L_0}$ requires summing over the set of consistent programs given D, and $P_{S_1}$ requires summing over the set of all possible specifications given prog. To this end, rather than tackling the difficult problem of searching for a correct program given a specification, a challenging research field in its own right [8–16], we work over a small enough domain such that the search problem can be efficiently solved with a simple version space algebra [17]. We develop an efficient inference algorithm to compute these probabilities exactly, and then build a working program synthesizer with these inference algorithms. In conducting a user study on Amazon Mechanical Turk, we find that naive end-users communicate more efficiently with a pragmatic program synthesizer compared to its literal variant. Concretely, this work makes the following contributions:
|
| 14 |
+
|
| 15 |
+
- 1. a systematic formulation of recursive pragmatics within program synthesis
|
| 16 |
+
- 2. an efficient implementation of an incremental pragmatic model via version space algebra
|
| 17 |
+
- a user study demonstrating that end-users communicate their intended program more efficiently with pragmatic synthesizers
|
| 18 |
+
|
| 19 |
+
We now formally connect program synthesis with pragmatic communication. We describe *reference game*, a class of cooperative 2-player games from the linguistic literature. We then cast program synthesis as an instance of a reference game played between a human speaker and a machine listener.
|
| 20 |
+
|
| 21 |
+
In program synthesis, one would like to obtain a program without explicitly coding for it. Instead, the user describes desirable properties of the program as a specification, which often takes in the form of a set of examples. Given these examples, the synthesizer would search for a program that satisfies these examples. In an interactive setting [18], rather than giving these examples all at once, the user gives the examples in rounds, based on the synthesizer's feedback each round.
|
| 22 |
+
|
| 23 |
+
In a reference game, a speaker-listener pair (S,L) cooperatively communicate a **concept** $h \in H$ using some atomic **utterances** $u \in U$ . Given a concept h, the speaker S chooses a **set of utterances** $D = \{u^1, \ldots, u^k | u^i \in U\}$ to describe the concept. The communication is successful if the original concept is recovered by the listener, i.e. h = L(S(h)). The communication is efficient if |D| is small.
|
| 24 |
+
|
| 25 |
+
Therefore, it should be unsurprising that, given a reference game, a human speaker-listener pair would act *pragmatically* [3]: The speaker is choosing didactic utterances that are most descriptive yet parsimonious to describe the concept, and the listener is aware that the speaker is being didactic while recovering the intended concept.
|
| 26 |
+
|
| 27 |
+
It is easy to see why program synthesis is an instance of a reference game: The user would like to obtain a "concept" in the form of a "program", the user does so by using "utterances" in the form of "examples". See Figure 1. This formulation can explain in part the frustration of using a traditional synthesizer, or machine in general. Because while the user naturally assumes pragmatic communication, and selects the examples didacticly, the machine/synthesizer is not pragmatic, letting the carefully selected examples fall on deaf ears.
|
| 28 |
+
|
| 29 |
+
<span id="page-2-0"></span>
|
| 30 |
+
|
| 31 |
+
Figure 1: program synthesis as a reference game
|
| 32 |
+
|
| 33 |
+
Two strangers who speak different languages would not perform as well in a reference game as two close friends. Clearly, there needs to be a convention shared between the speaker and the listener for effective communication to occur. Approaches such as [19,20] use a corpus of human annotated data so that the machine can imitate the conventions of human communication directly. Works such as [21,22] leverage both annotated data and pragmatic inference to achieve successful human-machine communication over natural language. This work shows that, in the context of program synthesis by examples, by building the concept of pragmatic communication into the synthesizer, the user can quickly adopt the convention of the synthesizer effectively via human learning <sup>2</sup>. This is advantageous because annotated user data is expensive to obtain. In this regard, our work is most similar to SHRDLURN [23], where a pragmatic semantic parser was able to translate natural language utterances into a desirable program without being trained first on human annotated data.
|
| 34 |
+
|
| 35 |
+
We now describe how to operationalize pragmatics using a small, program-like reference game, where by-hand calculation is feasible. This exposition adapts formalism from [18] for efficient implementation within program synthesizers.
|
| 36 |
+
|
| 37 |
+
**The Game.** Consider the following game. There are ten different **concepts** $H = \{h_0 \dots h_9\}$ and eight **atomic examples** $\{u_0 \dots u_7\}$ . Each concept is a contiguous line segment on a horizontal grid of 4 cells, and each atomic example indicates whether a particular cell is occupied by the segment. One can view this example as an instance of predicate synthesis, where the program takes in the form of a predicate function h, and the atomic examples as input-output pairs obtained by applying the predicate function on some input: i.e. $u_0 = (cell_0, h(cell_0) = True)$ . We can visualise the game with a **meaning matrix** (Figure 2), where each entry (i,j) denotes whether $h_j \vdash u_i$ $(h_j$ is consistent with $u_i$ ). Given a set of examples D, we say $h \vdash D$ if $\forall u \in D, h \vdash u$ .
|
| 38 |
+
|
| 39 |
+
If a human speaker uses the set of examples $D = \{u_2, u_4\}$ , what is the most likely concept being communicated? We should expect it is $h_5$ , as $u_2$ and $u_4$ marks the end-points of the segment, despite
|
| 40 |
+
|
| 41 |
+
<span id="page-2-1"></span><sup>&</sup>lt;sup>2</sup>which is far more powerful than machine learning
|
| 42 |
+
|
| 43 |
+
| | | h0 | h1 | h2 | h3 | h4 | h5 | h6 | h7 | h8 | h9 |
|
| 44 |
+
|----|----------|----|----|----|----|----|----|----|----|----|----|
|
| 45 |
+
| | | | | | | | | | | | |
|
| 46 |
+
| u0 | <b>v</b> | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 |
|
| 47 |
+
| u1 | X | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 |
|
| 48 |
+
| u2 | | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 |
|
| 49 |
+
| u3 | X | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 |
|
| 50 |
+
| u4 | | 0 | 0 | 1 | 1 | 0 | 1 | 1 | 1 | 1 | 0 |
|
| 51 |
+
| u5 | X | 1 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
|
| 52 |
+
| u6 | | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 1 | 1 |
|
| 53 |
+
| u7 | X | 1 | 1 | 1 | 0 | 1 | 1 | 0 | 1 | 0 | 0 |
|
| 54 |
+
|
| 55 |
+
<span id="page-3-0"></span>Figure 2: the meaning matrix: entry (i, j) denotes if example $u_i$ is true given concept $h_j$ .
|
| 56 |
+
|
| 57 |
+
the concepts $h_2$ , $h_3$ , $h_6$ are also consistent with D. We now demonstrate an incremental pragmatic model that can capture this behaviour with recursive Bayesian inference.
|
| 58 |
+
|
| 59 |
+
The recursive pragmatic model derives a probabilistic speaker $S_1$ and listener $L_1$ pair given a meaning matrix, and the resulting convention of the communicating pair $S_1$ - $L_1$ is shown to be both efficient and human usable [24]. Clearly, there are other ways to derive a speaker-listener pair that are highly efficient, for instance, training a pair of cooperative agents in a RL setting [25]. However, agents trained this way tends to deviate from how a human would communicate, essentially coming up with a highly efficient yet obfuscated communication codes that are not human understandable.
|
| 60 |
+
|
| 61 |
+
**Literal Listener** $L_0$ . We start by building the literal listener $L_0$ from the meaning matrix. Upon receiving a set of examples D, $L_0$ samples uniformly from the set of consistent concepts:
|
| 62 |
+
|
| 63 |
+
<span id="page-3-1"></span>
|
| 64 |
+
$$P_{L_0}(h|D) \propto \mathbb{1}(h \vdash D), \quad P_{L_0}(h|D) = \frac{\mathbb{1}(h \vdash D)}{\sum_{h' \in H} \mathbb{1}(h' \vdash D)}$$
|
| 65 |
+
(1)
|
| 66 |
+
|
| 67 |
+
Applying to our example in Figure 2, we see that $P_{L_0}(h_5|u_2,u_4)=\frac{1}{4}$ .
|
| 68 |
+
|
| 69 |
+
**Incrementally Pragmatic Speaker** $S_1$ . We now build a pragmatic speaker $S_1$ recursively from $L_0$ . Here, rather than treating D as an unordered set, we view it as an ordered sequence of examples, and models the speaker's generation of D incrementally, similar to autoregressive sequence generation in language modeling [26]. Let $D = u^1 \dots u^k$ , then:
|
| 70 |
+
|
| 71 |
+
$$P_{S_1}(D|h) = P_{S_1}(u_1, \dots, u_k|h) = P_S(u_1|h)P_S(u_2|h, u_1)\dots P(u_k|h, u_1 \dots u_{k-1})$$
|
| 72 |
+
(2)
|
| 73 |
+
|
| 74 |
+
where the incremental probability $P_S(u_i|h,u_1,\ldots,u_{i-1})$ is defined recursively with $L_0$ :
|
| 75 |
+
|
| 76 |
+
$$P_S(u_i|h, u_{1...i-1}) \propto P_{L_0}(h|u_{1...i}), \quad P_S(u_i|h, u_{1...i-1}) = \frac{P_{L_0}(h|u_1, \dots, u_i)}{\sum_{u_i'} P_{L_0}(h|u_1, \dots, u_i')}$$
|
| 77 |
+
(3)
|
| 78 |
+
|
| 79 |
+
Applying this reasoning to our example in Figure 2, we see that $P_{S_1}(u_2, u_4|h_5)$ is:
|
| 80 |
+
|
| 81 |
+
$$P_S(u_2|h_5)P_S(u_4|h_5, u_2) = \frac{P_{L_0}(h_5|u_2)}{\sum_{u'} P_{L_0}(h_5|u')} \frac{P_{L_0}(h_5|u_2, u_4)}{\sum_{u''} P_{L_0}(h_5|u_2, u'')} = 0.25 * 0.3 = 0.075$$
|
| 82 |
+
(4)
|
| 83 |
+
|
| 84 |
+
**Informative Listener** $L_1$ . Finally, we construct an informative listener $L_1$ which recursively reasons about the informative speaker $S_1$ :
|
| 85 |
+
|
| 86 |
+
<span id="page-3-3"></span><span id="page-3-2"></span>
|
| 87 |
+
$$P_{L_1}(h|D) \propto P_{S_1}(D|h), \quad P_{L_1}(h|D) = \frac{P_{S_1}(D|h)}{\sum_{h'} P_{S_1}(D|h')}$$
|
| 88 |
+
(5)
|
| 89 |
+
|
| 90 |
+
In our example, $P_{L_1}(h_5|u_{2,4})\approx 0.31$ , $P_{L_1}(h_2|u_{2,4})\approx 0.28$ , $P_{L_1}(h_3|u_{2,4})\approx 0.19$ , $P_{L_1}(h_6|u_{2,4})\approx 0.21$ . As we can see, the intended concept $h_5$ is ranked first, in contrast to the uninformative listener $L_0$ .
|
| 91 |
+
|
| 92 |
+
Computing the pragmatic listener $L_1$ naively would incur a cost of $O(|H|^2|U||D|^2)$ , which can be prohibitively expensive even in instances where H and U can be enumerated. Here, we give an efficient implementation of $S_1$ and $L_1$ that is drastically faster than the naive implementation. While our algorithm cannot yet scale to the regime of state-of-the-art program synthesizers – where H and U cannot be enumerated – we believe computational principles elucidated here could pave the way for pragmatic synthesizers over combinatorially large program spaces, particularly with when this combinatorial space is manipulated with version space algebras (VSA), as in [2, 5, 17]. To this end, we employ VSA with aggressive precomputation to memoize the cost of pragmatic inference.
|
| 93 |
+
|
| 94 |
+
# Method
|
| 95 |
+
|
| 96 |
+
We start by redefining some terms of pragmatics into the language of program synthesis. Let h be a **program** and H be the **set of programs**. Let X be the **domain** of the program and Y be the **range** of the program: $H: X \to Y$ . An **example** u is a pair $u = (x, y) \in X \times Y = U$ . A program is **consistent** with an example, $h \vdash u$ , if u = (x, y), h(x) = y.
|
| 97 |
+
|
| 98 |
+
We use a simple form of version space algebra [17] to precompute and cache two kinds of mappings. First, we iterate over the rows of the meaning matrix and store, for each atomic example u, the set of programs that are consistent with it: $M_L[u] = \{h|h\vdash u\}$ . Here $M_L$ is a map or a dictionary data structure, which can be thought of as an atomic listener, that returns a set of consistent programs for every atomic example. Second, we iterate over the columns of meaning matrix, and store, for each program h, the set of atomic examples that are consistent with it $M_S[h] = \{u|h\vdash u\}$ . $M_S$ can be thought of as an atomic speaker, that returns a set of usable atomic examples for every program. Abusing notation slightly, let's define: $|M_L| = max_u|M_L[u]|$ and $|M_S| = max_h|M_S[h]|$ . Note that these quantities can be significantly smaller than H and U if the meaning matrix is sparse.
|
| 99 |
+
|
| 100 |
+
To compute $P_{L_0}(h|D)$ , we first compute the set intersection $D[H] = \cap_{u \in D} M_L[u]$ , which corresponds to the set of programs consistent under D. Note $D[H] = \{\} \iff h \not\vdash D$ . Therefore, from Eq. 1 we derive $P_{L_0}(h|D) = 0$ if $D[H] = \{\}$ , and $\frac{1}{|D[H]|}$ otherwise. Each call is time $O(|M_L||D|)$ .
|
| 101 |
+
|
| 102 |
+
Computing $P_{S_1}$ amounts to computing a sequence of the incremental probability $P_S$ defined in Eq. 3. The brunt of computing $P_S$ lies in the normalisation constant, $\sum_{u_i'} P_{L_0}(h|u_1,\ldots,u_i')$ . We speed up this computation in two ways: First, we note that if $h \not\vdash u_i'$ , the probability $P_{L_0}(h|u_1,\ldots,u_i')$ would be 0. Thus, we can simplify this summation using the atomic speaker $M_S[h]$ like so: $\sum_{u_i'} P_{L_0}(h|u_1,\ldots,u_i') = \sum_{u_i'\in M_S[h]} P_{L_0}(h|u_1,\ldots,u_i')$ , which reduces the number of terms within the summation from O(|U|) to $O(|M_S|)$ . Second, recall that computing $P_{L_0}(h|D)$ amounts to computing the consistent set D[H]. We note that the only varying example inside the summation is $u_i'$ , while all the previous examples $D_{prev} = \{u_1 \ldots u_{i-1}\}$ remains constant. This allows caching the intermediate results of the set intersection $D_{prev}[H] = \cap_{u \in D_{prev}} M_L[u]$ to be re-used in computing $(D_{prev} \cup \{u_i'\})[H] = M_L[u'] \cap D_{prev}[H]$ , up to |D| times. Thus, $P_{S_1}$ is $O(|M_L||M_S||D|^2)$ .
|
| 103 |
+
|
| 104 |
+
Again, the brunt of the computation lies in the normalisation constant $\sum_{h'} P_{S_1}(D|h')$ of Eq 5. However, note that in case $h' \nvdash D$ , $P_{S_1}(D|h') = 0$ . This would allow us to leverage the consistent set D[H] to sum over atmost $|M_L|$ elements: $\sum_{h'} P_{S_1}(D|h') = \sum_{h' \in D[H]} P_{S_1}(D|h')$ . Overall, $P_{L_1}$ is $O(|M_L|^2|M_S||D|^2)$ time, significantly faster than the original $O(|H|^2|U||D|^2)$ .
|
| 105 |
+
|
| 106 |
+
```
|
| 107 |
+
P -> if (x , y ) in box (B ,B ,B , B )
|
| 108 |
+
then symbol (S , C )
|
| 109 |
+
else pebble
|
| 110 |
+
B -> 0 | 1 | 2 | 3 | 4 | 5 | 6
|
| 111 |
+
S -> ring (O ,I ,R ,x , y )
|
| 112 |
+
O -> chicken | pig
|
| 113 |
+
I -> chicken | pig | pebble
|
| 114 |
+
R -> 1 | 2 | 3
|
| 115 |
+
C -> [ red , green , blue ][ A2 ( A1 ) ]
|
| 116 |
+
A1 -> x | y | x + y
|
| 117 |
+
A2 -> lambda z :0 | lambda z :1 |
|
| 118 |
+
lambda z :2 | lambda z : z %2 |
|
| 119 |
+
lambda z : z %2+1 |
|
| 120 |
+
lambda z :2*( z %2)
|
| 121 |
+
```
|
| 122 |
+
|
| 123 |
+
<span id="page-5-1"></span>Figure 3: DSL of pattern laying programs / rendering of 4 different programs on 7 × 7 grids
|
| 124 |
+
|
| 125 |
+
To describe our program synthesis system with pragmatics, we only need to specify the space of programs, the space of atomic examples, and the meaning matrix; the rest will follow.[3](#page-5-0)
|
| 126 |
+
|
| 127 |
+
Programs. We consider a simple domain of programs that can layout grid-like patterns like those studied in [\[27,](#page-10-0) [28\]](#page-10-1). Specifically, each program is a function that takes in a coordinate (x, y) of a 7 × 7 grid, and place a particular symbol at that location. Symbols can be one of three shapes: *chicken*, *pig*, *pebble*, and be one of three colors: *red*, *green*, *blue*, with the exception that *pebble* is always colorless. A DSL and some of the programs renderings are shown in Figure [3.](#page-5-1) Here, box is the bounding box where the main pattern should be placed. ring is a function that takes two shapes and makes the outside shape O wrap around the inside shape I with a thickness of R. symbol is a function that takes in a shape and a color and outputs an appropriate symbol. We consider two programs h<sup>1</sup> and h<sup>2</sup> equivalent if they render to the same pattern over a 7 × 7 grid. After such de-duplication, there are a total of 17976 programs in our space of programs.
|
| 128 |
+
|
| 129 |
+
Atomic Examples. The space of atomic examples consists of tuples of form ((x, y), s), where (x, y) is a grid coordinate, and s is a symbol. As there are a total of 7 distinct symbols and the grid is 7 × 7, there are a total of 343 atomic examples in our domain.
|
| 130 |
+
|
| 131 |
+
Meaning Matrix. An entry of the meaning matrix denotes whether a program, once rendered onto the grid, would be consistent with an atomic example. For instance, let the upper-left pattern in Figure [3](#page-5-1) be rendered from program h1337, then, it will be consistent with the atomic examples ((0, 0), pebble) and ((3, 3), pig\_red), while be inconsistent with ((6, 6), pig\_blue).
|
| 132 |
+
|
| 133 |
+
We conduct an user study to evaluate how well a naive end-user interacts with a pragmatic program synthesizer (L1) versus a non-pragmatic one (L0). We hypothesized that to the extent that the pragmatic models capture computational principles of communication, humans should be able to communicate with them efficiently and intuitively, even if the form of communication is new to them.
|
| 134 |
+
|
| 135 |
+
Subjects. Subjects (N = 55) were recruited on Amazon Mechanical Turk and paid \$2.75 for 20 minutes. Subjects gave informed consent. Seven responses were omitted for failing to answer an instruction quiz. The remaining subjects (N=48) (26 M, 22 F), (Age = 40.9 +/- 12.1 (mean/SD)) were included. The study was approved by our institution's Institutional Review Board.
|
| 136 |
+
|
| 137 |
+
<span id="page-5-0"></span><sup>3</sup> code : [https://github.com/evanthebouncy/program\\_synthesis\\_pragmatics](https://github.com/evanthebouncy/program_synthesis_pragmatics)
|
| 138 |
+
|
| 139 |
+
Stimuli. Stimuli were 10 representative renderings of program sampled from the DSL, capturing different concepts such as stripes vs checkered colour patterns and solid vs hollow ring shapes.
|
| 140 |
+
|
| 141 |
+
The communication task. The subjects were told they are communicating with two robots, either white (L0) or blue (L1). The subjects were given a stimuli (a rendering), and were asked to make a robot recreate this pattern by providing the robots with few, strategically placed symbols on a scratch grid (set of examples). Each time the subject places a symbol, the robot guesses the most likely program given the examples, and display its guess as a rendering as feedback to the subject. The subject may proceed to the next task if the pattern is successfully recreated. See Figure [6.1](#page-6-0) [4](#page-6-1) .
|
| 142 |
+
|
| 143 |
+
Procedure. First, the subjects read the instructions followed by a quiz. Subjects who failed the quiz twice proceeded with the experiment, but their responses were omitted. Next, the subjects practice with selecting and placing symbols. Subjects proceed with the communication task presented in two blocks, one with white robot L<sup>0</sup> and one with blue robot L1, in random order between subjects. Each block contains 10 trials of the 10 stimuli, also in random order. In the end of the experiment subjects fill a survey: which robot was easier, and free-form feedback about their communication strategies.
|
| 144 |
+
|
| 145 |
+

|
| 146 |
+
|
| 147 |
+
<span id="page-6-0"></span>Figure 4: user interface for the communication task
|
2008.12855/record.json
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"arxiv_id": "2008.12855",
|
| 3 |
+
"month": "2020_08",
|
| 4 |
+
"year": 2020,
|
| 5 |
+
"conference": "ACMMM",
|
| 6 |
+
"title": "Personal Food Model",
|
| 7 |
+
"arxiv_url": "https://arxiv.org/abs/2008.12855",
|
| 8 |
+
"source": {
|
| 9 |
+
"paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_08/main_diagram_database/2008.12855",
|
| 10 |
+
"tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_08/tex_files_extracted/2008.12855",
|
| 11 |
+
"paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_08/main_diagram_database/2008.12855/paper_text/paper.md",
|
| 12 |
+
"metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_08/main_diagram_database/2008.12855/metadata.json",
|
| 13 |
+
"intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_08/main_diagram_database/2008.12855/paper_text/paper.md",
|
| 14 |
+
"intro_method_from_kind": "markdown"
|
| 15 |
+
},
|
| 16 |
+
"files": {
|
| 17 |
+
"main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2008.12855/main_diagram/main_diagram.drawio",
|
| 18 |
+
"main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2008.12855/main_diagram/main_diagram.png",
|
| 19 |
+
"main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2008.12855/main_diagram/main_diagram.pdf",
|
| 20 |
+
"intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2008.12855/paper_text/intro_method.md",
|
| 21 |
+
"paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2008.12855/paper.pdf",
|
| 22 |
+
"latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2008.12855/latex_source"
|
| 23 |
+
},
|
| 24 |
+
"status": {
|
| 25 |
+
"copy_drawio": "exists",
|
| 26 |
+
"copy_png": "exists",
|
| 27 |
+
"diagram_pdf": "pdf_exists",
|
| 28 |
+
"intro_method": "exists",
|
| 29 |
+
"paper_pdf": "exists",
|
| 30 |
+
"latex": "exists"
|
| 31 |
+
}
|
| 32 |
+
}
|
2010.01402/record.json
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"arxiv_id": "2010.01402",
|
| 3 |
+
"month": "2020_10",
|
| 4 |
+
"year": 2020,
|
| 5 |
+
"conference": "ECCV",
|
| 6 |
+
"title": "Unsupervised Monocular Depth Estimation for Night-time Images using Adversarial Domain Feature Adaptation",
|
| 7 |
+
"arxiv_url": "https://arxiv.org/abs/2010.01402",
|
| 8 |
+
"source": {
|
| 9 |
+
"paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_10/main_diagram_database/2010.01402",
|
| 10 |
+
"tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_10/tex_files_extracted/2010.01402",
|
| 11 |
+
"paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_10/main_diagram_database/2010.01402/paper_text/paper.md",
|
| 12 |
+
"metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_10/main_diagram_database/2010.01402/metadata.json",
|
| 13 |
+
"intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_10/main_diagram_database/2010.01402/paper_text/paper.md",
|
| 14 |
+
"intro_method_from_kind": "markdown"
|
| 15 |
+
},
|
| 16 |
+
"files": {
|
| 17 |
+
"main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2010.01402/main_diagram/main_diagram.drawio",
|
| 18 |
+
"main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2010.01402/main_diagram/main_diagram.png",
|
| 19 |
+
"main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2010.01402/main_diagram/main_diagram.pdf",
|
| 20 |
+
"intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2010.01402/paper_text/intro_method.md",
|
| 21 |
+
"paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2010.01402/paper.pdf",
|
| 22 |
+
"latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2010.01402/latex_source"
|
| 23 |
+
},
|
| 24 |
+
"status": {
|
| 25 |
+
"copy_drawio": "exists",
|
| 26 |
+
"copy_png": "exists",
|
| 27 |
+
"diagram_pdf": "pdf_exists",
|
| 28 |
+
"intro_method": "exists",
|
| 29 |
+
"paper_pdf": "exists",
|
| 30 |
+
"latex": "exists"
|
| 31 |
+
}
|
| 32 |
+
}
|
2010.01693/record.json
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"arxiv_id": "2010.01693",
|
| 3 |
+
"month": "2020_10",
|
| 4 |
+
"year": 2021,
|
| 5 |
+
"conference": "EMNLP",
|
| 6 |
+
"title": "Intention Reasoning Network for Multi-Domain End-to-end Task-Oriented Dialogue",
|
| 7 |
+
"arxiv_url": "https://arxiv.org/abs/2010.01693",
|
| 8 |
+
"source": {
|
| 9 |
+
"paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_10/main_diagram_database/2010.01693",
|
| 10 |
+
"tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_10/tex_files_extracted/2010.01693",
|
| 11 |
+
"paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_10/main_diagram_database/2010.01693/paper_text/paper.md",
|
| 12 |
+
"metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_10/main_diagram_database/2010.01693/metadata.json",
|
| 13 |
+
"intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_10/main_diagram_database/2010.01693/paper_text/paper.md",
|
| 14 |
+
"intro_method_from_kind": "markdown"
|
| 15 |
+
},
|
| 16 |
+
"files": {
|
| 17 |
+
"main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2010.01693/main_diagram/main_diagram.drawio",
|
| 18 |
+
"main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2010.01693/main_diagram/main_diagram.png",
|
| 19 |
+
"main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2010.01693/main_diagram/main_diagram.pdf",
|
| 20 |
+
"intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2010.01693/paper_text/intro_method.md",
|
| 21 |
+
"paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2010.01693/paper.pdf",
|
| 22 |
+
"latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2010.01693/latex_source"
|
| 23 |
+
},
|
| 24 |
+
"status": {
|
| 25 |
+
"copy_drawio": "exists",
|
| 26 |
+
"copy_png": "exists",
|
| 27 |
+
"diagram_pdf": "pdf_exists",
|
| 28 |
+
"intro_method": "exists",
|
| 29 |
+
"paper_pdf": "exists",
|
| 30 |
+
"latex": "exists"
|
| 31 |
+
}
|
| 32 |
+
}
|
2010.12810/record.json
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"arxiv_id": "2010.12810",
|
| 3 |
+
"month": "2020_10",
|
| 4 |
+
"year": 2020,
|
| 5 |
+
"conference": "NEURIPS",
|
| 6 |
+
"title": "Autoregressive Score Matching",
|
| 7 |
+
"arxiv_url": "https://arxiv.org/abs/2010.12810",
|
| 8 |
+
"source": {
|
| 9 |
+
"paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_10/main_diagram_database/2010.12810",
|
| 10 |
+
"tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_10/tex_files_extracted/2010.12810",
|
| 11 |
+
"paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_10/main_diagram_database/2010.12810/paper_text/paper.md",
|
| 12 |
+
"metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_10/main_diagram_database/2010.12810/metadata.json",
|
| 13 |
+
"intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_10/main_diagram_database/2010.12810/paper_text/paper.md",
|
| 14 |
+
"intro_method_from_kind": "markdown"
|
| 15 |
+
},
|
| 16 |
+
"files": {
|
| 17 |
+
"main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2010.12810/main_diagram/main_diagram.drawio",
|
| 18 |
+
"main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2010.12810/main_diagram/main_diagram.png",
|
| 19 |
+
"main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2010.12810/main_diagram/main_diagram.pdf",
|
| 20 |
+
"intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2010.12810/paper_text/intro_method.md",
|
| 21 |
+
"paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2010.12810/paper.pdf",
|
| 22 |
+
"latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2010.12810/latex_source"
|
| 23 |
+
},
|
| 24 |
+
"status": {
|
| 25 |
+
"copy_drawio": "exists",
|
| 26 |
+
"copy_png": "exists",
|
| 27 |
+
"diagram_pdf": "pdf_exists",
|
| 28 |
+
"intro_method": "exists",
|
| 29 |
+
"paper_pdf": "exists",
|
| 30 |
+
"latex": "exists"
|
| 31 |
+
}
|
| 32 |
+
}
|
2012.03500/record.json
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"arxiv_id": "2012.03500",
|
| 3 |
+
"month": "2020_12",
|
| 4 |
+
"year": 2021,
|
| 5 |
+
"conference": "ICML",
|
| 6 |
+
"title": "EfficientTTS: An Efficient and High-Quality Text-to-Speech Architecture",
|
| 7 |
+
"arxiv_url": "https://arxiv.org/abs/2012.03500",
|
| 8 |
+
"source": {
|
| 9 |
+
"paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_12/main_diagram_database/2012.03500",
|
| 10 |
+
"tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_12/tex_files_extracted/2012.03500",
|
| 11 |
+
"paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_12/main_diagram_database/2012.03500/paper_text/paper.md",
|
| 12 |
+
"metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_12/main_diagram_database/2012.03500/metadata.json",
|
| 13 |
+
"intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2020_12/main_diagram_database/2012.03500/paper_text/paper.md",
|
| 14 |
+
"intro_method_from_kind": "markdown"
|
| 15 |
+
},
|
| 16 |
+
"files": {
|
| 17 |
+
"main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2012.03500/main_diagram/main_diagram.drawio",
|
| 18 |
+
"main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2012.03500/main_diagram/main_diagram.png",
|
| 19 |
+
"main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2012.03500/main_diagram/main_diagram.pdf",
|
| 20 |
+
"intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2012.03500/paper_text/intro_method.md",
|
| 21 |
+
"paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2012.03500/paper.pdf",
|
| 22 |
+
"latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2012.03500/latex_source"
|
| 23 |
+
},
|
| 24 |
+
"status": {
|
| 25 |
+
"copy_drawio": "exists",
|
| 26 |
+
"copy_png": "exists",
|
| 27 |
+
"diagram_pdf": "pdf_exists",
|
| 28 |
+
"intro_method": "exists",
|
| 29 |
+
"paper_pdf": "exists",
|
| 30 |
+
"latex": "exists"
|
| 31 |
+
}
|
| 32 |
+
}
|
2012.15788/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2021-05-07T11:53:41.040Z" agent="5.0 (Macintosh; Intel Mac OS X 11_2_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36" version="14.6.8" etag="qaHfTuVpUJsLTFWY-Tlm" type="google"><diagram id="bnx8YY2pq1jZOtCTF-bv">7Vxtb5s6FP41lbYPQ4CBwMe26zZdbbqTOmnbRxecxLeAcw1pk/vrrw12eLGTsgRCqNpJCz62wX7Oi885nOQK3CabzxSult9IhOIr24w2V+DjlW1bvuOzD07ZCooFBGVBcSRoFeEe/4cE0RTUNY5Q1hiYExLneNUkhiRNUZg3aJBS8twcNidx86kruBBPNCvCfQhjpAz7iaN8WVJ9e1bRvyC8WMonW15Q9iRQDhY3zpYwIs81Eri7AreUkLy8Sja3KOboSVzKeZ/29O4WRlGad5lglxOeYLwWexPryrdys5Ss0wjx8eYVuHle4hzdr2DIe58ZfxltmScxa1nsUtwO0Rxt9i7J2m2UiQgiCcrplg0REzwBjRAOG4j2c4X0zBS0ZR3lmSBCwd3F7tYVAOxCYKDHAyjbRxFjvWgSmi/JgqQwvquoN02AqjFfCVkJWP5Beb4VcgzXOWmChjY4/8WnG65o/a71fNyIOxeNrWykbGe1Sbz5u95XTStajXnfEcUMGkQFsdw032mDZxlZ01CQPKFmkC6QBNztzFqKYpjjp+btT+GTMyafrBqXKp7p+XQctJXdOT+07ssm4QxYTw83T8HtL7JMGeUzIVEC+RUz9nzBS8T+j1HRoCTmLcy7fxQdN/ABGadZ4TmO41sSE1rMBXOX/2P0LKfkEdV6vOKPzyBpXqOXf/3Yc79lz33Vnluuxp47PZjz2VTMRGXOG8a8su17zHkHNfE1amKPpyb+yWpyxU9p/v9r0hIARlST4M10aZniBiMyRQY7Na7cxhAnJ+LL0JL2zxrAunsqQjqA7D4AshSAvsHsEXHZvECkFA3XQGXpAptesNIFel7MnnAT4Sd2ueCX9+sVok84w6TQ6fLEEMMeqBwlKeyZtbljnrTNwGl3uPYaOEUwWxZr7eidWkA9d63u7qkQGtMwTVPcvfNRXNztmlK4rQ1YEZzmWe1h3zmhFnY7LfF03LqAKeMt0z00nl2UK2jNlssh83nGhaslwjsEukk1UE0koRSFeSHBl6T9ylFyVu2fTLjMG0dkJ2QqoqFsfmce9e7kWmoQ/RM/YoUL2RKu+GW4jTEDm4KXBfOhZMvXhx0Bho+Lgll/r3N2GyToWckXtx9XyLKb6g5cVX69oTwhb9zDxT79dLFOj+MsXSA3QL7jqNOj7QdK8dCeBsdY+lEjedOYXa4IDBDLHyUCLtBbiN5EQM0caFxYScpWRdhayYv375q/VLmJ0Zx9XLM+y3cMYM5WG55N4DPNnMsF77Id3/BnTq2L+wMfSpNazLVZv+u2B8xhguNtOSSDafYhY2fZvLo/ZbQ5oYkYwd8obd6ZRhAEjuO9rxbZ3s2DJHznb6a41u9c8F1XM2Q3aqN2Q27ZGtEm33ODL5hx1yQsxqd8PzhOuFghyhcM0xBlzXklwn+MOnBchrp1GahbFtCgjtMwXkd1nLXBjw6Adkx0inxOAKlaJohLRysV1Iu8uLZtmL5/GSiAmatB4Z0VBPZ7birTqB8dsQIjsALtnj1g+IGypSH37Ok4X3L5U8zPhJykBfslDn+kIa8mWbiL7g94yC4YykUOphLhdXBzpFdbd3NGDObkctp5Rro3nFtRwo7LrEMwtyd2q2cf5NNPkk23lX3w3U7RG+hBNHccvnjRHPAlnPTS6yIdjCjSajpYJM7IhKTaNV+W6tlQUq2mHu+ecISYn3zimdZz4lE5ljT1VDrV7yPvKMt0NCHbLur4QSFOcbrQRSQKkkX00oDrwFG/xzmAMV6kjBYyAHmO8YbDi5nDcy06EhxFhRHScarJywHdiXYZnKWyzdJ5E73wrUMN0Mhlgc5ZywLtkTOQl1sXKE+x+sEGxnTWJlPycyS4DhgRXF0K7vxoTxG5DiU2MMx57YIm4wQTbk/Th2xVZKfY2Hu2MfbxE9LsRHfjkkLo1ksER/eOdKhyG3meXL7hGDBMAfLbFHW1GbHMW67nkNqoGiMUqVUt+Ir0pF1KdF5FUWPHNk8OMqSeH341HGmXd5yXI5oymMurf2sbd0dFaKhKQaDGoVOqFNRBNVitEFBjv6nUVJ0Xp1FDwoa/sNdbsKQj0Q7shnQhNDUKTnfW9u9CzBR5HoNxxyI3QCh9XIFPK9fqzg6Xh74w/uRqEKCGoq+sws7tmKj1+jBmo74+vJQKO0cX/QyQNOilwk6KR18K5Ywa/15KhZ1WBAYIgHupsOtdBNRo+63Cbm+FXTsv91Ztd2K1nR7OV733l+rnJlhJJV+vlnzgfApxuvhatD56/bhK7XfaOldJW2rVx5t/Z5T4YaA3TI7mW2pgxGjNUVNZU6+1crzz1Vo50/mi13AZCEfzZTCZtB5Fpvdm1KZcbKUT66GKrRz1t1WmUWzlaqp2hiq2cnSJrnaxFcryt1qrF2uLzllr5bx9relg0NWodDgp4mrcySzCrwLM64TtJiwu0QqHfAeFHLLojMVjkH0mKML8c842Gi5xhgzDeHPOu9pA3U+C9OScs2b1W4llxqP6yUlw9z8=</diagram></mxfile>
|
2012.15788/main_diagram/main_diagram.pdf
ADDED
|
Binary file (33.3 kB). View file
|
|
|
2012.15788/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Fact verification is the task of predicting whether claims are true or false using evidence. With the availability of a number of resources [@Wang2017a; @Karadzhov2017FullySources; @Thorne2018a; @Augenstein2019; @Wadden2020FactClaims], the task has attracted significant attention and spawned the development of new models, architectures and approaches. With potentially sensitive applications, recent works have focused on building explainable variants of fact checking [@Atanasova2020GeneratingExplanations; @Stammbach2020; @Kotonya2020]. Exposing the evidence source and decision making process may help the reader uncover subtle issues that cause automated systems to fail. Additionally, using such evidence to continuously update news articles as facts change forms part of the vision outlined by @Cohen2011 for automated newsrooms.
|
| 4 |
+
|
| 5 |
+
In this paper, we propose *Factual Error Correction*, as an explainable alternative for fact verification. Rather than merely assigning a truth label, possibly accompanied by evidence, our goal is to rewrite claims so that they are better supported by the retrieved evidence. For example, in Figure [\[fig:intro_example\]](#fig:intro_example){reference-type="ref" reference="fig:intro_example"}, a claim that would be [Refuted]{.smallcaps} by the evidence using a fact verification system is rewritten so that it becomes supported by evidence retrieved from Wikipedia. This work extends fact guided sentence modification [@Shah2019], which uses short factoid claims to introduce changes to Wikipedia passages. However, they assume that the claim and Wikipedia text are always incongruous and require a meaning-altering change, our proposal makes no assumptions over the veracity, and is applicable to claims both supported and refuted by evidence. Additionally, we incorporate a retrieval component to select evidence for a given claim from a corpus (in our case, Wikipedia) rather than requiring gold standard evidence to be explicitly provided.
|
| 6 |
+
|
| 7 |
+
A challenge for factual error correction is the lack of datasets consisting of claims paired with their corrections. However, with recent developments in fact checking, there is an abundance of new datasets consisting of claims paired with evidence. To address this data scarcity, we make use of distant supervision to incorporate retrieved evidence into generating the corrections.
|
| 8 |
+
|
| 9 |
+
We release a dataset of 65,000 claims, containing the intermediate annotations from FEVER [@Thorne2018a]. These consist of factoid sentences that were used to construct the supported and refuted claims in the dataset, and use these as reference targets for automated evaluation. We further verify the findings through a final round of annotation using human raters. Our evaluation finds high correlation between manual scores and the SARI metric [@xu-etal-2016-optimizing] and our best performing distantly-supervised system generated corrected claims for 24% of instances when using retrieved evidence, with a SARI Final score of .419. A fully-supervised system with gold evidence generated corrections for 69% of instances, indicating plenty of opportunities for future work to extend our contributions.
|
2102.09701/record.json
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"arxiv_id": "2102.09701",
|
| 3 |
+
"month": "2021_02",
|
| 4 |
+
"year": 2021,
|
| 5 |
+
"conference": "NEURIPS",
|
| 6 |
+
"title": "Center Smoothing: Certified Robustness for Networks with Structured Outputs",
|
| 7 |
+
"arxiv_url": "https://arxiv.org/abs/2102.09701",
|
| 8 |
+
"source": {
|
| 9 |
+
"paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_02/main_diagram_database/2102.09701",
|
| 10 |
+
"tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_02/tex_files_extracted/2102.09701",
|
| 11 |
+
"paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_02/main_diagram_database/2102.09701/paper_text/paper.md",
|
| 12 |
+
"metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_02/main_diagram_database/2102.09701/metadata.json",
|
| 13 |
+
"intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_02/main_diagram_database/2102.09701/paper_text/paper.md",
|
| 14 |
+
"intro_method_from_kind": "markdown"
|
| 15 |
+
},
|
| 16 |
+
"files": {
|
| 17 |
+
"main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2102.09701/main_diagram/main_diagram.drawio",
|
| 18 |
+
"main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2102.09701/main_diagram/main_diagram.png",
|
| 19 |
+
"main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2102.09701/main_diagram/main_diagram.pdf",
|
| 20 |
+
"intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2102.09701/paper_text/intro_method.md",
|
| 21 |
+
"paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2102.09701/paper.pdf",
|
| 22 |
+
"latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2102.09701/latex_source"
|
| 23 |
+
},
|
| 24 |
+
"status": {
|
| 25 |
+
"copy_drawio": "exists",
|
| 26 |
+
"copy_png": "exists",
|
| 27 |
+
"diagram_pdf": "pdf_exists",
|
| 28 |
+
"intro_method": "exists",
|
| 29 |
+
"paper_pdf": "exists",
|
| 30 |
+
"latex": "exists"
|
| 31 |
+
}
|
| 32 |
+
}
|
2103.00673/record.json
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"arxiv_id": "2103.00673",
|
| 3 |
+
"month": "2021_03",
|
| 4 |
+
"year": 2021,
|
| 5 |
+
"conference": "NEURIPS",
|
| 6 |
+
"title": "Convolutional Normalization: Improving Deep Convolutional Network Robustness and Training",
|
| 7 |
+
"arxiv_url": "https://arxiv.org/abs/2103.00673",
|
| 8 |
+
"source": {
|
| 9 |
+
"paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_03/main_diagram_database/2103.00673",
|
| 10 |
+
"tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_03/tex_files_extracted/2103.00673",
|
| 11 |
+
"paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_03/main_diagram_database/2103.00673/paper_text/paper.md",
|
| 12 |
+
"metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_03/main_diagram_database/2103.00673/metadata.json",
|
| 13 |
+
"intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_03/main_diagram_database/2103.00673/paper_text/paper.md",
|
| 14 |
+
"intro_method_from_kind": "markdown"
|
| 15 |
+
},
|
| 16 |
+
"files": {
|
| 17 |
+
"main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2103.00673/main_diagram/main_diagram.drawio",
|
| 18 |
+
"main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2103.00673/main_diagram/main_diagram.png",
|
| 19 |
+
"main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2103.00673/main_diagram/main_diagram.pdf",
|
| 20 |
+
"intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2103.00673/paper_text/intro_method.md",
|
| 21 |
+
"paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2103.00673/paper.pdf",
|
| 22 |
+
"latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2103.00673/latex_source"
|
| 23 |
+
},
|
| 24 |
+
"status": {
|
| 25 |
+
"copy_drawio": "exists",
|
| 26 |
+
"copy_png": "exists",
|
| 27 |
+
"diagram_pdf": "pdf_exists",
|
| 28 |
+
"intro_method": "exists",
|
| 29 |
+
"paper_pdf": "exists",
|
| 30 |
+
"latex": "exists"
|
| 31 |
+
}
|
| 32 |
+
}
|
2103.04503/record.json
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"arxiv_id": "2103.04503",
|
| 3 |
+
"month": "2021_03",
|
| 4 |
+
"year": 2021,
|
| 5 |
+
"conference": "CVPR",
|
| 6 |
+
"title": "End-to-End Human Object Interaction Detection With HOI Transformer",
|
| 7 |
+
"arxiv_url": "https://arxiv.org/abs/2103.04503",
|
| 8 |
+
"source": {
|
| 9 |
+
"paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_03/main_diagram_database/2103.04503",
|
| 10 |
+
"tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_03/tex_files_extracted/2103.04503",
|
| 11 |
+
"paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_03/main_diagram_database/2103.04503/paper_text/paper.md",
|
| 12 |
+
"metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_03/main_diagram_database/2103.04503/metadata.json",
|
| 13 |
+
"intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_03/main_diagram_database/2103.04503/paper_text/paper.md",
|
| 14 |
+
"intro_method_from_kind": "markdown"
|
| 15 |
+
},
|
| 16 |
+
"files": {
|
| 17 |
+
"main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2103.04503/main_diagram/main_diagram.drawio",
|
| 18 |
+
"main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2103.04503/main_diagram/main_diagram.png",
|
| 19 |
+
"main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2103.04503/main_diagram/main_diagram.pdf",
|
| 20 |
+
"intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2103.04503/paper_text/intro_method.md",
|
| 21 |
+
"paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2103.04503/paper.pdf",
|
| 22 |
+
"latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2103.04503/latex_source"
|
| 23 |
+
},
|
| 24 |
+
"status": {
|
| 25 |
+
"copy_drawio": "exists",
|
| 26 |
+
"copy_png": "exists",
|
| 27 |
+
"diagram_pdf": "pdf_exists",
|
| 28 |
+
"intro_method": "exists",
|
| 29 |
+
"paper_pdf": "exists",
|
| 30 |
+
"latex": "exists"
|
| 31 |
+
}
|
| 32 |
+
}
|
2103.10379/record.json
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"arxiv_id": "2103.10379",
|
| 3 |
+
"month": "2021_03",
|
| 4 |
+
"year": 2021,
|
| 5 |
+
"conference": "AAAI",
|
| 6 |
+
"title": "ChronoR: Rotation Based Temporal Knowledge Graph Embedding",
|
| 7 |
+
"arxiv_url": "https://arxiv.org/abs/2103.10379",
|
| 8 |
+
"source": {
|
| 9 |
+
"paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_03/main_diagram_database/2103.10379",
|
| 10 |
+
"tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_03/tex_files_extracted/2103.10379",
|
| 11 |
+
"paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_03/main_diagram_database/2103.10379/paper_text/paper.md",
|
| 12 |
+
"metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_03/main_diagram_database/2103.10379/metadata.json",
|
| 13 |
+
"intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_03/main_diagram_database/2103.10379/paper_text/paper.md",
|
| 14 |
+
"intro_method_from_kind": "markdown"
|
| 15 |
+
},
|
| 16 |
+
"files": {
|
| 17 |
+
"main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2103.10379/main_diagram/main_diagram.drawio",
|
| 18 |
+
"main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2103.10379/main_diagram/main_diagram.png",
|
| 19 |
+
"main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2103.10379/main_diagram/main_diagram.pdf",
|
| 20 |
+
"intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2103.10379/paper_text/intro_method.md",
|
| 21 |
+
"paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2103.10379/paper.pdf",
|
| 22 |
+
"latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2103.10379/latex_source"
|
| 23 |
+
},
|
| 24 |
+
"status": {
|
| 25 |
+
"copy_drawio": "exists",
|
| 26 |
+
"copy_png": "exists",
|
| 27 |
+
"diagram_pdf": "pdf_exists",
|
| 28 |
+
"intro_method": "exists",
|
| 29 |
+
"paper_pdf": "exists",
|
| 30 |
+
"latex": "exists"
|
| 31 |
+
}
|
| 32 |
+
}
|
2103.15573/record.json
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"arxiv_id": "2103.15573",
|
| 3 |
+
"month": "2021_03",
|
| 4 |
+
"year": 2021,
|
| 5 |
+
"conference": "CVPR",
|
| 6 |
+
"title": "HumanGPS: Geodesic PreServing Feature for Dense Human Correspondences",
|
| 7 |
+
"arxiv_url": "https://arxiv.org/abs/2103.15573",
|
| 8 |
+
"source": {
|
| 9 |
+
"paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_03/main_diagram_database/2103.15573",
|
| 10 |
+
"tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_03/tex_files_extracted/2103.15573",
|
| 11 |
+
"paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_03/main_diagram_database/2103.15573/paper_text/paper.md",
|
| 12 |
+
"metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_03/main_diagram_database/2103.15573/metadata.json",
|
| 13 |
+
"intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_03/main_diagram_database/2103.15573/paper_text/paper.md",
|
| 14 |
+
"intro_method_from_kind": "markdown"
|
| 15 |
+
},
|
| 16 |
+
"files": {
|
| 17 |
+
"main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2103.15573/main_diagram/main_diagram.drawio",
|
| 18 |
+
"main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2103.15573/main_diagram/main_diagram.png",
|
| 19 |
+
"main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2103.15573/main_diagram/main_diagram.pdf",
|
| 20 |
+
"intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2103.15573/paper_text/intro_method.md",
|
| 21 |
+
"paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2103.15573/paper.pdf",
|
| 22 |
+
"latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2103.15573/latex_source"
|
| 23 |
+
},
|
| 24 |
+
"status": {
|
| 25 |
+
"copy_drawio": "exists",
|
| 26 |
+
"copy_png": "exists",
|
| 27 |
+
"diagram_pdf": "pdf_exists",
|
| 28 |
+
"intro_method": "exists",
|
| 29 |
+
"paper_pdf": "exists",
|
| 30 |
+
"latex": "exists"
|
| 31 |
+
}
|
| 32 |
+
}
|
2104.05832/record.json
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"arxiv_id": "2104.05832",
|
| 3 |
+
"month": "2021_04",
|
| 4 |
+
"year": 2021,
|
| 5 |
+
"conference": "NAACL",
|
| 6 |
+
"title": "SPARTQA: A Textual Question Answering Benchmark for Spatial Reasoning",
|
| 7 |
+
"arxiv_url": "https://arxiv.org/abs/2104.05832",
|
| 8 |
+
"source": {
|
| 9 |
+
"paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_04/main_diagram_database/2104.05832",
|
| 10 |
+
"tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_04/tex_files_extracted/2104.05832",
|
| 11 |
+
"paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_04/main_diagram_database/2104.05832/paper_text/paper.md",
|
| 12 |
+
"metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_04/main_diagram_database/2104.05832/metadata.json",
|
| 13 |
+
"intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_04/main_diagram_database/2104.05832/paper_text/paper.md",
|
| 14 |
+
"intro_method_from_kind": "markdown"
|
| 15 |
+
},
|
| 16 |
+
"files": {
|
| 17 |
+
"main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2104.05832/main_diagram/main_diagram.drawio",
|
| 18 |
+
"main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2104.05832/main_diagram/main_diagram.png",
|
| 19 |
+
"main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2104.05832/main_diagram/main_diagram.pdf",
|
| 20 |
+
"intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2104.05832/paper_text/intro_method.md",
|
| 21 |
+
"paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2104.05832/paper.pdf",
|
| 22 |
+
"latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2104.05832/latex_source"
|
| 23 |
+
},
|
| 24 |
+
"status": {
|
| 25 |
+
"copy_drawio": "exists",
|
| 26 |
+
"copy_png": "exists",
|
| 27 |
+
"diagram_pdf": "pdf_exists",
|
| 28 |
+
"intro_method": "exists",
|
| 29 |
+
"paper_pdf": "exists",
|
| 30 |
+
"latex": "exists"
|
| 31 |
+
}
|
| 32 |
+
}
|
2106.02940/record.json
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"arxiv_id": "2106.02940",
|
| 3 |
+
"month": "2021_06",
|
| 4 |
+
"year": 2022,
|
| 5 |
+
"conference": "AAAI",
|
| 6 |
+
"title": "Same State, Different Task: Continual Reinforcement Learning without Interference",
|
| 7 |
+
"arxiv_url": "https://arxiv.org/abs/2106.02940",
|
| 8 |
+
"source": {
|
| 9 |
+
"paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_06/main_diagram_database/2106.02940",
|
| 10 |
+
"tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_06/tex_files_extracted/2106.02940",
|
| 11 |
+
"paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_06/main_diagram_database/2106.02940/paper_text/paper.md",
|
| 12 |
+
"metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_06/main_diagram_database/2106.02940/metadata.json",
|
| 13 |
+
"intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_06/main_diagram_database/2106.02940/paper_text/paper.md",
|
| 14 |
+
"intro_method_from_kind": "markdown"
|
| 15 |
+
},
|
| 16 |
+
"files": {
|
| 17 |
+
"main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2106.02940/main_diagram/main_diagram.drawio",
|
| 18 |
+
"main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2106.02940/main_diagram/main_diagram.png",
|
| 19 |
+
"main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2106.02940/main_diagram/main_diagram.pdf",
|
| 20 |
+
"intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2106.02940/paper_text/intro_method.md",
|
| 21 |
+
"paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2106.02940/paper.pdf",
|
| 22 |
+
"latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2106.02940/latex_source"
|
| 23 |
+
},
|
| 24 |
+
"status": {
|
| 25 |
+
"copy_drawio": "ok",
|
| 26 |
+
"copy_png": "ok",
|
| 27 |
+
"diagram_pdf": "ok",
|
| 28 |
+
"intro_method": "ok",
|
| 29 |
+
"paper_pdf": "ok",
|
| 30 |
+
"latex": "ok"
|
| 31 |
+
}
|
| 32 |
+
}
|
2106.06795/record.json
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"arxiv_id": "2106.06795",
|
| 3 |
+
"month": "2021_06",
|
| 4 |
+
"year": 2021,
|
| 5 |
+
"conference": "IJCAI",
|
| 6 |
+
"title": "Knowledge Consolidation based Class Incremental Online Learning with Limited Data",
|
| 7 |
+
"arxiv_url": "https://arxiv.org/abs/2106.06795",
|
| 8 |
+
"source": {
|
| 9 |
+
"paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_06/main_diagram_database/2106.06795",
|
| 10 |
+
"tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_06/tex_files_extracted/2106.06795",
|
| 11 |
+
"paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_06/main_diagram_database/2106.06795/paper_text/paper.md",
|
| 12 |
+
"metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_06/main_diagram_database/2106.06795/metadata.json",
|
| 13 |
+
"intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_06/main_diagram_database/2106.06795/paper_text/paper.md",
|
| 14 |
+
"intro_method_from_kind": "markdown"
|
| 15 |
+
},
|
| 16 |
+
"files": {
|
| 17 |
+
"main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2106.06795/main_diagram/main_diagram.drawio",
|
| 18 |
+
"main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2106.06795/main_diagram/main_diagram.png",
|
| 19 |
+
"main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2106.06795/main_diagram/main_diagram.pdf",
|
| 20 |
+
"intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2106.06795/paper_text/intro_method.md",
|
| 21 |
+
"paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2106.06795/paper.pdf",
|
| 22 |
+
"latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2106.06795/latex_source"
|
| 23 |
+
},
|
| 24 |
+
"status": {
|
| 25 |
+
"copy_drawio": "exists",
|
| 26 |
+
"copy_png": "exists",
|
| 27 |
+
"diagram_pdf": "pdf_exists",
|
| 28 |
+
"intro_method": "exists",
|
| 29 |
+
"paper_pdf": "exists",
|
| 30 |
+
"latex": "exists"
|
| 31 |
+
}
|
| 32 |
+
}
|
2106.13265/record.json
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"arxiv_id": "2106.13265",
|
| 3 |
+
"month": "2021_06",
|
| 4 |
+
"year": 2023,
|
| 5 |
+
"conference": "IJCAI",
|
| 6 |
+
"title": "Hierarchical Apprenticeship Learning for Disease Progression Modeling",
|
| 7 |
+
"arxiv_url": "https://arxiv.org/abs/2106.13265",
|
| 8 |
+
"source": {
|
| 9 |
+
"paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_06/main_diagram_database/2106.13265",
|
| 10 |
+
"tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_06/tex_files_extracted/2106.13265",
|
| 11 |
+
"paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_06/main_diagram_database/2106.13265/paper_text/paper.md",
|
| 12 |
+
"metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_06/main_diagram_database/2106.13265/metadata.json",
|
| 13 |
+
"intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_06/main_diagram_database/2106.13265/paper_text/paper.md",
|
| 14 |
+
"intro_method_from_kind": "markdown"
|
| 15 |
+
},
|
| 16 |
+
"files": {
|
| 17 |
+
"main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2106.13265/main_diagram/main_diagram.drawio",
|
| 18 |
+
"main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2106.13265/main_diagram/main_diagram.png",
|
| 19 |
+
"main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2106.13265/main_diagram/main_diagram.pdf",
|
| 20 |
+
"intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2106.13265/paper_text/intro_method.md",
|
| 21 |
+
"paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2106.13265/paper.pdf",
|
| 22 |
+
"latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2106.13265/latex_source"
|
| 23 |
+
},
|
| 24 |
+
"status": {
|
| 25 |
+
"copy_drawio": "exists",
|
| 26 |
+
"copy_png": "exists",
|
| 27 |
+
"diagram_pdf": "pdf_exists",
|
| 28 |
+
"intro_method": "exists",
|
| 29 |
+
"paper_pdf": "exists",
|
| 30 |
+
"latex": "exists"
|
| 31 |
+
}
|
| 32 |
+
}
|
2107.12309/record.json
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"arxiv_id": "2107.12309",
|
| 3 |
+
"month": "2021_07",
|
| 4 |
+
"year": 2021,
|
| 5 |
+
"conference": "ICCV",
|
| 6 |
+
"title": "Spatial-Temporal Transformer for Dynamic Scene Graph Generation",
|
| 7 |
+
"arxiv_url": "https://arxiv.org/abs/2107.12309",
|
| 8 |
+
"source": {
|
| 9 |
+
"paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_07/main_diagram_database/2107.12309",
|
| 10 |
+
"tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_07/tex_files_extracted/2107.12309",
|
| 11 |
+
"paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_07/main_diagram_database/2107.12309/paper_text/paper.md",
|
| 12 |
+
"metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_07/main_diagram_database/2107.12309/metadata.json",
|
| 13 |
+
"intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_07/main_diagram_database/2107.12309/paper_text/paper.md",
|
| 14 |
+
"intro_method_from_kind": "markdown"
|
| 15 |
+
},
|
| 16 |
+
"files": {
|
| 17 |
+
"main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2107.12309/main_diagram/main_diagram.drawio",
|
| 18 |
+
"main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2107.12309/main_diagram/main_diagram.png",
|
| 19 |
+
"main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2107.12309/main_diagram/main_diagram.pdf",
|
| 20 |
+
"intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2107.12309/paper_text/intro_method.md",
|
| 21 |
+
"paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2107.12309/paper.pdf",
|
| 22 |
+
"latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2107.12309/latex_source"
|
| 23 |
+
},
|
| 24 |
+
"status": {
|
| 25 |
+
"copy_drawio": "exists",
|
| 26 |
+
"copy_png": "exists",
|
| 27 |
+
"diagram_pdf": "pdf_exists",
|
| 28 |
+
"intro_method": "exists",
|
| 29 |
+
"paper_pdf": "exists",
|
| 30 |
+
"latex": "exists"
|
| 31 |
+
}
|
| 32 |
+
}
|
2108.09079/record.json
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"arxiv_id": "2108.09079",
|
| 3 |
+
"month": "2021_08",
|
| 4 |
+
"year": 2021,
|
| 5 |
+
"conference": "ICCV",
|
| 6 |
+
"title": "Structure-Preserving Deraining With Residue Channel Prior Guidance",
|
| 7 |
+
"arxiv_url": "https://arxiv.org/abs/2108.09079",
|
| 8 |
+
"source": {
|
| 9 |
+
"paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_08/main_diagram_database/2108.09079",
|
| 10 |
+
"tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_08/tex_files_extracted/2108.09079",
|
| 11 |
+
"paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_08/main_diagram_database/2108.09079/paper_text/paper.md",
|
| 12 |
+
"metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_08/main_diagram_database/2108.09079/metadata.json",
|
| 13 |
+
"intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_08/main_diagram_database/2108.09079/paper_text/paper.md",
|
| 14 |
+
"intro_method_from_kind": "markdown"
|
| 15 |
+
},
|
| 16 |
+
"files": {
|
| 17 |
+
"main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2108.09079/main_diagram/main_diagram.drawio",
|
| 18 |
+
"main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2108.09079/main_diagram/main_diagram.png",
|
| 19 |
+
"main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2108.09079/main_diagram/main_diagram.pdf",
|
| 20 |
+
"intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2108.09079/paper_text/intro_method.md",
|
| 21 |
+
"paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2108.09079/paper.pdf",
|
| 22 |
+
"latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2108.09079/latex_source"
|
| 23 |
+
},
|
| 24 |
+
"status": {
|
| 25 |
+
"copy_drawio": "exists",
|
| 26 |
+
"copy_png": "exists",
|
| 27 |
+
"diagram_pdf": "pdf_exists",
|
| 28 |
+
"intro_method": "exists",
|
| 29 |
+
"paper_pdf": "exists",
|
| 30 |
+
"latex": "exists"
|
| 31 |
+
}
|
| 32 |
+
}
|
2108.12318/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2021-02-01T11:34:36.585Z" agent="5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.104 Safari/537.36" etag="pfdY3QCKexxSsKsxKZTc" version="14.2.9" type="device"><diagram id="g15KqjpKPJuZy75RXw08" name="Page-1">zVlbd5swDP41edwO5pKQxy5ts3O6nXXL1q6PDijBLcHMmIbs188EczVhNNf2pZYsG1vS90luB8ZklUwZDr2v1AV/oGtuMjCuB7quI90Uv1LNJtMghIxMs2TElbpSMSN/QSo1qY2JC1HNkFPqcxLWlQ4NAnB4TYcZo+u62YL69a+GeAmKYuZgX9U+Epd7mdbWR6X+M5Cll38ZDcfZzArnxvImkYdduq6ojJuBMWGU8my0Sibgp97L/ZKtu90xWxyMQcD7LJjN0fOvafTsfP90ZwfJw/3d7fTDMNvlFfuxvLA8LN/kHgBXOESKlHGPLmmA/ZtS+4nROHAh/YwmpNLmC6WhUCKhfAbONzK6OOZUqDy+8uWsuAHb/Jbrt8JTKny0cvE6qU5eb6SkukB6JaIxc6Dj3jIJOWZL4B12MsypDyofkA6eAl2BOI8wYOBjTl7rSYNl7i0LuzI8YiAj9IZoGUq0bgHzmIFQ3iScYYdTpgSwDE/q67VHOMxCvHXPWqC2HooF8f0J9cU26VrDtcB2TaGPOKMvUJmx9bkxHHYF4RUYh6TTbXLWkhDJScKU8rpEHMph5FXQNtRO5Gg0Pj4uohfgjieF53gV5osDGsB+uCkoRWuLkLb9OQgmVk+YIPtd4cRSovczu4GuTXwcRWRBQMXJf5BRh1ETJxjshdMWhaFjw3xxHJwUVVECZXxxnKievkT92D/BR30T3HxXCT5S3H7FOXZe0qw+VYovbAec1hSf25ZpdRLN/imua5fO8ROUgjO2SJAQ/jvfQ4wrq4RULkqF87VV1rHRJJfeUxLwMpmQ3Wgs7EaSZBeSq6q9c2OjgmnzrGxulN1Y2eiKMbypmIWpQdRx4MZ30EjrPJdiX38DiEF2gjL7C+ceQPqmgoh7Rl4xT9vQB8wInvvp8J6BSxxOaBApiGEeXc3j6P/96DugpLTNrQfFUikJDVVKQuhkdVctACopBe5V+gAWkrOtCE5H8+jiyCu8nHNGyRNPVZpo5YySn0pKeqrO7eCnOjHuzzx9O1WjJ/NUYmu1VJtcdyhBGXX8jvbkpybR6aehp+J8m1rN3Xkq1GV+InKyFWQUvf8XPN/+eer0xHSe54AxHNX9O/rYxkxmCzNp1uHMdPtI7q5m7LP16+/Xb0/wcPeHJS1PrynDLoFtXvwAcbkI+4rXBReF6TBk1IGoh/fnotldbkPwLeY+SR/QBxUIxektodkZhyaKC5hUwjBuIRHzCOWhNQhHqA7tNQD1qwFo8KYetV4DaqWola2P9tTbncGXKghNhjWsPSuCoTc20s5TEvJEPxbJt4ZI5fj98/sN76JdPQ46d353MW+/DuhS+W00HixWkwP75rdp1jcyx6fJ72YPc2B+C7H8j09mXv7jzLj5Bw==</diagram></mxfile>
|
2108.12318/main_diagram/main_diagram.pdf
ADDED
|
Binary file (12.9 kB). View file
|
|
|
2108.12318/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Deep learning has provided remarkable advances in language understanding and modelling tasks in recent years [@Vaswani2017transformer; @Devlin2019bert; @Brown2020gpt3]. However, this increased utility may harm user privacy, as neural models trained with datasets containing personal identifiable information can unintentionally leak information that users may prefer to keep private [@Carlini2019secret; @Song2017secret]. Even seemingly innocuous collections of metadata [@Xu2008deanon] such as data provided by the users (e.g. at registration time on social media) or data which has been cleansed of identifying attributes [@Sun2012deanon], can provide *latent* information for the re-identification of participants.
|
| 4 |
+
|
| 5 |
+
Using social media data can also raise ethical considerations [@Townsend:2016]. Users may have edited or deleted posts that models continue to rely on in existing datasets, and may unintentionally reveal information they would rather keep private [@Bartunov2012; @Pontes2012; @Goga2013]. Research has shown practical attacks that exploit trained models to establish whether a particular individual formed part of a model's training dataset, in an attack known as membership inference [@Leino2020inference; @Truex2018inference]. Personally identifiable attributes such as age, gender, or location can be reliably reconstructed given the output of such a model [@Fredrikson2015inference; @Zhang2020inference]. Neural representations of input data, including language embeddings, have proven to be a vulnerability for these inferences [@Song2020inference], thus privacy-preserving techniques should be applied to these text representations when they form part of a machine learning pipeline.
|
| 6 |
+
|
| 7 |
+
To minimise the risk of such attacks in uncovering sensitive information, previous work has employed an adversarial training objective [@Coavoux2018inference; @Li2018adversarial] by modifying the loss function of the model to impose a penalty when a simulated attacker task, such as predicting a private variable from the input sequence, performs well. However, this approach provides no formal privacy guarantees nor privacy loss accounting system. @Phan2019adversarial proposed an approach which implements classical differential privacy in an adversarial learning paradigm, however, this work relies on adversarial objectives to promote robustness to adversarial samples rather than privacy.
|
| 8 |
+
|
| 9 |
+
Providing a privacy guarantee leads to the notion of differential privacy (DP), as defined by @Dwork2013foundations. This definition quantifies privacy loss as the maximum possible deviation between the same aggregate function applied to two datasets which differ only in a single record, which can be expressed by the variable $\epsilon$.
|
| 10 |
+
|
| 11 |
+
::: {#def:epsilon_privacy .definition}
|
| 12 |
+
**Definition 1** ($\epsilon$-differential privacy). *The level of private information leaked by a computation $M$ can be expressed by the variable $\epsilon$ where for any two data sets $A$ and $B$, and any set of possible outputs $S \subset Range(M)$, $$\begin{equation*}
|
| 13 |
+
\resizebox{0.99\hsize}{!}{$[M(A) \in S] \leq Pr[M(B) \in S] \times exp(\epsilon \times |A \oplus B|)$}
|
| 14 |
+
\end{equation*}$$*
|
| 15 |
+
:::
|
| 16 |
+
|
| 17 |
+
This notion of $\epsilon$-differential privacy has been extended to text embeddings through the application of calibrated noise [@Fernandes2019; @Beigi2019embeds]. @Lyu2020perturb proposed a method based on local differential privacy---an extension to the schema under which noise is applied to the input data before it leaves the user's device and is encountered by the model owner---producing a private representation which can be sent to a server for classification. However, this approach uses simulated attacker performance as a test benchmark for private information leakage, rather than during training to improve privacy outcomes.
|
| 18 |
+
|
| 19 |
+
In this work, we propose an approach that combines perturbed pre-trained embeddings with a privacy-preserving adversarial training function that helps preserving the encoded semantic links in the input text while obscuring sensitive information. We demonstrate that our approach achieves comparable task performance against a competitive baseline while preserving privacy. We experiment with a dataset that contains personally identifiable information namely gender, location and birth year. To minimize harm, we experiment with a publicly available English-language dataset [@Hovy2015]. Specifically:
|
| 20 |
+
|
| 21 |
+
- We introduce CAPE, \"Context-Aware Private Embeddings\", an approach that applies both DP-compliant perturbations and an adversarial learning objective to privatize the embedding outputs of pre-trained language models.
|
| 22 |
+
|
| 23 |
+
- We establish metrics for testing the privacy result of our system against non-DP-compliant models by offering an empirical framework for determining the level of success of simulated attacks.
|
| 24 |
+
|
| 25 |
+
- We find that attacker inferences demonstrate differing levels of accuracy depending on the type of private attribute targeted.
|
| 26 |
+
|
| 27 |
+
- We establish superior privacy outcomes for our method compared to a sample adversarial learning approach [@coavoux_privacy-preserving_2018] and a perturbation-only method [@Lyu2020perturb] representing the dominant approaches currently applied to other task domains.
|
| 28 |
+
|
| 29 |
+
# Method
|
| 30 |
+
|
| 31 |
+
We consider the possibility that an attacker may have access to the intermediate feature representations extracted from text from a published language model along with a supervision signal that may allow them to train a model to recover private information about the text author, possibly garnered from access to a secondary data source as demonstrated in @Narayanan2008deanon and @Carlini:2020. To mitigate this risk, we introduce a DP-compliant layer to the feature extractor that perturbs the representations by adding calibrated noise. We train a second classifier to predict known private variables in addition to our main target task classifier, then pass the error gradient from the secondary classifier through a reversal layer to promote embedding invariance to the private features. Figure [1](#fig:model){reference-type="ref" reference="fig:model"} shows the system architecture.
|
| 32 |
+
|
| 33 |
+
<figure id="fig:model" data-latex-placement="ht">
|
| 34 |
+
<img src="model_diagram.png" />
|
| 35 |
+
<figcaption>CAPE model diagram. Solid lines indicate data flow, dotted lines indicate gradient updates.</figcaption>
|
| 36 |
+
</figure>
|
| 37 |
+
|
| 38 |
+
We experiment with multi-class sentiment analysis on the UK section of the Trustpilot dataset [@Hovy2015], which provides text reviews with an attached numerical rating from 1-5 as well as three demographic attributes: gender, location and birth year. Sentiment analysis from text reviews represents a popular task to which pre-trained language models are well suited. We use the gender as reported in the dataset, as a binary attribute, while birth years are separated into 6 equal-sized age range bins, and locations are translated from latitude/longitude pairs into Geohash strings with a precision of two characters, which results in 5 potential location classes. Details of the dataset and pre-processing steps can be found in Appendix A.
|
| 39 |
+
|
| 40 |
+
In our initial baseline experiment, we train a feature extraction module consisting of a pre-trained BERT model [@Devlin2019bert] along with two dense layers in order to extract useful features from the input text $x$. We obtain the final hidden state of the pre-trained model for each token in the input, then take a mean average over the sequence to produce an embedding for the full text, such that: $$\begin{equation}
|
| 41 |
+
x_e = f(x)
|
| 42 |
+
\end{equation}$$
|
| 43 |
+
|
| 44 |
+
Sentiment analysis is then carried out by a classifier which learns to predict the review rating label $y$ given the embedding vector. Classifier setup and hyper-parameter details are listed in Appendix B.
|
| 45 |
+
|
| 46 |
+
We simulate a task that an attacker may wish to perform on the input text by training a secondary classifier along with the target task that attempts to predict the value of private information variables $z$. Following @Coavoux2018inference, we target several features of the respondent as extracted from the dataset, namely gender, location, and birth year. These features, while in reality not being private by virtue of being public information provided by users, represent good proxies for sensitive attributes that users may not wish to be inferred from similar public datasets. In this sense, they provide a useful benchmark of the potential privacy risk, while allowing us to avoid unethical inferences concerning private attributes not shared by the user.
|
| 47 |
+
|
| 48 |
+
In order to promote invariance in the text representation with respect to our private variables, we adopt the approach pioneered by @Ganin2017domain. Initially designed to promote domain-independent learning, this system involves training a secondary objective to predict features we do not wish to be distinguishable via gradient descent, then passing the loss through a gradient reversal layer into a target task objective, represented in our experiments by the feature extractor.
|
| 49 |
+
|
| 50 |
+
For a single instance of our data $(x_e, y, z)$ the adversarial classifier optimizes: $$\begin{equation}
|
| 51 |
+
\mathcal{L}_a(x_e,y,z;\theta_a) = -log P(z|x_e;\theta_a)
|
| 52 |
+
\end{equation}$$
|
| 53 |
+
|
| 54 |
+
Hence, the combination of both target and attacker classifiers lead to the following objective function, where $\theta_r$, $\theta_p$, $\theta_a$ represent the parameters of the feature extractor, classifier and adversarial classifier respectively: $$\begin{equation}
|
| 55 |
+
\begin{aligned}
|
| 56 |
+
\mathcal{L}(x_e, y, z; \theta_r, \theta_p,\theta_a) = & -log P(y|x_e;\theta_r, \theta_p)\\
|
| 57 |
+
& -\lambda log P(\neg z|x_e;\theta_a)
|
| 58 |
+
\end{aligned}
|
| 59 |
+
\end{equation}$$
|
| 60 |
+
|
| 61 |
+
where $\neg$ indicates that the log likelihood of the private label $z$ is inverted, and $\lambda$ is the regularization parameter scaling the gradient from our adversarial classifier.
|
| 62 |
+
|
| 63 |
+
Since it is also desirable to provide a measure of general privacy alongside the specific attacker task we simulate in our adversarial training, we adopt the local DP method of @Lyu2020perturb to perturb the feature representations we produce. Converting the generated embedding into a DP-compliant representation requires us to inject calibrated Laplace noise into the hidden state vector obtained from the pre-trained language model as follows: $$\begin{equation}
|
| 64 |
+
\tilde{x}_e = x_e + n
|
| 65 |
+
\end{equation}$$
|
| 66 |
+
|
| 67 |
+
where $n$ is a vector of equal length to $x_e$ containing i.i.d. random variables sampled from the Laplace distribution centred around 0 with a scale defined by $\frac{\Delta f}{\epsilon}$, where $\epsilon$ is the privacy budget parameter and $\Delta f$ is the sensitivity of our function.
|
| 68 |
+
|
| 69 |
+
Since determining the sensitivity of an unbounded embedding function is practically infeasible, we constrain the range of our representation to \[0,1\], as recommended by @Shokri2015dp. In this way, the L1 norm and the sensitivity of our function summed across $n$ dimensions of $x_e$ are the same, i.e. $\Delta f = 1$.
|
| 70 |
+
|
| 71 |
+
::: algorithm
|
| 72 |
+
Extract features from input sequence: $x_e = f(x)$ Normalise representation: $x_e \leftarrow x_e - \min{x_e} / (\max{x_e} - \min{x_e}$) Apply perturbation: $\tilde{x}_e = x_e + Lap(\frac{\Delta f}{\epsilon})$ Train classifiers: $\mathcal{L}(\tilde{x}_e, y, z; \theta_r, \theta_p) = -log P(y|\tilde{x}_e;\theta_r, \theta_p) - \lambda log P(\neg z|\tilde{x}_e;\theta_a)$
|
| 73 |
+
:::
|
| 74 |
+
|
| 75 |
+
To preserve the general privacy benefits of DP-compliant embeddings with invariance to the specific private variable identified for adversarial training, we combine both processes in a system we call Context-Aware Private Embeddings (CAPE). Algorithm [\[alg:CAPE\]](#alg:CAPE){reference-type="ref" reference="alg:CAPE"} presents the joint adversarial training scheme with perturbed embedding sequences derived from our feature extractor.
|
2108.13655/record.json
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"arxiv_id": "2108.13655",
|
| 3 |
+
"month": "2021_08",
|
| 4 |
+
"year": 2022,
|
| 5 |
+
"conference": "ACL",
|
| 6 |
+
"title": "MELM: Data Augmentation with Masked Entity Language Modeling for Low-Resource NER",
|
| 7 |
+
"arxiv_url": "https://arxiv.org/abs/2108.13655",
|
| 8 |
+
"source": {
|
| 9 |
+
"paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_08/main_diagram_database/2108.13655",
|
| 10 |
+
"tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_08/tex_files_extracted/2108.13655",
|
| 11 |
+
"paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_08/main_diagram_database/2108.13655/paper_text/paper.md",
|
| 12 |
+
"metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_08/main_diagram_database/2108.13655/metadata.json",
|
| 13 |
+
"intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_08/main_diagram_database/2108.13655/paper_text/paper.md",
|
| 14 |
+
"intro_method_from_kind": "markdown"
|
| 15 |
+
},
|
| 16 |
+
"files": {
|
| 17 |
+
"main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2108.13655/main_diagram/main_diagram.drawio",
|
| 18 |
+
"main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2108.13655/main_diagram/main_diagram.png",
|
| 19 |
+
"main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2108.13655/main_diagram/main_diagram.pdf",
|
| 20 |
+
"intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2108.13655/paper_text/intro_method.md",
|
| 21 |
+
"paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2108.13655/paper.pdf",
|
| 22 |
+
"latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2108.13655/latex_source"
|
| 23 |
+
},
|
| 24 |
+
"status": {
|
| 25 |
+
"copy_drawio": "exists",
|
| 26 |
+
"copy_png": "exists",
|
| 27 |
+
"diagram_pdf": "pdf_exists",
|
| 28 |
+
"intro_method": "exists",
|
| 29 |
+
"paper_pdf": "exists",
|
| 30 |
+
"latex": "exists"
|
| 31 |
+
}
|
| 32 |
+
}
|
2108.13702/record.json
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"arxiv_id": "2108.13702",
|
| 3 |
+
"month": "2021_08",
|
| 4 |
+
"year": 2021,
|
| 5 |
+
"conference": "ICCV",
|
| 6 |
+
"title": "SemIE: Semantically-Aware Image Extrapolation",
|
| 7 |
+
"arxiv_url": "https://arxiv.org/abs/2108.13702",
|
| 8 |
+
"source": {
|
| 9 |
+
"paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_08/main_diagram_database/2108.13702",
|
| 10 |
+
"tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_08/tex_files_extracted/2108.13702",
|
| 11 |
+
"paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_08/main_diagram_database/2108.13702/paper_text/paper.md",
|
| 12 |
+
"metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_08/main_diagram_database/2108.13702/metadata.json",
|
| 13 |
+
"intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_08/main_diagram_database/2108.13702/paper_text/paper.md",
|
| 14 |
+
"intro_method_from_kind": "markdown"
|
| 15 |
+
},
|
| 16 |
+
"files": {
|
| 17 |
+
"main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2108.13702/main_diagram/main_diagram.drawio",
|
| 18 |
+
"main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2108.13702/main_diagram/main_diagram.png",
|
| 19 |
+
"main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2108.13702/main_diagram/main_diagram.pdf",
|
| 20 |
+
"intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2108.13702/paper_text/intro_method.md",
|
| 21 |
+
"paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2108.13702/paper.pdf",
|
| 22 |
+
"latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2108.13702/latex_source"
|
| 23 |
+
},
|
| 24 |
+
"status": {
|
| 25 |
+
"copy_drawio": "ok",
|
| 26 |
+
"copy_png": "ok",
|
| 27 |
+
"diagram_pdf": "ok",
|
| 28 |
+
"intro_method": "ok",
|
| 29 |
+
"paper_pdf": "ok",
|
| 30 |
+
"latex": "ok"
|
| 31 |
+
}
|
| 32 |
+
}
|
2109.02639/record.json
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"arxiv_id": "2109.02639",
|
| 3 |
+
"month": "2021_09",
|
| 4 |
+
"year": 2021,
|
| 5 |
+
"conference": "NEURIPS",
|
| 6 |
+
"title": "On the Out-of-distribution Generalization of Probabilistic Image Modelling",
|
| 7 |
+
"arxiv_url": "https://arxiv.org/abs/2109.02639",
|
| 8 |
+
"source": {
|
| 9 |
+
"paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_09/main_diagram_database/2109.02639",
|
| 10 |
+
"tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_09/tex_files_extracted/2109.02639",
|
| 11 |
+
"paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_09/main_diagram_database/2109.02639/paper_text/paper.md",
|
| 12 |
+
"metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_09/main_diagram_database/2109.02639/metadata.json",
|
| 13 |
+
"intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_09/main_diagram_database/2109.02639/paper_text/paper.md",
|
| 14 |
+
"intro_method_from_kind": "markdown"
|
| 15 |
+
},
|
| 16 |
+
"files": {
|
| 17 |
+
"main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2109.02639/main_diagram/main_diagram.drawio",
|
| 18 |
+
"main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2109.02639/main_diagram/main_diagram.png",
|
| 19 |
+
"main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2109.02639/main_diagram/main_diagram.pdf",
|
| 20 |
+
"intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2109.02639/paper_text/intro_method.md",
|
| 21 |
+
"paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2109.02639/paper.pdf",
|
| 22 |
+
"latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2109.02639/latex_source"
|
| 23 |
+
},
|
| 24 |
+
"status": {
|
| 25 |
+
"copy_drawio": "exists",
|
| 26 |
+
"copy_png": "exists",
|
| 27 |
+
"diagram_pdf": "pdf_exists",
|
| 28 |
+
"intro_method": "exists",
|
| 29 |
+
"paper_pdf": "exists",
|
| 30 |
+
"latex": "exists"
|
| 31 |
+
}
|
| 32 |
+
}
|
2109.06253/record.json
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"arxiv_id": "2109.06253",
|
| 3 |
+
"month": "2021_09",
|
| 4 |
+
"year": 2021,
|
| 5 |
+
"conference": "EMNLP",
|
| 6 |
+
"title": "Multi-Sentence Resampling: A Simple Approach to Alleviate Dataset Length Bias and Beam-Search Degradation",
|
| 7 |
+
"arxiv_url": "https://arxiv.org/abs/2109.06253",
|
| 8 |
+
"source": {
|
| 9 |
+
"paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_09/main_diagram_database/2109.06253",
|
| 10 |
+
"tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_09/tex_files_extracted/2109.06253",
|
| 11 |
+
"paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_09/main_diagram_database/2109.06253/paper_text/paper.md",
|
| 12 |
+
"metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_09/main_diagram_database/2109.06253/metadata.json",
|
| 13 |
+
"intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_09/main_diagram_database/2109.06253/paper_text/paper.md",
|
| 14 |
+
"intro_method_from_kind": "markdown"
|
| 15 |
+
},
|
| 16 |
+
"files": {
|
| 17 |
+
"main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2109.06253/main_diagram/main_diagram.drawio",
|
| 18 |
+
"main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2109.06253/main_diagram/main_diagram.png",
|
| 19 |
+
"main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2109.06253/main_diagram/main_diagram.pdf",
|
| 20 |
+
"intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2109.06253/paper_text/intro_method.md",
|
| 21 |
+
"paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2109.06253/paper.pdf",
|
| 22 |
+
"latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2109.06253/latex_source"
|
| 23 |
+
},
|
| 24 |
+
"status": {
|
| 25 |
+
"copy_drawio": "exists",
|
| 26 |
+
"copy_png": "exists",
|
| 27 |
+
"diagram_pdf": "pdf_exists",
|
| 28 |
+
"intro_method": "exists",
|
| 29 |
+
"paper_pdf": "exists",
|
| 30 |
+
"latex": "exists"
|
| 31 |
+
}
|
| 32 |
+
}
|
2109.08303/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2021-08-10T10:34:56.559Z" agent="5.0 (X11)" etag="_6RA679k5hQIpkTwY2BO" version="14.6.13" type="device"><diagram id="9hD4JQZsQDOtO1FcHYT8" name="Page-1">7VlNc9owEP01nkkPYWwZMByJk6YzTWcywyFJb8JWbDWyRYUIkF/flS1/GzApCUmnJ7xPu6v1Pu1qAcN2o/W1wPPwB/cJM5Dprw370kDIsgcmfChkkyLO2E6BQFBfKxXAlL4QDWq7YEl9sqgoSs6ZpPMq6PE4Jp6sYFgIvqqqPXJW3XWOA9IAph5mTfSO+jJM0RFyCvwboUGY7WwNx+lKhDNl/SaLEPt8VYLsK8N2BecyfYrWLmEqeVleUruvW1bzwASJZReDJ3c0m5xzfHNDB0vxe/zdefHPNRkLuclemPjw/lrkQoY84DFmVwV6Ifgy9onyaoJU6NxwPgfQAvAXkXKjycRLyQEKZcT0KllTea/MewMtPZRWLtfacyJsMiGWYlMyUuJDea0wS6SK3S0RNCKSCA2mL63edGsuNbTgS+GRHQns6zOJRUDkDj2roBxqhXAIR2zAUBCGJX2uBoL1oQ1yvYJXeNDUHkCzjvIZs6Xe6TplUQAKRbsEYusHoUrzKqSSTOc4ScYKar1KqfZPhCTr3TltpkAb2Fmh6E5hDbW8KurOynTCUs31zTfK2uBfL44j1sGwax1sOQTvUwfDRh1MOYNT+0FLwD55CTifpgRAqLf5D1AWo45l0T9lVYwaVWGgIYNwL3z6DI+BesRn1pee2v0Mqc90HbYrqbRYzc7uDeQ+fAFDw3HVnFYC7Ml54vAeJDd5UnjqYybqXvfupf0VW6b73cHDzyT07r5rZ35P5cNkN1cr0TpQQ3BvhhfU6/ncW0bqwByhL5jVvtBvaQvDlrYwequ2MP7fFv6qLWTfbvb2heEp+4LVPynNJZILyl9Bs1UhueD8A9Fs2Sfl2exyAaTfGEDtVnDoc9En7Z2nb56W1SXd6U8p9qT79efAbQuuXUtffamEcglV1lC65ly++lZ87+k4ZyS7BVuYROZ7TseWfaTJqSAuH5MKIBuTEu60szplBbu5A9RwgHY6yA5LJQLUFgHaHgGqR4DaImh10IKAWfVIZ2lpM8+VUVdldIhndIjnaszokJj3KKNDPKNDPDfz3E7TljzvUG7meY9yZ8/NPHeOeY9yM8+dY86VzUbLhOYna/efFPyJuJxxNbfEPFYz1CNlrAZhRoMYRA+apRpxLlQrpR5mE70QUd9PBrC2Rlxt1UfoxePapeq0XKr9llaMDm/FIBY/kidrpb8a7Ks/</diagram></mxfile>
|
2109.08303/main_diagram/main_diagram.pdf
ADDED
|
Binary file (11 kB). View file
|
|
|
2109.08303/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
An ASP program $\pi$ is a set of rules of the form:
|
| 4 |
+
|
| 5 |
+
::: center
|
| 6 |
+
$h_1\ |\ldots|\ h_n :- b_1,\ldots,b_m.$
|
| 7 |
+
:::
|
| 8 |
+
|
| 9 |
+
where $n+m>0$, $h_1|\ldots|h_n$ is a disjunction of atoms referred to as *head*, and $b_1,\ldots,b_m$ is a conjunction of literals referred to as *body*. If $n=0$, then the rule is called *constraint*, whereas if $m=0$ the rule is called *fact*.
|
| 10 |
+
|
| 11 |
+
An atom $a$ is an expression of the form $p(t_1,\ldots,t_k)$ where $p$ is a predicate of arity $k$ and $t_1,\ldots,t_k$ are *terms*. A term is an alphanumeric string that could be either a *variable* or a *constant*. According to Prolog notation, if a term starts with a capital letter is a *variable* otherwise is *constant*. If $\forall{i \in \{1,\ldots,k\} }$, $t_i$ is a constant then the atom $a$ is said *ground*. A *literal* is an atom $a$ or its negation $\sim a$ where $\sim$ denotes the *negation as failure*. Given a literal $l$ it is said *positive* if $l=a$, *negative* if $l=\sim a$. Given a positive literal $l = a$, we define the *complement*, $\overline{l} = \sim a$, instead, for a negative literal $l = \sim a$, $\overline{l} = \overline{\sim a} = a$. ASP supports also *aggregate atoms*. An aggregate atom is of the form $f (S) \succ T$, where $f (S)$ is an aggregate function, $\succ \in \{=, <,\leq, >,\geq\}$ is a predefined comparison operator, and T is a term referred to as guard. An aggregate function is of the form $f (S)$, where $S$ is a set term and $f \in \{\#count, \#sum\}$ is an aggregate function symbol. A set term $S$ is a pair that is either a symbolic set or a ground set. A symbolic set is a pair $\{\mathit{Terms} : \mathit{Conj}\}$, where *Terms* is a list of variables and *Conj* is a conjunction of *standard* atoms, that is, *Conj* does not contain aggregate atoms. A ground set, instead, is a set of pairs of the form $(\overline{t} : \mathit{conj})$, where $\overline{t}$ is a list of constants and *conj* is a conjunction of ground atoms. Given a program $\pi$ we define the positive dependency graph $G_{\pi} = <V,E>$ as a directed graph where $V=\{p : p$ is a predicate term appearing in $\pi\}$ and $E=\{(u,v) : u \in V, v \in V$ and $\exists{r \in \pi}$ s.t. $\exists$ a positive literal $l$ and an atom $a$ s.t. $l$ is of the form $u(t_1,...,t_k)$ and $l$ appears in the body of $r$, and $a$ is of the form $v(t_1,...,t_n)$ and $a$ appears in the head of $r\}$. $\pi$ is said to be recursive if $G_{\pi}$ is a cyclic graph. Given a program $\pi$, we define $U_{\pi}$, the *Herbrand Universe*, as the set of all constants appearing in $\pi$ and $B_{\pi}$, the *Herbrand Base*, as the set of all possible ground atoms that can be built using predicate in $\pi$ and constants in $U_{\pi}$. $\mathcal{B}$ denotes $B_{\pi} \cup \overline{B_{\pi}}$. Given a rule $r$ and the Herbrand Universe $U_{\pi}$, we define $ground(r)$ as the set of all possible instantiations of $r$ that can be built by assigning variables in $r$ to constant in $U_{\pi}$. Given a program $\pi$, instead, $ground(\pi)=\bigcup_{r \in \pi} ground(r)$. An interpretation $I$ is a set of literals. In particular, $I$ is total if $\forall{a \in B_{\pi}} (a \in I \vee \sim a \in I) \wedge \,\, (a \in I \rightarrow \sim a \notin I)$. A literal $l$ is true w.r.t $I$ if $l \in I$, otherwise it is false. A ground conjunction *conj* of atoms is true w.r.t $I$ if all atoms in *conj* are true, otherwise, if at least one atom is false then *conj* is false w.r.t. $I$. Let $I(S)$ denote the multiset $[ t_1 | (t_1 ,\ldots , t_n) : \mathit{conj} \in S \wedge \mathit{conj}$ is true w.r.t. $I ]$. The evaluation $I(f (S))$ of an aggregate function $f (S)$ w.r.t. I is the result of the application of $f$ on $I(S)$.
|
| 12 |
+
|
| 13 |
+
::: example
|
| 14 |
+
**Example 1**. *Let $A$ be an aggregate atom $A=\#count\{(1:p(1,1)) , (2:p(2,1)),(3:p(3,1))\}>1$ and let $I=\{p(1,1),p(2,1),\sim p(3,1)\}$. $I(S)=[1,2]$, $I(f(S)) = 2$ since $f=\#count$ so the aggregate atom $A$ is true w.r.t. $I$.*
|
| 15 |
+
:::
|
| 16 |
+
|
| 17 |
+
An interpretation $I$ is a *model* for $\pi$ if $\forall{\,r\in ground(\pi) (\forall{\,l \in body(r), l \in I}) \rightarrow (\exists{a \in head(r)}: a \in I})$. The *FLP-reduct* of $\pi$, denoted by $\pi^I$, is the set of rules obtained from $\pi$ by deleting those rules whose body is false w.r.t $I$. Let $I$ be a model for $\pi$, $I$ is a *stable model* for $\pi$ if there is no $I' \subset I$ such that $I'$ is a model for $\pi^I$. Given a program $\pi$, $\pi$ is *coherent* if it admits at least one stable model otherwise is *incoherent*.
|
| 18 |
+
|
| 19 |
+
In order to solve the grounding bottleneck problem, several attempts have been made [@DBLP:conf/ijcai/GebserLMPRS18], including language extensions (such as constraint programming [@DBLP:journals/tplp/OstrowskiS12; @DBLP:journals/tplp/BalducciniL17], difference logic [@DBLP:conf/iclp/GebserKKOSW16; @DBLP:conf/iclp/SusmanL16]) and *lazy grounding* techniques [@DBLP:journals/fuin/PaluDPR09; @DBLP:conf/lpnmr/LefevreN09a; @Weinzierl2017]. Hybrid formalisms are efficiently evaluated by coupling an ASP system with a solver for the other theory, thus circumventing the grounding bottleneck. Lazy grounding implementations instantiate a rule only when its body is satisfied to prevent the grounding of rules which are unnecessary during the search of an answer set. Albeit lazy grounding techniques obtained good preliminary results, their performance is still not competitive with state-of-the-art systems [@DBLP:conf/ijcai/GebserLMPRS18]. Lazy grounding has been also extended to support aggregates [@DBLP:conf/aaai/BomansonJW19]. To the best of my knowledge, this normalization strategies is limited to monotone aggregates with a lower bound. This approach turns out to be very promising outperfoming the ground & and solve system [Clingo]{.smallcaps} on benchmarks were the grounding is really hard.
|
| 20 |
+
|
| 21 |
+
Another existing approach is proposed by Cuteri et al. [@DBLP:journals/tplp/CuteriDRS17; @DBLP:journals/tplp/CuteriDRS19] and is based on the compilation of constraints into a dedicated procedure, called *propagator*, that supports the solver in the model computation process. There exists two different implementations of this approach, namely *lazy* and *eager*. In more details, given an ASP program $P$ and a set of constraint $C$ such that $C \subseteq P$, if the program $P \setminus C$ is unsatisfiable then the original program is also unsatisfiable. Otherwise, if $P \setminus C$ admits at least one model $I$ and constraints in $C$ are satisfied w.r.t. $I$, then $I$ is a model for the original program. Starting from this description the *lazy* implementation build a procedure that automatically understands if a candidate model $I$ satisfies constraints in $C$. If some of these constraints are not satisfied then they will be lazily instantiated in the solver and the model computations starts again, otherwise the process stops returning the model $I$. The *eager* implementation is based on a different approach. Indeed, in this case the propagator procedure is completely involved in the model computation process. Every time that the solver assign a truth value to a literal, either true or false, the propagator is notified and it simulates the instantiation of the constraints in $C$ without storing it in memory, and if it is possible it makes some inferences on the truth values of the literals that have not been assigned yet, in order to prevent constraints failure. Results obtained with this compilation-based approach are very promising but currently the eager approach supports only simple constraints without aggregates.
|
| 22 |
+
|
| 23 |
+
<figure id="Fig:Data2" data-latex-placement="!htb">
|
| 24 |
+
<div class="minipage">
|
| 25 |
+
<img src="GroundAndSolve.png" />
|
| 26 |
+
</div>
|
| 27 |
+
<div class="minipage">
|
| 28 |
+
<img src="CompilationApproach.png" />
|
| 29 |
+
</div>
|
| 30 |
+
<figcaption>Solving using compilation based approach</figcaption>
|
| 31 |
+
</figure>
|
| 32 |
+
|
| 33 |
+
Approaches based on compilation of constraints revealed to be very promising, outperforming traditional systems in many comparisons. However, a significant number of problems, especially hard combinatorial problems from ASP competitions feature aggregates that are not yet supported by compilation-based approaches. Aggregates are among the standardized knowledge modeling constructs that make ASP effective in representing complex problems. So, the extension of the existing compilation approach is an interesting research topic in order to extend the benefits of the compilation to all those ASP programs that cannot be solved by traditional approached. For this reason, we decided to tackle this problem starting from the compilation of constraints containing aggregates. In particular, a first attempt has been proposed in the paper [@DBLP:conf/cilc/MazzottaCDR20]. In particular, we presented an extension of the eager compilation for constraints containing *#count* aggregates and promising results have been obtained. The next natural step is to extend the compilation approach also to *#sum* aggregate. Moreover, we also consider to extend the compilation approach also to rules, whereas now it is limited to constraints.
|
| 34 |
+
|
| 35 |
+
Therefore, our research goals are as follows:
|
| 36 |
+
|
| 37 |
+
- Extend the eager compilation approach also to *#sum* aggregates.
|
| 38 |
+
|
| 39 |
+
- Support the eager compilation of rules without recursion.
|
| 40 |
+
|
| 41 |
+
- Support the eager compilation of simple programs containing recursive rules.
|
| 42 |
+
|
| 43 |
+
- Support, also, the compilation of disjunctive and choice rules in order to provide a complete compilation of ASP program, providing a novel approach to compute stable models of an ASP program.
|
| 44 |
+
|
| 45 |
+
In principle it can be also combined with the lazy normalization [@DBLP:conf/aaai/BomansonJW19]. However this would not be a trivial combination and might be subject of future research once the compilation approach will be extended to a larger class of programs.
|
| 46 |
+
|
| 47 |
+
Up to now we have extended the compilation to constraints containing *#count* aggregates. Moreover, we currently have a preliminary implementation of a technique that is able to compile constraints containing also *#sum* aggregates, whose performance is still not satisfactory. It turns out that compiling *#sum* aggregates requires several additional optimization techniques and dedicated data structures.
|
| 48 |
+
|
| 49 |
+
In order to show the working principles of our approach in presence of *#count* aggregates, we present the following example.
|
| 50 |
+
|
| 51 |
+
::: example
|
| 52 |
+
**Example 2**. *Let $:- a(X,Z), c(Z), \#count\{Y:b(X,Y)\} >= 2$ be a constraint, then there are several possible propagation steps that can be done. Note that a propagation step is done to avoid possible violations of the constraint.*
|
| 53 |
+
|
| 54 |
+
- ***Aggregate propagation.** Let $I = \{a(1,1),\ c(1)\}$ be an interpretation and assume that the solver assigns $b(1,1)$ to true. Starting from $b(1,1)$ it is possible to propagate all the undefined values of the form $b(1,\_)$ to false, since if $b(1,2)$ becomes true the count returns 2 that is greater than or equal to 2 and thus the constraint is violated.*
|
| 55 |
+
|
| 56 |
+
- ***Body literal propagation.** Let $I = \{b(1,1),\ b(1,2)\}$ be an interpretation and assume that the solver assigns $a(1,1)$ to true. Note that $I$ satisfies the aggregate, therefore starting from $a(1,1)$ it is possible to propagate $c(1)$ to false because if $c(1)$ becomes true then the constraint is violated.*
|
| 57 |
+
|
| 58 |
+
*In general, the propagator starts building all possible instantiations of the constraint and looks for undefined values that could be propagated. If there is a constraint instantiation such that each body literal is true, then we have to check if the count has reached the aggregate's guard minus one. In this case, we can propagate the aggregate body to false. Note that in this simple case we can just propagate undefined values of $b$ but if the aggregate body is more complex propagation is not so simple. Since literals in the aggregate are in conjunction, in order to propagate a conjunction as false we need only one literal false and then for each possible conjunction we can propagate the last undefined literal of that conjunction. On the other hand, if there is a constraint instantiation such that exactly one literal is undefined and the aggregate is true then the undefined literal could be propagated as false. So, the propagator procedure is a complex and custom procedure that stores partial interpretation and implements optimized join techniques in order to build constraint instantiation.*
|
| 59 |
+
:::
|
| 60 |
+
|
| 61 |
+
In Algorithm [\[alg:compileConstraintWithAggregate\]](#alg:compileConstraintWithAggregate){reference-type="ref" reference="alg:compileConstraintWithAggregate"}, we report a simplified pseudo code propagator for the constraint described in the example. In particular, here we focus only on one propagation case. The complete propagator is based on several procedures, one for every literal in the body of the constraint or in the aggregate body, whose algorithms are similar to Algorithm [\[alg:compileConstraintWithAggregate\]](#alg:compileConstraintWithAggregate){reference-type="ref" reference="alg:compileConstraintWithAggregate"}.
|
| 62 |
+
|
| 63 |
+
In ordered to better understand Algorithm [\[alg:compileConstraintWithAggregate\]](#alg:compileConstraintWithAggregate){reference-type="ref" reference="alg:compileConstraintWithAggregate"} lets introduce some utility functions. Let $l$ be a ground literal of the form $a(1,2)$ we define:
|
| 64 |
+
|
| 65 |
+
- *getPredicateName()* returns the predicate name of *l*, e.g., \"a\"
|
| 66 |
+
|
| 67 |
+
- *getTermAt(integer i)* return the i-th term of *l*, e.g., *l*.getTermAt(0) return 1
|
| 68 |
+
|
| 69 |
+
::: algorithm
|
| 70 |
+
:::
|
| 71 |
+
|
| 72 |
+
The goal of the proposed procedure is to simulate the grounding of the constraint after a literal becomes true. In particular, assume that $a(1,2)$ becomes true, then the procedure is invoked passing $a(1,2)$. So X will be assigned to 1 and Z to 2 (lines 2 and 3, respectively). Afterward, true and undefined values of $c$ matching Z are searched. If the procedure finds a true value of $c$ matching Z then it evaluates the aggregate propagation. If the aggregate is also true then a conflict is found. Otherwise, if the aggregate's guard -1 has been reached the procedure makes a propagation to ensure that the aggregate is made false. On the other hand, if the procedure finds an undefined value of $c$, then it can be propagated as false only when the aggregates is true.
|
| 73 |
+
|
| 74 |
+
# Method
|
| 75 |
+
|
| 76 |
+
We started from the baseline system presented in [@DBLP:conf/ijcai/CuteriDRS20]. The approach has been extended to support the compilation of the propagation of aggregates. In particular, the compiler has been implemented in C++, and its output, that is the propagator procedure itself, is also C++ code compliant to the [wasp]{.smallcaps} propagator interface, and is loaded in the ASP solver as a C++ dynamic library. The source code is not available yet, since we are in a development stage and we are working on a more robust and stable version.
|
| 77 |
+
|
| 78 |
+
We carried our an experimental evaluation to empirically assessed the performance gain of the proposed approach w.r.t. the base solver [wasp]{.smallcaps}. Namely, we considered two hard benchmarks of the ASP competitions [@DBLP:journals/ai/CalimeriGMR16], namely *Combined Configuration* and *Abstract Dialectical Frameworks*, featuring some constraints containing *#count* aggregates.
|
| 79 |
+
|
| 80 |
+
In *Combined Configuration*, the problem is to configure an artifact by combining several components in order to achieve some goals; whereas in *Abstract Dialectical Frameworks* the problem is to find all statements which are necessarily accepted or rejected in a given abstract argumentation framework. In both benchmarks we compile all constraints with aggregates supported by our implementation (i.e., constraints with exactly one *#count* aggregate). The experiments were run on an Intel Xeon CPU E7-8880 v4 2.20GHz, time and memory were limited to 10 minutes and 4 GB, respectively.
|
2109.09166/record.json
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"arxiv_id": "2109.09166",
|
| 3 |
+
"month": "2021_09",
|
| 4 |
+
"year": 2021,
|
| 5 |
+
"conference": "ICCV",
|
| 6 |
+
"title": "Unsupervised 3D Pose Estimation for Hierarchical Dance Video Recognition",
|
| 7 |
+
"arxiv_url": "https://arxiv.org/abs/2109.09166",
|
| 8 |
+
"source": {
|
| 9 |
+
"paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_09/main_diagram_database/2109.09166",
|
| 10 |
+
"tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_09/tex_files_extracted/2109.09166",
|
| 11 |
+
"paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_09/main_diagram_database/2109.09166/paper_text/paper.md",
|
| 12 |
+
"metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_09/main_diagram_database/2109.09166/metadata.json",
|
| 13 |
+
"intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_09/main_diagram_database/2109.09166/paper_text/paper.md",
|
| 14 |
+
"intro_method_from_kind": "markdown"
|
| 15 |
+
},
|
| 16 |
+
"files": {
|
| 17 |
+
"main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2109.09166/main_diagram/main_diagram.drawio",
|
| 18 |
+
"main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2109.09166/main_diagram/main_diagram.png",
|
| 19 |
+
"main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2109.09166/main_diagram/main_diagram.pdf",
|
| 20 |
+
"intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2109.09166/paper_text/intro_method.md",
|
| 21 |
+
"paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2109.09166/paper.pdf",
|
| 22 |
+
"latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2109.09166/latex_source"
|
| 23 |
+
},
|
| 24 |
+
"status": {
|
| 25 |
+
"copy_drawio": "exists",
|
| 26 |
+
"copy_png": "exists",
|
| 27 |
+
"diagram_pdf": "pdf_exists",
|
| 28 |
+
"intro_method": "exists",
|
| 29 |
+
"paper_pdf": "exists",
|
| 30 |
+
"latex": "exists"
|
| 31 |
+
}
|
| 32 |
+
}
|
2109.11171/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2021-05-16T19:07:29.381Z" agent="5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36" etag="gdSc5wA-5S_OZCqhF5yU" version="14.6.13" type="device"><diagram id="nGpJgXpXvY08ustpBEML" name="Page-1">7V1tk6I4EP41flyL8O7HmdG5q9rbra2dvbrbj4xEpQ7FRbxx7tdfEFBIMmMGDQnabtWshJjA050n3Z0mDKyH5e63NFgvviQhjgemEe4G1nhgmr7rkL95wWtRYCOjKJinUVgUoWPBU/QfLguratsoxJtGxSxJ4ixaNwunyWqFp1mjLEjT5KVZbZbEzV7Xwbzs0TgWPE2DGDPV/orCbFHelukdy3/H0XxR9YzcUXFmGVSVyyY2iyBMXmp9WZOB9ZAmSVZ8W+4ecJxjV+FSXNDjG2cPF5biVSbyg8nkm42SP798vsPfJ3er+PP219dPyPWLdv4N4m15y+XlZq8VBmmyXYU4b8YYWPcviyjDT+tgmp99IUInZYtsGZMjRL7Oojh+SOIk3f/Wwih0sJeXJ6vsqWwzr7fJ0uQfXNVcJSty4r68FpxmePfmbaIDeETpcLLEWfpKqpQ/cNxSkKXCWdXxy1F8dlm0qEmuKgtKhZkfWj5iSr6UsH4AYte7NoRNCmFTMcLo6iC2/SbEpq8YYufaADYogA3lOnx1ENsUxLZqIjYlIlzi1gT38lyrXE89u38g0oNduUngyTS6ZIFo6waiTMKUBaKvGYgu6h+I2ln4zqj/ICq3MD2rfyBqZ6Z7PTRxGDtROYg95ETGn1ENotNDE4fhROUeSw8nFoYTVYPoGT0EUTvfuY+zMw2iat9ZYF45gVqwWReLHLNolyOtZGrhwHgoq+N4KLz8kO6hMtK0qFoZr2F+Vo1hH6dnejSrnllQH6dnOqCofAWMN7W4cVbeOzkzK+Aipb+2+Yrz/Y9oiTfkxFf8Qv5+T5bB6niSfJuX/+8b2azzszWRVBXz1j9t9qv2d6QCctc7tpVN1Qy5t6IlqvXtM11UXvXpDg1eh6jWYdFSs/X8Otg+N9t168tAvMv4IXIZTJ/nYZ1JxJp7k+2wZipTI56Mzaw5rLnLVJyxHsTRfEUOp2QsY1J+n4/0aBrEd+WJZRSGeTdcHmkyzcV5A1Hki3imFM+SksUb7Aw2HA5vRBqORUvDGzpC8nAkicNmvawbFoejXBzsWucNi2OkXBys23e74rAs5eJgPcgbEodHi0NsJj9OMZePuLOO1O3Iw3Z0Gx4O65LdkDhG+g2PW7ataMfDstDQFBsfMkUCE3pNJJ5qxnJ7GIljcoFUR+LcHuZHMkmmqmPCUnOhbwZE9wpAVJ3GIvXxqI5AVJ4f6fVxnYxOd1Y9sfg9nJ2ZdGflIPImFlgng3Uy+etkZiusb3SdjIrmKF8n83m2FPAG8IZ83rCAN1qHHdXzBi9TEXgDeEM+b3wF3mi9mqeeN3hhh9vljXa2M/AG5PN1nM/nqOYNXqQNeAN4A+IbWvEGHd9QzRuV39RT3hBt/SRJ8EcK0AGELToNWyinA3icCMwICFvozhtM2EI5b5jAG2fPgcAbELboOGwxUs0bvN1FgDeANyBsoRVv0GEL5bzBS7YG3gDegPiGVrxBxzeU8wakgQJvQHxDd95g4hvKeQPSQM/XZeANiG90/LSzp5o3IA0UeAPiG9rzBr2JiHLegDRQ4A2Ib2jPG/RuN8p5A9JAgTcgvqE7bzCbyKnmDVTtay1zkwFGIhenY2p3bsTZiYX7wgKJe5xLePXDCRzpITIwrWffyU3cplRI+ePj2B6bh3EnHfyO95fnJSVdJ/a2dtgLvDbmOrCnN93RAHve4vhVYk9vM6MB9gIbmF0H9vQGfBpgD4s09Q5h4aD9wgFSnVCNDPDkISp1maiUemWGtxRBqKStmeFpp8ysW3m7m1cj5e9qQaiDN5RKD11ZLV9ZKu9Vmwhp4Ub65rPluqwrM7l7fJyMLuPKtAVf4lv9tHAjlWCv3I1EAttnXyn2qjfdRkjgBb/Xib3yHZIREtjx/EqxV71POuK8jlEjB0nmWjKErs5VZt22TEZVy6DMkIh5rjIr9/ZNrUNXnSszxGHPUGbVTzGhyqK7ydAVLQ49XoWHTK23wOicYCA2foZGK08jNAVihq1jscQLnDn5P55YTjqW7v5T1qyVF5+D0pwlEETno3CcepcnEF+eRLR+4v1+8v3HCYa5zaGNBHaG5K8HSFMkmW8VzIe2P8XTaauhfUh3ugTwLTNBkETk9X4YLBfTivQPz0Vryya0IyK8uChLpS29veo0WsenFPo87eUMC/RBzQWFPhq+tsnJQuhYpWWmIZBJLnSwH9qt5sfDmoqE+dESDDe78oCX7HOEAfZn7QwTd+rj55kc4HmLtd06e5bWrsXAuX+afBs4Y/AvBCwCwZV/icqkdfI+KNNHZmP1yiSQRnKFUwIP+I6nBIEckrOCBLOZ2TJIELrPriPJCOLF+boFvhpf2tLnwx9PQJ9C9MlTpm7p0760K6PnE4wmHa1XH+qzL+3L6JmARyNvGf7QE1uKledH2gI515tFsM6/zmK8u0vT5IXggVdh+XU8jYPNJpo2RVDdkD1Ebv3jFb992nPn2Bq6hm/VPjZHWG+thr292FXcAQ7n+KOiqsnB4cihKktxHGTRv83mecIpe/iWRPvpqFIdOgWZlu8m2aZTXP7qKGK2IXrLWLqhLEjnOGMaIqILXmvV1nmFzTsX/EY/R9UrWjwq4gHTM3TTMU/r5ik9JAJPX//OaWPoVIc/SxbZH4x3jaPX6mgXZbWfkaOftTPHH+UH1W84U+RpHSR3sxe2yNRUSPO9moZSvWbmFjp3WFivqYaYaf9Cem3SD42axrvXRecenKpv09tkmJ2MG62Ta2BLAI2tcmYEK8/ec7SOt8JDAj1SZssUfWxXojoLJJSI2zQNi+Zo4HzIpkGKbBpb1KYpHslVZtOMjKFR+6AmP7a13G36kXKrGwvHGp2wcMzz6pvWB+t3YxEJhMb18nIPIZ/+ermm7b4zcphn+EVHjul9qNlLjSPP5fYjWW95T0hqrbeHgNm16m0t6+aymstpWJLuHnuSq72G3imcXZvuZkemu84mOf3OCYkbG5HDNMkFcNRpcoeLL0mI8xr/Aw==</diagram></mxfile>
|
2109.11171/main_diagram/main_diagram.pdf
ADDED
|
Binary file (13.9 kB). View file
|
|
|
2109.11171/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Information extraction refers to the task of automatically extracting structured information from unstructured resources, benefiting a wide range of applications such as information retrieval and knowledge base population. Information extraction covers a great variety of tasks in natural language processing (NLP), such as open information extraction and relation classification. For example, given a sentence "Born in Glasgow, Fisher is a graduate of the London Opera Centre", open information extraction seeks to extract (Fisher; Born in; Glasgow), and "city\_of\_birth" is predicted as the
|
| 4 |
+
|
| 5 |
+
relation between a given pair of entities "Fisher" and "Glasgow" for relation classification.
|
| 6 |
+
|
| 7 |
+
Most current approaches design task-specific pipelines for different information extraction tasks. Yet, this presents two limitations for information extraction. First, since most of the approaches employ a task-specific model, it is difficult to leverage a single pipeline to solve many tasks or adapt a model trained on one task to another without changing any task-specific modules. Second, those supervised state-of-the-arts are trained on task-specific corpora to predict from a fixed set of task-specific categories, which restricts their usability since additional labeled data is needed to specify any other classes. Such task-specific labeled data is scarce in information extraction. For example, the largest training set for open information extraction contains only 3,200 sentences [\(Stanovsky et al.,](#page-9-0) [2018\)](#page-9-0). Motivated by this, we aim to solve information extraction tasks within the same framework in a task-agnostic setting.
|
| 8 |
+
|
| 9 |
+
In this paper, we propose a unified framework for information extraction. The basic idea is to treat every information extraction problem as a "text-totriple" problem, i.e., translating input text to output triples. We successfully apply our framework to three information extraction tasks, greatly improving zero-shot performance on many datasets and sometimes even reaching competitiveness with the current state-of-the-art fully supervised approaches. Figure [1](#page-1-0) shows how different information extraction tasks are handled within our framework. The framework encodes task priors in the input text and decodes the output triples to finally produce task predictions. We achieve this by leveraging the same translation process on all tasks, the only difference among tasks being the input encoding. This is in contrast with previous approaches using task-specific models and datasets. The design of the common translation module for all tasks is important: by leveraging the task priors encoded in
|
| 10 |
+
|
| 11 |
+
<sup>1</sup>The code and datasets are available at [https://](https://github.com/cgraywang/deepex) [github.com/cgraywang/deepex](https://github.com/cgraywang/deepex).
|
| 12 |
+
|
| 13 |
+
<span id="page-1-0"></span>
|
| 14 |
+
|
| 15 |
+
Figure 1: Our DEEPEX translates between input text and output triples, and the output is then decoded into task predictions.
|
| 16 |
+
|
| 17 |
+
the input text, we enable the zero-shot transfer of the general knowledge that a pre-trained LM has about the task. We demonstrate that a simple pretraining task of predicting which relational triple goes with which text on a task-agnostic corpus further enhances the zero-shot capabilities on all tasks. To the best of our knowledge, this is the first framework to handle a variety of information extraction tasks in a zero-shot setting.
|
| 18 |
+
|
| 19 |
+
Our contributions are summarized below.
|
| 20 |
+
|
| 21 |
+
- 1. We introduce DEEPEX, a unified framework that solves information extraction tasks in a zero-shot setting. We cast information extractions as text-to-triple problems by incorporating the task priors in the input text and translating the input text to triples as output.
|
| 22 |
+
- 2. We apply our framework to (i) open information extraction, (ii) relation classification, and (iii) factual probe. In all tasks, we achieve competitive zero-shot performance to the current state-of-the-art including the fully supervised methods, and we achieve new state-ofthe-art performance on open information extraction (OIE2016, WEB, NYT, and PENN) and factual probe (T-REx). For instance, our zero-shot approach significantly outperforms the supervised open information extraction by averaging 37.5 points in F1.
|
| 23 |
+
- 3. We also show that our framework delivers more interpretable results while achieving comparable performance on all tasks, thanks to the transparency of the text-to-triple translation.
|
| 24 |
+
|
| 25 |
+
# Method
|
| 26 |
+
|
| 27 |
+
We cast a suite of information extraction tasks into a text-to-triple translation framework. As shown in Figure [1,](#page-1-0) input and output are designed in a format that is appropriate for a given task. The translation process takes the input text and produces triples as output. The decoding step generates task predictions from the output. In this section, we describe the input and output format, the translation, and the decoding process. We use open information extraction (OIE) as a running example in this section. For OIE, we are given a sentence and asked to extract triples.
|
| 28 |
+
|
| 29 |
+
The input is a NP-chunked sentence, and the output is a set of triples. The NPs are encoded as task priors in the input. Below is an example.
|
| 30 |
+
|
| 31 |
+
> Input Born in GlasgowNP, FisherNP is a graduate of the London Opera CentreNP. Output (Fisher; Born in; Glasgow), (Fisher; is a graduate of; London Opera Centre).
|
| 32 |
+
|
| 33 |
+
NP denotes the noun phrase.
|
| 34 |
+
|
| 35 |
+
We aim to translate the above input text to output triples. Information extraction tasks lack highquality training data, therefore training an end-toend supervised approach [\(Paolini et al.,](#page-9-1) [2021\)](#page-9-1) is not feasible. Pre-trained language models (LM) (e.g., BERT [\(Devlin et al.,](#page-8-0) [2019\)](#page-8-0) and GPT [\(Brown](#page-8-1) [et al.,](#page-8-1) [2020\)](#page-8-1)) have demonstrated their zero-shot capabilities in a wide range of NLP tasks, thanks to the general information that they know about the tasks.
|
| 36 |
+
|
| 37 |
+
We therefore propose a zero-shot translation process consisting of two steps: generating and ranking, as shown in Figure [2.](#page-2-0) The generating stage produces general information about the task via pre-trained LMs, and the ranking stage looks for specific information about the task via a ranking model pre-trained on a task-agnostic corpus.
|
| 38 |
+
|
| 39 |
+
Generating The generating stage produces a set of candidate triples that contain general information about the task from pre-trained LMs. OIE is
|
| 40 |
+
|
| 41 |
+
<span id="page-2-0"></span>
|
| 42 |
+
|
| 43 |
+
Figure 2: Summary of our approach. The framework encodes task-relevant information in the input text and decodes the output triples to produce task predictions. The zero-shot translation first generates general information that a pre-trained language model has about the input, then ranks to find the output triples of interest to the task via a ranking model pre-trained on a task-agnostic relational corpus.
|
| 44 |
+
|
| 45 |
+
formulated as extracting a set of sequences in the input that are generally relevant to an argument pair (i.e., NP pair). We particularly use the attention scores in pre-trained LMs to measure the relevance between the sequence and the argument pair.
|
| 46 |
+
|
| 47 |
+
We frame the process as a search problem. Given an argument pair (e.g., "Fisher" and "London Opera Centre"), we aim to search for the sequences (e.g., "is a graduate of") with the largest attention scores connecting the pair. To compute a score for every possible sequence is computationally expensive, especially when the sequence length is large. Therefore the exhaustive search is intractable. We use beam search, which is an approximate strategy to explore the search space efficiently. Beam search maintains the k-best candidates. This means the time cost of beam search does not depend on the sequence length but the size of the beam and the average length of the candidates. The beam search starts with a task-specific start token [S]. At each step, beam search simply selects top-k next tokens with the largest attention scores, and just keeps kpartial candidates with the highest scores, where k is the beam size. When a candidate produces a taskspecific end token [E], the candidate is complete. For OIE, [S] and [E] refer to the argument pair, e.g, ([S] is "Fisher", and [E] refers to "London Opera Centre").
|
| 48 |
+
|
| 49 |
+
The above traditional beam search only allows searching sequences between [S] and [E]. To adapt beam search to produce more triples, we allow searching sequences: (i) left to both [S] and [E], and (ii) right to both [S] and [E]. This helps to improve the recall of the candidates. For example, a candidate triple (Fisher; Born in; Glasgow) is generated by looking at "Born in" on the
|
| 50 |
+
|
| 51 |
+
left in the above example. We also need to enable bidirectionality by running the search in both directions (left to right and right to left) following Wang et al. (2020). For OIE, we implement this by allowing every argument as both [S] and [E] regardless of its position in the input. For example, "Fisher" is [S] in (Fisher; Born in; Glasgow) given "Glasgow" appears before "Fisher" in the input.
|
| 52 |
+
|
| 53 |
+
Ranking The ranking stage finds triples that are of interest to the task via a ranking model pretrained on a task-agnostic relational corpus. For OIE, the generating stage produces k candidate triples for every argument pair. However, the sequences in the candidates are relevant to the argument pairs, not just in the relational aspect. The ranking stage aims to find the triples that specifically express the relational information between the argument pair, which is important for OIE.
|
| 54 |
+
|
| 55 |
+
We propose a contrastive model to rank the triples as illustrated in Figure 2. Given a batch of N (sentence, triple) pairs, the model is trained to predict which of the $N^2$ possible (sentence, triple) pairs across a batch actually appeared. The model learns a joint embedding space by training a base encoder BERT. The input sequence of the BERT encoder is in the format: [CLS] sentence [SEP] triple [SEP], which follows the standard input format of BERT. The goal is to maximize the cosine similarity of the sentence and triple embeddings of the N true pairs in the batch while minimizing the cosine similarity of the embeddings of the remaining $N^2 - N$ incorrect pairs. We optimize a cross-entropy loss over these similarity scores. The loss function for a positive pair is defined by l in Eq. 1.
|
| 56 |
+
|
| 57 |
+
$$l_{\text{sentence}} = -\log \frac{\exp(\text{sim}(\mathbf{u}_{i}, \mathbf{v}_{i}))}{\sum_{k=1}^{N} \exp(\text{sim}(\mathbf{u}_{i}, \mathbf{v}_{k}))}$$
|
| 58 |
+
$$l_{\text{triple}} = -\log \frac{\exp(\text{sim}(\mathbf{u}_{i}, \mathbf{v}_{i}))}{\sum_{k=1}^{N} \exp(\text{sim}(\mathbf{u}_{k}, \mathbf{v}_{i}))}$$
|
| 59 |
+
$$l = \frac{l_{\text{sentence}} + l_{\text{triple}}}{2}$$
|
| 60 |
+
(1)
|
| 61 |
+
|
| 62 |
+
<span id="page-3-0"></span>where $sim(\mathbf{u}, \mathbf{v}) = \frac{\mathbf{u}^{\mathsf{T}}\mathbf{v}}{\|\mathbf{u}\|\|\mathbf{v}\|}$ . For the *i*-th positive (sentence, triple) pair, $\mathbf{u}_i$ and $\mathbf{v}_i$ denote the sentence and triple embedding respectively.
|
| 63 |
+
|
| 64 |
+
We take advantage of the pre-trained BERT<sub>BASE</sub> as the base encoder. We further simplify the standard contrastive learning framework by removing the projection layer between the representation and the contrastive embedding space. Neither the linear (Radford et al., 2021) nor non-linear (Chen et al., 2020b) projection is used. This is because sentences and triples are unified in the same embedding space of BERT. We train the model on T-REx (Elsahar et al., 2019), which is a dataset of large-scale alignments between Wikipedia abstracts and Wikidata triples. T-REx contains a large number of sentence-triple pairs (11 million triples are paired with 6.2 million sentences). T-REx also reports an accuracy of 97.8% of the pairs.
|
| 65 |
+
|
| 66 |
+
The ranking model is task-agnostic. The ranking model takes the input in the same format for all tasks. At test time, the input text and each candidate triple from the generating stage is paired as the input to the ranking model. The candidate triples are ranked by the contrastive loss. We adopt the top-n candidate triples returned by the ranking model as the output. n varies across different tasks $^2$ . For the above OIE example, the output is the top-2 triples.
|
| 67 |
+
|
| 68 |
+
Once the output triples are produced, we decode the output triples to obtain task predictions. For OIE, the output triples serve as task predictions directly. No specific decoding strategy is needed.
|
| 69 |
+
|
| 70 |
+
The details are provided in Sec. 2.
|
| 71 |
+
|
| 72 |
+
For this task, we are given an input sentence with gold head and tail entities aiming to classify the relation type in a pre-defined category.
|
| 73 |
+
|
| 74 |
+
**Input and Output Format** The input is a sentence encoded with gold head and tail entities, and linked relation phrases. The output is a triple. An example is below.
|
| 75 |
+
|
| 76 |
+
$\begin{array}{cccc} \textbf{Input} & \underline{Born \ in}_{place\_of\_birth} & \underline{Glasgow}_{GOLD}, \\ \underline{Fisher}_{GOLD} & is \ a \ graduate \ of \ the \ London \\ Opera \ Centre. \end{array}$
|
| 77 |
+
|
| 78 |
+
Output (Fisher; place\_of\_birth; Glasgow).
|
| 79 |
+
|
| 80 |
+
GOLD denotes the gold entity. The linked relation phrases annotated with Wikidata predicates, e.g., Born in place of birth, are constructed as follows. We use an offline dictionary that maps the pre-defined relations to the Wikidata predicates. Such dictionaries are often provided either by Wikidata or third-parties. In all tested datasets, we can refer to either gold Wikidata or other high-quality resources for the dictionaries. We consider a sequence of tokens linked to a certain relation if the tokens are matched with the label or alias of the particular predicate in Wikidata. In the above example, "Born in" matches an alias of the Wikidata predicate "place\_of\_birth". In practice, some Wikidata predicates do not provide as many aliases as others. Inspired by Angeli et al. (2015), we follow the below procedure to add new aliases to resolve the imbalance issue: We first create a large candidate set of Wikipedia relations aligned to Wikidata predicates via distant supervision, then ask human annotators to filter out the wrong alignments. The remaining aligned relation phrases are added as new aliases of the Wikidata predicates.
|
| 81 |
+
|
| 82 |
+
**Relation-Constrained Translation** For the beam search in the generating stage of Sec. 2.2, [S] and [E] are the gold head and tail entities respectively. As the task requires the relations to be from a pre-defined category, using the beam search directly is not efficient. Allowing generating any token at each step might lead to sequences that do not match any pre-defined relations. Similar to De Cao et al. (2021), we use constrained beam search, which only decodes tokens belonging to a linked relation phrase. We take the top-n triples from the ranking model as the output.
|
| 83 |
+
|
| 84 |
+
**Decoding Relation** We take the Wikidata predicates of the output triples, and map the predicates back to the relations in the pre-defined category, which serve as the task predictions. In the above input/output example, "place\_of\_birth" is the Wikidata predicate in the output triple. It is mapped to
|
| 85 |
+
|
| 86 |
+
<sup>&</sup>lt;sup>2</sup>Please refer to Appendix A for details.
|
| 87 |
+
|
| 88 |
+
"city\_of\_birth" in the pre-defined relation category of one of the widely used relation classification datasets, TACRED. "city\_of\_birth" hence serves as the task prediction.
|
| 89 |
+
|
| 90 |
+
Given an input sentence with gold head entity name and relation name, the task aims to fill in the tail entity.
|
| 91 |
+
|
| 92 |
+
Input and Output Format The input is encoded as a NP-chunked sentence with gold head entity candidates and linked relation phrases. The output is a triple. An example is below.
|
| 93 |
+
|
| 94 |
+
Input Born inplace\_of\_birth GlasgowNP, FisherGOLD/NP is a graduate of the London Opera CentreNP.
|
| 95 |
+
|
| 96 |
+
Output (Fisher; place\_of\_birth; Glasgow).
|
| 97 |
+
|
| 98 |
+
GOLD/NP denotes the noun phrase that matches the gold head entity. Born inplace\_of\_birth represents a linked relation phrase annotated with a Wikidata predicate which is constructed in the same way as in Sec. [3.2.](#page-3-1)
|
| 99 |
+
|
| 100 |
+
Entity-Constrained Translation For the beam search, [S] and [E] are the gold head entity candidate and linked relation phrase respectively. Similar to the relation classification, we also constrain the search to generate possible tail entity sequences. We assume that NPs other than the gold head entity provide the set of candidate tail entities. To enable this, the search only decodes tokens belonging to the candidate NPs. In practice, we take the top-1 triple from the ranking model as the output.
|
| 101 |
+
|
| 102 |
+
Decoding Tail Entity We take the tail entities of the output triples as task predictions. For example, in the above output triple, "Glasgow" is decoded as the task prediction.
|
2110.02027/record.json
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"arxiv_id": "2110.02027",
|
| 3 |
+
"month": "2021_10",
|
| 4 |
+
"year": 2022,
|
| 5 |
+
"conference": "ICML",
|
| 6 |
+
"title": "ProGCL: Rethinking Hard Negative Mining in Graph Contrastive Learning",
|
| 7 |
+
"arxiv_url": "https://arxiv.org/abs/2110.02027",
|
| 8 |
+
"source": {
|
| 9 |
+
"paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_10/main_diagram_database/2110.02027",
|
| 10 |
+
"tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_10/tex_files_extracted/2110.02027",
|
| 11 |
+
"paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_10/main_diagram_database/2110.02027/paper_text/paper.md",
|
| 12 |
+
"metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_10/main_diagram_database/2110.02027/metadata.json",
|
| 13 |
+
"intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_10/main_diagram_database/2110.02027/paper_text/paper.md",
|
| 14 |
+
"intro_method_from_kind": "markdown"
|
| 15 |
+
},
|
| 16 |
+
"files": {
|
| 17 |
+
"main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2110.02027/main_diagram/main_diagram.drawio",
|
| 18 |
+
"main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2110.02027/main_diagram/main_diagram.png",
|
| 19 |
+
"main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2110.02027/main_diagram/main_diagram.pdf",
|
| 20 |
+
"intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2110.02027/paper_text/intro_method.md",
|
| 21 |
+
"paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2110.02027/paper.pdf",
|
| 22 |
+
"latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2110.02027/latex_source"
|
| 23 |
+
},
|
| 24 |
+
"status": {
|
| 25 |
+
"copy_drawio": "exists",
|
| 26 |
+
"copy_png": "exists",
|
| 27 |
+
"diagram_pdf": "pdf_exists",
|
| 28 |
+
"intro_method": "exists",
|
| 29 |
+
"paper_pdf": "exists",
|
| 30 |
+
"latex": "exists"
|
| 31 |
+
}
|
| 32 |
+
}
|
2110.06539/record.json
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"arxiv_id": "2110.06539",
|
| 3 |
+
"month": "2021_10",
|
| 4 |
+
"year": 2022,
|
| 5 |
+
"conference": "ICLR",
|
| 6 |
+
"title": "On Covariate Shift of Latent Confounders in Imitation and Reinforcement Learning",
|
| 7 |
+
"arxiv_url": "https://arxiv.org/abs/2110.06539",
|
| 8 |
+
"source": {
|
| 9 |
+
"paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_10/main_diagram_database/2110.06539",
|
| 10 |
+
"tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_10/tex_files_extracted/2110.06539",
|
| 11 |
+
"paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_10/main_diagram_database/2110.06539/paper_text/paper.md",
|
| 12 |
+
"metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_10/main_diagram_database/2110.06539/metadata.json",
|
| 13 |
+
"intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_10/main_diagram_database/2110.06539/paper_text/paper.md",
|
| 14 |
+
"intro_method_from_kind": "markdown"
|
| 15 |
+
},
|
| 16 |
+
"files": {
|
| 17 |
+
"main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2110.06539/main_diagram/main_diagram.drawio",
|
| 18 |
+
"main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2110.06539/main_diagram/main_diagram.png",
|
| 19 |
+
"main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2110.06539/main_diagram/main_diagram.pdf",
|
| 20 |
+
"intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2110.06539/paper_text/intro_method.md",
|
| 21 |
+
"paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2110.06539/paper.pdf",
|
| 22 |
+
"latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2110.06539/latex_source"
|
| 23 |
+
},
|
| 24 |
+
"status": {
|
| 25 |
+
"copy_drawio": "exists",
|
| 26 |
+
"copy_png": "exists",
|
| 27 |
+
"diagram_pdf": "pdf_exists",
|
| 28 |
+
"intro_method": "exists",
|
| 29 |
+
"paper_pdf": "exists",
|
| 30 |
+
"latex": "exists"
|
| 31 |
+
}
|
| 32 |
+
}
|
2110.13059/record.json
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"arxiv_id": "2110.13059",
|
| 3 |
+
"month": "2021_10",
|
| 4 |
+
"year": 2022,
|
| 5 |
+
"conference": "ICLR",
|
| 6 |
+
"title": "Exploiting Redundancy: Separable Group Convolutional Networks on Lie Groups",
|
| 7 |
+
"arxiv_url": "https://arxiv.org/abs/2110.13059",
|
| 8 |
+
"source": {
|
| 9 |
+
"paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_10/main_diagram_database/2110.13059",
|
| 10 |
+
"tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_10/tex_files_extracted/2110.13059",
|
| 11 |
+
"paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_10/main_diagram_database/2110.13059/paper_text/paper.md",
|
| 12 |
+
"metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_10/main_diagram_database/2110.13059/metadata.json",
|
| 13 |
+
"intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_10/main_diagram_database/2110.13059/paper_text/paper.md",
|
| 14 |
+
"intro_method_from_kind": "markdown"
|
| 15 |
+
},
|
| 16 |
+
"files": {
|
| 17 |
+
"main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2110.13059/main_diagram/main_diagram.drawio",
|
| 18 |
+
"main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2110.13059/main_diagram/main_diagram.png",
|
| 19 |
+
"main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2110.13059/main_diagram/main_diagram.pdf",
|
| 20 |
+
"intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2110.13059/paper_text/intro_method.md",
|
| 21 |
+
"paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2110.13059/paper.pdf",
|
| 22 |
+
"latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2110.13059/latex_source"
|
| 23 |
+
},
|
| 24 |
+
"status": {
|
| 25 |
+
"copy_drawio": "ok",
|
| 26 |
+
"copy_png": "ok",
|
| 27 |
+
"diagram_pdf": "ok",
|
| 28 |
+
"intro_method": "ok",
|
| 29 |
+
"paper_pdf": "ok",
|
| 30 |
+
"latex": "ok"
|
| 31 |
+
}
|
| 32 |
+
}
|
2112.01853/record.json
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"arxiv_id": "2112.01853",
|
| 3 |
+
"month": "2021_12",
|
| 4 |
+
"year": 2022,
|
| 5 |
+
"conference": "AAAI",
|
| 6 |
+
"title": "Episodic Policy Gradient Training",
|
| 7 |
+
"arxiv_url": "https://arxiv.org/abs/2112.01853",
|
| 8 |
+
"source": {
|
| 9 |
+
"paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_12/main_diagram_database/2112.01853",
|
| 10 |
+
"tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_12/tex_files_extracted/2112.01853",
|
| 11 |
+
"paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_12/main_diagram_database/2112.01853/paper_text/paper.md",
|
| 12 |
+
"metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_12/main_diagram_database/2112.01853/metadata.json",
|
| 13 |
+
"intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_12/main_diagram_database/2112.01853/paper_text/paper.md",
|
| 14 |
+
"intro_method_from_kind": "markdown"
|
| 15 |
+
},
|
| 16 |
+
"files": {
|
| 17 |
+
"main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2112.01853/main_diagram/main_diagram.drawio",
|
| 18 |
+
"main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2112.01853/main_diagram/main_diagram.png",
|
| 19 |
+
"main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2112.01853/main_diagram/main_diagram.pdf",
|
| 20 |
+
"intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2112.01853/paper_text/intro_method.md",
|
| 21 |
+
"paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2112.01853/paper.pdf",
|
| 22 |
+
"latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2112.01853/latex_source"
|
| 23 |
+
},
|
| 24 |
+
"status": {
|
| 25 |
+
"copy_drawio": "exists",
|
| 26 |
+
"copy_png": "exists",
|
| 27 |
+
"diagram_pdf": "pdf_exists",
|
| 28 |
+
"intro_method": "exists",
|
| 29 |
+
"paper_pdf": "exists",
|
| 30 |
+
"latex": "exists"
|
| 31 |
+
}
|
| 32 |
+
}
|
2112.10149/record.json
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"arxiv_id": "2112.10149",
|
| 3 |
+
"month": "2021_12",
|
| 4 |
+
"year": 2022,
|
| 5 |
+
"conference": "AAAI",
|
| 6 |
+
"title": "Elastic-Link for Binarized Neural Networks",
|
| 7 |
+
"arxiv_url": "https://arxiv.org/abs/2112.10149",
|
| 8 |
+
"source": {
|
| 9 |
+
"paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_12/main_diagram_database/2112.10149",
|
| 10 |
+
"tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_12/tex_files_extracted/2112.10149",
|
| 11 |
+
"paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_12/main_diagram_database/2112.10149/paper_text/paper.md",
|
| 12 |
+
"metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_12/main_diagram_database/2112.10149/metadata.json",
|
| 13 |
+
"intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_12/main_diagram_database/2112.10149/paper_text/paper.md",
|
| 14 |
+
"intro_method_from_kind": "markdown"
|
| 15 |
+
},
|
| 16 |
+
"files": {
|
| 17 |
+
"main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2112.10149/main_diagram/main_diagram.drawio",
|
| 18 |
+
"main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2112.10149/main_diagram/main_diagram.png",
|
| 19 |
+
"main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2112.10149/main_diagram/main_diagram.pdf",
|
| 20 |
+
"intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2112.10149/paper_text/intro_method.md",
|
| 21 |
+
"paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2112.10149/paper.pdf",
|
| 22 |
+
"latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2112.10149/latex_source"
|
| 23 |
+
},
|
| 24 |
+
"status": {
|
| 25 |
+
"copy_drawio": "exists",
|
| 26 |
+
"copy_png": "exists",
|
| 27 |
+
"diagram_pdf": "pdf_exists",
|
| 28 |
+
"intro_method": "exists",
|
| 29 |
+
"paper_pdf": "exists",
|
| 30 |
+
"latex": "exists"
|
| 31 |
+
}
|
| 32 |
+
}
|
2112.11909/record.json
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"arxiv_id": "2112.11909",
|
| 3 |
+
"month": "2021_12",
|
| 4 |
+
"year": 2020,
|
| 5 |
+
"conference": "AAAI",
|
| 6 |
+
"title": "Hypergraph Convolutional Network for Multi-Hop Knowledge Base Question Answering (Student Abstract)",
|
| 7 |
+
"arxiv_url": "https://arxiv.org/abs/2112.11909",
|
| 8 |
+
"source": {
|
| 9 |
+
"paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_12/main_diagram_database/2112.11909",
|
| 10 |
+
"tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_12/tex_files_extracted/2112.11909",
|
| 11 |
+
"paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_12/main_diagram_database/2112.11909/paper_text/paper.md",
|
| 12 |
+
"metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_12/main_diagram_database/2112.11909/metadata.json",
|
| 13 |
+
"intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2021_12/main_diagram_database/2112.11909/paper_text/paper.md",
|
| 14 |
+
"intro_method_from_kind": "markdown"
|
| 15 |
+
},
|
| 16 |
+
"files": {
|
| 17 |
+
"main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2112.11909/main_diagram/main_diagram.drawio",
|
| 18 |
+
"main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2112.11909/main_diagram/main_diagram.png",
|
| 19 |
+
"main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2112.11909/main_diagram/main_diagram.pdf",
|
| 20 |
+
"intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2112.11909/paper_text/intro_method.md",
|
| 21 |
+
"paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2112.11909/paper.pdf",
|
| 22 |
+
"latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2112.11909/latex_source"
|
| 23 |
+
},
|
| 24 |
+
"status": {
|
| 25 |
+
"copy_drawio": "exists",
|
| 26 |
+
"copy_png": "exists",
|
| 27 |
+
"diagram_pdf": "pdf_exists",
|
| 28 |
+
"intro_method": "exists",
|
| 29 |
+
"paper_pdf": "exists",
|
| 30 |
+
"latex": "exists"
|
| 31 |
+
}
|
| 32 |
+
}
|
2201.01666/record.json
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"arxiv_id": "2201.01666",
|
| 3 |
+
"month": "2022_01",
|
| 4 |
+
"year": 2022,
|
| 5 |
+
"conference": "ICLR",
|
| 6 |
+
"title": "Sample Efficient Deep Reinforcement Learning via Uncertainty Estimation",
|
| 7 |
+
"arxiv_url": "https://arxiv.org/abs/2201.01666",
|
| 8 |
+
"source": {
|
| 9 |
+
"paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2022_01/main_diagram_database/2201.01666",
|
| 10 |
+
"tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2022_01/tex_files_extracted/2201.01666",
|
| 11 |
+
"paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2022_01/main_diagram_database/2201.01666/paper_text/paper.md",
|
| 12 |
+
"metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2022_01/main_diagram_database/2201.01666/metadata.json",
|
| 13 |
+
"intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2022_01/main_diagram_database/2201.01666/paper_text/paper.md",
|
| 14 |
+
"intro_method_from_kind": "markdown"
|
| 15 |
+
},
|
| 16 |
+
"files": {
|
| 17 |
+
"main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2201.01666/main_diagram/main_diagram.drawio",
|
| 18 |
+
"main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2201.01666/main_diagram/main_diagram.png",
|
| 19 |
+
"main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2201.01666/main_diagram/main_diagram.pdf",
|
| 20 |
+
"intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2201.01666/paper_text/intro_method.md",
|
| 21 |
+
"paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2201.01666/paper.pdf",
|
| 22 |
+
"latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2201.01666/latex_source"
|
| 23 |
+
},
|
| 24 |
+
"status": {
|
| 25 |
+
"copy_drawio": "exists",
|
| 26 |
+
"copy_png": "exists",
|
| 27 |
+
"diagram_pdf": "pdf_exists",
|
| 28 |
+
"intro_method": "exists",
|
| 29 |
+
"paper_pdf": "exists",
|
| 30 |
+
"latex": "exists"
|
| 31 |
+
}
|
| 32 |
+
}
|
2201.02233/record.json
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"arxiv_id": "2201.02233",
|
| 3 |
+
"month": "2022_01",
|
| 4 |
+
"year": 2020,
|
| 5 |
+
"conference": "AAAI",
|
| 6 |
+
"title": "Consistent Video Style Transfer via Compound Regularization",
|
| 7 |
+
"arxiv_url": "https://arxiv.org/abs/2201.02233",
|
| 8 |
+
"source": {
|
| 9 |
+
"paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2022_01/main_diagram_database/2201.02233",
|
| 10 |
+
"tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2022_01/tex_files_extracted/2201.02233",
|
| 11 |
+
"paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2022_01/main_diagram_database/2201.02233/paper_text/paper.md",
|
| 12 |
+
"metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2022_01/main_diagram_database/2201.02233/metadata.json",
|
| 13 |
+
"intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2022_01/main_diagram_database/2201.02233/paper_text/paper.md",
|
| 14 |
+
"intro_method_from_kind": "markdown"
|
| 15 |
+
},
|
| 16 |
+
"files": {
|
| 17 |
+
"main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2201.02233/main_diagram/main_diagram.drawio",
|
| 18 |
+
"main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2201.02233/main_diagram/main_diagram.png",
|
| 19 |
+
"main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2201.02233/main_diagram/main_diagram.pdf",
|
| 20 |
+
"intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2201.02233/paper_text/intro_method.md",
|
| 21 |
+
"paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2201.02233/paper.pdf",
|
| 22 |
+
"latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2201.02233/latex_source"
|
| 23 |
+
},
|
| 24 |
+
"status": {
|
| 25 |
+
"copy_drawio": "ok",
|
| 26 |
+
"copy_png": "ok",
|
| 27 |
+
"diagram_pdf": "ok",
|
| 28 |
+
"intro_method": "ok",
|
| 29 |
+
"paper_pdf": "ok",
|
| 30 |
+
"latex": "ok"
|
| 31 |
+
}
|
| 32 |
+
}
|
2201.02263/record.json
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"arxiv_id": "2201.02263",
|
| 3 |
+
"month": "2022_01",
|
| 4 |
+
"year": 2022,
|
| 5 |
+
"conference": "CVPR",
|
| 6 |
+
"title": "ITSA: An Information-Theoretic Approach to Automatic Shortcut Avoidance and Domain Generalization in Stereo Matching Networks",
|
| 7 |
+
"arxiv_url": "https://arxiv.org/abs/2201.02263",
|
| 8 |
+
"source": {
|
| 9 |
+
"paper_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2022_01/main_diagram_database/2201.02263",
|
| 10 |
+
"tex_dir": "/home/zling/lzl/ICML2026/Build_Dataset/data/2022_01/tex_files_extracted/2201.02263",
|
| 11 |
+
"paper_md": "/home/zling/lzl/ICML2026/Build_Dataset/data/2022_01/main_diagram_database/2201.02263/paper_text/paper.md",
|
| 12 |
+
"metadata_json": "/home/zling/lzl/ICML2026/Build_Dataset/data/2022_01/main_diagram_database/2201.02263/metadata.json",
|
| 13 |
+
"intro_method_from": "/home/zling/lzl/ICML2026/Build_Dataset/data/2022_01/main_diagram_database/2201.02263/paper_text/paper.md",
|
| 14 |
+
"intro_method_from_kind": "markdown"
|
| 15 |
+
},
|
| 16 |
+
"files": {
|
| 17 |
+
"main_drawio": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2201.02263/main_diagram/main_diagram.drawio",
|
| 18 |
+
"main_png": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2201.02263/main_diagram/main_diagram.png",
|
| 19 |
+
"main_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2201.02263/main_diagram/main_diagram.pdf",
|
| 20 |
+
"intro_method_md": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2201.02263/paper_text/intro_method.md",
|
| 21 |
+
"paper_pdf": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2201.02263/paper.pdf",
|
| 22 |
+
"latex": "/home/zling/lzl/ICML2026/Build_Dataset/dataset/2201.02263/latex_source"
|
| 23 |
+
},
|
| 24 |
+
"status": {
|
| 25 |
+
"copy_drawio": "ok",
|
| 26 |
+
"copy_png": "ok",
|
| 27 |
+
"diagram_pdf": "ok",
|
| 28 |
+
"intro_method": "ok",
|
| 29 |
+
"paper_pdf": "ok",
|
| 30 |
+
"latex": "ok"
|
| 31 |
+
}
|
| 32 |
+
}
|