updated appendix
Browse files
README.md
CHANGED
|
@@ -108,7 +108,7 @@ For any questions or issues with the dataset, please contact the author at kilia
|
|
| 108 |
|
| 109 |
## Appendix
|
| 110 |
|
| 111 |
-
|
| 112 |
```
|
| 113 |
from sklearn.model_selection import train_test_split
|
| 114 |
|
|
@@ -119,7 +119,6 @@ template_names = lexicographicDataWikidataSPARQL['template_name'].unique()
|
|
| 119 |
test_set = pd.DataFrame()
|
| 120 |
train_set = pd.DataFrame()
|
| 121 |
|
| 122 |
-
|
| 123 |
for template_name in template_names:
|
| 124 |
# get the samples for the template_name
|
| 125 |
samples = lexicographicDataWikidataSPARQL[lexicographicDataWikidataSPARQL['template_name'] == template_name]
|
|
@@ -135,7 +134,6 @@ for template_name in template_names:
|
|
| 135 |
print(f"{template_name} has more than 20 samples")
|
| 136 |
train, test = train_test_split(samples, test_size=20)
|
| 137 |
|
| 138 |
-
|
| 139 |
test_set = pd.concat([test_set, test])
|
| 140 |
train_set = pd.concat([train_set, train])
|
| 141 |
```
|
|
|
|
| 108 |
|
| 109 |
## Appendix
|
| 110 |
|
| 111 |
+
#### 1. Code for Generating the Train-Test Split on full_data in Python
|
| 112 |
```
|
| 113 |
from sklearn.model_selection import train_test_split
|
| 114 |
|
|
|
|
| 119 |
test_set = pd.DataFrame()
|
| 120 |
train_set = pd.DataFrame()
|
| 121 |
|
|
|
|
| 122 |
for template_name in template_names:
|
| 123 |
# get the samples for the template_name
|
| 124 |
samples = lexicographicDataWikidataSPARQL[lexicographicDataWikidataSPARQL['template_name'] == template_name]
|
|
|
|
| 134 |
print(f"{template_name} has more than 20 samples")
|
| 135 |
train, test = train_test_split(samples, test_size=20)
|
| 136 |
|
|
|
|
| 137 |
test_set = pd.concat([test_set, test])
|
| 138 |
train_set = pd.concat([train_set, train])
|
| 139 |
```
|