init
Browse files- .gitattributes +5 -0
- README.md +1 -1
- data/nell.test.jsonl +1 -1
- data/nell.train.jsonl +2 -2
- data/nell.validation.jsonl +1 -1
- data/nell.vocab.clean.txt +3 -0
- data/nell_filter.test.jsonl +3 -0
- data/nell_filter.train.jsonl +3 -0
- data/nell_filter.validation.jsonl +3 -0
- data/nell_filter.vocab.txt +3 -0
- fewshot_link_prediction.py +1 -1
- generate_filtered_data.py +90 -0
- process.py +73 -4
.gitattributes
CHANGED
|
@@ -63,3 +63,8 @@ data/nell.vocab.txt filter=lfs diff=lfs merge=lfs -text
|
|
| 63 |
data/wiki.test.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 64 |
data/wiki.train.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 65 |
data/wiki.validation.jsonl filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 63 |
data/wiki.test.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 64 |
data/wiki.train.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 65 |
data/wiki.validation.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 66 |
+
data/nell.vocab.clean.txt filter=lfs diff=lfs merge=lfs -text
|
| 67 |
+
data/nell_filter.test.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 68 |
+
data/nell_filter.train.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 69 |
+
data/nell_filter.validation.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 70 |
+
data/nell_filter.vocab.txt filter=lfs diff=lfs merge=lfs -text
|
README.md
CHANGED
|
@@ -33,7 +33,7 @@ An example of `test` of `nell` looks as follows.
|
|
| 33 |
}
|
| 34 |
```
|
| 35 |
|
| 36 |
-
## Statistics
|
| 37 |
|
| 38 |
- Entity Types
|
| 39 |
|
|
|
|
| 33 |
}
|
| 34 |
```
|
| 35 |
|
| 36 |
+
## Statistics on the NELL test split
|
| 37 |
|
| 38 |
- Entity Types
|
| 39 |
|
data/nell.test.jsonl
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 510425
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:fa48d27aba4c3b82c2369313fc7a0436103fcb27115efc4159e8b50d988a6fa3
|
| 3 |
size 510425
|
data/nell.train.jsonl
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:bc4c73fdae5db374da7855e421176dc09462cc72366040d39ce42d7ec333460d
|
| 3 |
+
size 1997185
|
data/nell.validation.jsonl
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 222265
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c44da469d07e6c45d8bd71cf84b8abc33b0d4ecf41d395c563f055db58ed0919
|
| 3 |
size 222265
|
data/nell.vocab.clean.txt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:af38d1a58200cf9a3aa0c0d5e76c30202eff02c0cbd353f1c92640fd3fb0d422
|
| 3 |
+
size 884124
|
data/nell_filter.test.jsonl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7080aa24a120c206d15c89bdeb659613f98943e1a777dc0470d2ba220789f537
|
| 3 |
+
size 328006
|
data/nell_filter.train.jsonl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d5c399247378d26aaf06b3d6bc4770eb861805527311b188aba9a9243b45b84b
|
| 3 |
+
size 1303312
|
data/nell_filter.validation.jsonl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:95da57e7d4674775756c8fdd3e262c1aebb094c65e4c0596a0ae0e859bf38c43
|
| 3 |
+
size 190916
|
data/nell_filter.vocab.txt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:52489b95fececef828e13cf875c86128ab3b8134c6bdbce7618d809ee59a75f5
|
| 3 |
+
size 694324
|
fewshot_link_prediction.py
CHANGED
|
@@ -28,7 +28,7 @@ _CITATION = """
|
|
| 28 |
|
| 29 |
_HOME_PAGE = "https://github.com/asahi417/relbert"
|
| 30 |
_URL = f'https://huggingface.co/datasets/relbert/{_NAME}/resolve/main/data'
|
| 31 |
-
_TYPES = ["nell", "wiki"]
|
| 32 |
_URLS = {i: {
|
| 33 |
str(datasets.Split.TRAIN): [f'{_URL}/{i}.train.jsonl'],
|
| 34 |
str(datasets.Split.VALIDATION): [f'{_URL}/{i}.validation.jsonl'],
|
|
|
|
| 28 |
|
| 29 |
_HOME_PAGE = "https://github.com/asahi417/relbert"
|
| 30 |
_URL = f'https://huggingface.co/datasets/relbert/{_NAME}/resolve/main/data'
|
| 31 |
+
_TYPES = ["nell", "nell_filter", "wiki"]
|
| 32 |
_URLS = {i: {
|
| 33 |
str(datasets.Split.TRAIN): [f'{_URL}/{i}.train.jsonl'],
|
| 34 |
str(datasets.Split.VALIDATION): [f'{_URL}/{i}.validation.jsonl'],
|
generate_filtered_data.py
ADDED
|
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
non_entity_types = [
|
| 5 |
+
'academicfield',
|
| 6 |
+
'agent',
|
| 7 |
+
'agriculturalproduct',
|
| 8 |
+
'amphibian',
|
| 9 |
+
'animal',
|
| 10 |
+
'aquarium',
|
| 11 |
+
'arachnid',
|
| 12 |
+
'architect',
|
| 13 |
+
'arthropod',
|
| 14 |
+
'bakedgood',
|
| 15 |
+
'bathroomitem',
|
| 16 |
+
'bedroomitem',
|
| 17 |
+
'beverage',
|
| 18 |
+
'bird',
|
| 19 |
+
'blog',
|
| 20 |
+
'bodypart',
|
| 21 |
+
'bone',
|
| 22 |
+
'candy',
|
| 23 |
+
'cave',
|
| 24 |
+
'chemical',
|
| 25 |
+
'clothing',
|
| 26 |
+
'coffeedrink',
|
| 27 |
+
'condiment',
|
| 28 |
+
'crimeorcharge',
|
| 29 |
+
'crustacean',
|
| 30 |
+
'date',
|
| 31 |
+
'dateliteral',
|
| 32 |
+
'economicsector',
|
| 33 |
+
'fish',
|
| 34 |
+
'food',
|
| 35 |
+
'fruit',
|
| 36 |
+
'fungus',
|
| 37 |
+
'furniture',
|
| 38 |
+
'grain',
|
| 39 |
+
'hallwayitem',
|
| 40 |
+
'hobby',
|
| 41 |
+
'insect',
|
| 42 |
+
'invertebrate',
|
| 43 |
+
'jobposition',
|
| 44 |
+
'kitchenitem',
|
| 45 |
+
'landscapefeatures',
|
| 46 |
+
'legume',
|
| 47 |
+
'location',
|
| 48 |
+
'mammal',
|
| 49 |
+
'meat',
|
| 50 |
+
'mlsoftware',
|
| 51 |
+
'mollusk',
|
| 52 |
+
'month',
|
| 53 |
+
'nut',
|
| 54 |
+
'officebuildingroom',
|
| 55 |
+
'physiologicalcondition',
|
| 56 |
+
'plant',
|
| 57 |
+
'politicsissue',
|
| 58 |
+
'profession',
|
| 59 |
+
'professionalorganization',
|
| 60 |
+
'reptile',
|
| 61 |
+
'room',
|
| 62 |
+
'sport',
|
| 63 |
+
'tableitem',
|
| 64 |
+
'tradeunion',
|
| 65 |
+
'vegetable',
|
| 66 |
+
'vehicle',
|
| 67 |
+
'vertebrate',
|
| 68 |
+
'weapon',
|
| 69 |
+
'wine'
|
| 70 |
+
]
|
| 71 |
+
|
| 72 |
+
full_data = {}
|
| 73 |
+
for s in ["train", "validation", "test"]:
|
| 74 |
+
with open(f"data/nell.{s}.jsonl") as f:
|
| 75 |
+
data = [json.loads(i) for i in f.read().split('\n') if len(i) > 0]
|
| 76 |
+
data = [i for i in data if i['head_type'] not in non_entity_types and i['tail_type'] not in non_entity_types]
|
| 77 |
+
with open(f"data/nell_filter.{s}.jsonl", "w") as f:
|
| 78 |
+
f.write('\n'.join([json.dumps(i) for i in data]))
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
with open("data/nell.vocab.txt") as f:
|
| 82 |
+
types = [i for i in f.read().split('\n')]
|
| 83 |
+
|
| 84 |
+
with open("data/nell.vocab.clean.txt") as f:
|
| 85 |
+
vocab = [i for i in f.read().split('\n')]
|
| 86 |
+
vocab_type = [(a, b.split(":")[1]) for a, b in zip(vocab, types) if len(a) > 0 and len(b) > 0 and len(b.split(":")) > 2]
|
| 87 |
+
vocab_new = [a for a, b in vocab_type if b not in non_entity_types]
|
| 88 |
+
with open("data/nell_filter.vocab.txt", 'w') as f:
|
| 89 |
+
f.write('\n'.join(vocab_new))
|
| 90 |
+
|
process.py
CHANGED
|
@@ -18,7 +18,75 @@ data_dir_nell = "NELL"
|
|
| 18 |
data_dir_wiki = "Wiki"
|
| 19 |
os.makedirs("data", exist_ok=True)
|
| 20 |
|
| 21 |
-
short = ['alcs', "uk", "us", "usa"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22 |
|
| 23 |
|
| 24 |
def clean(token):
|
|
@@ -27,9 +95,7 @@ def clean(token):
|
|
| 27 |
token = token.replace("__", "")
|
| 28 |
token = re.sub(r"00\d\Z", "", token)
|
| 29 |
token = re.sub(r"\An(\d+)", r"\1", token)
|
| 30 |
-
if _type in
|
| 31 |
-
"crustacean", "agriculturalproduct", "reptile", "mammal", "amphibian", "sport", "hobby", "vegetable",
|
| 32 |
-
"beverage", "fruit", "grain", "coffeedrink", ]:
|
| 33 |
return token, _type
|
| 34 |
new_token = []
|
| 35 |
for _t in token.split(" "):
|
|
@@ -74,6 +140,9 @@ if __name__ == '__main__':
|
|
| 74 |
vocab = read_vocab(f"{data_dir_nell}/ent2ids")
|
| 75 |
with open("data/nell.vocab.txt", 'w') as f:
|
| 76 |
f.write("\n".join(vocab))
|
|
|
|
|
|
|
|
|
|
| 77 |
|
| 78 |
vocab = read_vocab(f"{data_dir_wiki}/ent2ids")
|
| 79 |
with open("data/wiki.vocab.txt", 'w') as f:
|
|
|
|
| 18 |
data_dir_wiki = "Wiki"
|
| 19 |
os.makedirs("data", exist_ok=True)
|
| 20 |
|
| 21 |
+
short = ['alcs', "uk", "us", "usa", "npr", "nbc", "bbc", "cnn", "abc", "cbs", "nfl", "mlb", "nba", "nhl", "pga", "ncaa",
|
| 22 |
+
"wjhu", "pbs", "un"]
|
| 23 |
+
non_entity_types = [
|
| 24 |
+
'academicfield',
|
| 25 |
+
'agent',
|
| 26 |
+
'agriculturalproduct',
|
| 27 |
+
'amphibian',
|
| 28 |
+
'animal',
|
| 29 |
+
'aquarium',
|
| 30 |
+
'arachnid',
|
| 31 |
+
'architect',
|
| 32 |
+
'arthropod',
|
| 33 |
+
'bakedgood',
|
| 34 |
+
'bathroomitem',
|
| 35 |
+
'bedroomitem',
|
| 36 |
+
'beverage',
|
| 37 |
+
'bird',
|
| 38 |
+
'blog',
|
| 39 |
+
'bodypart',
|
| 40 |
+
'bone',
|
| 41 |
+
'candy',
|
| 42 |
+
'cave',
|
| 43 |
+
'chemical',
|
| 44 |
+
'clothing',
|
| 45 |
+
'coffeedrink',
|
| 46 |
+
'condiment',
|
| 47 |
+
'crimeorcharge',
|
| 48 |
+
'crustacean',
|
| 49 |
+
'date',
|
| 50 |
+
'dateliteral',
|
| 51 |
+
'economicsector',
|
| 52 |
+
'fish',
|
| 53 |
+
'food',
|
| 54 |
+
'fruit',
|
| 55 |
+
'fungus',
|
| 56 |
+
'furniture',
|
| 57 |
+
'grain',
|
| 58 |
+
'hallwayitem',
|
| 59 |
+
'hobby',
|
| 60 |
+
'insect',
|
| 61 |
+
'invertebrate',
|
| 62 |
+
'jobposition',
|
| 63 |
+
'kitchenitem',
|
| 64 |
+
'landscapefeatures',
|
| 65 |
+
'legume',
|
| 66 |
+
'location',
|
| 67 |
+
'mammal',
|
| 68 |
+
'meat',
|
| 69 |
+
'mlsoftware',
|
| 70 |
+
'mollusk',
|
| 71 |
+
'month',
|
| 72 |
+
'nut',
|
| 73 |
+
'officebuildingroom',
|
| 74 |
+
'physiologicalcondition',
|
| 75 |
+
'plant',
|
| 76 |
+
'politicsissue',
|
| 77 |
+
'profession',
|
| 78 |
+
'professionalorganization',
|
| 79 |
+
'reptile',
|
| 80 |
+
'room',
|
| 81 |
+
'sport',
|
| 82 |
+
'tableitem',
|
| 83 |
+
'tradeunion',
|
| 84 |
+
'vegetable',
|
| 85 |
+
'vehicle',
|
| 86 |
+
'vertebrate',
|
| 87 |
+
'weapon',
|
| 88 |
+
'wine'
|
| 89 |
+
]
|
| 90 |
|
| 91 |
|
| 92 |
def clean(token):
|
|
|
|
| 95 |
token = token.replace("__", "")
|
| 96 |
token = re.sub(r"00\d\Z", "", token)
|
| 97 |
token = re.sub(r"\An(\d+)", r"\1", token)
|
| 98 |
+
if _type in non_entity_types:
|
|
|
|
|
|
|
| 99 |
return token, _type
|
| 100 |
new_token = []
|
| 101 |
for _t in token.split(" "):
|
|
|
|
| 140 |
vocab = read_vocab(f"{data_dir_nell}/ent2ids")
|
| 141 |
with open("data/nell.vocab.txt", 'w') as f:
|
| 142 |
f.write("\n".join(vocab))
|
| 143 |
+
vocab_clean = [clean(i)[0] if len(i.split(":")) > 2 else i for i in vocab]
|
| 144 |
+
with open("data/nell.vocab.clean.txt", 'w') as f:
|
| 145 |
+
f.write("\n".join(vocab_clean))
|
| 146 |
|
| 147 |
vocab = read_vocab(f"{data_dir_wiki}/ent2ids")
|
| 148 |
with open("data/wiki.vocab.txt", 'w') as f:
|