|
|
import requests |
|
|
|
|
|
from datasets import Dataset, DatasetDict |
|
|
|
|
|
|
|
|
def load_bio_file(filepath_or_url): |
|
|
"""Load a .bio file and yield structured token-label sequences.""" |
|
|
|
|
|
if filepath_or_url.startswith("http"): |
|
|
response = requests.get(filepath_or_url) |
|
|
response.raise_for_status() |
|
|
lines = response.text.splitlines() |
|
|
else: |
|
|
with open(filepath_or_url, encoding="utf-8") as f: |
|
|
lines = f.readlines() |
|
|
|
|
|
guid = 0 |
|
|
tokens, ner_tags = [], [] |
|
|
|
|
|
for line in lines: |
|
|
line = line.strip() |
|
|
if not line or line.startswith("-DOCSTART-"): |
|
|
|
|
|
if tokens: |
|
|
yield { |
|
|
"tokens": tokens, |
|
|
"ner_tags": ner_tags, |
|
|
} |
|
|
guid += 1 |
|
|
tokens, ner_tags = [], [] |
|
|
else: |
|
|
|
|
|
splits = line.split() |
|
|
if len(splits) >= 2: |
|
|
token, ner = splits[:2] |
|
|
tokens.append(token) |
|
|
ner_tags.append(ner) |
|
|
else: |
|
|
|
|
|
print(f"Warning: malformed line skipped -> {line}") |
|
|
|
|
|
|
|
|
if tokens: |
|
|
yield { |
|
|
"tokens": tokens, |
|
|
"ner_tags": ner_tags, |
|
|
} |
|
|
|
|
|
|
|
|
def load_all_splits(): |
|
|
"""Load train, validation, and test splits.""" |
|
|
urls = { |
|
|
"train": "https://raw.githubusercontent.com/brickee/HarveyNER/refs/heads/main/data/tweets/tweets.train.bio", |
|
|
"validation": "https://raw.githubusercontent.com/brickee/HarveyNER/refs/heads/main/data/tweets/tweets.dev.bio", |
|
|
"test": "https://raw.githubusercontent.com/brickee/HarveyNER/refs/heads/main/data/tweets/tweets.test.bio", |
|
|
} |
|
|
|
|
|
datasets = {} |
|
|
for split, url in urls.items(): |
|
|
print(f"Loading {split} data from {url} ...") |
|
|
data = list(load_bio_file(url)) |
|
|
datasets[split] = data |
|
|
print(f" Loaded {len(data)} sentences for {split}.") |
|
|
|
|
|
return datasets |
|
|
|
|
|
|
|
|
def main(): |
|
|
datasets = load_all_splits() |
|
|
print("\nExample from train:") |
|
|
print(datasets["train"][0]) |
|
|
|
|
|
hf_datasets = DatasetDict({ |
|
|
'train': Dataset.from_list(datasets['train']), |
|
|
'validation': Dataset.from_list(datasets['validation']), |
|
|
'test': Dataset.from_list(datasets['test']), |
|
|
}) |
|
|
hf_datasets.push_to_hub('extraordinarylab/harvey-ner') |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |