| from datasets import load_dataset | |
| import json | |
| ds = load_dataset("wikimedia/wikipedia", "20231101.en") | |
| def get_text(text): | |
| arr = text.split('\n\n') | |
| if len(arr) == 1: | |
| return text | |
| else: | |
| return "\n\n".join(arr[:2]) | |
| url = [get_text(t) for t in ds['train']['url']] | |
| train = [get_text(t) for t in ds['train']['text']] | |
| title = [t for t in ds['train']['title']] | |
| with open("wikipedia-title-20231101-en.txt", "w") as f: | |
| for l in title: | |
| print(json.dumps(l), file=f) | |
| with open("wikipedia-text-20231101-en.txt", "w") as f: | |
| for l in train: | |
| print(json.dumps(l), file=f) | |
| with open("wikipedia-url-20231101-en.txt", "w") as f: | |
| for l in url: | |
| print(l, file=f) | |