| | from bs4 import BeautifulSoup |
| | from datasets import load_dataset |
| |
|
| | def get_titles(file_path): |
| | |
| | with open(file_path, 'r') as f: |
| | html_content = f.read() |
| | soup = BeautifulSoup(html_content, 'html.parser') |
| | |
| | tbody = soup.find('tbody') |
| | |
| | trs = tbody.find_all('tr') |
| | |
| | trs = trs[2:] |
| | |
| | titles = [] |
| | for tr in trs: |
| | if tr is None or tr.find('a') is None: |
| | continue |
| | a_tags = tr.find_all('a') |
| | for a_tag in a_tags: |
| | if a_tag and 'title' in a_tag.attrs: |
| | titles.append(a_tag['title']) |
| | return titles |
| |
|
| | if __name__ == '__main__': |
| | titles_exzellent = get_titles('exzellent.txt') |
| | |
| | titles = titles_exzellent |
| | titles = list(set(titles)) |
| | with open('titles.txt', 'w') as f: |
| | for title in titles: |
| | f.write(title + '\n') |
| | |
| | |
| | dataset = load_dataset("graelo/wikipedia", "20230901.de", split="train") |
| | |
| | dataset = dataset.filter(lambda example: example['title'] in titles, num_proc=64) |
| | dataset.map(lambda x: {'text': f"# {x['title']}\n\n{x['text']}"}, remove_columns=['title'], num_proc=64) |
| | |
| | used_title = [example['title'] for example in dataset] |
| | non_used_title = [title for title in titles if title not in used_title] |
| | print(f'Number of used titles: {len(used_title)}') |
| | print(f'Number of non used titles: {len(non_used_title)}') |
| | print(non_used_title[:20]) |
| |
|
| | dataset.push_to_hub("LeoLM/wiki_de_exzellent", private=True) |
| |
|