| | import os |
| | from datasets import load_dataset |
| | import matplotlib.pyplot as plt |
| | from collections import Counter, defaultdict |
| |
|
| | |
| | dataset = load_dataset( |
| | 'json', |
| | data_files='files/*.jsonl.gz', |
| | split='train', |
| | encoding='utf-8' |
| | ) |
| |
|
| | |
| | print(len(dataset)) |
| |
|
| | |
| | os.makedirs("images", exist_ok=True) |
| |
|
| | |
| | cnt = Counter() |
| | for row in dataset: |
| | cnt[row["lang"]] += 1 |
| |
|
| | |
| | langs, freqs = zip(*cnt.items()) |
| |
|
| | plt.figure(figsize=(10, 4)) |
| | plt.bar(langs, freqs) |
| | plt.xticks(rotation=90) |
| | plt.ylabel("# documents") |
| | plt.title("Number of documents per language") |
| | plt.tight_layout() |
| | plt.savefig("images/nb_documents.png") |
| | plt.close() |
| |
|
| | |
| | threshold = 10000 |
| | high = {k: v for k, v in cnt.items() if v > threshold} |
| | low = {k: v for k, v in cnt.items() if v <= threshold} |
| |
|
| | |
| | fig, axes = plt.subplots(nrows=2, ncols=1, figsize=(12, 8)) |
| |
|
| | |
| | if high: |
| | langs, freqs = zip(*high.items()) |
| | axes[0].bar(langs, freqs) |
| | axes[0].set_xticklabels(langs, rotation=90) |
| | axes[0].set_ylabel("# documents") |
| | axes[0].set_title("Languages with more than 10,000 documents") |
| |
|
| | |
| | if low: |
| | langs, freqs = zip(*low.items()) |
| | axes[1].bar(langs, freqs) |
| | axes[1].set_xticklabels(langs, rotation=90) |
| | axes[1].set_ylabel("# documents") |
| | axes[1].set_title("Languages with 10,000 or fewer documents") |
| |
|
| | plt.tight_layout() |
| | plt.savefig("images/nb_documents_combined.png") |
| | plt.close() |
| |
|
| | |
| | low_languages = set(low.keys()) |
| |
|
| | text_lengths_low = defaultdict(list) |
| |
|
| | |
| | for row in dataset: |
| | lang = row["lang"] |
| | if lang in low_languages: |
| | text_len = len(row["text"]) if row["text"] else 0 |
| | text_lengths_low[lang].append(text_len) |
| |
|
| | |
| | langs = list(text_lengths_low.keys()) |
| | data = [text_lengths_low[lang] for lang in langs] |
| |
|
| | |
| | plt.figure(figsize=(12, 6)) |
| | plt.boxplot(data, labels=langs, showfliers=False) |
| | plt.xticks(rotation=90) |
| | plt.ylabel("Document length") |
| | plt.title("Document length per language") |
| | plt.tight_layout() |
| | plt.savefig("images/boxplot_low.png") |
| | plt.close() |
| |
|
| | |
| | high_languages = set(high.keys()) |
| |
|
| | text_lengths_high = defaultdict(list) |
| |
|
| | |
| | for row in dataset: |
| | lang = row["lang"] |
| | if lang in high_languages: |
| | text_len = len(row["text"]) if row["text"] else 0 |
| | text_lengths_high[lang].append(text_len) |
| |
|
| | |
| | langs = list(text_lengths_high.keys()) |
| | data = [text_lengths_high[lang] for lang in langs] |
| |
|
| | |
| | plt.figure(figsize=(12, 6)) |
| | plt.boxplot(data, labels=langs, showfliers=False) |
| | plt.xticks(rotation=90) |
| | plt.ylabel("Document length") |
| | plt.title("Document length per language") |
| | plt.tight_layout() |
| | plt.savefig("images/boxplot_high.png") |
| | plt.close() |
| |
|
| | overall_max_high = max(max(lengths) for lengths in text_lengths_high.values()) |
| | overall_max_low = max(max(lengths) for lengths in text_lengths_low.values()) |
| | print("Maximum text length overall:", max(overall_max_high, overall_max_low )) |
| |
|
| | |
| | all_concepts = set() |
| | all_concept_ids = set() |
| |
|
| | for row in dataset: |
| | |
| | concepts = row.get("eurovoc_concepts", []) |
| | concept_ids = row.get("eurovoc_concepts_ids", []) |
| | all_concepts.update(concepts) |
| | all_concept_ids.update(concept_ids) |
| |
|
| | print("Number of unique eurovoc_concepts:", len(all_concepts)) |
| | print("Number of unique eurovoc_concepts_ids:", len(all_concept_ids)) |