File size: 3,842 Bytes
437e44a | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 | import os
from datasets import load_dataset
import matplotlib.pyplot as plt
from collections import Counter, defaultdict
# Load dataset
dataset = load_dataset(
'json',
data_files='files/*.jsonl.gz',
split='train',
encoding='utf-8'
)
# Print dataset size
print(len(dataset)) # 5820634
# Create images directory if it doesn't exist
os.makedirs("images", exist_ok=True)
# Count languages
cnt = Counter()
for row in dataset:
cnt[row["lang"]] += 1
# Plot overall language distribution
langs, freqs = zip(*cnt.items())
plt.figure(figsize=(10, 4))
plt.bar(langs, freqs)
plt.xticks(rotation=90)
plt.ylabel("# documents")
plt.title("Number of documents per language")
plt.tight_layout()
plt.savefig("images/nb_documents.png")
plt.close()
# Split into two groups
threshold = 10000
high = {k: v for k, v in cnt.items() if v > threshold}
low = {k: v for k, v in cnt.items() if v <= threshold}
# Create a figure with 2 subplots (vertical)
fig, axes = plt.subplots(nrows=2, ncols=1, figsize=(12, 8))
# High-frequency languages
if high:
langs, freqs = zip(*high.items())
axes[0].bar(langs, freqs)
axes[0].set_xticklabels(langs, rotation=90)
axes[0].set_ylabel("# documents")
axes[0].set_title("Languages with more than 10,000 documents")
# Low-frequency languages
if low:
langs, freqs = zip(*low.items())
axes[1].bar(langs, freqs)
axes[1].set_xticklabels(langs, rotation=90)
axes[1].set_ylabel("# documents")
axes[1].set_title("Languages with 10,000 or fewer documents")
plt.tight_layout()
plt.savefig("images/nb_documents_combined.png")
plt.close()
# Analyze text lengths for low-frequency languages
low_languages = set(low.keys())
text_lengths_low = defaultdict(list)
# Iterate through dataset efficiently
for row in dataset:
lang = row["lang"]
if lang in low_languages:
text_len = len(row["text"]) if row["text"] else 0
text_lengths_low[lang].append(text_len)
# Prepare data for boxplot
langs = list(text_lengths_low.keys())
data = [text_lengths_low[lang] for lang in langs]
# Plot
plt.figure(figsize=(12, 6))
plt.boxplot(data, labels=langs, showfliers=False) # hide extreme outliers for clarity
plt.xticks(rotation=90)
plt.ylabel("Document length")
plt.title("Document length per language")
plt.tight_layout()
plt.savefig("images/boxplot_low.png")
plt.close()
# Analyze text lengths for low-frequency languages
high_languages = set(high.keys())
text_lengths_high = defaultdict(list)
# Iterate through dataset efficiently
for row in dataset:
lang = row["lang"]
if lang in high_languages:
text_len = len(row["text"]) if row["text"] else 0
text_lengths_high[lang].append(text_len)
# Prepare data for boxplot
langs = list(text_lengths_high.keys())
data = [text_lengths_high[lang] for lang in langs]
# Plot
plt.figure(figsize=(12, 6))
plt.boxplot(data, labels=langs, showfliers=False) # hide extreme outliers for clarity
plt.xticks(rotation=90)
plt.ylabel("Document length")
plt.title("Document length per language")
plt.tight_layout()
plt.savefig("images/boxplot_high.png")
plt.close()
overall_max_high = max(max(lengths) for lengths in text_lengths_high.values())
overall_max_low = max(max(lengths) for lengths in text_lengths_low.values())
print("Maximum text length overall:", max(overall_max_high, overall_max_low )) # 99,996,139
# Collect all unique eurovoc concepts and ids
all_concepts = set()
all_concept_ids = set()
for row in dataset:
# Add all eurovoc concepts and ids from this row
concepts = row.get("eurovoc_concepts", [])
concept_ids = row.get("eurovoc_concepts_ids", [])
all_concepts.update(concepts)
all_concept_ids.update(concept_ids)
print("Number of unique eurovoc_concepts:", len(all_concepts)) # 7097
print("Number of unique eurovoc_concepts_ids:", len(all_concept_ids)) # 7049 |