Datasets:

Modalities:
Text
Formats:
json
ArXiv:
License:
EuroVoc / scripts /analyse.py
apapagi's picture
Upload folder using huggingface_hub
437e44a verified
import os
from datasets import load_dataset
import matplotlib.pyplot as plt
from collections import Counter, defaultdict
# Load dataset
dataset = load_dataset(
'json',
data_files='files/*.jsonl.gz',
split='train',
encoding='utf-8'
)
# Print dataset size
print(len(dataset)) # 5820634
# Create images directory if it doesn't exist
os.makedirs("images", exist_ok=True)
# Count languages
cnt = Counter()
for row in dataset:
cnt[row["lang"]] += 1
# Plot overall language distribution
langs, freqs = zip(*cnt.items())
plt.figure(figsize=(10, 4))
plt.bar(langs, freqs)
plt.xticks(rotation=90)
plt.ylabel("# documents")
plt.title("Number of documents per language")
plt.tight_layout()
plt.savefig("images/nb_documents.png")
plt.close()
# Split into two groups
threshold = 10000
high = {k: v for k, v in cnt.items() if v > threshold}
low = {k: v for k, v in cnt.items() if v <= threshold}
# Create a figure with 2 subplots (vertical)
fig, axes = plt.subplots(nrows=2, ncols=1, figsize=(12, 8))
# High-frequency languages
if high:
langs, freqs = zip(*high.items())
axes[0].bar(langs, freqs)
axes[0].set_xticklabels(langs, rotation=90)
axes[0].set_ylabel("# documents")
axes[0].set_title("Languages with more than 10,000 documents")
# Low-frequency languages
if low:
langs, freqs = zip(*low.items())
axes[1].bar(langs, freqs)
axes[1].set_xticklabels(langs, rotation=90)
axes[1].set_ylabel("# documents")
axes[1].set_title("Languages with 10,000 or fewer documents")
plt.tight_layout()
plt.savefig("images/nb_documents_combined.png")
plt.close()
# Analyze text lengths for low-frequency languages
low_languages = set(low.keys())
text_lengths_low = defaultdict(list)
# Iterate through dataset efficiently
for row in dataset:
lang = row["lang"]
if lang in low_languages:
text_len = len(row["text"]) if row["text"] else 0
text_lengths_low[lang].append(text_len)
# Prepare data for boxplot
langs = list(text_lengths_low.keys())
data = [text_lengths_low[lang] for lang in langs]
# Plot
plt.figure(figsize=(12, 6))
plt.boxplot(data, labels=langs, showfliers=False) # hide extreme outliers for clarity
plt.xticks(rotation=90)
plt.ylabel("Document length")
plt.title("Document length per language")
plt.tight_layout()
plt.savefig("images/boxplot_low.png")
plt.close()
# Analyze text lengths for low-frequency languages
high_languages = set(high.keys())
text_lengths_high = defaultdict(list)
# Iterate through dataset efficiently
for row in dataset:
lang = row["lang"]
if lang in high_languages:
text_len = len(row["text"]) if row["text"] else 0
text_lengths_high[lang].append(text_len)
# Prepare data for boxplot
langs = list(text_lengths_high.keys())
data = [text_lengths_high[lang] for lang in langs]
# Plot
plt.figure(figsize=(12, 6))
plt.boxplot(data, labels=langs, showfliers=False) # hide extreme outliers for clarity
plt.xticks(rotation=90)
plt.ylabel("Document length")
plt.title("Document length per language")
plt.tight_layout()
plt.savefig("images/boxplot_high.png")
plt.close()
overall_max_high = max(max(lengths) for lengths in text_lengths_high.values())
overall_max_low = max(max(lengths) for lengths in text_lengths_low.values())
print("Maximum text length overall:", max(overall_max_high, overall_max_low )) # 99,996,139
# Collect all unique eurovoc concepts and ids
all_concepts = set()
all_concept_ids = set()
for row in dataset:
# Add all eurovoc concepts and ids from this row
concepts = row.get("eurovoc_concepts", [])
concept_ids = row.get("eurovoc_concepts_ids", [])
all_concepts.update(concepts)
all_concept_ids.update(concept_ids)
print("Number of unique eurovoc_concepts:", len(all_concepts)) # 7097
print("Number of unique eurovoc_concepts_ids:", len(all_concept_ids)) # 7049