Datasets:
Tasks:
Text Classification
Modalities:
Text
Formats:
json
Languages:
Vietnamese
Size:
1K - 10K
DOI:
License:
File size: 2,774 Bytes
21fc01c ff2289d 0a474d0 ff2289d 0a474d0 21fc01c ff2289d 21fc01c ff2289d 21fc01c ff2289d 21fc01c ff2289d 0a474d0 21fc01c ff2289d 21fc01c ff2289d 21fc01c ff2289d 21fc01c ff2289d 21fc01c ff2289d 21fc01c ff2289d 21fc01c ff2289d 21fc01c ff2289d 21fc01c ff2289d 21fc01c ff2289d 21fc01c ff2289d 21fc01c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 |
#!/usr/bin/env python3
"""Generate statistics for the UTS2017_Bank dataset."""
import json
import statistics as stats
from collections import Counter
from pathlib import Path
def load_jsonl(file_path):
"""Load JSONL file and return list of items."""
with open(file_path, encoding="utf-8") as f:
return [json.loads(line.strip()) for line in f]
def text_stats(items):
"""Calculate text length statistics."""
word_counts = [len(item["text"].split()) for item in items]
return {
"avg": stats.mean(word_counts),
"min": min(word_counts),
"max": max(word_counts),
"median": stats.median(word_counts),
}
def print_subset_stats(subset_name, emoji):
"""Print statistics for a dataset subset."""
print(f"\n{emoji} {subset_name.upper()} SUBSET")
print("-" * 40)
for split in ["train", "test"]:
file_path = Path(f"data/{subset_name}/{split}.jsonl")
items = load_jsonl(file_path)
print(f"\n{split.capitalize()}: {len(items)} examples")
# Text statistics
text_data = text_stats(items)
print(f" Words: avg={text_data['avg']:.1f}, range={text_data['min']}-{text_data['max']}")
# Subset-specific stats
if subset_name == "classification":
labels = Counter(item["label"] for item in items)
print(f" Top labels: {', '.join(f'{k}({v})' for k, v in labels.most_common(3))}")
elif subset_name == "sentiment":
sentiments = Counter(item["sentiment"] for item in items)
print(f" Sentiments: {', '.join(f'{k}({v})' for k, v in sentiments.most_common())}")
elif subset_name == "aspect_sentiment":
multi_aspect = sum(1 for item in items if len(item["aspects"]) > 1)
print(f" Multi-aspect: {multi_aspect}/{len(items)} examples")
def main():
"""Generate and display dataset statistics."""
print("📊 UTS2017_Bank Dataset Statistics")
print("=" * 50)
# Overall stats
train_items = load_jsonl("data/classification/train.jsonl")
test_items = load_jsonl("data/classification/test.jsonl")
total = len(train_items) + len(test_items)
print(f"\n📈 OVERALL: {total} examples ({len(train_items)} train, {len(test_items)} test)")
# Subset statistics
print_subset_stats("classification", "🏷️")
print_subset_stats("sentiment", "😊")
print_subset_stats("aspect_sentiment", "🎯")
# Available configurations
print("\n💡 USAGE:")
print(" load_dataset('undertheseanlp/UTS2017_Bank', 'classification')")
print(" load_dataset('undertheseanlp/UTS2017_Bank', 'sentiment')")
print(" load_dataset('undertheseanlp/UTS2017_Bank', 'aspect_sentiment')")
if __name__ == "__main__":
main()
|