Datasets:
Tasks:
Text Classification
Modalities:
Text
Formats:
json
Languages:
Vietnamese
Size:
1K - 10K
DOI:
License:
Vu Anh
commited on
Commit
·
4624208
1
Parent(s):
5992786
Update validation script to test HuggingFace Hub loading
Browse files- Changed to load from 'undertheseanlp/UTS2017_Bank' Hub repository
- Removed local file fallback - focus on Hub validation
- Updated usage instructions to show Hub loading syntax
- Removed redundant try-except handling
- validate_dataset.py +8 -18
validate_dataset.py
CHANGED
|
@@ -164,38 +164,28 @@ def validate_data_content():
|
|
| 164 |
|
| 165 |
|
| 166 |
def validate_huggingface_loading():
|
| 167 |
-
"""Validate that the dataset can be loaded
|
| 168 |
|
| 169 |
print("\n" + "=" * 60)
|
| 170 |
-
print("VALIDATING HUGGINGFACE LOADING")
|
| 171 |
print("=" * 60)
|
| 172 |
|
| 173 |
-
# Try loading each configuration
|
| 174 |
configs = ["classification", "sentiment", "aspect_sentiment"]
|
| 175 |
|
| 176 |
for config in configs:
|
| 177 |
try:
|
| 178 |
-
print(f"\n🤗 Loading config: {config}")
|
| 179 |
-
|
| 180 |
-
|
| 181 |
-
|
| 182 |
-
"json",
|
| 183 |
-
data_files={
|
| 184 |
-
"train": f"data/{config}/train.jsonl",
|
| 185 |
-
"test": f"data/{config}/test.jsonl"
|
| 186 |
-
}
|
| 187 |
-
)
|
| 188 |
-
|
| 189 |
-
print(" ✓ Successfully loaded")
|
| 190 |
print(f" ✓ Train: {len(dataset['train'])} examples")
|
| 191 |
print(f" ✓ Test: {len(dataset['test'])} examples")
|
| 192 |
|
| 193 |
-
# Validate first example
|
| 194 |
sample = dataset["train"][0]
|
| 195 |
print(f" ✓ Sample keys: {list(sample.keys())}")
|
| 196 |
|
| 197 |
except Exception as e:
|
| 198 |
-
print(f" ❌ Failed to load {config}: {e}")
|
| 199 |
|
| 200 |
|
| 201 |
def validate_data_consistency():
|
|
@@ -300,7 +290,7 @@ if __name__ == "__main__":
|
|
| 300 |
print("✅ VALIDATION COMPLETE")
|
| 301 |
print("=" * 60)
|
| 302 |
print("🎉 Dataset appears to be properly created and formatted!")
|
| 303 |
-
print("💡 You can now use: load_dataset('
|
| 304 |
|
| 305 |
else:
|
| 306 |
print("\n❌ VALIDATION FAILED")
|
|
|
|
| 164 |
|
| 165 |
|
| 166 |
def validate_huggingface_loading():
|
| 167 |
+
"""Validate that the dataset can be loaded from HuggingFace Hub."""
|
| 168 |
|
| 169 |
print("\n" + "=" * 60)
|
| 170 |
+
print("VALIDATING HUGGINGFACE HUB LOADING")
|
| 171 |
print("=" * 60)
|
| 172 |
|
|
|
|
| 173 |
configs = ["classification", "sentiment", "aspect_sentiment"]
|
| 174 |
|
| 175 |
for config in configs:
|
| 176 |
try:
|
| 177 |
+
print(f"\n🤗 Loading from Hub - config: {config}")
|
| 178 |
+
dataset = load_dataset("undertheseanlp/UTS2017_Bank", config)
|
| 179 |
+
|
| 180 |
+
print(" ✓ Successfully loaded from HuggingFace Hub")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 181 |
print(f" ✓ Train: {len(dataset['train'])} examples")
|
| 182 |
print(f" ✓ Test: {len(dataset['test'])} examples")
|
| 183 |
|
|
|
|
| 184 |
sample = dataset["train"][0]
|
| 185 |
print(f" ✓ Sample keys: {list(sample.keys())}")
|
| 186 |
|
| 187 |
except Exception as e:
|
| 188 |
+
print(f" ❌ Failed to load {config} from Hub: {e}")
|
| 189 |
|
| 190 |
|
| 191 |
def validate_data_consistency():
|
|
|
|
| 290 |
print("✅ VALIDATION COMPLETE")
|
| 291 |
print("=" * 60)
|
| 292 |
print("🎉 Dataset appears to be properly created and formatted!")
|
| 293 |
+
print("💡 You can now use: load_dataset('undertheseanlp/UTS2017_Bank', config_name)")
|
| 294 |
|
| 295 |
else:
|
| 296 |
print("\n❌ VALIDATION FAILED")
|