uploads / synthgen /check_json_validity.py
EdwardSJ151's picture
Upload folder using huggingface_hub
3af75d1 verified
from datasets import load_dataset, Dataset, DatasetDict
from collections import Counter
from typing import Any, Dict, List, Optional, Tuple
import json
EXPECTED_COLUMNS = ["messages"]
EXPECTED_MESSAGE_KEYS = {"role", "content"}
ALLOWED_ROLES = {"user", "assistant", "system"}
def check_columns(split_name: str, ds) -> List[str]:
issues: List[str] = []
cols = list(ds.column_names)
if cols != EXPECTED_COLUMNS:
issues.append(
f"[{split_name}] Unexpected columns {cols}, expected {EXPECTED_COLUMNS}"
)
return issues
def validate_message_structure(
msg: Any,
split_name: str,
idx: int,
msg_idx: int,
) -> List[str]:
issues: List[str] = []
prefix = f"[{split_name}][example {idx}][message {msg_idx}]"
if not isinstance(msg, dict):
issues.append(f"{prefix} Message is not a dict. Got type={type(msg)}")
return issues
keys = set(msg.keys())
missing = EXPECTED_MESSAGE_KEYS - keys
extra = keys - EXPECTED_MESSAGE_KEYS
if missing:
issues.append(f"{prefix} Missing keys: {sorted(missing)}")
if extra:
issues.append(f"{prefix} Extra keys: {sorted(extra)}")
role = msg.get("role", None)
if not isinstance(role, str):
issues.append(f"{prefix} 'role' is not str. Got type={type(role)} value={role!r}")
else:
if role not in ALLOWED_ROLES:
issues.append(f"{prefix} 'role' has unexpected value: {role!r}")
content = msg.get("content", None)
if not isinstance(content, str):
issues.append(
f"{prefix} 'content' is not str. Got type={type(content)} value={content!r}"
)
else:
if content.strip() == "":
issues.append(f"{prefix} 'content' is empty or whitespace only")
return issues
def validate_conversation_level(
messages: Any,
split_name: str,
idx: int,
enforce_turn_pattern: bool = True,
) -> List[str]:
issues: List[str] = []
prefix = f"[{split_name}][example {idx}]"
if not isinstance(messages, list):
issues.append(f"{prefix} 'messages' is not a list. Got type={type(messages)}")
return issues
if len(messages) == 0:
issues.append(f"{prefix} 'messages' is an empty list")
return issues
if not enforce_turn_pattern:
return issues
roles = [m.get("role") for m in messages if isinstance(m, dict)]
first_non_system_role: Optional[str] = None
for r in roles:
if r != "system":
first_non_system_role = r
break
if first_non_system_role is None:
issues.append(f"{prefix} All roles are 'system', no user or assistant")
elif first_non_system_role != "user":
issues.append(
f"{prefix} First non system role is {first_non_system_role!r}, expected 'user'"
)
if roles and roles[-1] != "assistant":
issues.append(
f"{prefix} Last role is {roles[-1]!r}, expected 'assistant' for training"
)
roles_wo_system = [r for r in roles if r != "system"]
for i in range(len(roles_wo_system) - 1):
if roles_wo_system[i] == roles_wo_system[i + 1]:
issues.append(
f"{prefix} Non alternating roles around positions {i} and {i+1}: "
f"{roles_wo_system[i]!r}, {roles_wo_system[i+1]!r}"
)
break
return issues
def validate_split(
split_name: str,
ds,
max_issue_examples: int = 50,
enforce_turn_pattern: bool = True,
) -> Dict[str, Any]:
issues: List[str] = []
role_counts: Counter = Counter()
num_examples_with_issues = 0
issues.extend(check_columns(split_name, ds))
for idx, example in enumerate(ds):
example_issues: List[str] = []
if not isinstance(example, dict):
example_issues.append(
f"[{split_name}][example {idx}] Example is not a dict. Got type={type(example)}"
)
if example_issues:
issues.extend(example_issues)
num_examples_with_issues += 1
continue
if "messages" not in example:
example_issues.append(
f"[{split_name}][example {idx}] Missing key 'messages' in example keys={list(example.keys())}"
)
if example_issues:
issues.extend(example_issues)
num_examples_with_issues += 1
continue
messages = example["messages"]
example_issues.extend(
validate_conversation_level(
messages,
split_name,
idx,
enforce_turn_pattern=enforce_turn_pattern,
)
)
if isinstance(messages, list):
for msg_idx, msg in enumerate(messages):
example_issues.extend(
validate_message_structure(
msg,
split_name,
idx,
msg_idx,
)
)
if isinstance(msg, dict):
role = msg.get("role", None)
if isinstance(role, str):
role_counts[role] += 1
if example_issues:
num_examples_with_issues += 1
if num_examples_with_issues <= max_issue_examples:
issues.extend(example_issues)
return {
"issues": issues,
"role_counts": role_counts,
"num_examples": len(ds),
"num_examples_with_issues": num_examples_with_issues,
}
def try_fix_example(example: Dict[str, Any]) -> Optional[Dict[str, Any]]:
"""
Try generic fixes. If still structurally bad, return None so that it can be dropped.
Fixes:
- ensure messages is a list and non empty
- drop non dict messages
- ensure role and content exist and are strings
- drop messages with empty content
- restrict roles to allowed set with simple fallback
- enforce first non system role is user
- enforce last role is assistant
- enforce alternation ignoring system
"""
if not isinstance(example, dict):
return None
if "messages" not in example:
return None
messages = example["messages"]
if not isinstance(messages, list) or len(messages) == 0:
return None
new_messages: List[Dict[str, Any]] = []
repaired = False
for msg in messages:
if not isinstance(msg, dict):
repaired = True
continue
msg = dict(msg)
if "role" not in msg or not isinstance(msg["role"], str):
repaired = True
if not new_messages:
msg["role"] = "user"
else:
prev_role = new_messages[-1].get("role", "user")
msg["role"] = "assistant" if prev_role != "assistant" else "user"
if "content" not in msg or not isinstance(msg["content"], str):
repaired = True
msg["content"] = str(msg.get("content", ""))
msg["content"] = msg["content"].strip()
if msg["content"] == "":
repaired = True
continue
role = msg["role"]
if role not in ALLOWED_ROLES:
repaired = True
lower = role.lower()
if lower in ALLOWED_ROLES:
msg["role"] = lower
else:
msg["role"] = "user"
new_messages.append(msg)
if not new_messages:
return None
roles = [m["role"] for m in new_messages]
non_sys_indices = [i for i, r in enumerate(roles) if r != "system"]
if not non_sys_indices:
return None
first_ns_idx = non_sys_indices[0]
if new_messages[first_ns_idx]["role"] != "user":
repaired = True
new_messages[first_ns_idx]["role"] = "user"
if new_messages[-1]["role"] != "assistant":
repaired = True
new_messages[-1]["role"] = "assistant"
last_ns_role = None
for m in new_messages:
if m["role"] == "system":
continue
if last_ns_role is None:
last_ns_role = m["role"]
continue
if m["role"] == last_ns_role:
repaired = True
m["role"] = "assistant" if last_ns_role != "assistant" else "user"
last_ns_role = m["role"]
if not new_messages:
return None
fixed_example = dict(example)
fixed_example["messages"] = new_messages
return fixed_example
def clean_split(
split_name: str,
ds,
enforce_turn_pattern: bool = True,
) -> Tuple[Dataset, int, int]:
"""
Returns:
- cleaned Dataset
- number of fixed examples
- number of dropped examples
"""
cleaned_examples: List[Dict[str, Any]] = []
fixed_count = 0
dropped_count = 0
for idx, example in enumerate(ds):
original_example = example
fixed_example = try_fix_example(original_example)
if fixed_example is None:
dropped_count += 1
continue
tmp_ds = Dataset.from_list([fixed_example])
result = validate_split(
split_name=f"{split_name}_tmp",
ds=tmp_ds,
max_issue_examples=0,
enforce_turn_pattern=enforce_turn_pattern,
)
if result["num_examples_with_issues"] > 0:
dropped_count += 1
continue
if fixed_example != original_example:
fixed_count += 1
cleaned_examples.append(fixed_example)
cleaned_ds = Dataset.from_list(cleaned_examples)
return cleaned_ds, fixed_count, dropped_count
def print_validation_summary(split_name: str, result: Dict[str, Any]) -> None:
total = result["num_examples"]
num_with_issues = result["num_examples_with_issues"]
pct = (num_with_issues / total * 100.0) if total > 0 else 0.0
print(f"Split: {split_name}")
print(f" Total examples: {total}")
print(f" Examples with issues: {num_with_issues} ({pct:.2f}%)")
print(f" Role counts: {result['role_counts']}")
if result["issues"]:
print(f" Showing up to {len(result['issues'])} logged issues for {split_name}:")
for issue in result["issues"]:
print(" -", issue)
else:
print(f" No issues found in split {split_name}.")
def main():
dataset = load_dataset("cemig-ceia/CemigConvo_v0")
original_summary: Dict[str, Any] = {}
print("Initial validation")
for split_name, ds in dataset.items():
print(f"Validating split: {split_name} (num_rows={len(ds)})")
result = validate_split(split_name, ds)
print_validation_summary(split_name, result)
original_summary[split_name] = {
"num_examples": result["num_examples"],
"num_examples_with_issues": result["num_examples_with_issues"],
"role_counts": dict(result["role_counts"]),
}
cleaned_splits: Dict[str, Dataset] = {}
cleaning_stats: Dict[str, Any] = {}
print("\nCleaning dataset")
for split_name, ds in dataset.items():
print(f"Cleaning split: {split_name}")
cleaned_ds, fixed_count, dropped_count = clean_split(split_name, ds)
cleaned_splits[split_name] = cleaned_ds
cleaning_stats[split_name] = {
"fixed_examples": fixed_count,
"dropped_examples": dropped_count,
"original_examples": len(ds),
"cleaned_examples": len(cleaned_ds),
}
print(
f" Fixed examples: {fixed_count}, dropped examples: {dropped_count}, "
f"final size: {len(cleaned_ds)} (from {len(ds)})"
)
cleaned_dataset = DatasetDict(cleaned_splits)
print("\nValidation after cleaning")
cleaned_summary: Dict[str, Any] = {}
for split_name, ds in cleaned_dataset.items():
print(f"Validating cleaned split: {split_name} (num_rows={len(ds)})")
result = validate_split(split_name, ds)
print_validation_summary(split_name, result)
cleaned_summary[split_name] = {
"num_examples": result["num_examples"],
"num_examples_with_issues": result["num_examples_with_issues"],
"role_counts": dict(result["role_counts"]),
}
summary = {
"original": original_summary,
"cleaned": cleaned_summary,
"cleaning_stats": cleaning_stats,
}
with open(
"cemig_format_validation_and_cleaning_summary.json",
"w",
encoding="utf_8",
) as f:
json.dump(summary, f, indent=2, ensure_ascii=False)
print("\nWriting cleaned splits to JSONL")
for split_name, ds in cleaned_dataset.items():
out_path = f"cemig_clean_{split_name}.jsonl"
print(f" Writing cleaned split to JSONL: {out_path}")
ds.to_json(
out_path,
lines=True,
force_ascii=False,
)
cleaned_dataset.save_to_disk("cemig_clean")
print("\nSummary written to cemig_format_validation_and_cleaning_summary.json")
print("Cleaned dataset saved to 'cemig_clean' and JSONL files per split")
if __name__ == "__main__":
main()