| import re |
| import subprocess |
| from itertools import combinations |
| from pathlib import Path |
|
|
| import pandas as pd |
|
|
| CSV_DIR = None |
| while CSV_DIR is None: |
| user_input = input("Enter the full path to the CSV directory: ").strip() |
| if not user_input: |
| continue |
| csv_path = Path(user_input) |
| if not csv_path.is_dir(): |
| continue |
| CSV_DIR = csv_path |
|
|
| OUTPUT_FILE = Path("./schema_comparison_output.txt") |
| N_ROWS = 1000 |
| SAMPLE_ROWS = 5 |
|
|
| output_lines = [] |
|
|
|
|
| def output(*args, **kwargs): |
| line = " ".join(str(a) for a in args) |
| print(line, **kwargs) |
| output_lines.append(line) |
|
|
|
|
| def extract_date(filename: str) -> str: |
| match = re.match(r"^(.*?_\d{2}_\d{2}_\d{4})", filename) |
| return match.group(1) if match else None |
|
|
|
|
| def get_row_count(filepath: Path) -> int: |
| result = subprocess.run( |
| ["wc", "-l", str(filepath)], |
| capture_output=True, |
| text=True, |
| ) |
| return int(result.stdout.strip().split()[0]) - 1 |
|
|
|
|
| def load_schema(filepath: Path, nrows: int = N_ROWS) -> tuple[dict, pd.DataFrame]: |
| try: |
| df = pd.read_csv(filepath, nrows=nrows) |
| except Exception as e: |
| output(f"Warning: Could not read {filepath.name}: {e}") |
| return {}, pd.DataFrame() |
| schema = dict(df.dtypes) |
| return schema, df |
|
|
|
|
| def compare_schemas( |
| schema1: dict, |
| df1: pd.DataFrame, |
| name1: str, |
| schema2: dict, |
| df2: pd.DataFrame, |
| name2: str, |
| ): |
| cols1 = set(schema1.keys()) |
| cols2 = set(schema2.keys()) |
|
|
| missing_in_1 = cols2 - cols1 |
| missing_in_2 = cols1 - cols2 |
| common_cols = cols1 & cols2 |
|
|
| dtype_mismatches = {} |
| for col in common_cols: |
| if str(schema1[col]) != str(schema2[col]): |
| dtype_mismatches[col] = (schema1[col], schema2[col]) |
|
|
| if missing_in_1 or missing_in_2 or dtype_mismatches: |
| return False, missing_in_1, missing_in_2, dtype_mismatches |
| return True, None, None, None |
|
|
|
|
| def print_mismatch( |
| name1: str, |
| name2: str, |
| missing_in_1: set, |
| missing_in_2: set, |
| dtype_mismatches: dict, |
| df1: pd.DataFrame, |
| df2: pd.DataFrame, |
| ): |
| output(f"\n{'=' * 60}") |
| output(f"MISMATCH: {name1} <-> {name2}") |
| output(f"{'=' * 60}") |
|
|
| if missing_in_1: |
| output(f"\nColumns in {name2} but not in {name1}: {missing_in_1}") |
| if missing_in_2: |
| output(f"\nColumns in {name1} but not in {name2}: {missing_in_2}") |
|
|
| if dtype_mismatches: |
| output("\n--- DTYPE MISMATCHES ---") |
| for col, (dt1, dt2) in dtype_mismatches.items(): |
| output(f" {col}: {name1}={dt1}, {name2}={dt2}") |
|
|
| output("\n--- SAMPLE DATA FOR MISMATCHED COLUMNS ---") |
| for col in dtype_mismatches.keys(): |
| output(f"\n--- {col} ---") |
| output(f"{name1} ({SAMPLE_ROWS} rows):") |
| output(df1[col].head(SAMPLE_ROWS).to_list()) |
| output(f"{name2} ({SAMPLE_ROWS} rows):") |
| output(df2[col].head(SAMPLE_ROWS).to_list()) |
|
|
|
|
| def main(): |
| global output_lines |
|
|
| csv_files = list(CSV_DIR.glob("*.csv")) |
|
|
| date_groups = {} |
| for f in csv_files: |
| date = extract_date(f.name) |
| if date: |
| date_groups.setdefault(date, []).append(f) |
|
|
| file_stats = {} |
|
|
| for date, files in sorted(date_groups.items()): |
| if len(files) < 2: |
| output(f"\nSkipping {date} (only 1 file: {files[0].name})") |
| for f in files: |
| schema, df = load_schema(f) |
| file_stats[f.name] = { |
| "row_count": get_row_count(f), |
| "col_count": len(df.columns), |
| } |
| continue |
|
|
| output(f"\n{'#' * 60}") |
| output(f"DATE GROUP: {date}") |
|
|
| row_counts = {} |
| schemas = {} |
| dataframes = {} |
|
|
| for f in files: |
| row_counts[f.name] = get_row_count(f) |
| file_stats[f.name] = { |
| "row_count": row_counts[f.name], |
| "col_count": 0, |
| } |
| schema, df = load_schema(f) |
| schemas[f.name] = schema |
| dataframes[f.name] = df |
| file_stats[f.name]["col_count"] = len(df.columns) |
|
|
| file_info = ", ".join(f"{f.name} ({row_counts[f.name]:,} rows)" for f in files) |
| output(f"Files: {file_info}") |
| output(f"{'#' * 60}") |
|
|
| has_mismatch = False |
| for f1, f2 in combinations(files, 2): |
| name1, name2 = f1.name, f2.name |
| is_match, missing_in_1, missing_in_2, dtype_mismatches = compare_schemas( |
| schemas[name1], |
| dataframes[name1], |
| name1, |
| schemas[name2], |
| dataframes[name2], |
| name2, |
| ) |
|
|
| if not is_match: |
| has_mismatch = True |
| print_mismatch( |
| name1, |
| name2, |
| missing_in_1, |
| missing_in_2, |
| dtype_mismatches, |
| dataframes[name1], |
| dataframes[name2], |
| ) |
|
|
| if not has_mismatch: |
| output("\nAll files in this group have matching schemas.") |
|
|
| output(f"\n{'#' * 60}") |
| output("FILE STATISTICS") |
| output(f"{'#' * 60}") |
| output(f"\n{'File':<60} {'Rows':>12} {'Columns':>10}") |
| output("-" * 82) |
| grand_total_rows = 0 |
| for name, stats in sorted(file_stats.items()): |
| output(f"{name:<60} {stats['row_count']:>12,} {stats['col_count']:>10}") |
| grand_total_rows += stats["row_count"] |
| output("-" * 82) |
| output(f"{'TOTAL':<60} {grand_total_rows:>12,}") |
|
|
| OUTPUT_FILE.write_text("\n".join(output_lines)) |
| print(f"\nOutput saved to {OUTPUT_FILE}") |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|