|
|
|
|
|
import argparse |
|
|
import json |
|
|
import os |
|
|
import random |
|
|
from typing import Dict, Any, Optional, Tuple |
|
|
|
|
|
import pandas as pd |
|
|
import numpy as np |
|
|
from tqdm import tqdm |
|
|
|
|
|
|
|
|
def try_read_csv(path: str) -> pd.DataFrame: |
|
|
print(f"📂 Reading: {path}") |
|
|
try: |
|
|
df = pd.read_csv(path, encoding="utf-8") |
|
|
print(" ✅ UTF-8") |
|
|
return df |
|
|
except Exception: |
|
|
df = pd.read_csv(path, encoding="latin-1") |
|
|
print(" ✅ latin-1") |
|
|
return df |
|
|
|
|
|
|
|
|
def canonical_country(name: Any) -> Any: |
|
|
if pd.isna(name) or not str(name).strip(): |
|
|
return np.nan |
|
|
n = str(name).strip().lower() |
|
|
synonyms = { |
|
|
"usa": "United States", "us": "United States", "u.s.": "United States", |
|
|
"uk": "United Kingdom", "u.k.": "United Kingdom", |
|
|
"south korea": "Korea, South", "republic of korea": "Korea, South", |
|
|
"russian federation": "Russia", |
|
|
"uae": "United Arab Emirates", "u.a.e.": "United Arab Emirates", |
|
|
} |
|
|
canonical = synonyms.get(n, str(name).strip()) |
|
|
'''if canonical != n: |
|
|
print(f" 🌍 Canonicalized: '{name}' → '{canonical}'")''' |
|
|
return canonical |
|
|
|
|
|
|
|
|
def load_city_catalog(json_path: str) -> Dict[str, list]: |
|
|
print(f"📚 Loading catalog: {json_path}") |
|
|
with open(json_path, "r", encoding="utf-8") as f: |
|
|
catalog = json.load(f) |
|
|
|
|
|
valid_catalog = {k: v for k, v in catalog.items() if isinstance(v, list) and len(v) > 0} |
|
|
print(f" ✅ {len(valid_catalog)} countries with cities") |
|
|
return valid_catalog |
|
|
|
|
|
|
|
|
def pick_random_city_country(catalog: Dict[str, list], rng: random.Random) -> tuple[str, str]: |
|
|
country = rng.choice(list(catalog.keys())) |
|
|
city = rng.choice(catalog[country]).strip() |
|
|
return city, country |
|
|
|
|
|
|
|
|
def repair_location_in_place(df: pd.DataFrame, catalog_path: str) -> Tuple[pd.DataFrame, Dict[str, Any]]: |
|
|
print("🔧 Repairing Location → always ensure 'city, country' pair...") |
|
|
out = df.copy() |
|
|
catalog = load_city_catalog(catalog_path) |
|
|
rng = random.Random(42) |
|
|
|
|
|
|
|
|
s = out["Location"].astype(str).str.strip() |
|
|
parts = s.str.rsplit(',', n=1, expand=True) |
|
|
city_raw = parts.iloc[:, 0].str.strip().replace("", np.nan) if parts.shape[1] > 1 else pd.Series([np.nan]*len(out)) |
|
|
country_raw = parts.iloc[:, 1].str.strip().replace("", np.nan) if parts.shape[1] > 1 else pd.Series([np.nan]*len(out)) |
|
|
|
|
|
|
|
|
country_canonical = country_raw.apply(canonical_country) |
|
|
country_invalid = country_canonical.isna() | ~country_canonical.isin(catalog) |
|
|
country_fix_count = country_invalid.sum() |
|
|
|
|
|
|
|
|
final_city = [] |
|
|
final_country = [] |
|
|
city_fixed = 0 |
|
|
country_fixed = 0 |
|
|
both_missing_fixed = 0 |
|
|
|
|
|
print(" 🔄 Processing rows...") |
|
|
for idx in tqdm(range(len(out)), ncols=80): |
|
|
ctry = country_canonical.iloc[idx] |
|
|
city = city_raw.iloc[idx] if len(city_raw) > idx else np.nan |
|
|
|
|
|
|
|
|
if pd.isna(ctry) or pd.isna(city) or ctry not in catalog: |
|
|
if pd.isna(ctry) and pd.isna(city): |
|
|
both_missing_fixed += 1 |
|
|
new_city, new_country = pick_random_city_country(catalog, rng) |
|
|
final_city.append(new_city) |
|
|
final_country.append(new_country) |
|
|
if ctry not in catalog and not pd.isna(ctry): |
|
|
country_fixed += 1 |
|
|
continue |
|
|
|
|
|
|
|
|
valid_cities_lower = {c.strip().lower() for c in catalog[ctry]} |
|
|
if pd.notna(city) and str(city).strip().lower() in valid_cities_lower: |
|
|
|
|
|
final_city.append(str(city).strip()) |
|
|
final_country.append(ctry) |
|
|
else: |
|
|
|
|
|
new_city = rng.choice(catalog[ctry]).strip() |
|
|
final_city.append(new_city) |
|
|
final_country.append(ctry) |
|
|
city_fixed += 1 |
|
|
|
|
|
|
|
|
new_location = [f"{c}, {co}" for c, co in zip(final_city, final_country)] |
|
|
out["Location"] = new_location |
|
|
|
|
|
actions = { |
|
|
"catalog_path": catalog_path, |
|
|
"country_fixed": int(country_fixed + both_missing_fixed), |
|
|
"city_fixed": int(city_fixed + both_missing_fixed), |
|
|
"both_missing_filled": int(both_missing_fixed), |
|
|
"total_rows": len(out), |
|
|
} |
|
|
print(f" ✅ Done. City fixed: {city_fixed}, Country fixed: {country_fixed}, Both missing → random pair: {both_missing_fixed}") |
|
|
return out, actions |
|
|
|
|
|
|
|
|
def main(): |
|
|
print("🚀 Starting CSV Repair (ensure city + country in Location)") |
|
|
parser = argparse.ArgumentParser() |
|
|
parser.add_argument("-i", "--input", default="HR_Data_Clean_2020_2025.csv") |
|
|
parser.add_argument("-o", "--cleaned-csv", default="cleaned.csv") |
|
|
parser.add_argument("--catalog", default="report/provinces_by_country.json") |
|
|
args = parser.parse_args() |
|
|
|
|
|
|
|
|
df = try_read_csv(args.input) |
|
|
print(f" 📊 Rows: {len(df):,}, Columns: {list(df.columns)}") |
|
|
|
|
|
if "Location" not in df.columns: |
|
|
raise ValueError("❌ 'Location' column not found") |
|
|
|
|
|
|
|
|
cleaned, actions = repair_location_in_place(df, catalog_path=args.catalog) |
|
|
|
|
|
|
|
|
print(f"💾 Saving cleaned CSV → {args.cleaned_csv}") |
|
|
cleaned.to_csv(args.cleaned_csv, index=False) |
|
|
|
|
|
|
|
|
report_path = "report/location_repair_report.json" |
|
|
os.makedirs("report", exist_ok=True) |
|
|
with open(report_path, "w", encoding="utf-8") as f: |
|
|
json.dump({"input": args.input, "actions": actions}, f, indent=2, ensure_ascii=False) |
|
|
|
|
|
|
|
|
print("\n" + "═" * 50) |
|
|
print("✅ REPAIR SUMMARY") |
|
|
print(f" Input: {args.input}") |
|
|
print(f" Output: {args.cleaned_csv}") |
|
|
print(f" Country fixed: {actions['country_fixed']:,}") |
|
|
print(f" City fixed: {actions['city_fixed']:,}") |
|
|
print(f" Both missing → pair: {actions['both_missing_filled']:,}") |
|
|
print(f" Report: {report_path}") |
|
|
print("═" * 50) |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |
|
|
|