| """Regenerate ML artifacts inside Docker to ensure pickle compatibility.""" |
|
|
| import numpy as np |
| import pandas as pd |
| import json |
| import joblib |
| import os |
| import sys |
| import warnings |
|
|
| warnings.filterwarnings("ignore") |
| np.random.seed(42) |
|
|
| _script_dir = os.path.dirname(os.path.abspath(__file__)) if "__file__" in dir() else os.getcwd() |
| sys.path.insert(0, _script_dir) |
| from app.services.generators import GENERATORS |
| from app.services.feature_engine import engineer_features |
| from app.config import FEATURE_COLS, START_DATE, DAYS |
|
|
| from sklearn.preprocessing import RobustScaler |
| from sklearn.cluster import KMeans |
| from sklearn.decomposition import PCA |
| from sklearn.metrics import silhouette_score |
| from xgboost import XGBClassifier |
| import umap |
|
|
| ARTIFACTS_DIR = os.path.join(_script_dir, "app", "artifacts") |
| os.makedirs(ARTIFACTS_DIR, exist_ok=True) |
|
|
| |
| print("Generating dataset...") |
| counts = { |
| "normal_salaried_employee": 600, "normal_freelancer": 350, "normal_student": 450, |
| "normal_retiree": 350, "normal_small_business": 300, "normal_high_net_worth": 200, |
| "normal_young_professional": 400, "normal_family_household": 350, |
| "mule_rapid_passthrough": 130, "mule_structuring_smurfing": 100, |
| "mule_funnel_collector": 90, "mule_dormant_burst": 110, |
| "mule_recruit_escalation": 120, "mule_round_trip": 100, |
| "mule_crypto_cashout": 120, "mule_layering_chain": 110, |
| "mule_micro_structuring": 130, "mule_ghost_payroll": 140, |
| "mule_onboarding_burst": 120, "mule_device_mule": 110, |
| } |
| all_records = [] |
| for btype, count in counts.items(): |
| print(f" {btype}: {count}") |
| all_records += GENERATORS[btype](count) |
|
|
| df = pd.DataFrame(all_records) |
| df["timestamp"] = pd.to_datetime(df["timestamp"]) |
| df = df.sort_values("timestamp").reset_index(drop=True) |
| df["day_of_week"] = df["timestamp"].dt.dayofweek |
| df["hour"] = df["timestamp"].dt.hour |
| df["is_weekend"] = df["day_of_week"].isin([5, 6]).astype(int) |
| df["category"] = df["label"].apply(lambda x: "mule" if x.startswith("mule_") else "normal") |
| print(f" Total: {len(df):,} txns, {df['account_id'].nunique():,} accounts") |
|
|
| |
| print("Engineering features...") |
| features_df = engineer_features(df) |
| label_cat = df.groupby("account_id").agg(label=("label", "first"), category=("category", "first")) |
| features_df = features_df.join(label_cat) |
| feature_cols = [c for c in FEATURE_COLS if c in features_df.columns] |
| X = features_df[feature_cols].fillna(0).values |
|
|
| |
| print("Fitting scaler...") |
| scaler = RobustScaler() |
| X_scaled = scaler.fit_transform(X) |
| X_scaled = np.nan_to_num(X_scaled, nan=0.0, posinf=0.0, neginf=0.0) |
| joblib.dump(scaler, os.path.join(ARTIFACTS_DIR, "scaler.joblib")) |
|
|
| |
| print("Fitting PCA...") |
| pca2 = PCA(n_components=2) |
| pca2.fit(X_scaled) |
| joblib.dump(pca2, os.path.join(ARTIFACTS_DIR, "pca2.joblib")) |
|
|
| |
| print("Fitting UMAP...") |
| reducer = umap.UMAP(n_components=2, n_neighbors=30, min_dist=0.3, random_state=42) |
| X_umap = reducer.fit_transform(X_scaled) |
| joblib.dump(reducer, os.path.join(ARTIFACTS_DIR, "umap_reducer.joblib")) |
|
|
| |
| print("Fitting KMeans...") |
| K_range = range(2, 16) |
| sil_scores = [] |
| for k in K_range: |
| km = KMeans(n_clusters=k, n_init=10, random_state=42) |
| labs = km.fit_predict(X_scaled) |
| sil_scores.append(silhouette_score(X_scaled, labs)) |
| best_k = list(K_range)[np.argmax(sil_scores)] |
| print(f" Best k = {best_k}") |
| kmeans = KMeans(n_clusters=best_k, n_init=10, random_state=42) |
| kmeans.fit(X_scaled) |
| joblib.dump(kmeans, os.path.join(ARTIFACTS_DIR, "kmeans.joblib")) |
|
|
| |
| print("Training XGBoost classifier...") |
| y_binary = (features_df["category"] == "mule").astype(int).values |
| classifier = XGBClassifier( |
| n_estimators=300, max_depth=5, learning_rate=0.1, |
| subsample=0.8, colsample_bytree=0.8, |
| scale_pos_weight=sum(y_binary == 0) / max(sum(y_binary == 1), 1), |
| random_state=42, use_label_encoder=False, eval_metric="logloss", |
| ) |
| classifier.fit(X, y_binary) |
| print(f" Train accuracy: {classifier.score(X, y_binary):.3f}") |
| joblib.dump(classifier, os.path.join(ARTIFACTS_DIR, "classifier.joblib")) |
| joblib.dump(classifier, os.path.join(ARTIFACTS_DIR, "surrogate_model.joblib")) |
|
|
| bg_indices = np.random.RandomState(42).choice(len(X), size=min(200, len(X)), replace=False) |
| np.save(os.path.join(ARTIFACTS_DIR, "shap_background.npy"), X[bg_indices]) |
|
|
| |
| print("Computing metadata...") |
| cluster_labels = kmeans.predict(X_scaled) |
| features_df["cluster"] = cluster_labels |
| features_df["umap_1"] = X_umap[:, 0] |
| features_df["umap_2"] = X_umap[:, 1] |
|
|
| normal_mask = features_df["category"] == "normal" |
| normal_centroid = X_scaled[normal_mask.values].mean(axis=0).tolist() |
| normal_distances = np.linalg.norm(X_scaled[normal_mask.values] - np.array(normal_centroid), axis=1) |
| max_normal_distance = float(np.percentile(normal_distances, 95)) |
|
|
| clusters_meta = {} |
| for c in range(best_k): |
| c_mask = features_df["cluster"] == c |
| c_data = features_df[c_mask] |
| mule_pct = float((c_data["category"] == "mule").mean()) |
| clusters_meta[str(c)] = {"size": int(c_mask.sum()), "mule_pct": round(mule_pct, 4), |
| "dominant": "mule" if mule_pct > 0.5 else "normal"} |
|
|
| cluster_metadata = {"best_k": best_k, "clusters": clusters_meta, |
| "normal_centroid_scaled": normal_centroid, |
| "max_normal_distance": max_normal_distance, "feature_cols": feature_cols} |
| with open(os.path.join(ARTIFACTS_DIR, "cluster_metadata.json"), "w") as f: |
| json.dump(cluster_metadata, f, indent=2) |
|
|
| |
| normal_features = features_df[normal_mask][feature_cols] |
| baseline = {"means": normal_features.mean().to_dict(), "stds": normal_features.std().fillna(0).to_dict(), |
| "mins": features_df[feature_cols].min().to_dict(), "maxs": features_df[feature_cols].max().to_dict()} |
| for key in baseline: |
| baseline[key] = {k: float(v) for k, v in baseline[key].items()} |
| with open(os.path.join(ARTIFACTS_DIR, "baseline_features.json"), "w") as f: |
| json.dump(baseline, f, indent=2) |
|
|
| |
| umap_points = [{"x": round(float(row["umap_1"]), 4), "y": round(float(row["umap_2"]), 4), |
| "category": row["category"], "label": row["label"]} |
| for _, row in features_df.iterrows()] |
| with open(os.path.join(ARTIFACTS_DIR, "existing_umap_coords.json"), "w") as f: |
| json.dump(umap_points, f) |
|
|
| |
| print("Saving CSV...") |
| df.to_csv(os.path.join(ARTIFACTS_DIR, "synthetic_transactions.csv"), index=False) |
|
|
| print("Done!") |
|
|