major / app.py
roshcheeku's picture
Update app.py
678df22 verified
from flask import Flask, request, jsonify, send_file
import os
import pandas as pd
from dateutil.parser import parse
import tempfile
import io
# -------- CACHE SETUP --------
CACHE_DIR = os.path.join(tempfile.gettempdir(), "cache")
os.makedirs(CACHE_DIR, exist_ok=True)
NORMALIZED_CACHE = os.path.join(CACHE_DIR, "normalized_claims.parquet")
RULE_A_CACHE = os.path.join(CACHE_DIR, "rule_A.parquet")
RULE_B_CACHE = os.path.join(CACHE_DIR, "rule_B.parquet")
RULE_C_CACHE = os.path.join(CACHE_DIR, "rule_C.parquet")
SHORT_WINDOW_DAYS = 7
app = Flask(__name__)
# -------- HELPERS --------
def _safe_parse_date(x):
if pd.isna(x):
return pd.NaT
try:
return parse(str(x), dayfirst=False, yearfirst=True, fuzzy=True)
except Exception:
return pd.NaT
def _first_existing(df, cols):
df_cols = {c.lower(): c for c in df.columns}
for c in cols:
if c.lower() in df_cols:
return df_cols[c.lower()]
return None
# -------- NORMALIZATION --------
def normalize_claims(df, source_name="uploaded_file"):
bene_col = _first_existing(df, ["DESYNPUF_ID", "BENE_ID", "BENEFICIARY_ID"])
clm_col = _first_existing(df, ["CLM_ID", "CLAIM_ID"])
from_col = _first_existing(df, ["CLM_FROM_DT", "SRVC_BGN_DT", "SRVC_BGN_DATE", "LINE_SRVC_DT"])
thru_col = _first_existing(df, ["CLM_THRU_DT", "SRVC_END_DT", "SRVC_END_DATE"])
line_dt = _first_existing(df, ["LINE_SRVC_DT"])
prov_col = _first_existing(df, ["PRF_PHYSN_NPI","AT_PHYSN_NPI","OP_PHYSN_NPI","ORG_NPI_NUM","PRVDR_NUM","NPI","PROVIDER_ID"])
proc_col = _first_existing(df, ["HCPCS_CD","CPT_CODE","PRCDR_CD","PRCDR1_CD","REV_CNTR_HCPCS_CD","PROCEDURE_CODE"])
type_col = _first_existing(df, ["NCH_CLM_TYPE_CD","CLM_TYPE","FILE_TYPE"])
out = pd.DataFrame({
"beneficiary_id": df[bene_col] if bene_col else pd.NA,
"claim_id": df[clm_col] if clm_col else pd.NA,
"start_date_raw": df[from_col] if from_col else df[line_dt] if line_dt else pd.NA,
"end_date_raw": df[thru_col] if thru_col else df[line_dt] if line_dt else pd.NA,
"service_date_raw": df[line_dt] if line_dt else df[from_col] if from_col else pd.NA,
"provider_id": df[prov_col] if prov_col else pd.NA,
"procedure_code": df[proc_col] if proc_col else pd.NA,
"claim_type": df[type_col] if type_col else pd.NA,
"source_file": source_name,
})
out["start_date"] = out["start_date_raw"].apply(_safe_parse_date)
out["end_date"] = out["end_date_raw"].apply(_safe_parse_date)
out["service_date"] = out["service_date_raw"].apply(_safe_parse_date)
out.loc[out["end_date"].isna() & out["start_date"].notna(), "end_date"] = out["start_date"]
for c in ["beneficiary_id","claim_id","provider_id","procedure_code","claim_type"]:
out[c] = out[c].astype(str).str.strip().str.upper()
out = out[(out["beneficiary_id"].notna()) & (out["beneficiary_id"] != "NAN")]
out = out[out["service_date"].notna() | out["start_date"].notna() | out["end_date"].notna()]
out["service_date"] = out["service_date"].fillna(out["start_date"])
return out[[
"beneficiary_id","claim_id","service_date","start_date","end_date",
"provider_id","procedure_code","claim_type","source_file"
]].reset_index(drop=True)
# -------- RULES --------
def _force_count_col(df):
df.columns = list(df.columns[:-1]) + ["count"]
return df
def rule_A_exact_duplicates(claims):
key = ["beneficiary_id", "procedure_code", "service_date"]
dup = claims.dropna(subset=key).groupby(key, as_index=False).size().reset_index()
dup = _force_count_col(dup)
dup = dup[dup["count"] > 1]
return dup.merge(claims, on=key, how="left")
def rule_B_too_frequent_billing(claims, days=7):
df = claims.dropna(subset=["beneficiary_id","provider_id","procedure_code","service_date"]).copy()
df = df.sort_values(["beneficiary_id","provider_id","procedure_code","service_date"])
df["prev_service_date"] = df.groupby(["beneficiary_id","provider_id","procedure_code"])["service_date"].shift(1)
df["days_since_prev"] = (df["service_date"] - df["prev_service_date"]).dt.days
return df[(df["prev_service_date"].notna()) & (df["days_since_prev"] >= 0) & (df["days_since_prev"] <= days)]
def rule_C_overlapping_fast(claims):
df = claims.dropna(subset=["beneficiary_id","procedure_code","start_date","end_date"]).copy()
results = []
for (bene, proc), group in df.groupby(["beneficiary_id", "procedure_code"]):
group = group.sort_values("start_date")
active = []
for _, row in group.iterrows():
active = [a for a in active if a["end_date"] >= row["start_date"]]
for a in active:
results.append({
"claim_id_a": a["claim_id"], "claim_id_b": row["claim_id"],
"beneficiary_id": bene, "procedure_code": proc,
"start_date_a": a["start_date"], "end_date_a": a["end_date"],
"start_date_b": row["start_date"], "end_date_b": row["end_date"],
"provider_id_a": a["provider_id"], "provider_id_b": row["provider_id"]
})
active.append(row.to_dict())
return pd.DataFrame(results)
# -------- API ROUTES --------
@app.route("/process", methods=["POST"])
def process_claims():
files = request.files.getlist("files") if "files" in request.files else []
if files: # New upload
frames = []
for f in files:
if not f.filename.lower().endswith(".csv"):
continue
try:
df = pd.read_csv(f, dtype=str, low_memory=False, encoding_errors="ignore")
frames.append(normalize_claims(df, f.filename))
except Exception as e:
return jsonify({"error": f"Failed to read {f.filename}: {str(e)}"}), 400
if not frames:
return jsonify({"error": "No valid CSV files found"}), 400
claims = pd.concat(frames, ignore_index=True).drop_duplicates()
claims["procedure_code"] = claims["procedure_code"].replace(["", "NAN"], pd.NA)
claims.to_parquet(NORMALIZED_CACHE, index=False)
# Compute rules once & cache
rule_A_exact_duplicates(claims).to_parquet(RULE_A_CACHE, index=False)
rule_B_too_frequent_billing(claims, days=SHORT_WINDOW_DAYS).to_parquet(RULE_B_CACHE, index=False)
rule_C_overlapping_fast(claims).to_parquet(RULE_C_CACHE, index=False)
else: # Load from cache
if not os.path.exists(NORMALIZED_CACHE):
return jsonify({"error": "No cached data available. Upload CSVs first."}), 400
claims = pd.read_parquet(NORMALIZED_CACHE)
# Summaries from cache
dup_A = pd.read_parquet(RULE_A_CACHE) if os.path.exists(RULE_A_CACHE) else pd.DataFrame()
dup_B = pd.read_parquet(RULE_B_CACHE) if os.path.exists(RULE_B_CACHE) else pd.DataFrame()
dup_C = pd.read_parquet(RULE_C_CACHE) if os.path.exists(RULE_C_CACHE) else pd.DataFrame()
summary = {
"Rule A": int(dup_A["claim_id"].nunique()) if not dup_A.empty else 0,
"Rule B": int(dup_B["claim_id"].nunique()) if not dup_B.empty else 0,
"Rule C": len(dup_C) if not dup_C.empty else 0,
"Total Claims": len(claims)
}
return jsonify(summary)
# -------- DOWNLOAD ENDPOINT --------
@app.route("/download/<rule>", methods=["GET"])
def download(rule):
mapping = {
"normalized": NORMALIZED_CACHE,
"rule_A": RULE_A_CACHE,
"rule_B": RULE_B_CACHE,
"rule_C": RULE_C_CACHE,
}
if rule not in mapping:
return jsonify({"error": f"Unknown rule: {rule}"}), 400
if not os.path.exists(mapping[rule]):
return jsonify({"error": f"No cached file for {rule}"}), 404
fmt = request.args.get("format", "csv").lower()
df = pd.read_parquet(mapping[rule])
buf = io.BytesIO()
if fmt == "csv":
df.to_csv(buf, index=False)
buf.seek(0)
return send_file(buf, mimetype="text/csv", as_attachment=True, download_name=f"{rule}.csv")
elif fmt == "parquet":
df.to_parquet(buf, index=False)
buf.seek(0)
return send_file(buf, mimetype="application/octet-stream", as_attachment=True, download_name=f"{rule}.parquet")
else:
return jsonify({"error": "Format must be csv or parquet"}), 400
if __name__ == "__main__":
app.run(host="0.0.0.0", port=7860, debug=False)