|
|
import gc |
|
|
import logging |
|
|
import time |
|
|
from pathlib import Path |
|
|
|
|
|
import numpy as np |
|
|
import pandas as pd |
|
|
from gomez_cloud.utils.date_utils import iterate_days |
|
|
|
|
|
logging.basicConfig( |
|
|
level=logging.INFO, |
|
|
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s", |
|
|
) |
|
|
log = logging.getLogger(__name__) |
|
|
|
|
|
|
|
|
def _haversine_m(lat1, lon1, lat2, lon2): |
|
|
"""Vectorised haversine distance in meters.""" |
|
|
R = 6371000.0 |
|
|
phi1 = np.radians(lat1); phi2 = np.radians(lat2) |
|
|
dphi = np.radians(lat2 - lat1); dl = np.radians(lon2 - lon1) |
|
|
a = np.sin(dphi/2)**2 + np.cos(phi1)*np.cos(phi2)*np.sin(dl/2)**2 |
|
|
return 2*R*np.arcsin(np.sqrt(a)) |
|
|
|
|
|
|
|
|
def _meters_to_deg(lat_deg, dx_m, dy_m): |
|
|
"""Convert local meter offsets (east=dx, north=dy) to lon/lat degrees at given latitude.""" |
|
|
lat_rad = np.radians(lat_deg) |
|
|
m_per_deg_lat = 111_320.0 |
|
|
m_per_deg_lon = 111_320.0 * np.cos(lat_rad) |
|
|
dlat = dy_m / m_per_deg_lat |
|
|
dlon = dx_m / m_per_deg_lon |
|
|
return dlat, dlon |
|
|
|
|
|
|
|
|
def jitter_points(df, lat_col, lon_col, radius_m=50, seed=42, shuffle=True): |
|
|
""" |
|
|
Add uniform random jitter within a circle of radius_m meters. |
|
|
Returns a new DataFrame with columns <lat_col>_jit, <lon_col>_jit. |
|
|
""" |
|
|
rng = np.random.default_rng(seed) |
|
|
n = len(df) |
|
|
|
|
|
u = rng.random(n) |
|
|
r = radius_m * np.sqrt(u) |
|
|
theta = rng.random(n) * 2*np.pi |
|
|
dx = r * np.cos(theta) |
|
|
dy = r * np.sin(theta) |
|
|
|
|
|
lat = df[lat_col].to_numpy(dtype=float, copy=False) |
|
|
dlat, dlon = _meters_to_deg(lat, dx, dy) |
|
|
|
|
|
out = df.copy() |
|
|
out[f"{lat_col}_jit"] = lat + dlat |
|
|
out[f"{lon_col}_jit"] = df[lon_col].to_numpy(dtype=float, copy=False) + dlon |
|
|
|
|
|
if shuffle: |
|
|
out = out.sample(frac=1.0, random_state=seed).reset_index(drop=True) |
|
|
return out |
|
|
|
|
|
|
|
|
def jitter_signal(series, sigma_db=2.0, seed=42, clip=(-120, -20)): |
|
|
""" |
|
|
Add small Gaussian noise (dBm) to signal, robust to strings like '-083'. |
|
|
""" |
|
|
rng = np.random.default_rng(seed) |
|
|
sig = pd.to_numeric(series, errors="coerce") |
|
|
noise = rng.normal(0.0, sigma_db, size=len(sig)) |
|
|
out = (sig + noise).clip(clip[0], clip[1]) |
|
|
return out |
|
|
|
|
|
|
|
|
|
|
|
def displacement_stats(orig_lat, orig_lon, jit_lat, jit_lon): |
|
|
d = _haversine_m(orig_lat, orig_lon, jit_lat, jit_lon) |
|
|
return { |
|
|
"n": d.size, |
|
|
"mean_m": float(np.nanmean(d)), |
|
|
"p50_m": float(np.nanpercentile(d, 50)), |
|
|
"p90_m": float(np.nanpercentile(d, 90)), |
|
|
"p95_m": float(np.nanpercentile(d, 95)), |
|
|
"max_m": float(np.nanmax(d)), |
|
|
} |
|
|
|
|
|
|
|
|
def js_distance_2d(orig_lat, orig_lon, jit_lat, jit_lon, bins=100, eps=1e-12): |
|
|
""" |
|
|
Jensen–Shannon distance between 2D (lat,lon) distributions via hist2d. |
|
|
Range: 0 identical … 1 very different (we return sqrt(JS divergence)). |
|
|
""" |
|
|
lat_all = np.concatenate([orig_lat, jit_lat]) |
|
|
lon_all = np.concatenate([orig_lon, jit_lon]) |
|
|
lat_edges = np.linspace(lat_all.min(), lat_all.max(), bins+1) |
|
|
lon_edges = np.linspace(lon_all.min(), lon_all.max(), bins+1) |
|
|
|
|
|
H1, _, _ = np.histogram2d(orig_lat, orig_lon, bins=[lat_edges, lon_edges]) |
|
|
H2, _, _ = np.histogram2d(jit_lat, jit_lon, bins=[lat_edges, lon_edges]) |
|
|
|
|
|
P = (H1.ravel() + eps); P /= P.sum() |
|
|
Q = (H2.ravel() + eps); Q /= Q.sum() |
|
|
M = 0.5*(P+Q) |
|
|
|
|
|
def kl(p, q): |
|
|
return np.sum(p * np.log(p/q)) |
|
|
js_div = 0.5*kl(P, M) + 0.5*kl(Q, M) |
|
|
return float(np.sqrt(js_div)) |
|
|
|
|
|
|
|
|
def ks_1d_marginals(orig, jit): |
|
|
"""Kolmogorov–Smirnov D for 1D arrays (simple numpy implementation).""" |
|
|
x = np.sort(np.asarray(orig)) |
|
|
y = np.sort(np.asarray(jit)) |
|
|
|
|
|
grid = np.sort(np.unique(np.concatenate([x, y]))) |
|
|
Fx = np.searchsorted(x, grid, side='right') / x.size |
|
|
Fy = np.searchsorted(y, grid, side='right') / y.size |
|
|
return float(np.max(np.abs(Fx - Fy))) |
|
|
|
|
|
|
|
|
def main() -> None: |
|
|
start_all = time.time() |
|
|
|
|
|
partitions = iterate_days(first="2025-03-01", last="2025-06-30") |
|
|
|
|
|
output_dir = Path(...) |
|
|
output_dir.mkdir(parents=True, exist_ok=True) |
|
|
|
|
|
base_uri = ... |
|
|
storage_opts = {"token": "cloud"} |
|
|
|
|
|
week_buffers = [] |
|
|
week_idx = 1 |
|
|
day_idx = 0 |
|
|
total_rows_written = 0 |
|
|
total_days_processed = 0 |
|
|
|
|
|
for day in partitions: |
|
|
t0 = time.time() |
|
|
uri = f"{base_uri}/day={day}" |
|
|
|
|
|
try: |
|
|
df = pd.read_parquet(uri, storage_options=storage_opts) |
|
|
except FileNotFoundError: |
|
|
log.warning("Partition not found (skipping): %s", uri) |
|
|
continue |
|
|
except Exception as e: |
|
|
log.exception("Failed to read partition %s: %s", uri, e) |
|
|
continue |
|
|
|
|
|
log.info("Loaded %s rows x %s cols from %s", len(df), len(df.columns), uri) |
|
|
|
|
|
try: |
|
|
|
|
|
pings_jit = jitter_points( |
|
|
df, |
|
|
lat_col="latitude", |
|
|
lon_col="longitude", |
|
|
radius_m=20, |
|
|
seed=456, |
|
|
shuffle=True, |
|
|
) |
|
|
|
|
|
pings_jit["signal_level_jit"] = jitter_signal( |
|
|
df["signal_level"], sigma_db=2.0, seed=456 |
|
|
) |
|
|
|
|
|
|
|
|
pings_jit = ( |
|
|
pings_jit.drop(columns=["latitude", "longitude", "signal_level"]) |
|
|
.rename( |
|
|
columns={ |
|
|
"latitude_jit": "latitude", |
|
|
"longitude_jit": "longitude", |
|
|
"signal_level_jit": "signal_level", |
|
|
} |
|
|
) |
|
|
) |
|
|
|
|
|
|
|
|
if "measurement_type_name" in pings_jit.columns: |
|
|
pings_jit["measurement_type_name"] = pings_jit["measurement_type_name"].apply( |
|
|
lambda x: x if x == "Full Service Loss (>120s)" else None |
|
|
) |
|
|
else: |
|
|
log.warning("Column 'measurement_type_name' missing in partition %s", day) |
|
|
|
|
|
log.info( |
|
|
"Transformed day=%s → %s rows", day, len(pings_jit) |
|
|
) |
|
|
|
|
|
week_buffers.append(pings_jit) |
|
|
total_days_processed += 1 |
|
|
day_idx += 1 |
|
|
|
|
|
except Exception as e: |
|
|
log.exception("Transform failed for day=%s: %s", day, e) |
|
|
|
|
|
del df |
|
|
gc.collect() |
|
|
continue |
|
|
finally: |
|
|
|
|
|
del df |
|
|
gc.collect() |
|
|
|
|
|
|
|
|
if day_idx % 14 == 0: |
|
|
try: |
|
|
week_df = pd.concat(week_buffers, ignore_index=True) |
|
|
out_path = output_dir / f"np_extract_part_{week_idx}.csv" |
|
|
week_df.to_csv(out_path, index=False) |
|
|
total_rows_written += len(week_df) |
|
|
log.info( |
|
|
"Wrote week %d: %s rows to %s (elapsed %.2fs)", |
|
|
week_idx, len(week_df), out_path, time.time() - t0 |
|
|
) |
|
|
finally: |
|
|
week_buffers.clear() |
|
|
week_idx += 1 |
|
|
|
|
|
del week_df |
|
|
gc.collect() |
|
|
|
|
|
log.info("Processed day=%s in %.2fs", day, time.time() - t0) |
|
|
|
|
|
|
|
|
if week_buffers: |
|
|
week_df = pd.concat(week_buffers, ignore_index=True) |
|
|
|
|
|
week_df.to_csv(f"/home/tom_freeman_vodafone_com/tom-foolery/data/np_extractions/part_{week_idx}.csv", index=False) |
|
|
|
|
|
total_rows_written += len(week_df) |
|
|
log.info( |
|
|
"Wrote FINAL part %d: %s rows to %s", |
|
|
week_idx, len(week_df), out_path |
|
|
) |
|
|
week_buffers.clear() |
|
|
del week_df |
|
|
gc.collect() |
|
|
|
|
|
log.info( |
|
|
"Done. Days processed: %d | Rows written: %d | Total time: %.2fs", |
|
|
total_days_processed, total_rows_written, time.time() - start_all |
|
|
) |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |
|
|
|