Datasets:
Upload 10 files (#1)
Browse files- Upload 10 files (58e4e812f57cbe57b3d3ded45c9001af6114566e)
Co-authored-by: Tom Freeman <TomFreeman3@users.noreply.huggingface.co>
- .gitattributes +9 -0
- np_extract_part_1.csv +3 -0
- np_extract_part_2.csv +3 -0
- np_extract_part_3.csv +3 -0
- np_extract_part_4.csv +3 -0
- np_extract_part_5.csv +3 -0
- np_extract_part_6.csv +3 -0
- np_extract_part_7.csv +3 -0
- np_extract_part_8.csv +3 -0
- np_extract_part_9.csv +3 -0
- obfs_np.py +248 -0
.gitattributes
CHANGED
|
@@ -57,3 +57,12 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 57 |
# Video files - compressed
|
| 58 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 59 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 57 |
# Video files - compressed
|
| 58 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 59 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
| 60 |
+
np_extract_part_1.csv filter=lfs diff=lfs merge=lfs -text
|
| 61 |
+
np_extract_part_2.csv filter=lfs diff=lfs merge=lfs -text
|
| 62 |
+
np_extract_part_3.csv filter=lfs diff=lfs merge=lfs -text
|
| 63 |
+
np_extract_part_4.csv filter=lfs diff=lfs merge=lfs -text
|
| 64 |
+
np_extract_part_5.csv filter=lfs diff=lfs merge=lfs -text
|
| 65 |
+
np_extract_part_6.csv filter=lfs diff=lfs merge=lfs -text
|
| 66 |
+
np_extract_part_7.csv filter=lfs diff=lfs merge=lfs -text
|
| 67 |
+
np_extract_part_8.csv filter=lfs diff=lfs merge=lfs -text
|
| 68 |
+
np_extract_part_9.csv filter=lfs diff=lfs merge=lfs -text
|
np_extract_part_1.csv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8641a28bc92616fff4c43f26328176c0729be2fa14fd9b35b5c9dd3f81f348bf
|
| 3 |
+
size 522828567
|
np_extract_part_2.csv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e540e7761621dc8daf57ecb1fc1cff66fa379f3ce8c4bf024fab60d021d5f840
|
| 3 |
+
size 515660719
|
np_extract_part_3.csv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f0a7fc7e45b5f825560149b561bf4297c3b2fb15c63936d6043a6ca08438408f
|
| 3 |
+
size 512904154
|
np_extract_part_4.csv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9562da5857e85317e04df9240f76f6318675e9f0e52cdfa70c2ec3eb31d0f02b
|
| 3 |
+
size 481844515
|
np_extract_part_5.csv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3da3eab2b40c8288fb404397cfd33d5144b82adec45db29035f7818fba41d1bf
|
| 3 |
+
size 476040385
|
np_extract_part_6.csv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ddc420ff844d1f6a92f27dc77fa482cd103af678f70626359095195af78d4f22
|
| 3 |
+
size 512294051
|
np_extract_part_7.csv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7ba56e238855a8cb4a734d83f4c43ed659220a1b426451ebd838fe2277118fe8
|
| 3 |
+
size 502468054
|
np_extract_part_8.csv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:14cd711447a98e5dc5235c8b14022933838137fdeda05fe3f078f0b47cb76ca6
|
| 3 |
+
size 526161322
|
np_extract_part_9.csv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1f9cc9627a0094a33c30aa18428a3f8aec15169e8421c2245a0556028f6eeba0
|
| 3 |
+
size 364490746
|
obfs_np.py
ADDED
|
@@ -0,0 +1,248 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gc
|
| 2 |
+
import logging
|
| 3 |
+
import time
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
|
| 6 |
+
import numpy as np
|
| 7 |
+
import pandas as pd
|
| 8 |
+
from gomez_cloud.utils.date_utils import iterate_days
|
| 9 |
+
|
| 10 |
+
logging.basicConfig(
|
| 11 |
+
level=logging.INFO,
|
| 12 |
+
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
|
| 13 |
+
)
|
| 14 |
+
log = logging.getLogger(__name__)
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def _haversine_m(lat1, lon1, lat2, lon2):
|
| 18 |
+
"""Vectorised haversine distance in meters."""
|
| 19 |
+
R = 6371000.0
|
| 20 |
+
phi1 = np.radians(lat1); phi2 = np.radians(lat2)
|
| 21 |
+
dphi = np.radians(lat2 - lat1); dl = np.radians(lon2 - lon1)
|
| 22 |
+
a = np.sin(dphi/2)**2 + np.cos(phi1)*np.cos(phi2)*np.sin(dl/2)**2
|
| 23 |
+
return 2*R*np.arcsin(np.sqrt(a))
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def _meters_to_deg(lat_deg, dx_m, dy_m):
|
| 27 |
+
"""Convert local meter offsets (east=dx, north=dy) to lon/lat degrees at given latitude."""
|
| 28 |
+
lat_rad = np.radians(lat_deg)
|
| 29 |
+
m_per_deg_lat = 111_320.0
|
| 30 |
+
m_per_deg_lon = 111_320.0 * np.cos(lat_rad)
|
| 31 |
+
dlat = dy_m / m_per_deg_lat
|
| 32 |
+
dlon = dx_m / m_per_deg_lon
|
| 33 |
+
return dlat, dlon
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def jitter_points(df, lat_col, lon_col, radius_m=50, seed=42, shuffle=True):
|
| 37 |
+
"""
|
| 38 |
+
Add uniform random jitter within a circle of radius_m meters.
|
| 39 |
+
Returns a new DataFrame with columns <lat_col>_jit, <lon_col>_jit.
|
| 40 |
+
"""
|
| 41 |
+
rng = np.random.default_rng(seed)
|
| 42 |
+
n = len(df)
|
| 43 |
+
# uniform in disk: r = R*sqrt(u), theta ~ U[0,2pi)
|
| 44 |
+
u = rng.random(n)
|
| 45 |
+
r = radius_m * np.sqrt(u)
|
| 46 |
+
theta = rng.random(n) * 2*np.pi
|
| 47 |
+
dx = r * np.cos(theta)
|
| 48 |
+
dy = r * np.sin(theta)
|
| 49 |
+
|
| 50 |
+
lat = df[lat_col].to_numpy(dtype=float, copy=False)
|
| 51 |
+
dlat, dlon = _meters_to_deg(lat, dx, dy)
|
| 52 |
+
|
| 53 |
+
out = df.copy()
|
| 54 |
+
out[f"{lat_col}_jit"] = lat + dlat
|
| 55 |
+
out[f"{lon_col}_jit"] = df[lon_col].to_numpy(dtype=float, copy=False) + dlon
|
| 56 |
+
|
| 57 |
+
if shuffle:
|
| 58 |
+
out = out.sample(frac=1.0, random_state=seed).reset_index(drop=True)
|
| 59 |
+
return out
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def jitter_signal(series, sigma_db=2.0, seed=42, clip=(-120, -20)):
|
| 63 |
+
"""
|
| 64 |
+
Add small Gaussian noise (dBm) to signal, robust to strings like '-083'.
|
| 65 |
+
"""
|
| 66 |
+
rng = np.random.default_rng(seed)
|
| 67 |
+
sig = pd.to_numeric(series, errors="coerce") # "-083" -> -83
|
| 68 |
+
noise = rng.normal(0.0, sigma_db, size=len(sig))
|
| 69 |
+
out = (sig + noise).clip(clip[0], clip[1])
|
| 70 |
+
return out
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
# ----- difference metrics -----
|
| 74 |
+
def displacement_stats(orig_lat, orig_lon, jit_lat, jit_lon):
|
| 75 |
+
d = _haversine_m(orig_lat, orig_lon, jit_lat, jit_lon)
|
| 76 |
+
return {
|
| 77 |
+
"n": d.size,
|
| 78 |
+
"mean_m": float(np.nanmean(d)),
|
| 79 |
+
"p50_m": float(np.nanpercentile(d, 50)),
|
| 80 |
+
"p90_m": float(np.nanpercentile(d, 90)),
|
| 81 |
+
"p95_m": float(np.nanpercentile(d, 95)),
|
| 82 |
+
"max_m": float(np.nanmax(d)),
|
| 83 |
+
}
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
def js_distance_2d(orig_lat, orig_lon, jit_lat, jit_lon, bins=100, eps=1e-12):
|
| 87 |
+
"""
|
| 88 |
+
Jensen–Shannon distance between 2D (lat,lon) distributions via hist2d.
|
| 89 |
+
Range: 0 identical … 1 very different (we return sqrt(JS divergence)).
|
| 90 |
+
"""
|
| 91 |
+
lat_all = np.concatenate([orig_lat, jit_lat])
|
| 92 |
+
lon_all = np.concatenate([orig_lon, jit_lon])
|
| 93 |
+
lat_edges = np.linspace(lat_all.min(), lat_all.max(), bins+1)
|
| 94 |
+
lon_edges = np.linspace(lon_all.min(), lon_all.max(), bins+1)
|
| 95 |
+
|
| 96 |
+
H1, _, _ = np.histogram2d(orig_lat, orig_lon, bins=[lat_edges, lon_edges])
|
| 97 |
+
H2, _, _ = np.histogram2d(jit_lat, jit_lon, bins=[lat_edges, lon_edges])
|
| 98 |
+
|
| 99 |
+
P = (H1.ravel() + eps); P /= P.sum()
|
| 100 |
+
Q = (H2.ravel() + eps); Q /= Q.sum()
|
| 101 |
+
M = 0.5*(P+Q)
|
| 102 |
+
|
| 103 |
+
def kl(p, q): # both already have eps
|
| 104 |
+
return np.sum(p * np.log(p/q))
|
| 105 |
+
js_div = 0.5*kl(P, M) + 0.5*kl(Q, M)
|
| 106 |
+
return float(np.sqrt(js_div))
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
def ks_1d_marginals(orig, jit):
|
| 110 |
+
"""Kolmogorov–Smirnov D for 1D arrays (simple numpy implementation)."""
|
| 111 |
+
x = np.sort(np.asarray(orig))
|
| 112 |
+
y = np.sort(np.asarray(jit))
|
| 113 |
+
# empirical CDFs on merged support
|
| 114 |
+
grid = np.sort(np.unique(np.concatenate([x, y])))
|
| 115 |
+
Fx = np.searchsorted(x, grid, side='right') / x.size
|
| 116 |
+
Fy = np.searchsorted(y, grid, side='right') / y.size
|
| 117 |
+
return float(np.max(np.abs(Fx - Fy)))
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
def main() -> None:
|
| 121 |
+
start_all = time.time()
|
| 122 |
+
|
| 123 |
+
partitions = iterate_days(first="2025-03-01", last="2025-06-30")
|
| 124 |
+
|
| 125 |
+
output_dir = Path(...)
|
| 126 |
+
output_dir.mkdir(parents=True, exist_ok=True)
|
| 127 |
+
|
| 128 |
+
base_uri = ...
|
| 129 |
+
storage_opts = {"token": "cloud"} # ADC via gcsfs
|
| 130 |
+
|
| 131 |
+
week_buffers = []
|
| 132 |
+
week_idx = 1
|
| 133 |
+
day_idx = 0
|
| 134 |
+
total_rows_written = 0
|
| 135 |
+
total_days_processed = 0
|
| 136 |
+
|
| 137 |
+
for day in partitions:
|
| 138 |
+
t0 = time.time()
|
| 139 |
+
uri = f"{base_uri}/day={day}"
|
| 140 |
+
|
| 141 |
+
try:
|
| 142 |
+
df = pd.read_parquet(uri, storage_options=storage_opts)
|
| 143 |
+
except FileNotFoundError:
|
| 144 |
+
log.warning("Partition not found (skipping): %s", uri)
|
| 145 |
+
continue
|
| 146 |
+
except Exception as e:
|
| 147 |
+
log.exception("Failed to read partition %s: %s", uri, e)
|
| 148 |
+
continue
|
| 149 |
+
|
| 150 |
+
log.info("Loaded %s rows x %s cols from %s", len(df), len(df.columns), uri)
|
| 151 |
+
|
| 152 |
+
try:
|
| 153 |
+
# Jitter location
|
| 154 |
+
pings_jit = jitter_points(
|
| 155 |
+
df,
|
| 156 |
+
lat_col="latitude",
|
| 157 |
+
lon_col="longitude",
|
| 158 |
+
radius_m=20,
|
| 159 |
+
seed=456,
|
| 160 |
+
shuffle=True,
|
| 161 |
+
)
|
| 162 |
+
# Jitter signal
|
| 163 |
+
pings_jit["signal_level_jit"] = jitter_signal(
|
| 164 |
+
df["signal_level"], sigma_db=2.0, seed=456
|
| 165 |
+
)
|
| 166 |
+
|
| 167 |
+
# Replace originals with jittered
|
| 168 |
+
pings_jit = (
|
| 169 |
+
pings_jit.drop(columns=["latitude", "longitude", "signal_level"])
|
| 170 |
+
.rename(
|
| 171 |
+
columns={
|
| 172 |
+
"latitude_jit": "latitude",
|
| 173 |
+
"longitude_jit": "longitude",
|
| 174 |
+
"signal_level_jit": "signal_level",
|
| 175 |
+
}
|
| 176 |
+
)
|
| 177 |
+
)
|
| 178 |
+
|
| 179 |
+
# Keep only 'Full Service Loss (>120s)', else set to None
|
| 180 |
+
if "measurement_type_name" in pings_jit.columns:
|
| 181 |
+
pings_jit["measurement_type_name"] = pings_jit["measurement_type_name"].apply(
|
| 182 |
+
lambda x: x if x == "Full Service Loss (>120s)" else None
|
| 183 |
+
)
|
| 184 |
+
else:
|
| 185 |
+
log.warning("Column 'measurement_type_name' missing in partition %s", day)
|
| 186 |
+
|
| 187 |
+
log.info(
|
| 188 |
+
"Transformed day=%s → %s rows", day, len(pings_jit)
|
| 189 |
+
)
|
| 190 |
+
|
| 191 |
+
week_buffers.append(pings_jit)
|
| 192 |
+
total_days_processed += 1
|
| 193 |
+
day_idx += 1
|
| 194 |
+
|
| 195 |
+
except Exception as e:
|
| 196 |
+
log.exception("Transform failed for day=%s: %s", day, e)
|
| 197 |
+
# Drop heavy refs before moving on
|
| 198 |
+
del df
|
| 199 |
+
gc.collect()
|
| 200 |
+
continue
|
| 201 |
+
finally:
|
| 202 |
+
# free the original df ASAP
|
| 203 |
+
del df
|
| 204 |
+
gc.collect()
|
| 205 |
+
|
| 206 |
+
# Flush every 14 days
|
| 207 |
+
if day_idx % 14 == 0:
|
| 208 |
+
try:
|
| 209 |
+
week_df = pd.concat(week_buffers, ignore_index=True)
|
| 210 |
+
out_path = output_dir / f"np_extract_part_{week_idx}.csv"
|
| 211 |
+
week_df.to_csv(out_path, index=False)
|
| 212 |
+
total_rows_written += len(week_df)
|
| 213 |
+
log.info(
|
| 214 |
+
"Wrote week %d: %s rows to %s (elapsed %.2fs)",
|
| 215 |
+
week_idx, len(week_df), out_path, time.time() - t0
|
| 216 |
+
)
|
| 217 |
+
finally:
|
| 218 |
+
week_buffers.clear()
|
| 219 |
+
week_idx += 1
|
| 220 |
+
# encourage memory to return
|
| 221 |
+
del week_df
|
| 222 |
+
gc.collect()
|
| 223 |
+
|
| 224 |
+
log.info("Processed day=%s in %.2fs", day, time.time() - t0)
|
| 225 |
+
|
| 226 |
+
# Final partial week flush
|
| 227 |
+
if week_buffers:
|
| 228 |
+
week_df = pd.concat(week_buffers, ignore_index=True)
|
| 229 |
+
|
| 230 |
+
week_df.to_csv(f"/home/tom_freeman_vodafone_com/tom-foolery/data/np_extractions/part_{week_idx}.csv", index=False)
|
| 231 |
+
|
| 232 |
+
total_rows_written += len(week_df)
|
| 233 |
+
log.info(
|
| 234 |
+
"Wrote FINAL part %d: %s rows to %s",
|
| 235 |
+
week_idx, len(week_df), out_path
|
| 236 |
+
)
|
| 237 |
+
week_buffers.clear()
|
| 238 |
+
del week_df
|
| 239 |
+
gc.collect()
|
| 240 |
+
|
| 241 |
+
log.info(
|
| 242 |
+
"Done. Days processed: %d | Rows written: %d | Total time: %.2fs",
|
| 243 |
+
total_days_processed, total_rows_written, time.time() - start_all
|
| 244 |
+
)
|
| 245 |
+
|
| 246 |
+
|
| 247 |
+
if __name__ == "__main__":
|
| 248 |
+
main()
|