Integral — ML Verification Notebook + React GraphQL Integration + Map UI
This document contains three deliverables you requested: (A) ML verification notebook (pothole detection & verification), (B) React dashboard GraphQL queries & subscription wiring, and (C) Map UI (Leaflet) integration. Each section includes runnable code, dependency lists, and quickstart instructions.
A — ML Verification Notebook (Jupyter, Python)
Purpose: Train a pothole detection model (segmentation + bbox) and produce a confidence score per detection for the verification-service. Exports inference results to the detection-service endpoint or writes directly to the Postgres objects table.
Notes: Use labeled images from vehicle dashcams, crowd-sourced uploads, and satellite tiles. This notebook uses PyTorch + torchvision and a simple U-Net-style segmentation + simple classifier for confidence.
Dependencies
pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118 # or CPU wheel
pip install jupyterlab opencv-python scikit-learn geopandas rasterio shapely matplotlib tqdm requests
pip install albumentations
Notebook (save as ml_verification.ipynb — here shown as linear Python cells)
# cell 1: imports
import os
import json
from pathlib import Path
import random
import numpy as np
import cv2
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
import torchvision.transforms as T
import albumentations as A
from sklearn.model_selection import train_test_split
from tqdm import tqdm
import requests
# cell 2: config
DATA_DIR = Path('data')
IMAGES_DIR = DATA_DIR/'images'
MASKS_DIR = DATA_DIR/'masks' # segmentation masks where potholes marked
BATCH_SIZE = 8
NUM_EPOCHS = 20
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
MODEL_DIR = Path('model')
MODEL_DIR.mkdir(parents=True, exist_ok=True)
# cell 3: dataset
class PotholeDataset(Dataset):
def __init__(self, image_paths, mask_paths, transforms=None):
self.images = image_paths
self.masks = mask_paths
self.transforms = transforms
def __len__(self):
return len(self.images)
def __getitem__(self, idx):
img = cv2.imread(str(self.images[idx]), cv2.IMREAD_COLOR)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
mask = cv2.imread(str(self.masks[idx]), cv2.IMREAD_GRAYSCALE)
# normalize mask to 0/1
mask = (mask > 127).astype('float32')
if self.transforms:
augmented = self.transforms(image=img, mask=mask)
img = augmented['image']
mask = augmented['mask']
img = img.astype('float32') / 255.0
img = np.transpose(img, (2,0,1))
img_t = torch.tensor(img, dtype=torch.float32)
mask_t = torch.tensor(mask, dtype=torch.float32).unsqueeze(0)
return img_t, mask_t
# cell 4: simple U-Net model
class DoubleConv(nn.Module):
def __init__(self, in_c, out_c):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(in_c, out_c, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(out_c, out_c, 3, padding=1),
nn.ReLU(inplace=True)
)
def forward(self,x): return self.net(x)
class UNet(nn.Module):
def __init__(self, n_channels=3, n_classes=1):
super().__init__()
self.enc1 = DoubleConv(n_channels, 64)
self.pool = nn.MaxPool2d(2)
self.enc2 = DoubleConv(64,128)
self.enc3 = DoubleConv(128,256)
self.enc4 = DoubleConv(256,512)
self.up3 = nn.ConvTranspose2d(512,256,2,stride=2)
self.dec3 = DoubleConv(512,256)
self.up2 = nn.ConvTranspose2d(256,128,2,stride=2)
self.dec2 = DoubleConv(256,128)
self.up1 = nn.ConvTranspose2d(128,64,2,stride=2)
self.dec1 = DoubleConv(128,64)
self.final = nn.Conv2d(64, n_classes, 1)
def forward(self,x):
e1 = self.enc1(x)
e2 = self.enc2(self.pool(e1))
e3 = self.enc3(self.pool(e2))
e4 = self.enc4(self.pool(e3))
d3 = self.dec3(torch.cat([self.up3(e4), e3], dim=1))
d2 = self.dec2(torch.cat([self.up2(d3), e2], dim=1))
d1 = self.dec1(torch.cat([self.up1(d2), e1], dim=1))
out = self.final(d1)
return torch.sigmoid(out)
# cell 5: prepare data lists (you must provide matching images & masks filenames)
image_files = sorted(list((IMAGES_DIR).glob('*.jpg')))
mask_files = sorted(list((MASKS_DIR).glob('*.png')))
train_imgs, val_imgs, train_masks, val_masks = train_test_split(image_files, mask_files, test_size=0.2, random_state=42)
transform = A.Compose([
A.Resize(256,256),
A.HorizontalFlip(p=0.5),
A.RandomBrightnessContrast(p=0.3),
])
train_ds = PotholeDataset(train_imgs, train_masks, transforms=transform)
val_ds = PotholeDataset(val_imgs, val_masks, transforms=A.Compose([A.Resize(256,256)]))
train_loader = DataLoader(train_ds, batch_size=BATCH_SIZE, shuffle=True)
val_loader = DataLoader(val_ds, batch_size=BATCH_SIZE, shuffle=False)
# cell 6: training loop
model = UNet().to(DEVICE)
optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)
criterion = nn.BCELoss()
for epoch in range(NUM_EPOCHS):
model.train()
running_loss = 0.0
for imgs, masks in tqdm(train_loader, desc=f'train {epoch}'):
imgs = imgs.to(DEVICE); masks = masks.to(DEVICE)
preds = model(imgs)
loss = criterion(preds, masks)
optimizer.zero_grad(); loss.backward(); optimizer.step()
running_loss += loss.item()
print(f'Epoch {epoch} loss {running_loss/len(train_loader):.4f}')
# validation
model.eval()
val_loss = 0.0
with torch.no_grad():
for imgs, masks in val_loader:
imgs = imgs.to(DEVICE); masks = masks.to(DEVICE)
preds = model(imgs)
val_loss += criterion(preds, masks).item()
print(f'Val loss {val_loss/len(val_loader):.4f}')
torch.save(model.state_dict(), MODEL_DIR/f'unet_epoch_{epoch}.pt')
# cell 7: inference helper -> produce detections
import scipy.ndimage as ndi
def infer_and_extract(img_path, model, threshold=0.3, min_area=50):
img = cv2.imread(img_path)
h0,w0 = img.shape[:2]
img_r = cv2.resize(img, (256,256))
x = np.transpose(img_r.astype('float32')/255.0, (2,0,1))
x = torch.tensor(x).unsqueeze(0).to(DEVICE)
with torch.no_grad():
pred = model(x)[0,0].cpu().numpy()
# resize mask back
mask = cv2.resize((pred>threshold).astype('uint8'), (w0,h0))
labeled, n = ndi.label(mask)
detections = []
for region in range(1,n+1):
ys, xs = np.where(labeled==region)
if len(xs) < min_area: continue
x_min, x_max = xs.min(), xs.max()
y_min, y_max = ys.min(), ys.max()
area = len(xs)
conf = pred[ys, xs].mean() # approximate confidence
detections.append({
'bbox': [int(x_min), int(y_min), int(x_max), int(y_max)],
'area': int(area),
'confidence': float(conf)
})
return detections, mask
# cell 8: export detections -> call detection-service
DETECTION_API = os.getenv('DETECTION_API','http://localhost:3001/detect')
def export_detection(location, image_url, detections, provenance):
# choose highest confidence detection
if not detections: return None
top = max(detections, key=lambda d: d['confidence'])
payload = {
'namespace':'satellite',
'type':'pothole-detection',
'timestamp':None,
'location': location,
'severity': int(min(5, max(1, int(top['area']/500)))), # heuristic
'images':[image_url],
'provenance': provenance
}
try:
r = requests.post(DETECTION_API, json=payload, timeout=5)
return r.json()
except Exception as e:
print('export failed', e)
return None
# usage example
if __name__ == '__main__':
model.load_state_dict(torch.load(MODEL_DIR/'unet_epoch_19.pt', map_location=DEVICE))
test_img = 'data/ground/test1.jpg'
dets, mask = infer_and_extract(test_img, model)
print(dets)
# export with a dummy location/provenance
print(export_detection({'lat':-29.12,'lon':26.22}, 'https://example.com/test1.jpg', dets, {'source':'ml-run','license':'CC0'}))
B — React Dashboard: GraphQL queries & subscriptions (Apollo Client)
Goal: Wire the dashboard to the Apollo Server created earlier. Provide query examples, subscription usage, and UI integration (hooks + state).
Dependencies
npm install @apollo/client graphql subscriptions-transport-ws graphql-ws
npm install leaflet react-leaflet
Note:
graphql-wsis recommended for modern websockets; for Apollo v3 usesubscriptions-transport-wsor adapt to your server.
Apollo client setup (src/apollo.js)
import { ApolloClient, InMemoryCache, HttpLink, split } from '@apollo/client';
import { GraphQLWsLink } from '@apollo/client/link/subscriptions';
import { createClient } from 'graphql-ws';
import { getMainDefinition } from '@apollo/client/utilities';
const httpLink = new HttpLink({ uri: 'http://localhost:4000/graphql' });
const wsLink = new GraphQLWsLink(createClient({ url: 'ws://localhost:4000/graphql' }));
const splitLink = split(
({ query }) => {
const def = getMainDefinition(query);
return def.kind === 'OperationDefinition' && def.operation === 'subscription';
},
wsLink,
httpLink
);
export const client = new ApolloClient({
link: splitLink,
cache: new InMemoryCache(),
});
Queries & Subscriptions (src/graphql/queries.js)
import { gql } from '@apollo/client';
export const LIST_FAULTS = gql`
query ListInfraFaults($limit:Int,$offset:Int){
listInfraFaults(limit:$limit,offset:$offset){
id namespace type timestamp severity confirmed images provenance
}
}
`;
export const FAULT_CREATED = gql`
subscription { faultCreated { id namespace type timestamp location severity confirmed images provenance } }
`;
export const FAULT_CONFIRMED = gql`
subscription { faultConfirmed { id confirmed } }
`;
export const PAYOUT_UPDATED = gql`
subscription { payoutUpdated { id faultId amountMinorUnits currency payeeId status txRef } }
`;
React hook usage (src/hooks/useFaults.js)
import { useQuery, useSubscription } from '@apollo/client';
import { LIST_FAULTS, FAULT_CREATED, FAULT_CONFIRMED } from '../graphql/queries';
export function useFaults() {
const { data, loading, error, fetchMore } = useQuery(LIST_FAULTS, { variables: { limit: 50, offset: 0 } });
useSubscription(FAULT_CREATED, {
onSubscriptionData: ({ client, subscriptionData }) => {
const newFault = subscriptionData.data.faultCreated;
// optional: update cache or refetch
client.cache.modify({
fields: {
listInfraFaults(existing = []) {
const newRef = client.cache.writeFragment({
data: newFault,
fragment: gql`fragment NewFault on InfrastructureFault { id namespace type timestamp severity confirmed images provenance }`
});
return [newRef, ...existing];
}
}
});
}
});
useSubscription(FAULT_CONFIRMED, {
onSubscriptionData: ({ client, subscriptionData }) => {
const changed = subscriptionData.data.faultConfirmed;
// update cache entry
client.cache.modify({ id: client.cache.identify({ __typename: 'InfrastructureFault', id: changed.id }), fields: { confirmed() { return changed.confirmed; } } });
}
});
return { data, loading, error, fetchMore };
}
Wiring into dashboard component (snippet)
import React from 'react';
import { useFaults } from './hooks/useFaults';
export default function InfraPanel(){
const { data, loading } = useFaults();
if(loading) return <div>Loading...</div>;
return (
<div>
{data.listInfraFaults.map(f => (
<div key={f.id} className="card">
<h4>{f.type} — severity {f.severity}</h4>
<p>Confirmed: {String(f.confirmed)}</p>
</div>
))}
</div>
);
}
C — Map UI (Leaflet + React-Leaflet) with Satellite overlays
Goal: Display pothole markers, heatmap, and satellite overlays (Sentinel tile layers or Mapbox). Clicking a marker opens verification panel and create/settle actions.
Dependencies
npm install leaflet react-leaflet leaflet.heat
Also add CSS in your app root for leaflet:
/* index.css */
.leaflet-container { height: 100%; width: 100%; }
Map component (src/components/MapView.jsx)
import React, { useEffect, useState } from 'react';
import { MapContainer, TileLayer, Marker, Popup, Circle } from 'react-leaflet';
import 'leaflet/dist/leaflet.css';
import L from 'leaflet';
// fix default icon issues in many bundlers
delete L.Icon.Default.prototype._getIconUrl;
L.Icon.Default.mergeOptions({
iconRetinaUrl: require('leaflet/dist/images/marker-icon-2x.png'),
iconUrl: require('leaflet/dist/images/marker-icon.png'),
shadowUrl: require('leaflet/dist/images/marker-shadow.png')
});
import { useFaults } from '../hooks/useFaults';
export default function MapView(){
const { data } = useFaults();
const [center] = useState([-29.12,26.22]);
return (
<MapContainer center={center} zoom={13} style={{height:'600px'}}>
<TileLayer
attribution='© OpenStreetMap contributors'
url='https://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png'
/>
{/* Optional Satellite overlay with Mapbox (requires token): */}
{/* <TileLayer url={`https://api.mapbox.com/styles/v1/mapbox/satellite-v9/tiles/{z}/{x}/{y}?access_token=${process.env.MAPBOX_TOKEN}`} /> */}
{data && data.listInfraFaults.map(f => (
<Marker key={f.id} position={[f.location.lat,f.location.lon]}>
<Popup>
<div>
<strong>{f.type}</strong>
<p>Severity: {f.severity}</p>
<p>Confirmed: {String(f.confirmed)}</p>
{f.images && f.images.length>0 && <img src={f.images[0]} alt="pothole" style={{width:'200px'}}/>}
<div>
<button onClick={()=>{/* call confirm mutation */}}>Confirm</button>
</div>
</div>
</Popup>
</Marker>
))}
</MapContainer>
);
}
Heatmap (optional)
Use leaflet.heat plugin. Convert faults into weighted points by severity.
Deployment & Running
- Start Postgres + Redis + Apollo server (see earlier
integral-apollo-servercanvas doc). Run migrations. - Start the Apollo client React app (
npm start) withclientconfigured tohttp://localhost:4000/graphqland WSws://localhost:4000/graphql. - Prepare training data and run
ml_verification.ipynbto train a model and generate detection exports to the detection-service endpoint. - Use the dashboard to observe faults appearing in real-time and interact with map UI to verify and settle payouts.
Next suggestions
- I can export the ML notebook as an actual
.ipynbfile and attach it for download. - I can generate the full React project files (components, hooks, package.json) in the canvas and zip them.
- I can produce a small sample dataset (synthetic images + masks) so you can run training quickly.
Which of those would you like me to produce now?