|
|
|
|
|
import pandas as pd
|
|
|
import numpy as np
|
|
|
import json
|
|
|
import sys
|
|
|
from scipy.signal import find_peaks
|
|
|
|
|
|
|
|
|
if sys.stdout.encoding != 'utf-8':
|
|
|
sys.stdout.reconfigure(encoding='utf-8')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
R_OPEN_THRESHOLD = 1_000_000
|
|
|
R_ARCING_THRESHOLD = 1000
|
|
|
R_MAIN_THRESHOLD = 600
|
|
|
|
|
|
|
|
|
R_HEALTHY_MEAN_IDEAL = 35
|
|
|
R_HEALTHY_MAX = 70
|
|
|
R_HEALTHY_STD_MAX = 15
|
|
|
|
|
|
|
|
|
R_WEAR_EARLY_MIN = 70
|
|
|
R_WEAR_MODERATE_MIN = 100
|
|
|
R_WEAR_SEVERE_MIN = 180
|
|
|
R_WEAR_CRITICAL_MIN = 280
|
|
|
WEAR_STD_EARLY = 15
|
|
|
WEAR_STD_MODERATE = 25
|
|
|
WEAR_STD_SEVERE = 45
|
|
|
|
|
|
|
|
|
MISALIGNMENT_JUMP_MIN = 120
|
|
|
MISALIGNMENT_COUNT_MIN = 6
|
|
|
MISALIGNMENT_JUMP_RATIO = 0.15
|
|
|
MISALIGNMENT_STD_MIN = 70
|
|
|
SHELF_DETECTION_THRESHOLD = 80
|
|
|
SQUARE_WAVE_DUTY_CYCLE = 0.3
|
|
|
|
|
|
|
|
|
ARCING_SPIKE_CRITICAL = 8000
|
|
|
ARCING_SPIKE_SEVERE = 5000
|
|
|
ARCING_SPIKE_MODERATE = 3000
|
|
|
ARCING_SPIKE_COUNT_CRITICAL = 4
|
|
|
ARCING_SPIKE_COUNT_SEVERE = 3
|
|
|
ARCING_INSTABILITY_STD = 700
|
|
|
SPIKE_WIDTH_THRESHOLD = 3
|
|
|
|
|
|
|
|
|
ASYMMETRY_RATIO_MODERATE = 1.6
|
|
|
ASYMMETRY_RATIO_SEVERE = 2.2
|
|
|
ASYMMETRY_RATIO_CRITICAL = 3.0
|
|
|
BOUNCE_PROMINENCE = 500
|
|
|
BOUNCE_SINUSOIDAL_FREQ = 10
|
|
|
PHASE3_REDUCTION_RATIO = 0.65
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
CLOSING_TIME_NOM = (80, 100)
|
|
|
OPENING_TIME_NOM = (30, 40)
|
|
|
CONTACT_SPEED_NOM = (4.5, 6.5)
|
|
|
TIMING_DEVIATION_THRESHOLD = 0.20
|
|
|
|
|
|
|
|
|
BOUNCE_COUNT_THRESHOLD = 5
|
|
|
BOUNCE_AMPLITUDE = 100
|
|
|
|
|
|
|
|
|
SF6_PRESSURE_NOM = (5.5, 6.5)
|
|
|
SF6_PRESSURE_CRITICAL = 5.0
|
|
|
ARC_QUENCH_DURATION_MAX = 25
|
|
|
|
|
|
|
|
|
STUTTER_COUNT_MIN = 3
|
|
|
STUTTER_DURATION_MIN = 10
|
|
|
|
|
|
|
|
|
DLRO_HEALTHY_MAX = 50
|
|
|
DLRO_MODERATE = 80
|
|
|
DLRO_CRITICAL = 100
|
|
|
FIXED_CONTACT_STD_MAX = 15
|
|
|
|
|
|
|
|
|
CLOSE_COIL_CURRENT_MIN = 2.0
|
|
|
TRIP_COIL_CURRENT_MIN = 2.0
|
|
|
COIL_CURRENT_NOM = (4.0, 7.0)
|
|
|
|
|
|
|
|
|
|
|
|
def standardize_input(df: pd.DataFrame) -> pd.DataFrame:
|
|
|
"""
|
|
|
Returns a DataFrame with one row and columns T_0...T_400 containing Resistance values (uOhm).
|
|
|
Supports vertical (>=401 rows, 1 col 'Resistance') or horizontal (>=401 cols).
|
|
|
"""
|
|
|
if 'Resistance' not in df.columns:
|
|
|
raise KeyError("CSV must contain a 'Resistance' column.")
|
|
|
|
|
|
df = df[['Resistance']]
|
|
|
|
|
|
|
|
|
if df.shape[0] >= 401 and df.shape[1] == 1:
|
|
|
values = df.iloc[:401, 0].values.reshape(1, -1)
|
|
|
cols = [f"T_{i}" for i in range(401)]
|
|
|
return pd.DataFrame(values, columns=cols)
|
|
|
|
|
|
|
|
|
elif df.shape[1] >= 401:
|
|
|
df = df.iloc[:, :401]
|
|
|
df.columns = [f"T_{i}" for i in range(401)]
|
|
|
return df
|
|
|
|
|
|
else:
|
|
|
raise ValueError(f"Input shape {df.shape} invalid. Expected 401 Resistance points.")
|
|
|
|
|
|
|
|
|
def analyze_dcrm_advanced(row_values, kpis=None):
|
|
|
"""
|
|
|
Production-Grade DCRM Analysis Engine with Full KPI Support
|
|
|
============================================================
|
|
|
Detects ALL 12 defect classes:
|
|
|
1. Healthy
|
|
|
2. Main Contact Wear
|
|
|
3. Arcing Contact Wear
|
|
|
4. Main Contact Misalignment
|
|
|
5. Arcing Contact Misalignment
|
|
|
6. Operating Mechanism Malfunction
|
|
|
7. Damping System Fault
|
|
|
8. SF6 Pressure Leakage
|
|
|
9. Linkage/Rod Obstruction
|
|
|
10. Fixed Contact Damage
|
|
|
11. Close Coil Damage
|
|
|
12. Trip Coil Damage
|
|
|
|
|
|
Args:
|
|
|
row_values: DCRM waveform (401 time points, resistance in µΩ)
|
|
|
kpis: Optional dictionary with KPIs (Closing Time, Opening Time, Contact Speed,
|
|
|
SF6 Pressure, DLRO, Close Coil Current, Trip Coil 1/2 Currents, etc.)
|
|
|
|
|
|
Returns:
|
|
|
JSON with ALL defects having confidence >75% (Gemini-style dual-agent output)
|
|
|
+ classifications array with confidence scores for all 12 classes
|
|
|
"""
|
|
|
arr = np.array(row_values, dtype=float)
|
|
|
|
|
|
|
|
|
if kpis is None:
|
|
|
kpis = {}
|
|
|
|
|
|
|
|
|
phases = detect_five_phases(arr)
|
|
|
|
|
|
if phases is None:
|
|
|
return {
|
|
|
"Fault_Detection": [_build_result(
|
|
|
"Open Circuit or Invalid Data",
|
|
|
"100.00 %",
|
|
|
"Critical",
|
|
|
"Breaker did not close properly or data is corrupted"
|
|
|
)],
|
|
|
"overall_health_assessment": {
|
|
|
"Contacts (moving & arcing)": "High Risk",
|
|
|
"SF6 Gas Chamber": "Normal",
|
|
|
"Operating Mechanism": "High Risk",
|
|
|
"Coil": "Normal"
|
|
|
}
|
|
|
}
|
|
|
|
|
|
|
|
|
phase1_open = arr[phases['phase1_start']:phases['phase1_end']]
|
|
|
phase2_closing = arr[phases['phase2_start']:phases['phase2_end']]
|
|
|
phase3_main = arr[phases['phase3_start']:phases['phase3_end']]
|
|
|
phase4_opening = arr[phases['phase4_start']:phases['phase4_end']]
|
|
|
phase5_open = arr[phases['phase5_start']:phases['phase5_end']]
|
|
|
|
|
|
|
|
|
features = extract_features(phase3_main, phase2_closing, phase4_opening, phases)
|
|
|
|
|
|
|
|
|
primary_faults = classify_primary_faults(features, phases, kpis)
|
|
|
|
|
|
|
|
|
secondary_faults = classify_secondary_faults(features, phases, kpis, primary_faults)
|
|
|
|
|
|
|
|
|
all_faults = primary_faults + secondary_faults
|
|
|
|
|
|
|
|
|
high_prob_faults = []
|
|
|
for fault in all_faults:
|
|
|
prob_str = fault['Confidence'].replace('%', '').strip()
|
|
|
prob_val = float(prob_str)
|
|
|
if prob_val > 50.0:
|
|
|
high_prob_faults.append(fault)
|
|
|
|
|
|
|
|
|
high_prob_faults.sort(key=lambda x: float(x['Confidence'].replace('%', '').strip()), reverse=True)
|
|
|
|
|
|
|
|
|
if not high_prob_faults:
|
|
|
healthy_desc = f"Insufficient evidence for any specific defect. Main Contact: Mean={features['main_mean']:.1f} µΩ, Std={features['main_std']:.1f} µΩ. All defect probabilities <50%."
|
|
|
high_prob_faults.append(_build_result(
|
|
|
"Inconclusive",
|
|
|
"45.00 %",
|
|
|
"Low",
|
|
|
healthy_desc
|
|
|
))
|
|
|
|
|
|
|
|
|
overall_health = {
|
|
|
"Contacts (moving & arcing)": "Normal",
|
|
|
"SF6 Gas Chamber": "Normal",
|
|
|
"Operating Mechanism": "Normal",
|
|
|
"Coil": "Normal"
|
|
|
}
|
|
|
|
|
|
for fault in high_prob_faults:
|
|
|
name = fault['defect_name'].lower()
|
|
|
severity = fault['Severity'].lower()
|
|
|
probability = float(fault['Confidence'].replace('%', '').strip())
|
|
|
|
|
|
|
|
|
if probability >= 85 and severity in ["high", "critical"]:
|
|
|
risk = "High Risk"
|
|
|
elif probability >= 70:
|
|
|
risk = "Moderate Risk"
|
|
|
elif probability >= 50:
|
|
|
risk = "Low Risk"
|
|
|
else:
|
|
|
risk = "Normal"
|
|
|
|
|
|
|
|
|
if any(x in name for x in ["main contact", "arcing contact", "contact wear", "contact misalignment", "fixed contact"]):
|
|
|
if overall_health["Contacts (moving & arcing)"] != "High Risk":
|
|
|
overall_health["Contacts (moving & arcing)"] = risk
|
|
|
|
|
|
if "sf6" in name or "pressure" in name:
|
|
|
if overall_health["SF6 Gas Chamber"] != "High Risk":
|
|
|
overall_health["SF6 Gas Chamber"] = risk
|
|
|
|
|
|
if any(x in name for x in ["operating mechanism", "damping", "linkage", "rod"]):
|
|
|
if overall_health["Operating Mechanism"] != "High Risk":
|
|
|
overall_health["Operating Mechanism"] = risk
|
|
|
|
|
|
if "coil" in name:
|
|
|
if overall_health["Coil"] != "High Risk":
|
|
|
overall_health["Coil"] = risk
|
|
|
|
|
|
|
|
|
|
|
|
class_probabilities = {
|
|
|
"Healthy": 0.0,
|
|
|
"Main Contact Wear": 0.0,
|
|
|
"Arcing Contact Wear": 0.0,
|
|
|
"Main Contact Misalignment": 0.0,
|
|
|
"Arcing Contact Misalignment": 0.0,
|
|
|
"Operating Mechanism Malfunction": 0.0,
|
|
|
"Damping System Fault": 0.0,
|
|
|
"Pressure System Leakage (SF6 Gas Chamber)": 0.0,
|
|
|
"Linkage/Connecting Rod Obstruction/Damage": 0.0,
|
|
|
"Fixed Contact Damage/Deformation": 0.0,
|
|
|
"Close Coil Damage": 0.0,
|
|
|
"Trip Coil Damage": 0.0
|
|
|
}
|
|
|
|
|
|
|
|
|
for fault in all_faults:
|
|
|
name = fault['defect_name']
|
|
|
prob_str = fault['Confidence'].replace('%', '').strip()
|
|
|
prob_val = float(prob_str) / 100.0
|
|
|
class_probabilities[name] = prob_val
|
|
|
|
|
|
|
|
|
classifications = []
|
|
|
for class_name, confidence in class_probabilities.items():
|
|
|
classifications.append({
|
|
|
"Class": class_name,
|
|
|
"Confidence": round(confidence, 4)
|
|
|
})
|
|
|
|
|
|
|
|
|
result = {
|
|
|
"Fault_Detection": high_prob_faults,
|
|
|
"overall_health_assessment": overall_health,
|
|
|
"classifications": classifications
|
|
|
}
|
|
|
|
|
|
return result
|
|
|
|
|
|
|
|
|
def detect_five_phases(arr):
|
|
|
"""
|
|
|
Automatically detects all 5 DCRM phases using adaptive thresholding.
|
|
|
Returns dict with start/end indices for each phase, or None if detection fails.
|
|
|
"""
|
|
|
|
|
|
is_contact = arr < R_ARCING_THRESHOLD
|
|
|
contact_indices = np.where(is_contact)[0]
|
|
|
|
|
|
if len(contact_indices) < 20:
|
|
|
return None
|
|
|
|
|
|
|
|
|
is_main = arr < R_MAIN_THRESHOLD
|
|
|
main_indices = np.where(is_main)[0]
|
|
|
|
|
|
if len(main_indices) < 5:
|
|
|
|
|
|
|
|
|
phase1_end = contact_indices[0]
|
|
|
phase5_start = contact_indices[-1] + 1
|
|
|
|
|
|
return {
|
|
|
'phase1_start': 0,
|
|
|
'phase1_end': phase1_end,
|
|
|
'phase2_start': phase1_end,
|
|
|
'phase2_end': contact_indices[-1],
|
|
|
'phase3_start': contact_indices[-1],
|
|
|
'phase3_end': contact_indices[-1],
|
|
|
'phase4_start': contact_indices[-1],
|
|
|
'phase4_end': phase5_start,
|
|
|
'phase5_start': phase5_start,
|
|
|
'phase5_end': len(arr)
|
|
|
}
|
|
|
|
|
|
|
|
|
t_contact_start = contact_indices[0]
|
|
|
t_contact_end = contact_indices[-1]
|
|
|
t_main_start = main_indices[0]
|
|
|
t_main_end = main_indices[-1]
|
|
|
|
|
|
return {
|
|
|
'phase1_start': 0,
|
|
|
'phase1_end': t_contact_start,
|
|
|
'phase2_start': t_contact_start,
|
|
|
'phase2_end': t_main_start,
|
|
|
'phase3_start': t_main_start,
|
|
|
'phase3_end': t_main_end,
|
|
|
'phase4_start': t_main_end,
|
|
|
'phase4_end': t_contact_end + 1,
|
|
|
'phase5_start': t_contact_end + 1,
|
|
|
'phase5_end': len(arr)
|
|
|
}
|
|
|
|
|
|
|
|
|
def extract_features(seg_main, seg_closing, seg_opening, phases):
|
|
|
"""
|
|
|
ULTRA-OPTIMIZED Feature Extraction with Micro-Level Waveform Analysis
|
|
|
=====================================================================
|
|
|
Detects:
|
|
|
- Square wave patterns (misalignment)
|
|
|
- Sinusoidal oscillations (damping/bounce)
|
|
|
- Impulse spikes (arcing wear)
|
|
|
- Grassy noise (contact wear)
|
|
|
- Telegraph jumps (mechanical defects)
|
|
|
- DC offset shifts (fixed contact issues)
|
|
|
"""
|
|
|
features = {}
|
|
|
|
|
|
|
|
|
if len(seg_main) > 0:
|
|
|
|
|
|
features['main_mean'] = float(np.mean(seg_main))
|
|
|
features['main_median'] = float(np.median(seg_main))
|
|
|
features['main_std'] = float(np.std(seg_main))
|
|
|
features['main_min'] = float(np.min(seg_main))
|
|
|
features['main_max'] = float(np.max(seg_main))
|
|
|
features['main_range'] = float(features['main_max'] - features['main_min'])
|
|
|
|
|
|
|
|
|
diffs = np.diff(seg_main)
|
|
|
abs_diffs = np.abs(diffs)
|
|
|
|
|
|
|
|
|
sharp_edges = np.sum(abs_diffs > MISALIGNMENT_JUMP_MIN)
|
|
|
features['telegraph_jumps'] = int(sharp_edges)
|
|
|
features['jump_ratio'] = float(sharp_edges / len(seg_main) if len(seg_main) > 0 else 0)
|
|
|
|
|
|
|
|
|
if features['main_range'] > 100:
|
|
|
threshold = features['main_median']
|
|
|
high_time = np.sum(seg_main > threshold)
|
|
|
duty_cycle = high_time / len(seg_main)
|
|
|
features['square_wave_duty'] = float(duty_cycle)
|
|
|
|
|
|
features['is_square_wave'] = 1 if 0.2 < duty_cycle < 0.8 else 0
|
|
|
else:
|
|
|
features['square_wave_duty'] = 0.5
|
|
|
features['is_square_wave'] = 0
|
|
|
|
|
|
|
|
|
|
|
|
if len(seg_main) > 20:
|
|
|
|
|
|
detrended = seg_main - np.mean(seg_main)
|
|
|
|
|
|
if len(detrended) > BOUNCE_SINUSOIDAL_FREQ:
|
|
|
autocorr = np.correlate(detrended[:min(100, len(detrended))],
|
|
|
detrended[:min(100, len(detrended))], mode='valid')[0]
|
|
|
features['oscillation_score'] = float(abs(autocorr) / (np.std(detrended)**2 * len(detrended) + 1))
|
|
|
else:
|
|
|
features['oscillation_score'] = 0.0
|
|
|
else:
|
|
|
features['oscillation_score'] = 0.0
|
|
|
|
|
|
|
|
|
|
|
|
noise_threshold_low = features['main_median'] + 30
|
|
|
noise_threshold_high = features['main_median'] + 200
|
|
|
grassy_spikes = np.sum((seg_main > noise_threshold_low) & (seg_main < noise_threshold_high))
|
|
|
features['uniform_spikes'] = int(grassy_spikes)
|
|
|
features['spike_density'] = float(grassy_spikes / len(seg_main) if len(seg_main) > 0 else 0)
|
|
|
|
|
|
|
|
|
features['avg_noise'] = float(np.mean(abs_diffs))
|
|
|
features['max_single_jump'] = float(np.max(abs_diffs)) if len(abs_diffs) > 0 else 0
|
|
|
features['noise_rms'] = float(np.sqrt(np.mean(abs_diffs**2)))
|
|
|
|
|
|
|
|
|
if len(seg_main) > 20:
|
|
|
|
|
|
hist, edges = np.histogram(seg_main, bins=min(15, len(seg_main)//10))
|
|
|
|
|
|
significant_bins = np.sum(hist > (len(seg_main) * 0.08))
|
|
|
features['num_shelves'] = int(significant_bins)
|
|
|
|
|
|
|
|
|
split_point = min(25, len(seg_main)//3)
|
|
|
initial_segment = seg_main[:split_point]
|
|
|
plateau_segment = seg_main[split_point:]
|
|
|
features['initial_deviation'] = float(np.std(initial_segment))
|
|
|
features['plateau_stability'] = float(np.std(plateau_segment))
|
|
|
|
|
|
|
|
|
if len(initial_segment) > 0 and len(plateau_segment) > 0:
|
|
|
features['has_initial_jump'] = 1 if features['initial_deviation'] > features['plateau_stability'] * 1.8 else 0
|
|
|
else:
|
|
|
features['has_initial_jump'] = 0
|
|
|
else:
|
|
|
features['num_shelves'] = 1
|
|
|
features['initial_deviation'] = 0
|
|
|
features['plateau_stability'] = features['main_std']
|
|
|
features['has_initial_jump'] = 0
|
|
|
|
|
|
else:
|
|
|
|
|
|
features.update({
|
|
|
'main_mean': 9999, 'main_median': 9999, 'main_std': 0,
|
|
|
'main_min': 9999, 'main_max': 9999, 'main_range': 0,
|
|
|
'telegraph_jumps': 0, 'jump_ratio': 0, 'uniform_spikes': 0,
|
|
|
'spike_density': 0, 'avg_noise': 0, 'max_single_jump': 0,
|
|
|
'noise_rms': 0, 'square_wave_duty': 0, 'is_square_wave': 0,
|
|
|
'oscillation_score': 0, 'initial_deviation': 0,
|
|
|
'plateau_stability': 0, 'has_initial_jump': 0, 'num_shelves': 0
|
|
|
})
|
|
|
|
|
|
|
|
|
features['dur_closing'] = int(max(1, len(seg_closing)))
|
|
|
features['dur_opening'] = int(max(1, len(seg_opening)))
|
|
|
features['dur_main'] = int(len(seg_main))
|
|
|
features['asymmetry_ratio'] = float(features['dur_opening'] / features['dur_closing'])
|
|
|
|
|
|
|
|
|
expected_main_duration = 160
|
|
|
features['phase3_reduction'] = float(features['dur_main'] / expected_main_duration)
|
|
|
|
|
|
|
|
|
|
|
|
features['closing_critical_spikes'] = 0
|
|
|
features['closing_severe_spikes'] = 0
|
|
|
features['closing_moderate_spikes'] = 0
|
|
|
features['opening_critical_spikes'] = 0
|
|
|
features['opening_severe_spikes'] = 0
|
|
|
features['opening_moderate_spikes'] = 0
|
|
|
features['closing_std'] = 0
|
|
|
features['opening_std'] = 0
|
|
|
features['closing_peak'] = 0
|
|
|
features['opening_peak'] = 0
|
|
|
|
|
|
|
|
|
if len(seg_closing) > 0:
|
|
|
features['closing_critical_spikes'] = int(np.sum(seg_closing > ARCING_SPIKE_CRITICAL))
|
|
|
features['closing_severe_spikes'] = int(np.sum(seg_closing > ARCING_SPIKE_SEVERE))
|
|
|
features['closing_moderate_spikes'] = int(np.sum(seg_closing > ARCING_SPIKE_MODERATE))
|
|
|
features['closing_std'] = float(np.std(seg_closing))
|
|
|
features['closing_peak'] = float(np.max(seg_closing))
|
|
|
|
|
|
|
|
|
critical_indices = np.where(seg_closing > ARCING_SPIKE_SEVERE)[0]
|
|
|
if len(critical_indices) > 0:
|
|
|
|
|
|
spike_groups = np.split(critical_indices, np.where(np.diff(critical_indices) > 2)[0] + 1)
|
|
|
sustained_spikes = sum(1 for group in spike_groups if len(group) >= SPIKE_WIDTH_THRESHOLD)
|
|
|
features['closing_sustained_spikes'] = int(sustained_spikes)
|
|
|
else:
|
|
|
features['closing_sustained_spikes'] = 0
|
|
|
else:
|
|
|
features['closing_sustained_spikes'] = 0
|
|
|
|
|
|
|
|
|
if len(seg_opening) > 0:
|
|
|
features['opening_critical_spikes'] = int(np.sum(seg_opening > ARCING_SPIKE_CRITICAL))
|
|
|
features['opening_severe_spikes'] = int(np.sum(seg_opening > ARCING_SPIKE_SEVERE))
|
|
|
features['opening_moderate_spikes'] = int(np.sum(seg_opening > ARCING_SPIKE_MODERATE))
|
|
|
features['opening_std'] = float(np.std(seg_opening))
|
|
|
features['opening_peak'] = float(np.max(seg_opening))
|
|
|
|
|
|
|
|
|
critical_indices = np.where(seg_opening > ARCING_SPIKE_SEVERE)[0]
|
|
|
if len(critical_indices) > 0:
|
|
|
spike_groups = np.split(critical_indices, np.where(np.diff(critical_indices) > 2)[0] + 1)
|
|
|
sustained_spikes = sum(1 for group in spike_groups if len(group) >= SPIKE_WIDTH_THRESHOLD)
|
|
|
features['opening_sustained_spikes'] = int(sustained_spikes)
|
|
|
else:
|
|
|
features['opening_sustained_spikes'] = 0
|
|
|
|
|
|
|
|
|
|
|
|
peaks, properties = find_peaks(seg_opening, prominence=BOUNCE_PROMINENCE, distance=5)
|
|
|
features['num_bounces'] = int(len(peaks))
|
|
|
|
|
|
|
|
|
opening_diffs = np.abs(np.diff(seg_opening))
|
|
|
features['arcing_telegraph'] = int(np.sum(opening_diffs > 400))
|
|
|
else:
|
|
|
features['opening_sustained_spikes'] = 0
|
|
|
features['num_bounces'] = 0
|
|
|
features['arcing_telegraph'] = 0
|
|
|
|
|
|
|
|
|
if len(seg_closing) > 10:
|
|
|
closing_diffs = np.abs(np.diff(seg_closing))
|
|
|
features['arcing_telegraph'] += int(np.sum(closing_diffs > 400))
|
|
|
|
|
|
|
|
|
features['total_critical_spikes'] = features['closing_critical_spikes'] + features['opening_critical_spikes']
|
|
|
features['total_severe_spikes'] = features['closing_severe_spikes'] + features['opening_severe_spikes']
|
|
|
features['total_moderate_spikes'] = features['closing_moderate_spikes'] + features['opening_moderate_spikes']
|
|
|
features['total_sustained_spikes'] = features['closing_sustained_spikes'] + features['opening_sustained_spikes']
|
|
|
|
|
|
|
|
|
if features['closing_severe_spikes'] > 0 and features['opening_severe_spikes'] > 0:
|
|
|
spike_ratio = max(features['closing_severe_spikes'], features['opening_severe_spikes']) / \
|
|
|
max(1, min(features['closing_severe_spikes'], features['opening_severe_spikes']))
|
|
|
features['spike_symmetry'] = float(spike_ratio)
|
|
|
else:
|
|
|
features['spike_symmetry'] = 1.0
|
|
|
|
|
|
return features
|
|
|
|
|
|
|
|
|
def classify_primary_faults(features, phases, kpis):
|
|
|
"""
|
|
|
ULTRA-OPTIMIZED Multi-Class PRIMARY Fault Classification with Individual Probability Scores
|
|
|
============================================================================================
|
|
|
Returns ALL defects with probability > 50% (independent scores, not cumulative).
|
|
|
|
|
|
Scoring Logic:
|
|
|
- Each defect gets 0-100% probability based on signature strength
|
|
|
- Multiple defects can coexist (e.g., Wear + Misalignment both at 85%)
|
|
|
- Scores are NOT normalized (don't sum to 100%)
|
|
|
- Only defects >50% probability are returned
|
|
|
|
|
|
Classes: 1-Healthy, 2-Main Wear, 3-Arcing Wear, 4-Main Misalign,
|
|
|
5-Arcing Misalign, 11-Close Coil, 12-Trip Coil
|
|
|
"""
|
|
|
all_faults = []
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
close_coil_current = kpis.get('Peak Close Coil Current (A)', None)
|
|
|
if close_coil_current is not None:
|
|
|
if close_coil_current < CLOSE_COIL_CURRENT_MIN:
|
|
|
conf = 95.0
|
|
|
sev = "High"
|
|
|
desc = f"Close Coil Current critically low ({close_coil_current:.2f} A, normal: 4-7 A). Coil winding damaged or control circuit fault."
|
|
|
all_faults.append(_build_result("Close Coil Damage", f"{conf:.2f} %", sev, desc))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
trip_coil1 = kpis.get('Peak Trip Coil 1 Current (A)', None)
|
|
|
trip_coil2 = kpis.get('Peak Trip Coil 2 Current (A)', None)
|
|
|
|
|
|
if trip_coil1 is not None and trip_coil2 is not None:
|
|
|
|
|
|
if trip_coil1 < TRIP_COIL_CURRENT_MIN and trip_coil2 < TRIP_COIL_CURRENT_MIN:
|
|
|
conf = 95.0
|
|
|
sev = "Critical"
|
|
|
desc = f"BOTH Trip Coils failed (TC1: {trip_coil1:.2f} A, TC2: {trip_coil2:.2f} A, normal: 4-7 A each). Breaker cannot trip - SAFETY CRITICAL."
|
|
|
all_faults.append(_build_result("Trip Coil Damage", f"{conf:.2f} %", sev, desc))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
arcing_misalign_prob = 0.0
|
|
|
arcing_misalign_reasons = []
|
|
|
|
|
|
|
|
|
if features['asymmetry_ratio'] > ASYMMETRY_RATIO_CRITICAL:
|
|
|
arcing_misalign_prob += 40.0
|
|
|
arcing_misalign_reasons.append(f"Critical timing asymmetry: Opening phase {features['asymmetry_ratio']:.2f}x longer than closing (>3.0x indicates severe misalignment)")
|
|
|
elif features['asymmetry_ratio'] > ASYMMETRY_RATIO_SEVERE:
|
|
|
arcing_misalign_prob += 32.0
|
|
|
arcing_misalign_reasons.append(f"Severe timing asymmetry: Opening {features['asymmetry_ratio']:.2f}x longer (>2.2x threshold)")
|
|
|
elif features['asymmetry_ratio'] > ASYMMETRY_RATIO_MODERATE:
|
|
|
arcing_misalign_prob += 24.0
|
|
|
arcing_misalign_reasons.append(f"Moderate asymmetry: Timing ratio {features['asymmetry_ratio']:.2f} (normal <1.5)")
|
|
|
|
|
|
|
|
|
if features['phase3_reduction'] < PHASE3_REDUCTION_RATIO:
|
|
|
reduction_pct = (1 - features['phase3_reduction']) * 100
|
|
|
arcing_misalign_prob += 20.0
|
|
|
arcing_misalign_reasons.append(f"Main contact duration reduced by {reduction_pct:.0f}% ({features['dur_main']} ms vs expected ~160 ms)")
|
|
|
elif features['phase3_reduction'] < 0.80:
|
|
|
arcing_misalign_prob += 12.0
|
|
|
arcing_misalign_reasons.append(f"Slightly reduced contact engagement ({features['dur_main']} ms)")
|
|
|
|
|
|
|
|
|
if features['num_bounces'] >= 5:
|
|
|
arcing_misalign_prob += 25.0
|
|
|
arcing_misalign_reasons.append(f"Detected {features['num_bounces']} sinusoidal bounces during opening (indicates mechanical oscillation)")
|
|
|
elif features['num_bounces'] >= 3:
|
|
|
arcing_misalign_prob += 18.0
|
|
|
arcing_misalign_reasons.append(f"{features['num_bounces']} rounded bounces detected")
|
|
|
elif features['num_bounces'] >= 1:
|
|
|
arcing_misalign_prob += 10.0
|
|
|
arcing_misalign_reasons.append(f"{features['num_bounces']} bounce peak(s) in arcing phase")
|
|
|
|
|
|
|
|
|
if features['arcing_telegraph'] > 15:
|
|
|
arcing_misalign_prob += 15.0
|
|
|
arcing_misalign_reasons.append(f"High-frequency telegraph noise in arcing zones ({features['arcing_telegraph']} rapid transitions)")
|
|
|
elif features['arcing_telegraph'] > 8:
|
|
|
arcing_misalign_prob += 10.0
|
|
|
arcing_misalign_reasons.append(f"Telegraph noise detected ({features['arcing_telegraph']} events)")
|
|
|
|
|
|
if arcing_misalign_prob >= 50.0:
|
|
|
prob_str = f"{min(99.0, arcing_misalign_prob):.2f} %"
|
|
|
sev = _get_severity(arcing_misalign_prob)
|
|
|
desc = ". ".join(arcing_misalign_reasons)
|
|
|
all_faults.append(_build_result("Arcing Contact Misalignment", prob_str, sev, desc))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
main_misalign_prob = 0.0
|
|
|
main_misalign_reasons = []
|
|
|
|
|
|
|
|
|
if (features['telegraph_jumps'] >= MISALIGNMENT_COUNT_MIN and
|
|
|
features['jump_ratio'] > MISALIGNMENT_JUMP_RATIO and
|
|
|
features['main_std'] > MISALIGNMENT_STD_MIN and
|
|
|
features['is_square_wave'] == 1):
|
|
|
main_misalign_prob += 45.0
|
|
|
main_misalign_reasons.append(f"Square-wave telegraph pattern: {features['telegraph_jumps']} sharp jumps (>{MISALIGNMENT_JUMP_MIN} µΩ), duty cycle {features['square_wave_duty']:.2f}")
|
|
|
elif features['telegraph_jumps'] >= MISALIGNMENT_COUNT_MIN and features['main_std'] > MISALIGNMENT_STD_MIN:
|
|
|
main_misalign_prob += 35.0
|
|
|
main_misalign_reasons.append(f"Telegraph pattern detected: {features['telegraph_jumps']} jumps, Std={features['main_std']:.1f} µΩ")
|
|
|
elif features['telegraph_jumps'] >= 4:
|
|
|
main_misalign_prob += 20.0
|
|
|
main_misalign_reasons.append(f"Partial telegraph: {features['telegraph_jumps']} jumps")
|
|
|
|
|
|
|
|
|
if features['has_initial_jump'] == 1 and features['initial_deviation'] > 120:
|
|
|
main_misalign_prob += 20.0
|
|
|
main_misalign_reasons.append(f"High initial transient (Std={features['initial_deviation']:.1f} µΩ) then stabilizes - classic misalignment signature")
|
|
|
elif features['has_initial_jump'] == 1:
|
|
|
main_misalign_prob += 12.0
|
|
|
main_misalign_reasons.append(f"Initial deviation detected ({features['initial_deviation']:.1f} µΩ)")
|
|
|
|
|
|
|
|
|
if features['num_shelves'] >= 4 and features['main_range'] > SHELF_DETECTION_THRESHOLD:
|
|
|
main_misalign_prob += 20.0
|
|
|
main_misalign_reasons.append(f"Stepped transitions: {features['num_shelves']} discrete resistance plateaus (range {features['main_range']:.1f} µΩ)")
|
|
|
elif features['num_shelves'] >= 3:
|
|
|
main_misalign_prob += 12.0
|
|
|
main_misalign_reasons.append(f"{features['num_shelves']} resistance shelves detected")
|
|
|
|
|
|
|
|
|
if features['main_std'] > MISALIGNMENT_STD_MIN * 2:
|
|
|
main_misalign_prob += 15.0
|
|
|
main_misalign_reasons.append(f"Very high variability (Std={features['main_std']:.1f} µΩ, normal <15 µΩ)")
|
|
|
elif features['main_std'] > MISALIGNMENT_STD_MIN:
|
|
|
main_misalign_prob += 10.0
|
|
|
main_misalign_reasons.append(f"Elevated variability (Std={features['main_std']:.1f} µΩ)")
|
|
|
|
|
|
if main_misalign_prob >= 50.0:
|
|
|
prob_str = f"{min(99.0, main_misalign_prob):.2f} %"
|
|
|
sev = _get_severity(main_misalign_prob)
|
|
|
desc = ". ".join(main_misalign_reasons)
|
|
|
all_faults.append(_build_result("Main Contact Misalignment", prob_str, sev, desc))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
main_wear_prob = 0.0
|
|
|
main_wear_reasons = []
|
|
|
|
|
|
|
|
|
dlro_value = kpis.get('DLRO Value (µΩ)', kpis.get('DLRO_Value_uOhm', kpis.get('dlro_uohm', None)))
|
|
|
|
|
|
|
|
|
if features['main_mean'] > R_WEAR_CRITICAL_MIN:
|
|
|
elevation = ((features['main_mean'] - R_HEALTHY_MEAN_IDEAL) / R_HEALTHY_MEAN_IDEAL) * 100
|
|
|
main_wear_prob += 50.0
|
|
|
main_wear_reasons.append(f"CRITICAL wear: Resistance {features['main_mean']:.1f} µΩ (healthy: 20-70 µΩ, {elevation:.0f}% above ideal). Severe erosion/material loss detected")
|
|
|
if dlro_value is not None and dlro_value > 250:
|
|
|
main_wear_prob += 8.0
|
|
|
main_wear_reasons.append(f"Confirmed by DLRO: {dlro_value:.1f} µΩ")
|
|
|
elif features['main_mean'] > R_WEAR_SEVERE_MIN:
|
|
|
elevation = ((features['main_mean'] - R_HEALTHY_MEAN_IDEAL) / R_HEALTHY_MEAN_IDEAL) * 100
|
|
|
main_wear_prob += 42.0
|
|
|
main_wear_reasons.append(f"SEVERE wear: Resistance {features['main_mean']:.1f} µΩ ({elevation:.0f}% above ideal). Significant contact degradation")
|
|
|
if dlro_value is not None and dlro_value > 180:
|
|
|
main_wear_prob += 8.0
|
|
|
main_wear_reasons.append(f"Confirmed by DLRO: {dlro_value:.1f} µΩ")
|
|
|
elif features['main_mean'] > R_WEAR_MODERATE_MIN:
|
|
|
main_wear_prob += 32.0
|
|
|
main_wear_reasons.append(f"MODERATE wear: Resistance {features['main_mean']:.1f} µΩ (healthy <70 µΩ). Contact wear progressing")
|
|
|
if dlro_value is not None and dlro_value > 100:
|
|
|
main_wear_prob += 8.0
|
|
|
main_wear_reasons.append(f"Confirmed by DLRO: {dlro_value:.1f} µΩ")
|
|
|
elif features['main_mean'] > R_WEAR_EARLY_MIN:
|
|
|
main_wear_prob += 20.0
|
|
|
main_wear_reasons.append(f"EARLY wear signs: Resistance {features['main_mean']:.1f} µΩ (healthy <70 µΩ)")
|
|
|
if dlro_value is not None and dlro_value > 70:
|
|
|
main_wear_prob += 8.0
|
|
|
main_wear_reasons.append(f"DLRO confirms: {dlro_value:.1f} µΩ")
|
|
|
|
|
|
|
|
|
if features['main_std'] > WEAR_STD_SEVERE:
|
|
|
main_wear_prob += 25.0
|
|
|
main_wear_reasons.append(f"Severe surface roughness: Std={features['main_std']:.1f} µΩ (healthy <15 µΩ). Indicates pitting/erosion")
|
|
|
elif features['main_std'] > WEAR_STD_MODERATE:
|
|
|
main_wear_prob += 18.0
|
|
|
main_wear_reasons.append(f"Moderate roughness: Std={features['main_std']:.1f} µΩ (healthy <15 µΩ)")
|
|
|
elif features['main_std'] > WEAR_STD_EARLY:
|
|
|
main_wear_prob += 10.0
|
|
|
main_wear_reasons.append(f"Surface roughness detected: Std={features['main_std']:.1f} µΩ")
|
|
|
|
|
|
|
|
|
if features['spike_density'] > 0.35:
|
|
|
main_wear_prob += 15.0
|
|
|
main_wear_reasons.append(f"Dense uniform spikes: {features['uniform_spikes']} spikes ({features['spike_density']*100:.1f}% density). Classic wear signature")
|
|
|
elif features['spike_density'] > 0.20:
|
|
|
main_wear_prob += 10.0
|
|
|
main_wear_reasons.append(f"Grassy pattern: {features['uniform_spikes']} spikes detected")
|
|
|
elif features['uniform_spikes'] > 10:
|
|
|
main_wear_prob += 5.0
|
|
|
main_wear_reasons.append(f"{features['uniform_spikes']} noise spikes in plateau")
|
|
|
|
|
|
|
|
|
if features['noise_rms'] > 40:
|
|
|
main_wear_prob += 10.0
|
|
|
main_wear_reasons.append(f"High continuous noise: RMS={features['noise_rms']:.1f} µΩ")
|
|
|
elif features['avg_noise'] > 25:
|
|
|
main_wear_prob += 6.0
|
|
|
main_wear_reasons.append(f"Elevated noise level: {features['avg_noise']:.1f} µΩ")
|
|
|
|
|
|
if main_wear_prob >= 50.0:
|
|
|
prob_str = f"{min(99.0, main_wear_prob):.2f} %"
|
|
|
sev = _get_severity(main_wear_prob)
|
|
|
desc = ". ".join(main_wear_reasons)
|
|
|
all_faults.append(_build_result("Main Contact Wear", prob_str, sev, desc))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
arcing_wear_prob = 0.0
|
|
|
arcing_wear_reasons = []
|
|
|
|
|
|
|
|
|
if features['total_critical_spikes'] >= ARCING_SPIKE_COUNT_CRITICAL:
|
|
|
arcing_wear_prob += 50.0
|
|
|
arcing_wear_reasons.append(f"CRITICAL: {features['total_critical_spikes']} severe arc flashes detected (>8000 µΩ). Arcing contact severely eroded")
|
|
|
if features['total_sustained_spikes'] >= 2:
|
|
|
arcing_wear_prob += 8.0
|
|
|
arcing_wear_reasons.append(f"{features['total_sustained_spikes']} sustained arc events (>3 samples width)")
|
|
|
elif features['total_severe_spikes'] >= ARCING_SPIKE_COUNT_SEVERE:
|
|
|
arcing_wear_prob += 40.0
|
|
|
arcing_wear_reasons.append(f"SEVERE: {features['total_severe_spikes']} high-energy spikes (>5000 µΩ) in arcing zones")
|
|
|
if features['total_sustained_spikes'] >= 2:
|
|
|
arcing_wear_prob += 8.0
|
|
|
arcing_wear_reasons.append(f"{features['total_sustained_spikes']} sustained arcs detected")
|
|
|
elif features['total_severe_spikes'] >= 2:
|
|
|
arcing_wear_prob += 28.0
|
|
|
arcing_wear_reasons.append(f"{features['total_severe_spikes']} arcing spikes detected (>5000 µΩ)")
|
|
|
elif features['total_moderate_spikes'] >= 5:
|
|
|
arcing_wear_prob += 20.0
|
|
|
arcing_wear_reasons.append(f"{features['total_moderate_spikes']} moderate arcing events (>3000 µΩ)")
|
|
|
|
|
|
|
|
|
if features['spike_symmetry'] < 1.4 and features['total_severe_spikes'] > 0:
|
|
|
arcing_wear_prob += 20.0
|
|
|
arcing_wear_reasons.append(f"Symmetric spike distribution (ratio {features['spike_symmetry']:.2f}). Confirms uniform arcing wear on both contacts")
|
|
|
elif features['spike_symmetry'] < 1.8 and features['total_severe_spikes'] > 0:
|
|
|
arcing_wear_prob += 12.0
|
|
|
arcing_wear_reasons.append(f"Relatively symmetric pattern (ratio {features['spike_symmetry']:.2f})")
|
|
|
|
|
|
|
|
|
max_arcing_std = max(features['closing_std'], features['opening_std'])
|
|
|
if max_arcing_std > ARCING_INSTABILITY_STD * 1.5:
|
|
|
arcing_wear_prob += 15.0
|
|
|
arcing_wear_reasons.append(f"Very high arcing instability: Std={max_arcing_std:.1f} µΩ (normal <500 µΩ)")
|
|
|
elif max_arcing_std > ARCING_INSTABILITY_STD:
|
|
|
arcing_wear_prob += 10.0
|
|
|
arcing_wear_reasons.append(f"Elevated arcing zone variability: Std={max_arcing_std:.1f} µΩ")
|
|
|
|
|
|
|
|
|
if features['main_mean'] < R_HEALTHY_MAX and features['main_std'] < WEAR_STD_MODERATE:
|
|
|
arcing_wear_prob += 15.0
|
|
|
arcing_wear_reasons.append(f"Main contact healthy (Mean={features['main_mean']:.1f} µΩ, Std={features['main_std']:.1f} µΩ). Confirms wear isolated to arcing contacts")
|
|
|
else:
|
|
|
|
|
|
if features['main_mean'] > R_WEAR_MODERATE_MIN:
|
|
|
arcing_wear_prob -= 10.0
|
|
|
|
|
|
|
|
|
if features['asymmetry_ratio'] > ASYMMETRY_RATIO_MODERATE:
|
|
|
arcing_wear_prob -= 18.0
|
|
|
|
|
|
|
|
|
if arcing_wear_prob >= 50.0:
|
|
|
prob_str = f"{min(99.0, arcing_wear_prob):.2f} %"
|
|
|
sev = _get_severity(arcing_wear_prob)
|
|
|
desc = ". ".join(arcing_wear_reasons)
|
|
|
all_faults.append(_build_result("Arcing Contact Wear", prob_str, sev, desc))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
healthy_prob = 100.0
|
|
|
healthy_reasons = []
|
|
|
|
|
|
|
|
|
if features['main_mean'] > R_WEAR_CRITICAL_MIN:
|
|
|
healthy_prob -= 55.0
|
|
|
elif features['main_mean'] > R_WEAR_SEVERE_MIN:
|
|
|
healthy_prob -= 48.0
|
|
|
elif features['main_mean'] > R_WEAR_MODERATE_MIN:
|
|
|
healthy_prob -= 40.0
|
|
|
elif features['main_mean'] > R_WEAR_EARLY_MIN:
|
|
|
healthy_prob -= 25.0
|
|
|
elif features['main_mean'] > R_HEALTHY_MAX:
|
|
|
healthy_prob -= 12.0
|
|
|
|
|
|
|
|
|
if features['main_std'] > WEAR_STD_SEVERE:
|
|
|
healthy_prob -= 30.0
|
|
|
elif features['main_std'] > WEAR_STD_MODERATE:
|
|
|
healthy_prob -= 22.0
|
|
|
elif features['main_std'] > R_HEALTHY_STD_MAX:
|
|
|
healthy_prob -= 12.0
|
|
|
|
|
|
|
|
|
if features['telegraph_jumps'] > 8:
|
|
|
healthy_prob -= 20.0
|
|
|
elif features['telegraph_jumps'] > 5:
|
|
|
healthy_prob -= 12.0
|
|
|
elif features['telegraph_jumps'] > 2:
|
|
|
healthy_prob -= 6.0
|
|
|
|
|
|
|
|
|
if features['total_critical_spikes'] > 0:
|
|
|
healthy_prob -= 20.0
|
|
|
elif features['total_severe_spikes'] > 1:
|
|
|
healthy_prob -= 15.0
|
|
|
elif features['total_moderate_spikes'] > 3:
|
|
|
healthy_prob -= 10.0
|
|
|
|
|
|
|
|
|
if features['asymmetry_ratio'] > ASYMMETRY_RATIO_SEVERE:
|
|
|
healthy_prob -= 15.0
|
|
|
elif features['asymmetry_ratio'] > ASYMMETRY_RATIO_MODERATE:
|
|
|
healthy_prob -= 10.0
|
|
|
elif features['asymmetry_ratio'] > 1.5:
|
|
|
healthy_prob -= 5.0
|
|
|
|
|
|
|
|
|
if features['num_bounces'] > 4:
|
|
|
healthy_prob -= 12.0
|
|
|
elif features['num_bounces'] > 2:
|
|
|
healthy_prob -= 7.0
|
|
|
|
|
|
|
|
|
if features['spike_density'] > 0.30:
|
|
|
healthy_prob -= 10.0
|
|
|
elif features['spike_density'] > 0.15:
|
|
|
healthy_prob -= 5.0
|
|
|
|
|
|
|
|
|
if healthy_prob >= 50.0:
|
|
|
healthy_reasons.append(f"Normal operation. Main Contact: Mean={features['main_mean']:.1f} µΩ (ideal: 20-70 µΩ), Std={features['main_std']:.1f} µΩ (smooth: <15 µΩ)")
|
|
|
healthy_reasons.append(f"Timing: {features['asymmetry_ratio']:.2f} ratio (balanced: <1.5)")
|
|
|
|
|
|
if features['total_critical_spikes'] == 0 and features['total_severe_spikes'] == 0:
|
|
|
healthy_reasons.append("No abnormal arcing detected")
|
|
|
|
|
|
if features['telegraph_jumps'] <= 2 and features['is_square_wave'] == 0:
|
|
|
healthy_reasons.append("Smooth transitions, no misalignment patterns")
|
|
|
|
|
|
prob_str = f"{max(0.0, healthy_prob):.2f} %"
|
|
|
sev = "None" if healthy_prob >= 85.0 else "Low"
|
|
|
desc = ". ".join(healthy_reasons)
|
|
|
all_faults.append(_build_result("Healthy", prob_str, sev, desc))
|
|
|
|
|
|
return all_faults
|
|
|
|
|
|
|
|
|
def classify_secondary_faults(features, phases, kpis, primary_faults):
|
|
|
"""
|
|
|
Detect SECONDARY MECHANICAL/OPERATIONAL DEFECTS (Classes 6-10).
|
|
|
Uses EXTREME STRICTNESS as per Gemini Agent 2 logic.
|
|
|
Only reports if confidence >75% AND overwhelming evidence.
|
|
|
"""
|
|
|
secondary_faults = []
|
|
|
|
|
|
|
|
|
primary_names = [f['defect_name'] for f in primary_faults]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
closing_time = kpis.get('Closing Time (ms)', None)
|
|
|
opening_time = kpis.get('Opening Time (ms)', None)
|
|
|
contact_speed = kpis.get('Contact Speed (m/s)', None)
|
|
|
|
|
|
mechanism_score = 0
|
|
|
mechanism_reasons = []
|
|
|
kpi_count = 0
|
|
|
|
|
|
|
|
|
if closing_time is not None:
|
|
|
if closing_time > CLOSING_TIME_NOM[1] * (1 + TIMING_DEVIATION_THRESHOLD):
|
|
|
deviation = ((closing_time - CLOSING_TIME_NOM[1]) / CLOSING_TIME_NOM[1]) * 100
|
|
|
mechanism_score += 35
|
|
|
mechanism_reasons.append(f"Slow closing: {closing_time:.1f} ms (nominal 80-100 ms, {deviation:.1f}% slower)")
|
|
|
kpi_count += 1
|
|
|
elif closing_time < CLOSING_TIME_NOM[0] * (1 - TIMING_DEVIATION_THRESHOLD):
|
|
|
deviation = ((CLOSING_TIME_NOM[0] - closing_time) / CLOSING_TIME_NOM[0]) * 100
|
|
|
mechanism_score += 35
|
|
|
mechanism_reasons.append(f"Fast closing: {closing_time:.1f} ms ({deviation:.1f}% faster)")
|
|
|
kpi_count += 1
|
|
|
|
|
|
|
|
|
if opening_time is not None:
|
|
|
if opening_time > OPENING_TIME_NOM[1] * (1 + TIMING_DEVIATION_THRESHOLD):
|
|
|
deviation = ((opening_time - OPENING_TIME_NOM[1]) / OPENING_TIME_NOM[1]) * 100
|
|
|
mechanism_score += 35
|
|
|
mechanism_reasons.append(f"Slow opening: {opening_time:.1f} ms (nominal 30-40 ms, {deviation:.1f}% slower)")
|
|
|
kpi_count += 1
|
|
|
elif opening_time < OPENING_TIME_NOM[0] * (1 - TIMING_DEVIATION_THRESHOLD):
|
|
|
deviation = ((OPENING_TIME_NOM[0] - opening_time) / OPENING_TIME_NOM[0]) * 100
|
|
|
mechanism_score += 35
|
|
|
mechanism_reasons.append(f"Fast opening: {opening_time:.1f} ms ({deviation:.1f}% faster)")
|
|
|
kpi_count += 1
|
|
|
|
|
|
|
|
|
if contact_speed is not None:
|
|
|
if contact_speed < CONTACT_SPEED_NOM[0] * (1 - TIMING_DEVIATION_THRESHOLD):
|
|
|
deviation = ((CONTACT_SPEED_NOM[0] - contact_speed) / CONTACT_SPEED_NOM[0]) * 100
|
|
|
mechanism_score += 30
|
|
|
mechanism_reasons.append(f"Low contact speed: {contact_speed:.2f} m/s (nominal 4.5-6.5 m/s, {deviation:.1f}% slower)")
|
|
|
kpi_count += 1
|
|
|
elif contact_speed > CONTACT_SPEED_NOM[1] * (1 + TIMING_DEVIATION_THRESHOLD):
|
|
|
deviation = ((contact_speed - CONTACT_SPEED_NOM[1]) / CONTACT_SPEED_NOM[1]) * 100
|
|
|
mechanism_score += 30
|
|
|
mechanism_reasons.append(f"High contact speed: {contact_speed:.2f} m/s ({deviation:.1f}% faster)")
|
|
|
kpi_count += 1
|
|
|
|
|
|
|
|
|
if kpi_count >= 2:
|
|
|
mechanism_score += 15
|
|
|
mechanism_reasons.append("Multiple timing parameters affected - confirms mechanism malfunction")
|
|
|
|
|
|
if mechanism_score > 0:
|
|
|
conf = min(95.0, mechanism_score)
|
|
|
sev = _get_severity(conf)
|
|
|
desc = ". ".join(mechanism_reasons)
|
|
|
secondary_faults.append(_build_result("Operating Mechanism Malfunction", f"{conf:.2f} %", sev, desc))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
damping_score = 0
|
|
|
damping_reasons = []
|
|
|
|
|
|
|
|
|
if features['num_bounces'] > BOUNCE_COUNT_THRESHOLD:
|
|
|
damping_score += 50
|
|
|
damping_reasons.append(f"Excessive bouncing detected: {features['num_bounces']} distinct bounces in main contact zone (>5 indicates damper failure)")
|
|
|
elif features['num_bounces'] >= 5:
|
|
|
damping_score += 35
|
|
|
damping_reasons.append(f"{features['num_bounces']} bounces detected")
|
|
|
|
|
|
|
|
|
if features['main_std'] > 50 and features['num_bounces'] >= 5:
|
|
|
damping_score += 25
|
|
|
damping_reasons.append(f"Oscillation pattern in main contact (Std={features['main_std']:.1f} µΩ with structured bounces)")
|
|
|
|
|
|
if damping_score > 0:
|
|
|
conf = min(95.0, damping_score)
|
|
|
sev = _get_severity(conf)
|
|
|
desc = ". ".join(damping_reasons)
|
|
|
secondary_faults.append(_build_result("Damping System Fault", f"{conf:.2f} %", sev, desc))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
sf6_pressure = kpis.get('SF6 Pressure (bar)', None)
|
|
|
|
|
|
sf6_score = 0
|
|
|
sf6_reasons = []
|
|
|
|
|
|
if sf6_pressure is not None:
|
|
|
if sf6_pressure < SF6_PRESSURE_CRITICAL:
|
|
|
sf6_score += 60
|
|
|
sf6_reasons.append(f"SF6 pressure critically low: {sf6_pressure:.2f} bar (normal: 5.5-6.5 bar)")
|
|
|
|
|
|
|
|
|
if features['dur_opening'] > ARC_QUENCH_DURATION_MAX:
|
|
|
sf6_score += 25
|
|
|
sf6_reasons.append(f"Prolonged arc quenching ({features['dur_opening']} ms) confirms gas leak")
|
|
|
|
|
|
|
|
|
if "Arcing Contact Wear" in primary_names:
|
|
|
sf6_score += 10
|
|
|
sf6_reasons.append("Arcing wear detected as primary defect - consistent with SF6 leak")
|
|
|
else:
|
|
|
|
|
|
if features['dur_opening'] > ARC_QUENCH_DURATION_MAX + 10:
|
|
|
if "Arcing Contact Wear" in primary_names:
|
|
|
|
|
|
arcing_conf = 0
|
|
|
for pf in primary_faults:
|
|
|
if pf['defect_name'] == "Arcing Contact Wear":
|
|
|
arcing_conf = float(pf['Confidence'].replace('%', '').strip())
|
|
|
|
|
|
if arcing_conf > 85:
|
|
|
sf6_score += 55
|
|
|
sf6_reasons.append(f"Prolonged arc quenching ({features['dur_opening']} ms, normal <25 ms) with severe arcing wear - indicates possible SF6 leak")
|
|
|
sf6_reasons.append("WARNING: No SF6 pressure sensor data. Confidence limited to 70%")
|
|
|
sf6_score = min(sf6_score, 70)
|
|
|
|
|
|
if sf6_score > 0:
|
|
|
conf = min(95.0, sf6_score)
|
|
|
sev = _get_severity(conf)
|
|
|
desc = ". ".join(sf6_reasons)
|
|
|
secondary_faults.append(_build_result("Pressure System Leakage (SF6 Gas Chamber)", f"{conf:.2f} %", sev, desc))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
linkage_score = 0
|
|
|
linkage_reasons = []
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if features['telegraph_jumps'] > STUTTER_COUNT_MIN and features['num_shelves'] > 3:
|
|
|
|
|
|
total_op_time = features['dur_closing'] + features['dur_main'] + features['dur_opening']
|
|
|
expected_time = 250
|
|
|
|
|
|
if total_op_time > expected_time * 1.15:
|
|
|
linkage_score += 50
|
|
|
linkage_reasons.append(f"Detected {features['telegraph_jumps']} mechanical stutters with {features['num_shelves']} stepped plateaus")
|
|
|
linkage_reasons.append(f"Total operation time {total_op_time} ms (expected ~{expected_time} ms, {((total_op_time/expected_time - 1)*100):.1f}% longer)")
|
|
|
elif features['num_shelves'] >= 5:
|
|
|
linkage_score += 35
|
|
|
linkage_reasons.append(f"Multiple stepped plateaus ({features['num_shelves']}) indicate mechanical impedance")
|
|
|
|
|
|
if linkage_score > 0:
|
|
|
conf = min(95.0, linkage_score)
|
|
|
sev = _get_severity(conf)
|
|
|
desc = ". ".join(linkage_reasons)
|
|
|
secondary_faults.append(_build_result("Linkage/Connecting Rod Obstruction/Damage", f"{conf:.2f} %", sev, desc))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
dlro_value = kpis.get('DLRO Value (µΩ)', None)
|
|
|
|
|
|
fixed_contact_score = 0
|
|
|
fixed_contact_reasons = []
|
|
|
|
|
|
if dlro_value is not None:
|
|
|
if dlro_value > DLRO_CRITICAL:
|
|
|
|
|
|
if features['main_std'] < FIXED_CONTACT_STD_MAX:
|
|
|
fixed_contact_score += 50
|
|
|
fixed_contact_reasons.append(f"DLRO critically high: {dlro_value:.1f} µΩ (normal <50 µΩ, critical >100 µΩ)")
|
|
|
fixed_contact_reasons.append(f"Smooth plateau (Std={features['main_std']:.1f} µΩ) indicates fixed contact/connection issue, not wear")
|
|
|
else:
|
|
|
|
|
|
if "Main Contact Wear" not in primary_names:
|
|
|
fixed_contact_score += 40
|
|
|
fixed_contact_reasons.append(f"DLRO high: {dlro_value:.1f} µΩ with noisy plateau")
|
|
|
else:
|
|
|
|
|
|
fixed_contact_score += 30
|
|
|
fixed_contact_reasons.append(f"DLRO high: {dlro_value:.1f} µΩ (secondary to Main Contact Wear)")
|
|
|
|
|
|
elif dlro_value > DLRO_MODERATE:
|
|
|
if features['main_std'] < FIXED_CONTACT_STD_MAX:
|
|
|
fixed_contact_score += 35
|
|
|
fixed_contact_reasons.append(f"DLRO moderately elevated: {dlro_value:.1f} µΩ (normal <50 µΩ)")
|
|
|
else:
|
|
|
|
|
|
if features['main_mean'] > DLRO_MODERATE and features['main_std'] < FIXED_CONTACT_STD_MAX:
|
|
|
if "Main Contact Wear" not in primary_names:
|
|
|
fixed_contact_score += 30
|
|
|
fixed_contact_reasons.append(f"Elevated but smooth plateau (Mean={features['main_mean']:.1f} µΩ, Std={features['main_std']:.1f} µΩ) suggests fixed contact issue")
|
|
|
fixed_contact_reasons.append("WARNING: No DLRO sensor data. Confidence limited to 65%")
|
|
|
fixed_contact_score = min(fixed_contact_score, 65)
|
|
|
|
|
|
if fixed_contact_score > 0:
|
|
|
conf = min(90.0, fixed_contact_score)
|
|
|
sev = _get_severity(conf)
|
|
|
desc = ". ".join(fixed_contact_reasons)
|
|
|
secondary_faults.append(_build_result("Fixed Contact Damage/Deformation", f"{conf:.2f} %", sev, desc))
|
|
|
|
|
|
return secondary_faults
|
|
|
|
|
|
|
|
|
def _get_severity(probability):
|
|
|
"""
|
|
|
Determine severity based on defect probability score.
|
|
|
|
|
|
Args:
|
|
|
probability: Float 0-100 representing defect probability
|
|
|
|
|
|
Returns:
|
|
|
String: "Critical", "High", "Medium", "Low", or "None"
|
|
|
"""
|
|
|
if probability >= 90:
|
|
|
return "Critical"
|
|
|
elif probability >= 75:
|
|
|
return "High"
|
|
|
elif probability >= 60:
|
|
|
return "Medium"
|
|
|
elif probability >= 50:
|
|
|
return "Low"
|
|
|
else:
|
|
|
return "None"
|
|
|
|
|
|
|
|
|
def _build_result(name, conf, sev, desc):
|
|
|
"""Helper to build fault result dictionary with proper Unicode handling"""
|
|
|
return {
|
|
|
"defect_name": name,
|
|
|
"Confidence": conf,
|
|
|
"Severity": sev,
|
|
|
"description": desc
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def parse_kpis_from_json(kpis_json):
|
|
|
"""
|
|
|
Convert KPI JSON format to dictionary for internal use.
|
|
|
|
|
|
Input format:
|
|
|
{
|
|
|
"kpis": [
|
|
|
{"name": "Closing Time", "unit": "ms", "value": 87.8},
|
|
|
...
|
|
|
]
|
|
|
}
|
|
|
|
|
|
Output format:
|
|
|
{
|
|
|
"Closing Time (ms)": 87.8,
|
|
|
...
|
|
|
}
|
|
|
"""
|
|
|
if kpis_json is None:
|
|
|
return {}
|
|
|
|
|
|
|
|
|
if isinstance(kpis_json, dict) and "kpis" not in kpis_json:
|
|
|
return kpis_json
|
|
|
|
|
|
|
|
|
kpi_dict = {}
|
|
|
kpis_list = kpis_json.get("kpis", [])
|
|
|
|
|
|
for kpi in kpis_list:
|
|
|
name = kpi.get("name", "")
|
|
|
unit = kpi.get("unit", "")
|
|
|
value = kpi.get("value", None)
|
|
|
|
|
|
|
|
|
key = f"{name} ({unit})" if unit else name
|
|
|
kpi_dict[key] = value
|
|
|
|
|
|
return kpi_dict
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def analyze_dcrm_from_dataframe(df, kpis=None):
|
|
|
"""
|
|
|
Central pipeline function to analyze DCRM data from DataFrame.
|
|
|
|
|
|
Args:
|
|
|
df: DataFrame with Resistance column (401 points)
|
|
|
kpis: KPI data in JSON format or dict format
|
|
|
JSON format: {"kpis": [{"name": "...", "unit": "...", "value": ...}, ...]}
|
|
|
Dict format: {"Name (unit)": value, ...}
|
|
|
|
|
|
Returns:
|
|
|
JSON with fault detection results and classifications
|
|
|
"""
|
|
|
|
|
|
df_standardized = standardize_input(df)
|
|
|
|
|
|
|
|
|
time_cols = [c for c in df_standardized.columns if c.startswith('T_')]
|
|
|
|
|
|
|
|
|
row_values = df_standardized.iloc[0][time_cols].values
|
|
|
|
|
|
|
|
|
kpis_dict = parse_kpis_from_json(kpis)
|
|
|
|
|
|
|
|
|
result = analyze_dcrm_advanced(row_values, kpis=kpis_dict)
|
|
|
|
|
|
return result
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
df = pd.read_csv('C:\\Users\\rkhanke\\Downloads\\parallel_proccessing\\combined\\data\\df3_final.csv')
|
|
|
|
|
|
|
|
|
sample_kpis = {
|
|
|
"kpis": [
|
|
|
{
|
|
|
"name": "Closing Time",
|
|
|
"unit": "ms",
|
|
|
"value": 90.0
|
|
|
},
|
|
|
{
|
|
|
"name": "Opening Time",
|
|
|
"unit": "ms",
|
|
|
"value": 35.0
|
|
|
},
|
|
|
{
|
|
|
"name": "DLRO Value",
|
|
|
"unit": "µΩ",
|
|
|
"value": 299.93
|
|
|
},
|
|
|
{
|
|
|
"name": "Peak Resistance",
|
|
|
"unit": "µΩ",
|
|
|
"value": 408.0
|
|
|
},
|
|
|
{
|
|
|
"name": "Main Wipe",
|
|
|
"unit": "mm",
|
|
|
"value": 46.0
|
|
|
},
|
|
|
{
|
|
|
"name": "Arc Wipe",
|
|
|
"unit": "mm",
|
|
|
"value": 63.0
|
|
|
},
|
|
|
{
|
|
|
"name": "Contact Travel Distance",
|
|
|
"unit": "mm",
|
|
|
"value": 550.0
|
|
|
},
|
|
|
{
|
|
|
"name": "Contact Speed",
|
|
|
"unit": "m/s",
|
|
|
"value": 5.5
|
|
|
},
|
|
|
{
|
|
|
"name": "Peak Close Coil Current",
|
|
|
"unit": "A",
|
|
|
"value": 5.2
|
|
|
},
|
|
|
{
|
|
|
"name": "Peak Trip Coil 1 Current",
|
|
|
"unit": "A",
|
|
|
"value": 5.0
|
|
|
},
|
|
|
{
|
|
|
"name": "Peak Trip Coil 2 Current",
|
|
|
"unit": "A",
|
|
|
"value": 4.8
|
|
|
},
|
|
|
{
|
|
|
"name": "Ambient Temperature",
|
|
|
"unit": "°C",
|
|
|
"value": 28.4
|
|
|
}
|
|
|
]
|
|
|
}
|
|
|
|
|
|
result = analyze_dcrm_from_dataframe(df, kpis=sample_kpis)
|
|
|
print(json.dumps(result, indent=2, ensure_ascii=False))
|
|
|
|
|
|
|