Upload 97 files
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .dockerignore +26 -0
- .env +8 -0
- DEPLOYMENT_GUIDE.md +66 -0
- Dockerfile +44 -0
- README_HF.md +51 -0
- apps/__pycache__/flask_server.cpython-313.pyc +0 -0
- apps/__pycache__/streamlit_dashboard.cpython-313.pyc +0 -0
- apps/fastapi_server.py +340 -0
- apps/flask_server.py +436 -0
- apps/resistance_vs_time.png +0 -0
- apps/streamlit_dashboard.py +867 -0
- apps/temp_vit_plot.png +0 -0
- core/agents/__pycache__/advice.cpython-313.pyc +0 -0
- core/agents/__pycache__/advice.cpython-39.pyc +0 -0
- core/agents/__pycache__/diagnosis.cpython-313.pyc +0 -0
- core/agents/__pycache__/diagnosis.cpython-39.pyc +0 -0
- core/agents/__pycache__/plotting.cpython-313.pyc +0 -0
- core/agents/__pycache__/recommendation.cpython-313.pyc +0 -0
- core/agents/__pycache__/recommendation.cpython-39.pyc +0 -0
- core/agents/advice.py +395 -0
- core/agents/diagnosis.py +754 -0
- core/agents/plotting.py +271 -0
- core/agents/recommendation.py +427 -0
- core/calculators/__pycache__/cbhi.cpython-313.pyc +0 -0
- core/calculators/__pycache__/cbhi.cpython-39.pyc +0 -0
- core/calculators/__pycache__/kpi.cpython-313.pyc +0 -0
- core/calculators/__pycache__/kpi.cpython-39.pyc +0 -0
- core/calculators/__pycache__/rul.cpython-313.pyc +0 -0
- core/calculators/__pycache__/rul.cpython-39.pyc +0 -0
- core/calculators/cbhi.py +197 -0
- core/calculators/kpi.py +307 -0
- core/calculators/rul.py +500 -0
- core/engines/__pycache__/advanced_rules.cpython-313.pyc +0 -0
- core/engines/__pycache__/diagnostics.cpython-313.pyc +0 -0
- core/engines/__pycache__/rules.cpython-313.pyc +0 -0
- core/engines/__pycache__/rules.cpython-39.pyc +0 -0
- core/engines/advanced_rules.py +399 -0
- core/engines/diagnostics.py +141 -0
- core/engines/rules.py +1328 -0
- core/models/__pycache__/vit_classifier.cpython-313.pyc +0 -0
- core/models/__pycache__/vit_classifier.cpython-39.pyc +0 -0
- core/models/vit_classifier.py +261 -0
- core/signal/__pycache__/arcing.cpython-313.pyc +0 -0
- core/signal/__pycache__/phases.cpython-313.pyc +0 -0
- core/signal/__pycache__/phases.cpython-39.pyc +0 -0
- core/signal/__pycache__/segmentation.cpython-313.pyc +0 -0
- core/signal/arcing.py +143 -0
- core/signal/phases.py +687 -0
- core/signal/segmentation.py +134 -0
- core/utils/__pycache__/report_generator.cpython-313.pyc +0 -0
.dockerignore
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
__pycache__
|
| 2 |
+
*.pyc
|
| 3 |
+
*.pyo
|
| 4 |
+
*.pyd
|
| 5 |
+
.Python
|
| 6 |
+
env/
|
| 7 |
+
venv/
|
| 8 |
+
.env
|
| 9 |
+
.venv
|
| 10 |
+
pip-log.txt
|
| 11 |
+
pip-delete-this-directory.txt
|
| 12 |
+
.tox/
|
| 13 |
+
.coverage
|
| 14 |
+
.coverage.*
|
| 15 |
+
.cache
|
| 16 |
+
nosetests.xml
|
| 17 |
+
coverage.xml
|
| 18 |
+
*.cover
|
| 19 |
+
*.log
|
| 20 |
+
.git
|
| 21 |
+
.gitignore
|
| 22 |
+
.mytg
|
| 23 |
+
.pytest_cache
|
| 24 |
+
docs/
|
| 25 |
+
tests/
|
| 26 |
+
tmp/
|
.env
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# GOOGLE_API_KEY=AIzaSyAjTiwQ6Pk__y7VmLStQpyT3ZdHPCL76uk
|
| 2 |
+
# GOOGLE_API_KEY_1=AIzaSyDXBFif6puAw8I7lAOlEv6p24SUWpwUF1k
|
| 3 |
+
# GOOGLE_API_KEY_2=AIzaSyBYU1pjQW_tSSC07FOjUVWoAx-JbGvbQGE
|
| 4 |
+
# GOOGLE_API_KEY_3=AIzaSyAfF-13nsrMdAAe3SFOPSxFya4EtfLBjho
|
| 5 |
+
GOOGLE_API_KEY=AIzaSyDeIdKxOMYB002k5W-vT5IiVUW7KJKoX_I
|
| 6 |
+
GOOGLE_API_KEY_1=AIzaSyDlx9XjJMzkD1oZxstW3xzodSBJqymXlyg
|
| 7 |
+
GOOGLE_API_KEY_2=AIzaSyAnykdmhgqHpEtqglf6KCf_rCvYnhCl__Y
|
| 8 |
+
GOOGLE_API_KEY_3=AIzaSyAkogd4IytURSGx6VYK8VYhlymJMgUfDBI
|
DEPLOYMENT_GUIDE.md
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Deploying DCRM Streamlit App to DigitalOcean
|
| 2 |
+
|
| 3 |
+
This guide outlines how to deploy your DCRM Analyzer Pro application to DigitalOcean's App Platform.
|
| 4 |
+
|
| 5 |
+
## Prerequisites
|
| 6 |
+
|
| 7 |
+
1. **DigitalOcean Account**: You need an account on [DigitalOcean](https://www.digitalocean.com/).
|
| 8 |
+
2. **GitHub Repository**: Your code must be pushed to a GitHub repository (private or public).
|
| 9 |
+
3. **Google API Key**: You will need your `GOOGLE_API_KEY` for the application to function.
|
| 10 |
+
|
| 11 |
+
## Step 1: Push Code to GitHub
|
| 12 |
+
|
| 13 |
+
Ensure your latest code (including the new `Dockerfile` and `.dockerignore`) is committed and pushed to your GitHub repository.
|
| 14 |
+
|
| 15 |
+
```bash
|
| 16 |
+
git add .
|
| 17 |
+
git commit -m "Add Docker deployment configuration"
|
| 18 |
+
git push origin main
|
| 19 |
+
```
|
| 20 |
+
|
| 21 |
+
## Step 2: Create App on DigitalOcean
|
| 22 |
+
|
| 23 |
+
1. Log in to the **DigitalOcean Control Panel**.
|
| 24 |
+
2. Click **Create** (green button at top right) -> **Apps**.
|
| 25 |
+
3. **Choose Source**: Select **GitHub**.
|
| 26 |
+
4. **Repository**: Select your repository (e.g., `final_DCRM_3PHASE_REVIEWED`).
|
| 27 |
+
5. **Branch**: Select `main` (or your working branch).
|
| 28 |
+
6. **Source Directory**: `/` (default).
|
| 29 |
+
7. Click **Next**.
|
| 30 |
+
|
| 31 |
+
## Step 3: Configure Resources
|
| 32 |
+
|
| 33 |
+
1. DigitalOcean should auto-detect the `Dockerfile` and select **Dockerfile** as the build strategy.
|
| 34 |
+
2. **Service Name**: You can rename it (e.g., `dcrm-api`).
|
| 35 |
+
3. **HTTP Port**: Ensure this is set to **5000** (Flask default).
|
| 36 |
+
4. **Instance Size**:
|
| 37 |
+
* Basic / Pro plans work well.
|
| 38 |
+
* **Recommendation**: 1 GB RAM minimum (approx $6/mo) is good for the API.
|
| 39 |
+
5. Click **Next**.
|
| 40 |
+
|
| 41 |
+
## Step 4: Environment Variables
|
| 42 |
+
|
| 43 |
+
1. Click **Edit** next to your service (or go to extracting environment variables step).
|
| 44 |
+
2. Add the following **Global Environment Variables**:
|
| 45 |
+
* **Key**: `GOOGLE_API_KEY`
|
| 46 |
+
* **Value**: *[Paste your actual Google API Key here]*
|
| 47 |
+
* **Encrypt**: Checked (Recommended)
|
| 48 |
+
3. Click **Save**.
|
| 49 |
+
4. Click **Next**.
|
| 50 |
+
|
| 51 |
+
## Step 5: Review & Deploy
|
| 52 |
+
|
| 53 |
+
1. Review the details (Region, Plan, Cost).
|
| 54 |
+
2. Click **Create Resources**.
|
| 55 |
+
|
| 56 |
+
DigitalOcean will now build your Docker image and deploy it. This process usually takes 3-5 minutes.
|
| 57 |
+
|
| 58 |
+
## Step 6: Access Your App
|
| 59 |
+
|
| 60 |
+
Once deployment is successful, you will see a **Live URL** (e.g., `https://dcrm-app-xyz.ondigitalocean.app/`). Click it to access your DCRM Analyzer!
|
| 61 |
+
|
| 62 |
+
## Troubleshooting
|
| 63 |
+
|
| 64 |
+
- **Build Failed**: Check the "Activity" tab for build logs. Verify requirements installation.
|
| 65 |
+
- **Health Check Failed**: Ensure port 5000 is exposed in Dockerfile (we handled this) and App Platform settings.
|
| 66 |
+
- **App Crashes**: Check "Runtime Logs". Often due to missing API keys or memory limits. If OOM (Out of Memory), upgrade to a 1GB or 2GB instance.
|
Dockerfile
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Use an official Python runtime as a parent image
|
| 2 |
+
FROM python:3.10-slim
|
| 3 |
+
|
| 4 |
+
# Set the working directory in the container
|
| 5 |
+
WORKDIR /app
|
| 6 |
+
|
| 7 |
+
# Install system dependencies required for OpenCV and other common libraries
|
| 8 |
+
# libgl1-mesa-glx and libglib2.0-0 are often needed for cv2
|
| 9 |
+
RUN apt-get update && apt-get install -y \
|
| 10 |
+
build-essential \
|
| 11 |
+
libgl1-mesa-glx \
|
| 12 |
+
libglib2.0-0 \
|
| 13 |
+
curl \
|
| 14 |
+
&& rm -rf /var/lib/apt/lists/*
|
| 15 |
+
|
| 16 |
+
# Copy the requirements file into the container at /app
|
| 17 |
+
COPY requirements.txt .
|
| 18 |
+
|
| 19 |
+
# Install any needed packages specified in requirements.txt
|
| 20 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 21 |
+
|
| 22 |
+
# Copy the rest of the application's code
|
| 23 |
+
# specific to this project structure where apps/streamlit_dashboard.py depends on core/
|
| 24 |
+
COPY . .
|
| 25 |
+
|
| 26 |
+
# Create a non-root user to run the app (required by Hugging Face Spaces)
|
| 27 |
+
RUN useradd -m -u 1000 user
|
| 28 |
+
|
| 29 |
+
# Switch to the new user
|
| 30 |
+
USER user
|
| 31 |
+
|
| 32 |
+
# Set home to the user's home directory
|
| 33 |
+
ENV HOME=/home/user \
|
| 34 |
+
PATH=/home/user/.local/bin:$PATH
|
| 35 |
+
|
| 36 |
+
# Expose port 7860 for Hugging Face
|
| 37 |
+
EXPOSE 7860
|
| 38 |
+
|
| 39 |
+
# Healthcheck to ensure the app is running
|
| 40 |
+
HEALTHCHECK CMD curl --fail http://localhost:7860/ || exit 1
|
| 41 |
+
|
| 42 |
+
# Run the application
|
| 43 |
+
# We run from the root directory so Python path finds 'core'
|
| 44 |
+
CMD ["python", "apps/flask_server.py"]
|
README_HF.md
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Deploying to Hugging Face Spaces
|
| 2 |
+
|
| 3 |
+
This guide explains how to host your DCRM API on Hugging Face Spaces using Docker.
|
| 4 |
+
|
| 5 |
+
## Prerequisites
|
| 6 |
+
|
| 7 |
+
1. **Hugging Face Account**: [Sign up here](https://huggingface.co/join).
|
| 8 |
+
2. **Google API Key**: Required for the app to run.
|
| 9 |
+
|
| 10 |
+
## Step 1: Create a Space
|
| 11 |
+
|
| 12 |
+
1. Go to [huggingface.co/new-space](https://huggingface.co/new-space).
|
| 13 |
+
2. **Space Name**: e.g., `dcrm-api`.
|
| 14 |
+
3. **License**: MIT or Apache 2.0.
|
| 15 |
+
4. **SDK**: Select **Docker**.
|
| 16 |
+
5. **Clean Template**: Choose "Blank".
|
| 17 |
+
6. Click **Create Space**.
|
| 18 |
+
|
| 19 |
+
## Step 2: Push Your Code
|
| 20 |
+
|
| 21 |
+
You can push your code directly via Git:
|
| 22 |
+
|
| 23 |
+
```bash
|
| 24 |
+
# Initialize git if you haven't
|
| 25 |
+
git init
|
| 26 |
+
|
| 27 |
+
# Add the Hugging Face remote (found in your Space's "Clone repository" button)
|
| 28 |
+
git remote add space https://huggingface.co/spaces/YOUR_USERNAME/dcrm-api
|
| 29 |
+
|
| 30 |
+
# Push
|
| 31 |
+
git add .
|
| 32 |
+
git commit -m "Deploy to HF"
|
| 33 |
+
git push space main
|
| 34 |
+
```
|
| 35 |
+
|
| 36 |
+
## Step 3: Configure Settings
|
| 37 |
+
|
| 38 |
+
1. Go to your Space's **Settings** tab.
|
| 39 |
+
2. Scroll to **Variables and secrets**.
|
| 40 |
+
3. Click **New Secret**.
|
| 41 |
+
* **Name**: `GOOGLE_API_KEY`
|
| 42 |
+
* **Value**: *[Your Google API Key]*
|
| 43 |
+
4. The Space will restart automatically.
|
| 44 |
+
|
| 45 |
+
## API Usage
|
| 46 |
+
|
| 47 |
+
Your API will be available at:
|
| 48 |
+
`https://YOUR_USERNAME-dcrm-api.hf.space`
|
| 49 |
+
|
| 50 |
+
Example Health Check:
|
| 51 |
+
`https://YOUR_USERNAME-dcrm-api.hf.space/api/health`
|
apps/__pycache__/flask_server.cpython-313.pyc
ADDED
|
Binary file (18.1 kB). View file
|
|
|
apps/__pycache__/streamlit_dashboard.cpython-313.pyc
ADDED
|
Binary file (41.7 kB). View file
|
|
|
apps/fastapi_server.py
ADDED
|
@@ -0,0 +1,340 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
DCRM Analysis API Wrapper
|
| 3 |
+
==========================
|
| 4 |
+
FastAPI wrapper for the DCRM analysis pipeline.
|
| 5 |
+
Accepts CSV uploads via POST and returns comprehensive JSON analysis.
|
| 6 |
+
|
| 7 |
+
Endpoint: POST /api/circuit-breakers/{breaker_id}/tests/upload
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
import os
|
| 11 |
+
import json
|
| 12 |
+
import traceback
|
| 13 |
+
from typing import Optional
|
| 14 |
+
import sys
|
| 15 |
+
|
| 16 |
+
# Add project root to sys.path to allow importing from core
|
| 17 |
+
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
| 18 |
+
|
| 19 |
+
# Previous Name: fastapi_app.py
|
| 20 |
+
from fastapi import FastAPI, File, UploadFile, HTTPException, Path
|
| 21 |
+
from fastapi.responses import JSONResponse
|
| 22 |
+
from fastapi.middleware.cors import CORSMiddleware
|
| 23 |
+
import pandas as pd
|
| 24 |
+
from io import StringIO
|
| 25 |
+
|
| 26 |
+
# Load environment variables
|
| 27 |
+
from dotenv import load_dotenv
|
| 28 |
+
load_dotenv()
|
| 29 |
+
|
| 30 |
+
# Ensure API key is set
|
| 31 |
+
if not os.getenv("GOOGLE_API_KEY"):
|
| 32 |
+
print("WARNING: GOOGLE_API_KEY not found in environment variables. Please check your .env file.")
|
| 33 |
+
|
| 34 |
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
| 35 |
+
from core.calculators.kpi import calculate_kpis
|
| 36 |
+
from core.calculators.cbhi import compute_cbhi
|
| 37 |
+
from core.signal.phases import analyze_dcrm_data
|
| 38 |
+
from core.engines.rules import analyze_dcrm_advanced
|
| 39 |
+
from core.agents.diagnosis import detect_fault, standardize_input
|
| 40 |
+
from core.utils.report_generator import generate_dcrm_json
|
| 41 |
+
from core.agents.recommendation import generate_recommendations
|
| 42 |
+
|
| 43 |
+
# Optional ViT Model
|
| 44 |
+
try:
|
| 45 |
+
from core.models.vit_classifier import predict_dcrm_image, plot_resistance_for_vit
|
| 46 |
+
VIT_AVAILABLE = True
|
| 47 |
+
except Exception as e:
|
| 48 |
+
print(f"ViT Model not available: {e}")
|
| 49 |
+
VIT_AVAILABLE = False
|
| 50 |
+
predict_dcrm_image = None
|
| 51 |
+
plot_resistance_for_vit = None
|
| 52 |
+
|
| 53 |
+
# =============================================================================
|
| 54 |
+
# CONFIGURATION - CHANGE THIS URL AFTER DEPLOYMENT
|
| 55 |
+
# =============================================================================
|
| 56 |
+
DEPLOYMENT_URL = "http://localhost:5000" # Change this to your deployed URL
|
| 57 |
+
# Example: DEPLOYMENT_URL = "https://your-domain.com"
|
| 58 |
+
# =============================================================================
|
| 59 |
+
|
| 60 |
+
# Initialize FastAPI app
|
| 61 |
+
app = FastAPI(
|
| 62 |
+
title="DCRM Analysis API",
|
| 63 |
+
description="Circuit Breaker Dynamic Contact Resistance Measurement Analysis",
|
| 64 |
+
version="1.0.0"
|
| 65 |
+
)
|
| 66 |
+
|
| 67 |
+
# Enable CORS for frontend access
|
| 68 |
+
app.add_middleware(
|
| 69 |
+
CORSMiddleware,
|
| 70 |
+
allow_origins=["*"], # Configure this based on your security requirements
|
| 71 |
+
allow_credentials=True,
|
| 72 |
+
allow_methods=["*"],
|
| 73 |
+
allow_headers=["*"],
|
| 74 |
+
)
|
| 75 |
+
|
| 76 |
+
# Initialize LLM (reused across requests)
|
| 77 |
+
llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash", temperature=0)
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
@app.get("/")
|
| 81 |
+
async def root():
|
| 82 |
+
"""Health check endpoint"""
|
| 83 |
+
return {
|
| 84 |
+
"status": "healthy",
|
| 85 |
+
"service": "DCRM Analysis API",
|
| 86 |
+
"version": "1.0.0",
|
| 87 |
+
"deployment_url": DEPLOYMENT_URL
|
| 88 |
+
}
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
@app.post("/api/circuit-breakers/{breaker_id}/tests/upload")
|
| 92 |
+
async def analyze_dcrm(
|
| 93 |
+
breaker_id: str = Path(..., description="Circuit breaker ID"),
|
| 94 |
+
file: UploadFile = File(..., description="CSV file with DCRM test data")
|
| 95 |
+
):
|
| 96 |
+
"""
|
| 97 |
+
Analyze DCRM test data from uploaded CSV file.
|
| 98 |
+
|
| 99 |
+
Expected CSV format:
|
| 100 |
+
- Columns: Time_ms, Resistance, Current, Travel, Close_Coil, Trip_Coil_1, Trip_Coil_2
|
| 101 |
+
- ~400 rows of time-series data
|
| 102 |
+
|
| 103 |
+
Returns:
|
| 104 |
+
- Comprehensive JSON analysis report matching dcrm-sample-response.txt structure
|
| 105 |
+
"""
|
| 106 |
+
|
| 107 |
+
# Validate file type
|
| 108 |
+
if not file.filename.endswith('.csv'):
|
| 109 |
+
raise HTTPException(
|
| 110 |
+
status_code=400,
|
| 111 |
+
detail={
|
| 112 |
+
"error": "Invalid file type",
|
| 113 |
+
"message": "Only CSV files are accepted",
|
| 114 |
+
"received": file.filename
|
| 115 |
+
}
|
| 116 |
+
)
|
| 117 |
+
|
| 118 |
+
try:
|
| 119 |
+
# Read CSV file
|
| 120 |
+
contents = await file.read()
|
| 121 |
+
csv_string = contents.decode('utf-8')
|
| 122 |
+
df = pd.read_csv(StringIO(csv_string))
|
| 123 |
+
|
| 124 |
+
# Validate required columns
|
| 125 |
+
required_columns = ['Time_ms', 'Resistance', 'Current', 'Travel',
|
| 126 |
+
'Close_Coil', 'Trip_Coil_1', 'Trip_Coil_2']
|
| 127 |
+
missing_columns = [col for col in required_columns if col not in df.columns]
|
| 128 |
+
|
| 129 |
+
if missing_columns:
|
| 130 |
+
raise HTTPException(
|
| 131 |
+
status_code=400,
|
| 132 |
+
detail={
|
| 133 |
+
"error": "Missing required columns",
|
| 134 |
+
"missing": missing_columns,
|
| 135 |
+
"required": required_columns,
|
| 136 |
+
"found": list(df.columns)
|
| 137 |
+
}
|
| 138 |
+
)
|
| 139 |
+
|
| 140 |
+
# Validate data size
|
| 141 |
+
if len(df) < 100:
|
| 142 |
+
raise HTTPException(
|
| 143 |
+
status_code=400,
|
| 144 |
+
detail={
|
| 145 |
+
"error": "Insufficient data",
|
| 146 |
+
"message": "CSV must contain at least 100 rows of data",
|
| 147 |
+
"received_rows": len(df)
|
| 148 |
+
}
|
| 149 |
+
)
|
| 150 |
+
|
| 151 |
+
# =====================================================================
|
| 152 |
+
# MAIN PROCESSING PIPELINE
|
| 153 |
+
# =====================================================================
|
| 154 |
+
|
| 155 |
+
# 1. Calculate KPIs
|
| 156 |
+
kpi_results = calculate_kpis(df)
|
| 157 |
+
kpis = kpi_results['kpis']
|
| 158 |
+
|
| 159 |
+
# 2. Phase Segmentation (AI-based)
|
| 160 |
+
phase_analysis_result = analyze_dcrm_data(df, llm)
|
| 161 |
+
|
| 162 |
+
# 3. Prepare KPIs for Rule Engine and AI Agent
|
| 163 |
+
raj_kpis = {
|
| 164 |
+
"Closing Time (ms)": kpis.get('closing_time'),
|
| 165 |
+
"Opening Time (ms)": kpis.get('opening_time'),
|
| 166 |
+
"Contact Speed (m/s)": kpis.get('contact_speed'),
|
| 167 |
+
"DLRO Value (µΩ)": kpis.get('dlro'),
|
| 168 |
+
"Peak Resistance (µΩ)": kpis.get('peak_resistance'),
|
| 169 |
+
"Peak Close Coil Current (A)": kpis.get('peak_close_coil'),
|
| 170 |
+
"Peak Trip Coil 1 Current (A)": kpis.get('peak_trip_coil_1'),
|
| 171 |
+
"Peak Trip Coil 2 Current (A)": kpis.get('peak_trip_coil_2'),
|
| 172 |
+
"SF6 Pressure (bar)": kpis.get('sf6_pressure'),
|
| 173 |
+
"Ambient Temperature (°C)": kpis.get('ambient_temp'),
|
| 174 |
+
"Main Wipe (mm)": kpis.get('main_wipe'),
|
| 175 |
+
"Arc Wipe (mm)": kpis.get('arc_wipe'),
|
| 176 |
+
"Contact Travel Distance (mm)": kpis.get('contact_travel')
|
| 177 |
+
}
|
| 178 |
+
|
| 179 |
+
raj_ai_kpis = {
|
| 180 |
+
"kpis": [
|
| 181 |
+
{"name": "Closing Time", "unit": "ms", "value": kpis.get('closing_time')},
|
| 182 |
+
{"name": "Opening Time", "unit": "ms", "value": kpis.get('opening_time')},
|
| 183 |
+
{"name": "DLRO Value", "unit": "µΩ", "value": kpis.get('dlro')},
|
| 184 |
+
{"name": "Peak Resistance", "unit": "µΩ", "value": kpis.get('peak_resistance')},
|
| 185 |
+
{"name": "Contact Speed", "unit": "m/s", "value": kpis.get('contact_speed')},
|
| 186 |
+
{"name": "Peak Close Coil Current", "unit": "A", "value": kpis.get('peak_close_coil')},
|
| 187 |
+
{"name": "Peak Trip Coil 1 Current", "unit": "A", "value": kpis.get('peak_trip_coil_1')},
|
| 188 |
+
{"name": "Peak Trip Coil 2 Current", "unit": "A", "value": kpis.get('peak_trip_coil_2')},
|
| 189 |
+
{"name": "SF6 Pressure", "unit": "bar", "value": kpis.get('sf6_pressure')},
|
| 190 |
+
{"name": "Ambient Temperature", "unit": "°C", "value": kpis.get('ambient_temp')}
|
| 191 |
+
]
|
| 192 |
+
}
|
| 193 |
+
|
| 194 |
+
# 4. Standardize resistance data for Rule Engine
|
| 195 |
+
temp_df = df[['Resistance']].copy()
|
| 196 |
+
if len(temp_df) < 401:
|
| 197 |
+
last_val = temp_df.iloc[-1, 0]
|
| 198 |
+
padding = pd.DataFrame({'Resistance': [last_val] * (401 - len(temp_df))})
|
| 199 |
+
temp_df = pd.concat([temp_df, padding], ignore_index=True)
|
| 200 |
+
|
| 201 |
+
std_df = standardize_input(temp_df)
|
| 202 |
+
row_values = std_df.iloc[0].values.tolist()
|
| 203 |
+
|
| 204 |
+
# 5. Run Rule Engine Analysis
|
| 205 |
+
rule_engine_result = analyze_dcrm_advanced(row_values, raj_kpis)
|
| 206 |
+
|
| 207 |
+
# 6. Run AI Agent Analysis
|
| 208 |
+
ai_agent_result = detect_fault(df, raj_ai_kpis)
|
| 209 |
+
|
| 210 |
+
# 7. Run ViT Model (if available)
|
| 211 |
+
vit_result = None
|
| 212 |
+
vit_plot_path = "temp_vit_plot.png"
|
| 213 |
+
|
| 214 |
+
plot_generated = False
|
| 215 |
+
try:
|
| 216 |
+
if plot_resistance_for_vit and plot_resistance_for_vit(df, vit_plot_path):
|
| 217 |
+
plot_generated = True
|
| 218 |
+
except Exception as e:
|
| 219 |
+
print(f"ViT Plot generation failed: {e}")
|
| 220 |
+
|
| 221 |
+
if plot_generated and VIT_AVAILABLE and predict_dcrm_image:
|
| 222 |
+
try:
|
| 223 |
+
vit_class, vit_conf, vit_details = predict_dcrm_image(vit_plot_path)
|
| 224 |
+
if vit_class:
|
| 225 |
+
vit_result = {
|
| 226 |
+
"class": vit_class,
|
| 227 |
+
"confidence": vit_conf,
|
| 228 |
+
"details": vit_details
|
| 229 |
+
}
|
| 230 |
+
except Exception as e:
|
| 231 |
+
print(f"ViT Prediction failed: {e}")
|
| 232 |
+
|
| 233 |
+
# 8. Calculate CBHI Score
|
| 234 |
+
cbhi_phase_data = {}
|
| 235 |
+
if 'phaseWiseAnalysis' in phase_analysis_result:
|
| 236 |
+
for phase in phase_analysis_result['phaseWiseAnalysis']:
|
| 237 |
+
p_name = f"Phase {phase.get('phaseNumber')}"
|
| 238 |
+
cbhi_phase_data[p_name] = {
|
| 239 |
+
"status": phase.get('status', 'Unknown'),
|
| 240 |
+
"confidence": phase.get('confidence', 0)
|
| 241 |
+
}
|
| 242 |
+
|
| 243 |
+
cbhi_score = compute_cbhi(raj_ai_kpis['kpis'], ai_agent_result, cbhi_phase_data)
|
| 244 |
+
|
| 245 |
+
# 9. Generate Recommendations
|
| 246 |
+
recommendations = generate_recommendations(
|
| 247 |
+
kpis=kpis,
|
| 248 |
+
cbhi_score=cbhi_score,
|
| 249 |
+
rule_faults=rule_engine_result.get("Fault_Detection", []),
|
| 250 |
+
ai_faults=ai_agent_result.get("Fault_Detection", []),
|
| 251 |
+
llm=llm
|
| 252 |
+
)
|
| 253 |
+
|
| 254 |
+
# 10. Generate Final JSON Report
|
| 255 |
+
full_report = generate_dcrm_json(
|
| 256 |
+
df=df,
|
| 257 |
+
kpis=kpis,
|
| 258 |
+
cbhi_score=cbhi_score,
|
| 259 |
+
rule_result=rule_engine_result,
|
| 260 |
+
ai_result=ai_agent_result,
|
| 261 |
+
llm=llm,
|
| 262 |
+
vit_result=vit_result,
|
| 263 |
+
phase_analysis_result=phase_analysis_result,
|
| 264 |
+
recommendations=recommendations
|
| 265 |
+
)
|
| 266 |
+
|
| 267 |
+
# Add breaker_id to response
|
| 268 |
+
full_report['breakerId'] = breaker_id
|
| 269 |
+
|
| 270 |
+
# Return JSON response
|
| 271 |
+
return JSONResponse(content=full_report, status_code=200)
|
| 272 |
+
|
| 273 |
+
except HTTPException:
|
| 274 |
+
# Re-raise HTTP exceptions as-is
|
| 275 |
+
raise
|
| 276 |
+
|
| 277 |
+
except pd.errors.EmptyDataError:
|
| 278 |
+
raise HTTPException(
|
| 279 |
+
status_code=400,
|
| 280 |
+
detail={
|
| 281 |
+
"error": "Empty CSV file",
|
| 282 |
+
"message": "The uploaded CSV file is empty or contains no data"
|
| 283 |
+
}
|
| 284 |
+
)
|
| 285 |
+
|
| 286 |
+
except pd.errors.ParserError as e:
|
| 287 |
+
raise HTTPException(
|
| 288 |
+
status_code=400,
|
| 289 |
+
detail={
|
| 290 |
+
"error": "CSV parsing error",
|
| 291 |
+
"message": "Failed to parse CSV file. Please check the file format.",
|
| 292 |
+
"details": str(e)
|
| 293 |
+
}
|
| 294 |
+
)
|
| 295 |
+
|
| 296 |
+
except Exception as e:
|
| 297 |
+
# Log the full error for debugging
|
| 298 |
+
error_trace = traceback.format_exc()
|
| 299 |
+
print(f"ERROR in DCRM analysis: {error_trace}")
|
| 300 |
+
|
| 301 |
+
# Return clean error to client
|
| 302 |
+
raise HTTPException(
|
| 303 |
+
status_code=500,
|
| 304 |
+
detail={
|
| 305 |
+
"error": "Analysis failed",
|
| 306 |
+
"message": "An error occurred during DCRM analysis",
|
| 307 |
+
"error_type": type(e).__name__,
|
| 308 |
+
"error_details": str(e)
|
| 309 |
+
}
|
| 310 |
+
)
|
| 311 |
+
|
| 312 |
+
|
| 313 |
+
@app.get("/api/health")
|
| 314 |
+
async def health_check():
|
| 315 |
+
"""Detailed health check with component status"""
|
| 316 |
+
return {
|
| 317 |
+
"status": "healthy",
|
| 318 |
+
"components": {
|
| 319 |
+
"llm": "operational",
|
| 320 |
+
"vit_model": "available" if VIT_AVAILABLE else "unavailable",
|
| 321 |
+
"kpi_calculator": "operational",
|
| 322 |
+
"rule_engine": "operational",
|
| 323 |
+
"ai_agent": "operational",
|
| 324 |
+
"phase_analysis": "operational"
|
| 325 |
+
},
|
| 326 |
+
"deployment_url": DEPLOYMENT_URL
|
| 327 |
+
}
|
| 328 |
+
|
| 329 |
+
|
| 330 |
+
if __name__ == "__main__":
|
| 331 |
+
import uvicorn
|
| 332 |
+
|
| 333 |
+
# Run the API server
|
| 334 |
+
# Change host and port as needed
|
| 335 |
+
uvicorn.run(
|
| 336 |
+
app,
|
| 337 |
+
host="0.0.0.0", # Listen on all interfaces
|
| 338 |
+
port=5001, # Change port if needed
|
| 339 |
+
log_level="info"
|
| 340 |
+
)
|
apps/flask_server.py
ADDED
|
@@ -0,0 +1,436 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
DCRM Analysis Flask API - Three Phase Support
|
| 3 |
+
==============================================
|
| 4 |
+
Flask API wrapper for the DCRM analysis pipeline.
|
| 5 |
+
Accepts 3 CSV uploads (R, Y, B phases) via POST and returns comprehensive JSON analysis.
|
| 6 |
+
|
| 7 |
+
Endpoint: POST /api/circuit-breakers/{breaker_id}/tests/upload-three-phase
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
import os
|
| 11 |
+
import json
|
| 12 |
+
import traceback
|
| 13 |
+
import uuid
|
| 14 |
+
from datetime import datetime, timezone
|
| 15 |
+
import sys
|
| 16 |
+
import concurrent.futures
|
| 17 |
+
|
| 18 |
+
# Add project root to sys.path to allow importing from core
|
| 19 |
+
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
| 20 |
+
|
| 21 |
+
# Previous Name: flask_app.py
|
| 22 |
+
from flask import Flask, request, jsonify
|
| 23 |
+
from flask_cors import CORS
|
| 24 |
+
from werkzeug.utils import secure_filename
|
| 25 |
+
import pandas as pd
|
| 26 |
+
from io import StringIO
|
| 27 |
+
|
| 28 |
+
# Load environment variables
|
| 29 |
+
from dotenv import load_dotenv
|
| 30 |
+
load_dotenv()
|
| 31 |
+
|
| 32 |
+
# Ensure API key is set
|
| 33 |
+
if not os.getenv("GOOGLE_API_KEY"):
|
| 34 |
+
print("WARNING: GOOGLE_API_KEY not found in environment variables. Please check your .env file.")
|
| 35 |
+
|
| 36 |
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
| 37 |
+
from core.calculators.kpi import calculate_kpis
|
| 38 |
+
from core.calculators.cbhi import compute_cbhi
|
| 39 |
+
from core.signal.phases import analyze_dcrm_data
|
| 40 |
+
from core.engines.rules import analyze_dcrm_advanced
|
| 41 |
+
from core.agents.diagnosis import detect_fault, standardize_input
|
| 42 |
+
from core.utils.report_generator import generate_dcrm_json
|
| 43 |
+
from core.agents.recommendation import generate_recommendations
|
| 44 |
+
|
| 45 |
+
# Optional ViT Model
|
| 46 |
+
try:
|
| 47 |
+
from core.models.vit_classifier import predict_dcrm_image, plot_resistance_for_vit
|
| 48 |
+
VIT_AVAILABLE = True
|
| 49 |
+
except Exception as e:
|
| 50 |
+
print(f"ViT Model not available: {e}")
|
| 51 |
+
VIT_AVAILABLE = False
|
| 52 |
+
predict_dcrm_image = None
|
| 53 |
+
plot_resistance_for_vit = None
|
| 54 |
+
|
| 55 |
+
# =============================================================================
|
| 56 |
+
# CONFIGURATION - CHANGE THIS URL AFTER DEPLOYMENT
|
| 57 |
+
# =============================================================================
|
| 58 |
+
DEPLOYMENT_URL = "http://localhost:5000" # Change this to your deployed URL
|
| 59 |
+
# Example: DEPLOYMENT_URL = "https://your-domain.com"
|
| 60 |
+
# =============================================================================
|
| 61 |
+
|
| 62 |
+
# Initialize Flask app
|
| 63 |
+
app = Flask(__name__)
|
| 64 |
+
CORS(app) # Enable CORS for frontend access
|
| 65 |
+
|
| 66 |
+
def get_llm(api_key=None):
|
| 67 |
+
"""
|
| 68 |
+
Factory function to create an LLM instance with a specific API key.
|
| 69 |
+
If no key is provided, falls back to the default GOOGLE_API_KEY.
|
| 70 |
+
"""
|
| 71 |
+
if not api_key:
|
| 72 |
+
api_key = os.getenv("GOOGLE_API_KEY")
|
| 73 |
+
|
| 74 |
+
if not api_key:
|
| 75 |
+
raise ValueError("No Google API Key provided and GOOGLE_API_KEY not found in env.")
|
| 76 |
+
|
| 77 |
+
return ChatGoogleGenerativeAI(model="gemini-2.0-flash", temperature=0, google_api_key=api_key)
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def process_single_phase_csv(args):
|
| 81 |
+
"""
|
| 82 |
+
Process a single phase CSV through the complete DCRM pipeline.
|
| 83 |
+
Designed to be run in a separate thread.
|
| 84 |
+
|
| 85 |
+
Args:
|
| 86 |
+
args: Tuple containing (df, breaker_id, api_key, phase_name)
|
| 87 |
+
|
| 88 |
+
Returns:
|
| 89 |
+
dict: Complete analysis results for one phase
|
| 90 |
+
"""
|
| 91 |
+
df, breaker_id, api_key, phase_name = args
|
| 92 |
+
|
| 93 |
+
try:
|
| 94 |
+
print(f"[{phase_name.upper()}] Starting processing with key ending in ...{api_key[-4:] if api_key else 'None'}")
|
| 95 |
+
|
| 96 |
+
# Initialize local LLM for this thread
|
| 97 |
+
llm = get_llm(api_key)
|
| 98 |
+
|
| 99 |
+
# 1. Calculate KPIs
|
| 100 |
+
kpi_results = calculate_kpis(df)
|
| 101 |
+
kpis = kpi_results['kpis']
|
| 102 |
+
|
| 103 |
+
# 2. Phase Segmentation (AI-based)
|
| 104 |
+
phase_analysis_result = analyze_dcrm_data(df, llm)
|
| 105 |
+
|
| 106 |
+
# 3. Prepare KPIs for Rule Engine and AI Agent
|
| 107 |
+
raj_kpis = {
|
| 108 |
+
"Closing Time (ms)": kpis.get('closing_time'),
|
| 109 |
+
"Opening Time (ms)": kpis.get('opening_time'),
|
| 110 |
+
"Contact Speed (m/s)": kpis.get('contact_speed'),
|
| 111 |
+
"DLRO Value (µΩ)": kpis.get('dlro'),
|
| 112 |
+
"Peak Resistance (µΩ)": kpis.get('peak_resistance'),
|
| 113 |
+
"Peak Close Coil Current (A)": kpis.get('peak_close_coil'),
|
| 114 |
+
"Peak Trip Coil 1 Current (A)": kpis.get('peak_trip_coil_1'),
|
| 115 |
+
"Peak Trip Coil 2 Current (A)": kpis.get('peak_trip_coil_2'),
|
| 116 |
+
"SF6 Pressure (bar)": kpis.get('sf6_pressure'),
|
| 117 |
+
"Ambient Temperature (°C)": kpis.get('ambient_temp'),
|
| 118 |
+
"Main Wipe (mm)": kpis.get('main_wipe'),
|
| 119 |
+
"Arc Wipe (mm)": kpis.get('arc_wipe'),
|
| 120 |
+
"Contact Travel Distance (mm)": kpis.get('contact_travel')
|
| 121 |
+
}
|
| 122 |
+
|
| 123 |
+
raj_ai_kpis = {
|
| 124 |
+
"kpis": [
|
| 125 |
+
{"name": "Closing Time", "unit": "ms", "value": kpis.get('closing_time')},
|
| 126 |
+
{"name": "Opening Time", "unit": "ms", "value": kpis.get('opening_time')},
|
| 127 |
+
{"name": "DLRO Value", "unit": "µΩ", "value": kpis.get('dlro')},
|
| 128 |
+
{"name": "Peak Resistance", "unit": "µΩ", "value": kpis.get('peak_resistance')},
|
| 129 |
+
{"name": "Contact Speed", "unit": "m/s", "value": kpis.get('contact_speed')},
|
| 130 |
+
{"name": "Peak Close Coil Current", "unit": "A", "value": kpis.get('peak_close_coil')},
|
| 131 |
+
{"name": "Peak Trip Coil 1 Current", "unit": "A", "value": kpis.get('peak_trip_coil_1')},
|
| 132 |
+
{"name": "Peak Trip Coil 2 Current", "unit": "A", "value": kpis.get('peak_trip_coil_2')},
|
| 133 |
+
{"name": "SF6 Pressure", "unit": "bar", "value": kpis.get('sf6_pressure')},
|
| 134 |
+
{"name": "Ambient Temperature", "unit": "°C", "value": kpis.get('ambient_temp')}
|
| 135 |
+
]
|
| 136 |
+
}
|
| 137 |
+
|
| 138 |
+
# 4. Standardize resistance data for Rule Engine
|
| 139 |
+
temp_df = df[['Resistance']].copy()
|
| 140 |
+
if len(temp_df) < 401:
|
| 141 |
+
last_val = temp_df.iloc[-1, 0]
|
| 142 |
+
padding = pd.DataFrame({'Resistance': [last_val] * (401 - len(temp_df))})
|
| 143 |
+
temp_df = pd.concat([temp_df, padding], ignore_index=True)
|
| 144 |
+
|
| 145 |
+
std_df = standardize_input(temp_df)
|
| 146 |
+
row_values = std_df.iloc[0].values.tolist()
|
| 147 |
+
|
| 148 |
+
# 5. Run Rule Engine Analysis
|
| 149 |
+
rule_engine_result = analyze_dcrm_advanced(row_values, raj_kpis)
|
| 150 |
+
|
| 151 |
+
# 6. Run AI Agent Analysis with error handling
|
| 152 |
+
try:
|
| 153 |
+
ai_agent_result = detect_fault(df, raj_ai_kpis)
|
| 154 |
+
print(f"[{phase_name.upper()}] AI Agent analysis completed successfully")
|
| 155 |
+
except Exception as e:
|
| 156 |
+
print(f"[{phase_name.upper()}] AI Agent failed: {e}. Using fallback.")
|
| 157 |
+
# Fallback: Use rule engine result as AI result
|
| 158 |
+
ai_agent_result = {
|
| 159 |
+
"Fault_Detection": rule_engine_result.get("Fault_Detection", []),
|
| 160 |
+
"overall_health_assessment": rule_engine_result.get("overall_health_assessment", {}),
|
| 161 |
+
"classifications": rule_engine_result.get("classifications", [])
|
| 162 |
+
}
|
| 163 |
+
|
| 164 |
+
# 7. Run ViT Model (if available)
|
| 165 |
+
vit_result = None
|
| 166 |
+
vit_plot_path = f"temp_vit_plot_{phase_name}_{uuid.uuid4().hex[:8]}.png" # Unique path for parallel safety
|
| 167 |
+
|
| 168 |
+
plot_generated = False
|
| 169 |
+
try:
|
| 170 |
+
if plot_resistance_for_vit and plot_resistance_for_vit(df, vit_plot_path):
|
| 171 |
+
plot_generated = True
|
| 172 |
+
except Exception as e:
|
| 173 |
+
print(f"[{phase_name.upper()}] ViT Plot generation failed: {e}")
|
| 174 |
+
|
| 175 |
+
if plot_generated and VIT_AVAILABLE and predict_dcrm_image:
|
| 176 |
+
try:
|
| 177 |
+
# Pass API key to ViT as well if needed, though currently it might use env var
|
| 178 |
+
# The updated vit_classifier uses requests to a deployed model, so API key is for Gemini part
|
| 179 |
+
vit_class, vit_conf, vit_details = predict_dcrm_image(vit_plot_path, api_key=api_key)
|
| 180 |
+
if vit_class:
|
| 181 |
+
vit_result = {
|
| 182 |
+
"class": vit_class,
|
| 183 |
+
"confidence": vit_conf,
|
| 184 |
+
"details": vit_details
|
| 185 |
+
}
|
| 186 |
+
except Exception as e:
|
| 187 |
+
print(f"[{phase_name.upper()}] ViT Prediction failed: {e}")
|
| 188 |
+
finally:
|
| 189 |
+
# Cleanup temp file
|
| 190 |
+
if os.path.exists(vit_plot_path):
|
| 191 |
+
try:
|
| 192 |
+
os.remove(vit_plot_path)
|
| 193 |
+
except:
|
| 194 |
+
pass
|
| 195 |
+
|
| 196 |
+
# 8. Calculate CBHI Score
|
| 197 |
+
cbhi_phase_data = {}
|
| 198 |
+
if 'phaseWiseAnalysis' in phase_analysis_result:
|
| 199 |
+
for phase in phase_analysis_result['phaseWiseAnalysis']:
|
| 200 |
+
p_name = f"Phase {phase.get('phaseNumber')}"
|
| 201 |
+
cbhi_phase_data[p_name] = {
|
| 202 |
+
"status": phase.get('status', 'Unknown'),
|
| 203 |
+
"confidence": phase.get('confidence', 0)
|
| 204 |
+
}
|
| 205 |
+
|
| 206 |
+
cbhi_score = compute_cbhi(raj_ai_kpis['kpis'], ai_agent_result, cbhi_phase_data)
|
| 207 |
+
|
| 208 |
+
# 9. Generate Recommendations with error handling
|
| 209 |
+
try:
|
| 210 |
+
recommendations = generate_recommendations(
|
| 211 |
+
kpis=kpis,
|
| 212 |
+
cbhi_score=cbhi_score,
|
| 213 |
+
rule_faults=rule_engine_result.get("Fault_Detection", []),
|
| 214 |
+
ai_faults=ai_agent_result.get("Fault_Detection", []),
|
| 215 |
+
llm=llm
|
| 216 |
+
)
|
| 217 |
+
print(f"[{phase_name.upper()}] Recommendations generated successfully")
|
| 218 |
+
except Exception as e:
|
| 219 |
+
print(f"[{phase_name.upper()}] Recommendations failed: {e}. Using fallback.")
|
| 220 |
+
# Fallback: Create basic recommendations from rule engine
|
| 221 |
+
recommendations = {
|
| 222 |
+
"maintenanceActions": [],
|
| 223 |
+
"futureFaultsPdf": []
|
| 224 |
+
}
|
| 225 |
+
# Extract from rule faults
|
| 226 |
+
for fault in rule_engine_result.get("Fault_Detection", []):
|
| 227 |
+
if fault.get("Severity") in ["High", "Critical"]:
|
| 228 |
+
recommendations["maintenanceActions"].append({
|
| 229 |
+
"action": f"Address {fault.get('defect_name')}",
|
| 230 |
+
"priority": "High",
|
| 231 |
+
"timeframe": "Immediate"
|
| 232 |
+
})
|
| 233 |
+
|
| 234 |
+
# 10. Generate Final JSON Report with error handling
|
| 235 |
+
try:
|
| 236 |
+
full_report = generate_dcrm_json(
|
| 237 |
+
df=df,
|
| 238 |
+
kpis=kpis,
|
| 239 |
+
cbhi_score=cbhi_score,
|
| 240 |
+
rule_result=rule_engine_result,
|
| 241 |
+
ai_result=ai_agent_result,
|
| 242 |
+
llm=llm,
|
| 243 |
+
vit_result=vit_result,
|
| 244 |
+
phase_analysis_result=phase_analysis_result,
|
| 245 |
+
recommendations=recommendations
|
| 246 |
+
)
|
| 247 |
+
print(f"[{phase_name.upper()}] Final report generated successfully")
|
| 248 |
+
except Exception as e:
|
| 249 |
+
print(f"[{phase_name.upper()}] Report generation failed: {e}. Using fallback.")
|
| 250 |
+
# Fallback: Create minimal valid report
|
| 251 |
+
full_report = {
|
| 252 |
+
"_id": f"fallback_{phase_name}_{uuid.uuid4().hex[:8]}",
|
| 253 |
+
"phase": phase_name,
|
| 254 |
+
"status": "partial_success",
|
| 255 |
+
"error": str(e),
|
| 256 |
+
"ruleBased_result": rule_engine_result,
|
| 257 |
+
"vitResult": vit_result,
|
| 258 |
+
"kpis": kpis,
|
| 259 |
+
"cbhi": {"score": cbhi_score},
|
| 260 |
+
"phaseWiseAnalysis": phase_analysis_result.get('phaseWiseAnalysis', [])
|
| 261 |
+
}
|
| 262 |
+
|
| 263 |
+
print(f"[{phase_name.upper()}] Processing complete.")
|
| 264 |
+
return full_report
|
| 265 |
+
|
| 266 |
+
except Exception as e:
|
| 267 |
+
print(f"[{phase_name.upper()}] Error: {e}")
|
| 268 |
+
traceback.print_exc()
|
| 269 |
+
# Return a partial error result so the whole request doesn't fail
|
| 270 |
+
return {
|
| 271 |
+
"error": str(e),
|
| 272 |
+
"phase": phase_name
|
| 273 |
+
}
|
| 274 |
+
|
| 275 |
+
|
| 276 |
+
@app.route('/')
|
| 277 |
+
def root():
|
| 278 |
+
"""Health check endpoint"""
|
| 279 |
+
return jsonify({
|
| 280 |
+
"status": "healthy",
|
| 281 |
+
"service": "DCRM Analysis Flask API",
|
| 282 |
+
"version": "2.1.0",
|
| 283 |
+
"deployment_url": DEPLOYMENT_URL
|
| 284 |
+
})
|
| 285 |
+
|
| 286 |
+
|
| 287 |
+
@app.route('/api/health')
|
| 288 |
+
def health_check():
|
| 289 |
+
"""Detailed health check with component status"""
|
| 290 |
+
return jsonify({
|
| 291 |
+
"status": "healthy",
|
| 292 |
+
"components": {
|
| 293 |
+
"llm": "operational",
|
| 294 |
+
"vit_model": "available" if VIT_AVAILABLE else "unavailable",
|
| 295 |
+
"kpi_calculator": "operational",
|
| 296 |
+
"rule_engine": "operational",
|
| 297 |
+
"ai_agent": "operational",
|
| 298 |
+
"phase_analysis": "operational"
|
| 299 |
+
},
|
| 300 |
+
"deployment_url": DEPLOYMENT_URL
|
| 301 |
+
})
|
| 302 |
+
|
| 303 |
+
|
| 304 |
+
@app.route('/api/circuit-breakers/<breaker_id>/tests/upload-three-phase', methods=['POST'])
|
| 305 |
+
def analyze_three_phase_dcrm(breaker_id):
|
| 306 |
+
"""
|
| 307 |
+
Analyze DCRM test data from 3 uploaded CSV files (R, Y, B phases).
|
| 308 |
+
Uses parallel processing with multiple API keys to speed up execution.
|
| 309 |
+
|
| 310 |
+
Expected files in request:
|
| 311 |
+
- fileR: Red phase CSV
|
| 312 |
+
- fileY: Yellow phase CSV
|
| 313 |
+
- fileB: Blue phase CSV
|
| 314 |
+
|
| 315 |
+
Returns:
|
| 316 |
+
- Comprehensive JSON analysis report with combined three-phase results
|
| 317 |
+
"""
|
| 318 |
+
|
| 319 |
+
try:
|
| 320 |
+
# Validate files are present
|
| 321 |
+
if 'fileR' not in request.files or 'fileY' not in request.files or 'fileB' not in request.files:
|
| 322 |
+
return jsonify({
|
| 323 |
+
"error": "Missing required files",
|
| 324 |
+
"message": "All three phase files are required: fileR, fileY, fileB",
|
| 325 |
+
"received": list(request.files.keys())
|
| 326 |
+
}), 400
|
| 327 |
+
|
| 328 |
+
fileR = request.files['fileR']
|
| 329 |
+
fileY = request.files['fileY']
|
| 330 |
+
fileB = request.files['fileB']
|
| 331 |
+
|
| 332 |
+
# Validate file types
|
| 333 |
+
for file in [fileR, fileY, fileB]:
|
| 334 |
+
if not file.filename.endswith('.csv'):
|
| 335 |
+
return jsonify({
|
| 336 |
+
"error": "Invalid file type",
|
| 337 |
+
"message": "Only CSV files are accepted",
|
| 338 |
+
"received": file.filename
|
| 339 |
+
}), 400
|
| 340 |
+
|
| 341 |
+
# Prepare DataFrames
|
| 342 |
+
dfs = {}
|
| 343 |
+
for phase_name, file in [('r', fileR), ('y', fileY), ('b', fileB)]:
|
| 344 |
+
file.seek(0)
|
| 345 |
+
csv_string = file.read().decode('utf-8')
|
| 346 |
+
try:
|
| 347 |
+
df = pd.read_csv(StringIO(csv_string))
|
| 348 |
+
|
| 349 |
+
# Basic validation
|
| 350 |
+
if len(df) < 100:
|
| 351 |
+
raise ValueError(f"Insufficient data in {phase_name.upper()} phase")
|
| 352 |
+
|
| 353 |
+
dfs[phase_name] = df
|
| 354 |
+
except Exception as e:
|
| 355 |
+
return jsonify({
|
| 356 |
+
"error": f"Error reading {phase_name.upper()} CSV",
|
| 357 |
+
"details": str(e)
|
| 358 |
+
}), 400
|
| 359 |
+
|
| 360 |
+
# Get API Keys
|
| 361 |
+
# Fallback to main key if specific ones aren't set
|
| 362 |
+
main_key = os.getenv("GOOGLE_API_KEY")
|
| 363 |
+
keys = {
|
| 364 |
+
'r': os.getenv("GOOGLE_API_KEY_1", main_key),
|
| 365 |
+
'y': os.getenv("GOOGLE_API_KEY_2", main_key),
|
| 366 |
+
'b': os.getenv("GOOGLE_API_KEY_3", main_key)
|
| 367 |
+
}
|
| 368 |
+
|
| 369 |
+
# Prepare tasks
|
| 370 |
+
tasks = []
|
| 371 |
+
for phase in ['r', 'y', 'b']:
|
| 372 |
+
tasks.append((dfs[phase], breaker_id, keys[phase], phase))
|
| 373 |
+
|
| 374 |
+
# Execute in parallel
|
| 375 |
+
results = {}
|
| 376 |
+
health_scores = []
|
| 377 |
+
|
| 378 |
+
print("Starting parallel processing of 3 phases...")
|
| 379 |
+
with concurrent.futures.ThreadPoolExecutor(max_workers=3) as executor:
|
| 380 |
+
# Map tasks to futures
|
| 381 |
+
future_to_phase = {
|
| 382 |
+
executor.submit(process_single_phase_csv, task): task[3]
|
| 383 |
+
for task in tasks
|
| 384 |
+
}
|
| 385 |
+
|
| 386 |
+
for future in concurrent.futures.as_completed(future_to_phase):
|
| 387 |
+
phase = future_to_phase[future]
|
| 388 |
+
try:
|
| 389 |
+
result = future.result()
|
| 390 |
+
results[phase] = result
|
| 391 |
+
if 'healthScore' in result:
|
| 392 |
+
health_scores.append(result['healthScore'])
|
| 393 |
+
except Exception as exc:
|
| 394 |
+
print(f'{phase} generated an exception: {exc}')
|
| 395 |
+
results[phase] = {"error": str(exc)}
|
| 396 |
+
|
| 397 |
+
# Combine results into three-phase structure (removed breakerId and operator)
|
| 398 |
+
combined_result = {
|
| 399 |
+
"_id": str(uuid.uuid4()).replace('-', '')[:24],
|
| 400 |
+
"createdAt": datetime.now(timezone.utc).strftime("%a, %d %b %Y %H:%M:%S GMT"),
|
| 401 |
+
"healthScore": round(sum(health_scores) / len(health_scores), 1) if health_scores else 0,
|
| 402 |
+
"r": results.get('r', {}),
|
| 403 |
+
"y": results.get('y', {}),
|
| 404 |
+
"b": results.get('b', {})
|
| 405 |
+
}
|
| 406 |
+
|
| 407 |
+
return jsonify(combined_result), 200
|
| 408 |
+
|
| 409 |
+
except Exception as e:
|
| 410 |
+
# Log the full error for debugging
|
| 411 |
+
error_trace = traceback.format_exc()
|
| 412 |
+
print(f"ERROR in three-phase DCRM analysis: {error_trace}")
|
| 413 |
+
|
| 414 |
+
# Return clean error to client
|
| 415 |
+
return jsonify({
|
| 416 |
+
"error": "Analysis failed",
|
| 417 |
+
"message": "An error occurred during DCRM analysis",
|
| 418 |
+
"error_type": type(e).__name__,
|
| 419 |
+
"error_details": str(e)
|
| 420 |
+
}), 500
|
| 421 |
+
|
| 422 |
+
|
| 423 |
+
if __name__ == "__main__":
|
| 424 |
+
# Print all registered routes for debugging
|
| 425 |
+
print("Registered Routes:")
|
| 426 |
+
print(app.url_map)
|
| 427 |
+
|
| 428 |
+
# Run the Flask app
|
| 429 |
+
# Run the Flask app
|
| 430 |
+
port = int(os.environ.get("PORT", 7860))
|
| 431 |
+
app.run(
|
| 432 |
+
host="0.0.0.0",
|
| 433 |
+
port=port,
|
| 434 |
+
debug=False, # Set debug to False for production
|
| 435 |
+
use_reloader=False
|
| 436 |
+
)
|
apps/resistance_vs_time.png
ADDED
|
apps/streamlit_dashboard.py
ADDED
|
@@ -0,0 +1,867 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Previous Name: streamlit_app_v2.py
|
| 2 |
+
import streamlit as st
|
| 3 |
+
import pandas as pd
|
| 4 |
+
import numpy as np
|
| 5 |
+
import os
|
| 6 |
+
import json
|
| 7 |
+
import sys
|
| 8 |
+
|
| 9 |
+
# Add project root to sys.path to allow importing from core
|
| 10 |
+
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
| 11 |
+
|
| 12 |
+
# Load environment variables
|
| 13 |
+
from dotenv import load_dotenv
|
| 14 |
+
load_dotenv()
|
| 15 |
+
|
| 16 |
+
# Ensure API key is set
|
| 17 |
+
if not os.getenv("GOOGLE_API_KEY"):
|
| 18 |
+
st.error("GOOGLE_API_KEY not found in environment variables. Please check your .env file.")
|
| 19 |
+
st.stop()
|
| 20 |
+
|
| 21 |
+
from langchain_experimental.agents.agent_toolkits import create_pandas_dataframe_agent
|
| 22 |
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
| 23 |
+
from langchain_core.prompts import PromptTemplate
|
| 24 |
+
from langchain_core.output_parsers import StrOutputParser
|
| 25 |
+
import plotly.graph_objects as go
|
| 26 |
+
from plotly.subplots import make_subplots
|
| 27 |
+
import base64
|
| 28 |
+
import io
|
| 29 |
+
from langchain_core.messages import HumanMessage
|
| 30 |
+
from core.agents.plotting import create_dcrm_plot, create_velocity_plot, create_resistance_zoom_plot, get_dcrm_prompt
|
| 31 |
+
from core.calculators.kpi import calculate_kpis
|
| 32 |
+
from core.calculators.cbhi import compute_cbhi
|
| 33 |
+
from core.signal.phases import analyze_dcrm_data
|
| 34 |
+
from core.engines.rules import analyze_dcrm_advanced
|
| 35 |
+
from core.agents.diagnosis import detect_fault, standardize_input
|
| 36 |
+
from core.utils.report_generator import generate_dcrm_json
|
| 37 |
+
from core.agents.recommendation import generate_recommendations
|
| 38 |
+
|
| 39 |
+
# Optional ViT Model (requires PyTorch compatibility)
|
| 40 |
+
try:
|
| 41 |
+
from core.models.vit_classifier import predict_dcrm_image, plot_resistance_for_vit
|
| 42 |
+
VIT_AVAILABLE = True
|
| 43 |
+
except Exception as e:
|
| 44 |
+
print(f"ViT Model not available: {e}")
|
| 45 |
+
VIT_AVAILABLE = False
|
| 46 |
+
predict_dcrm_image = None
|
| 47 |
+
plot_resistance_for_vit = None
|
| 48 |
+
|
| 49 |
+
# --- Configuration & CSS ---
|
| 50 |
+
st.set_page_config(page_title="DCRM Analyzer Pro", page_icon="⚡", layout="wide")
|
| 51 |
+
|
| 52 |
+
def load_css():
|
| 53 |
+
st.markdown("""
|
| 54 |
+
<style>
|
| 55 |
+
/* Main Font */
|
| 56 |
+
html, body, [class*="css"] {
|
| 57 |
+
font-family: 'Segoe UI', Roboto, Helvetica, Arial, sans-serif;
|
| 58 |
+
}
|
| 59 |
+
/* Headers */
|
| 60 |
+
h1, h2, h3 {
|
| 61 |
+
color: #2c3e50;
|
| 62 |
+
font-weight: 600;
|
| 63 |
+
}
|
| 64 |
+
/* Metrics */
|
| 65 |
+
[data-testid="stMetricValue"] {
|
| 66 |
+
font-size: 2rem;
|
| 67 |
+
color: #2980b9;
|
| 68 |
+
}
|
| 69 |
+
/* Cards/Containers */
|
| 70 |
+
.stExpander {
|
| 71 |
+
border: 1px solid #e0e0e0;
|
| 72 |
+
border-radius: 8px;
|
| 73 |
+
box-shadow: 0 2px 4px rgba(0,0,0,0.05);
|
| 74 |
+
}
|
| 75 |
+
/* Sidebar */
|
| 76 |
+
section[data-testid="stSidebar"] {
|
| 77 |
+
background-color: #f8f9fa;
|
| 78 |
+
}
|
| 79 |
+
/* Status Indicators */
|
| 80 |
+
.status-pass { color: #27ae60; font-weight: bold; }
|
| 81 |
+
.status-warning { color: #f39c12; font-weight: bold; }
|
| 82 |
+
.status-fail { color: #c0392b; font-weight: bold; }
|
| 83 |
+
</style>
|
| 84 |
+
""", unsafe_allow_html=True)
|
| 85 |
+
|
| 86 |
+
def main():
|
| 87 |
+
load_css()
|
| 88 |
+
|
| 89 |
+
with st.sidebar:
|
| 90 |
+
st.title("⚡ DCRM Analyzer")
|
| 91 |
+
st.markdown("---")
|
| 92 |
+
mode = st.radio("Select Module", ["General Chat", "DCRM Analysis"], index=1)
|
| 93 |
+
|
| 94 |
+
# Segmentation method selector removed - using robust signal processing by default
|
| 95 |
+
|
| 96 |
+
st.markdown("---")
|
| 97 |
+
st.caption("Powered by Gemini 1.5 Flash")
|
| 98 |
+
|
| 99 |
+
st.header(f"{mode}")
|
| 100 |
+
|
| 101 |
+
uploaded_file = st.file_uploader("Upload DCRM Data (CSV)", type="csv")
|
| 102 |
+
|
| 103 |
+
if uploaded_file is not None:
|
| 104 |
+
try:
|
| 105 |
+
df = pd.read_csv(uploaded_file)
|
| 106 |
+
llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash", temperature=0)
|
| 107 |
+
|
| 108 |
+
if mode == "General Chat":
|
| 109 |
+
st.info("💡 **Tip:** You can ask for graphs! Try: 'Plot the resistance in Zone 2'")
|
| 110 |
+
|
| 111 |
+
# Zone Enrichment Option
|
| 112 |
+
if st.checkbox("✨ Enrich Data with Zones"):
|
| 113 |
+
with st.spinner("Detecting Zones..."):
|
| 114 |
+
df, zones = enrich_data_with_zones(df, llm)
|
| 115 |
+
st.success("Data enriched with 'Zone' column!")
|
| 116 |
+
|
| 117 |
+
with st.expander("View Data"):
|
| 118 |
+
st.dataframe(df.head())
|
| 119 |
+
|
| 120 |
+
# Agent with Plotting Capabilities
|
| 121 |
+
agent = create_pandas_dataframe_agent(
|
| 122 |
+
llm,
|
| 123 |
+
df,
|
| 124 |
+
verbose=True,
|
| 125 |
+
allow_dangerous_code=True,
|
| 126 |
+
handle_parsing_errors=True,
|
| 127 |
+
prefix="""
|
| 128 |
+
You are a data analysis agent.
|
| 129 |
+
If the user asks for a plot or graph:
|
| 130 |
+
1. Use `plotly.graph_objects` (import as go) or `plotly.express` (import as px).
|
| 131 |
+
2. Create the figure object `fig`.
|
| 132 |
+
3. Display it using `st.plotly_chart(fig)`.
|
| 133 |
+
4. Do NOT use matplotlib.
|
| 134 |
+
5. Ensure you import streamlit as st inside the code execution.
|
| 135 |
+
"""
|
| 136 |
+
)
|
| 137 |
+
|
| 138 |
+
st.divider()
|
| 139 |
+
user_question = st.chat_input("Ask a question or request a plot...")
|
| 140 |
+
|
| 141 |
+
if user_question:
|
| 142 |
+
st.chat_message("user").write(user_question)
|
| 143 |
+
with st.spinner("Thinking..."):
|
| 144 |
+
try:
|
| 145 |
+
# We use the agent directly for everything now, as it handles both plotting and QA
|
| 146 |
+
response = agent.run(user_question)
|
| 147 |
+
|
| 148 |
+
with st.chat_message("assistant"):
|
| 149 |
+
st.write(response)
|
| 150 |
+
|
| 151 |
+
except Exception as e:
|
| 152 |
+
st.error(f"An error occurred: {str(e)}")
|
| 153 |
+
|
| 154 |
+
elif mode == "DCRM Analysis":
|
| 155 |
+
if st.button("🚀 Run Advanced Analysis", type="primary", width='stretch'):
|
| 156 |
+
with st.spinner("Performing Kinematic Segmentation & Diagnostics..."):
|
| 157 |
+
try:
|
| 158 |
+
# 1. Deterministic KPI Calculation
|
| 159 |
+
kpi_results = calculate_kpis(df)
|
| 160 |
+
kpis = kpi_results['kpis']
|
| 161 |
+
# cbhi_score will be calculated later after AI and Phase analysis
|
| 162 |
+
|
| 163 |
+
# 1.5. Phase Segmentation (using AI Agent)
|
| 164 |
+
with st.spinner("Running AI-Based Phase Segmentation..."):
|
| 165 |
+
# Use analyze_dcrm_data for robust segmentation and status
|
| 166 |
+
phase_analysis_result = analyze_dcrm_data(df, llm)
|
| 167 |
+
|
| 168 |
+
# Extract zones for plotting
|
| 169 |
+
phase_to_zone_map = {
|
| 170 |
+
1: "zone_1_pre_contact",
|
| 171 |
+
2: "zone_2_arcing_engagement",
|
| 172 |
+
3: "zone_3_main_conduction",
|
| 173 |
+
4: "zone_4_parting",
|
| 174 |
+
5: "zone_5_final_open"
|
| 175 |
+
}
|
| 176 |
+
zones = {}
|
| 177 |
+
phase_timings = []
|
| 178 |
+
|
| 179 |
+
if 'phaseWiseAnalysis' in phase_analysis_result:
|
| 180 |
+
st.success("✓ AI Segmentation complete!")
|
| 181 |
+
for phase in phase_analysis_result['phaseWiseAnalysis']:
|
| 182 |
+
p_num = phase.get('phaseNumber')
|
| 183 |
+
zone_key = phase_to_zone_map.get(p_num)
|
| 184 |
+
start_time = phase.get('startTime', 0)
|
| 185 |
+
end_time = phase.get('endTime', 0)
|
| 186 |
+
duration = end_time - start_time
|
| 187 |
+
|
| 188 |
+
if zone_key:
|
| 189 |
+
zones[zone_key] = {
|
| 190 |
+
'start_ms': start_time,
|
| 191 |
+
'end_ms': end_time
|
| 192 |
+
}
|
| 193 |
+
|
| 194 |
+
phase_timings.append({
|
| 195 |
+
"Phase": phase.get('name'),
|
| 196 |
+
"Start (ms)": start_time,
|
| 197 |
+
"End (ms)": end_time,
|
| 198 |
+
"Duration (ms)": duration
|
| 199 |
+
})
|
| 200 |
+
else:
|
| 201 |
+
st.warning("⚠️ AI Segmentation returned no zones.")
|
| 202 |
+
|
| 203 |
+
# 2. Rule Engine & AI Agent Analysis
|
| 204 |
+
# Prepare KPIs for Raj modules (needs specific format)
|
| 205 |
+
# kpis dict from calculate_kpis_and_score is flat: {'closing_time': 45.15, ...}
|
| 206 |
+
# Raj modules expect: {'kpis': [{'name': 'Closing Time', 'value': ...}, ...]} or dict {'Closing Time (ms)': ...}
|
| 207 |
+
# Let's construct the dict format expected by raj_rule_engine (it handles dicts loosely but prefers specific keys)
|
| 208 |
+
|
| 209 |
+
# Mapping current KPIs to Raj expected keys
|
| 210 |
+
raj_kpis = {
|
| 211 |
+
"Closing Time (ms)": kpis.get('closing_time'),
|
| 212 |
+
"Opening Time (ms)": kpis.get('opening_time'),
|
| 213 |
+
"Contact Speed (m/s)": kpis.get('contact_speed'),
|
| 214 |
+
"DLRO Value (µΩ)": kpis.get('dlro'),
|
| 215 |
+
"Peak Resistance (µΩ)": kpis.get('peak_resistance'),
|
| 216 |
+
"Peak Close Coil Current (A)": kpis.get('peak_close_coil'),
|
| 217 |
+
"Peak Trip Coil 1 Current (A)": kpis.get('peak_trip_coil_1'),
|
| 218 |
+
"Peak Trip Coil 2 Current (A)": kpis.get('peak_trip_coil_2'),
|
| 219 |
+
"SF6 Pressure (bar)": kpis.get('sf6_pressure'),
|
| 220 |
+
"Ambient Temperature (°C)": kpis.get('ambient_temp'),
|
| 221 |
+
"Main Wipe (mm)": kpis.get('main_wipe'),
|
| 222 |
+
"Arc Wipe (mm)": kpis.get('arc_wipe'),
|
| 223 |
+
"Contact Travel Distance (mm)": kpis.get('contact_travel')
|
| 224 |
+
}
|
| 225 |
+
|
| 226 |
+
# Also construct the list format for raj_ai_agent if needed, but detect_fault takes sample_kpis dict
|
| 227 |
+
# detect_fault expects: {'kpis': [{'name': '...', 'value': ...}]}
|
| 228 |
+
raj_ai_kpis = {
|
| 229 |
+
"kpis": [
|
| 230 |
+
{"name": "Closing Time", "unit": "ms", "value": kpis.get('closing_time')},
|
| 231 |
+
{"name": "Opening Time", "unit": "ms", "value": kpis.get('opening_time')},
|
| 232 |
+
{"name": "DLRO Value", "unit": "µΩ", "value": kpis.get('dlro')},
|
| 233 |
+
{"name": "Peak Resistance", "unit": "µΩ", "value": kpis.get('peak_resistance')},
|
| 234 |
+
{"name": "Contact Speed", "unit": "m/s", "value": kpis.get('contact_speed')},
|
| 235 |
+
{"name": "Peak Close Coil Current", "unit": "A", "value": kpis.get('peak_close_coil')},
|
| 236 |
+
{"name": "Peak Trip Coil 1 Current", "unit": "A", "value": kpis.get('peak_trip_coil_1')},
|
| 237 |
+
{"name": "Peak Trip Coil 2 Current", "unit": "A", "value": kpis.get('peak_trip_coil_2')},
|
| 238 |
+
{"name": "SF6 Pressure", "unit": "bar", "value": kpis.get('sf6_pressure')},
|
| 239 |
+
{"name": "Ambient Temperature", "unit": "°C", "value": kpis.get('ambient_temp')}
|
| 240 |
+
]
|
| 241 |
+
}
|
| 242 |
+
|
| 243 |
+
# Prepare row values for Rule Engine
|
| 244 |
+
# We need 401 points. If df has > 401, take first 401. If < 401, pad?
|
| 245 |
+
# Assuming standard 400ms data at 1ms sample rate.
|
| 246 |
+
# If Time_ms is present, we can try to resample or just take the Resistance column values.
|
| 247 |
+
# raj_rule_engine.standardize_input handles this.
|
| 248 |
+
|
| 249 |
+
# Create a temp df for standardize_input
|
| 250 |
+
temp_df = df[['Resistance']].copy()
|
| 251 |
+
# Ensure we have enough data or handle it
|
| 252 |
+
if len(temp_df) < 401:
|
| 253 |
+
# Pad with last value
|
| 254 |
+
last_val = temp_df.iloc[-1, 0]
|
| 255 |
+
padding = pd.DataFrame({'Resistance': [last_val] * (401 - len(temp_df))})
|
| 256 |
+
temp_df = pd.concat([temp_df, padding], ignore_index=True)
|
| 257 |
+
|
| 258 |
+
std_df = standardize_input(temp_df)
|
| 259 |
+
row_values = std_df.iloc[0].values.tolist()
|
| 260 |
+
|
| 261 |
+
# --- Run Rule Engine ---
|
| 262 |
+
rule_engine_result = analyze_dcrm_advanced(row_values, raj_kpis)
|
| 263 |
+
|
| 264 |
+
# --- Run AI Agent ---
|
| 265 |
+
# detect_fault takes (df, sample_kpis)
|
| 266 |
+
# We can pass the original df, it calls standardize_input internally
|
| 267 |
+
ai_agent_result = detect_fault(df, raj_ai_kpis)
|
| 268 |
+
|
| 269 |
+
# --- Run ViT Model (Visual Inspection) ---
|
| 270 |
+
vit_result = None
|
| 271 |
+
# --- Run ViT Model (Visual Inspection) ---
|
| 272 |
+
vit_result = None
|
| 273 |
+
vit_plot_path = "temp_vit_plot.png"
|
| 274 |
+
|
| 275 |
+
# Always generate plot (doesn't require torch)
|
| 276 |
+
plot_generated = False
|
| 277 |
+
try:
|
| 278 |
+
if plot_resistance_for_vit(df, vit_plot_path):
|
| 279 |
+
plot_generated = True
|
| 280 |
+
except Exception as e:
|
| 281 |
+
print(f"ViT Plot generation failed: {e}")
|
| 282 |
+
|
| 283 |
+
# Try prediction if plot exists and VIT is available
|
| 284 |
+
if plot_generated and VIT_AVAILABLE:
|
| 285 |
+
try:
|
| 286 |
+
vit_class, vit_conf, vit_details = predict_dcrm_image(vit_plot_path)
|
| 287 |
+
if vit_class: # Ensure we got a result
|
| 288 |
+
vit_result = {
|
| 289 |
+
"class": vit_class,
|
| 290 |
+
"confidence": vit_conf,
|
| 291 |
+
"details": vit_details
|
| 292 |
+
}
|
| 293 |
+
except Exception as e:
|
| 294 |
+
print(f"ViT Prediction failed: {e}")
|
| 295 |
+
|
| 296 |
+
|
| 297 |
+
|
| 298 |
+
# ============================================
|
| 299 |
+
# SECTION 1: CBHI SCORE & KPIs
|
| 300 |
+
# ============================================
|
| 301 |
+
|
| 302 |
+
# Calculate CBHI using new logic
|
| 303 |
+
# Prepare phase_data for CBHI
|
| 304 |
+
cbhi_phase_data = {}
|
| 305 |
+
if 'phaseWiseAnalysis' in phase_analysis_result:
|
| 306 |
+
for phase in phase_analysis_result['phaseWiseAnalysis']:
|
| 307 |
+
p_name = f"Phase {phase.get('phaseNumber')}"
|
| 308 |
+
cbhi_phase_data[p_name] = {
|
| 309 |
+
"status": phase.get('status', 'Unknown'),
|
| 310 |
+
"confidence": phase.get('confidence', 0)
|
| 311 |
+
}
|
| 312 |
+
|
| 313 |
+
# Compute Score
|
| 314 |
+
cbhi_score = compute_cbhi(raj_ai_kpis['kpis'], ai_agent_result, cbhi_phase_data)
|
| 315 |
+
|
| 316 |
+
# --- Run Recommendation Agent ---
|
| 317 |
+
with st.spinner("Generating Maintenance Recommendations..."):
|
| 318 |
+
recommendations = generate_recommendations(
|
| 319 |
+
kpis=kpis,
|
| 320 |
+
cbhi_score=cbhi_score,
|
| 321 |
+
rule_faults=rule_engine_result.get("Fault_Detection", []),
|
| 322 |
+
ai_faults=ai_agent_result.get("Fault_Detection", []),
|
| 323 |
+
llm=llm
|
| 324 |
+
)
|
| 325 |
+
|
| 326 |
+
st.markdown("## 🏆 Composite Breaker Health Index (CBHI)")
|
| 327 |
+
|
| 328 |
+
# CBHI Score Display
|
| 329 |
+
cbhi_col1, cbhi_col2, cbhi_col3 = st.columns([1, 2, 1])
|
| 330 |
+
|
| 331 |
+
with cbhi_col1:
|
| 332 |
+
st.markdown("") # Spacer
|
| 333 |
+
|
| 334 |
+
with cbhi_col2:
|
| 335 |
+
# Large centered score
|
| 336 |
+
if cbhi_score >= 90:
|
| 337 |
+
score_color = "#27ae60" # Green
|
| 338 |
+
status_text = "✅ Excellent Condition"
|
| 339 |
+
status_color = "success"
|
| 340 |
+
gradient = "linear-gradient(135deg, #11998e 0%, #38ef7d 100%)"
|
| 341 |
+
elif cbhi_score >= 75:
|
| 342 |
+
score_color = "#f39c12" # Orange
|
| 343 |
+
status_text = "⚠️ Good - Minor Review Needed"
|
| 344 |
+
status_color = "warning"
|
| 345 |
+
gradient = "linear-gradient(135deg, #f093fb 0%, #f5576c 100%)"
|
| 346 |
+
else:
|
| 347 |
+
score_color = "#e74c3c" # Red
|
| 348 |
+
status_text = "🚨 Critical Attention Required"
|
| 349 |
+
status_color = "error"
|
| 350 |
+
gradient = "linear-gradient(135deg, #fa709a 0%, #fee140 100%)"
|
| 351 |
+
|
| 352 |
+
st.markdown(f"""
|
| 353 |
+
<div style='text-align: center; padding: 30px; background: {gradient}; border-radius: 20px; box-shadow: 0 8px 16px rgba(0,0,0,0.2);'>
|
| 354 |
+
<h1 style='color: white; font-size: 5rem; margin: 0; text-shadow: 2px 2px 4px rgba(0,0,0,0.3);'>{cbhi_score}</h1>
|
| 355 |
+
<p style='color: white; font-size: 1.4rem; margin: 10px 0; font-weight: 500;'>out of 100</p>
|
| 356 |
+
</div>
|
| 357 |
+
""", unsafe_allow_html=True)
|
| 358 |
+
|
| 359 |
+
st.markdown("<br>", unsafe_allow_html=True)
|
| 360 |
+
|
| 361 |
+
if status_color == "success":
|
| 362 |
+
st.success(status_text)
|
| 363 |
+
elif status_color == "warning":
|
| 364 |
+
st.warning(status_text)
|
| 365 |
+
else:
|
| 366 |
+
st.error(status_text)
|
| 367 |
+
|
| 368 |
+
with cbhi_col3:
|
| 369 |
+
st.markdown("") # Spacer
|
| 370 |
+
|
| 371 |
+
st.markdown("---")
|
| 372 |
+
|
| 373 |
+
# KPIs Section
|
| 374 |
+
st.markdown("### 📊 Key Performance Indicators")
|
| 375 |
+
st.caption("Measured parameters from DCRM test data")
|
| 376 |
+
|
| 377 |
+
# Timing & Motion
|
| 378 |
+
with st.container():
|
| 379 |
+
st.markdown("**⏱️ Timing & Motion**")
|
| 380 |
+
k1, k2, k3, k4 = st.columns(4)
|
| 381 |
+
k1.metric("Closing Time", f"{kpis['closing_time']} ms", help="Time taken for contacts to close")
|
| 382 |
+
k2.metric("Opening Time", f"{kpis['opening_time']} ms", help="Time taken for contacts to open")
|
| 383 |
+
k3.metric("Contact Speed", f"{kpis['contact_speed']} m/s", help="Average contact movement speed")
|
| 384 |
+
k4.metric("Contact Travel", f"{kpis['contact_travel']} mm", help="Total mechanical travel distance")
|
| 385 |
+
|
| 386 |
+
st.markdown("")
|
| 387 |
+
|
| 388 |
+
# Contact Health
|
| 389 |
+
with st.container():
|
| 390 |
+
st.markdown("**🔌 Contact Health**")
|
| 391 |
+
k5, k6, k7, k8 = st.columns(4)
|
| 392 |
+
k5.metric("DLRO", f"{kpis['dlro']} µΩ", help="Dynamic Low Resistance - contact quality indicator")
|
| 393 |
+
k6.metric("Peak Resistance", f"{kpis['peak_resistance']} µΩ", help="Maximum resistance during operation")
|
| 394 |
+
k7.metric("Main Wipe", f"{kpis['main_wipe']} mm", help="Main contact wipe distance")
|
| 395 |
+
k8.metric("Arc Wipe", f"{kpis['arc_wipe']} mm", help="Arcing contact wipe distance")
|
| 396 |
+
|
| 397 |
+
st.markdown("")
|
| 398 |
+
|
| 399 |
+
# Electrical & Environment
|
| 400 |
+
with st.container():
|
| 401 |
+
st.markdown("**⚡ Electrical & Environment**")
|
| 402 |
+
k9, k10, k11, k12, k13 = st.columns(5)
|
| 403 |
+
k9.metric("Close Coil", f"{kpis['peak_close_coil']} A", help="Peak closing coil current")
|
| 404 |
+
k10.metric("Trip Coil 1", f"{kpis['peak_trip_coil_1']} A", help="Peak trip coil 1 current")
|
| 405 |
+
k11.metric("Trip Coil 2", f"{kpis['peak_trip_coil_2']} A", help="Peak trip coil 2 current")
|
| 406 |
+
k12.metric("Temperature", f"{kpis['ambient_temp']} °C", help="Ambient temperature during test")
|
| 407 |
+
k13.metric("SF6 Pressure", f"{kpis['sf6_pressure']} bar", help="SF6 gas pressure")
|
| 408 |
+
|
| 409 |
+
st.markdown("---")
|
| 410 |
+
|
| 411 |
+
# ============================================
|
| 412 |
+
# SECTION 2: SEGMENTED GRAPH WITH ZONES
|
| 413 |
+
# ============================================
|
| 414 |
+
|
| 415 |
+
st.markdown("## 📈 DCRM Waveforms with Phase Segmentation")
|
| 416 |
+
st.caption("Programmatic phase detection using resistance, current, and travel thresholds")
|
| 417 |
+
|
| 418 |
+
# Generate the full JSON report first to get phase boundaries
|
| 419 |
+
with st.spinner("Analyzing phases and generating report..."):
|
| 420 |
+
full_report = generate_dcrm_json(
|
| 421 |
+
df=df,
|
| 422 |
+
kpis=kpis,
|
| 423 |
+
cbhi_score=cbhi_score,
|
| 424 |
+
rule_result=rule_engine_result,
|
| 425 |
+
ai_result=ai_agent_result,
|
| 426 |
+
llm=llm,
|
| 427 |
+
vit_result=vit_result,
|
| 428 |
+
phase_analysis_result=phase_analysis_result,
|
| 429 |
+
recommendations=recommendations
|
| 430 |
+
)
|
| 431 |
+
|
| 432 |
+
# Get phase boundaries and create zone timing table
|
| 433 |
+
phase_to_zone_map = {
|
| 434 |
+
"Pre-Contact Travel": "zone_1_pre_contact",
|
| 435 |
+
"Arcing Contact Engagement & Arc Initiation": "zone_2_arcing_engagement",
|
| 436 |
+
"Main Contact Conduction": "zone_3_main_conduction",
|
| 437 |
+
"Main Contact Parting & Arc Elongation": "zone_4_parting",
|
| 438 |
+
"Final Open State": "zone_5_final_open"
|
| 439 |
+
}
|
| 440 |
+
|
| 441 |
+
zones = {}
|
| 442 |
+
phase_timings = []
|
| 443 |
+
|
| 444 |
+
if 'phaseWiseAnalysis' in full_report:
|
| 445 |
+
for phase in full_report['phaseWiseAnalysis']:
|
| 446 |
+
phase_name = phase.get('name', '')
|
| 447 |
+
zone_key = phase_to_zone_map.get(phase_name)
|
| 448 |
+
start_time = phase.get('startTime', 0)
|
| 449 |
+
end_time = phase.get('endTime', 0)
|
| 450 |
+
duration = end_time - start_time
|
| 451 |
+
|
| 452 |
+
if zone_key:
|
| 453 |
+
zones[zone_key] = {
|
| 454 |
+
'start_ms': start_time,
|
| 455 |
+
'end_ms': end_time
|
| 456 |
+
}
|
| 457 |
+
|
| 458 |
+
phase_timings.append({
|
| 459 |
+
"Phase": phase_name,
|
| 460 |
+
"Start (ms)": start_time,
|
| 461 |
+
"End (ms)": end_time,
|
| 462 |
+
"Duration (ms)": duration
|
| 463 |
+
})
|
| 464 |
+
|
| 465 |
+
# Display graph
|
| 466 |
+
fig_main = create_dcrm_plot(df, zones)
|
| 467 |
+
st.plotly_chart(fig_main, width='stretch')
|
| 468 |
+
|
| 469 |
+
# Display phase timing table
|
| 470 |
+
if phase_timings:
|
| 471 |
+
st.markdown("#### ⏱️ Phase Timing Breakdown")
|
| 472 |
+
timing_df = pd.DataFrame(phase_timings)
|
| 473 |
+
st.dataframe(timing_df, width='stretch', hide_index=True)
|
| 474 |
+
|
| 475 |
+
st.markdown("---")
|
| 476 |
+
|
| 477 |
+
# ============================================
|
| 478 |
+
# SECTION 3: PHASE-WISE DETAILED ANALYSIS
|
| 479 |
+
# ============================================
|
| 480 |
+
|
| 481 |
+
st.markdown("## 🔍 Phase-Wise Detailed Analysis")
|
| 482 |
+
st.caption("AI-enhanced analysis of each operational phase with diagnostic verdicts")
|
| 483 |
+
|
| 484 |
+
if 'phaseWiseAnalysis' in full_report:
|
| 485 |
+
for phase in full_report['phaseWiseAnalysis']:
|
| 486 |
+
phase_num = phase.get('phaseNumber', 0)
|
| 487 |
+
phase_name = phase.get('name', 'Unknown Phase')
|
| 488 |
+
phase_title = phase.get('phaseTitle', phase_name)
|
| 489 |
+
phase_desc = phase.get('description', '')
|
| 490 |
+
confidence = phase.get('confidence', 0)
|
| 491 |
+
|
| 492 |
+
# Color code by phase
|
| 493 |
+
phase_colors = {
|
| 494 |
+
1: "#ff9800", # Orange
|
| 495 |
+
2: "#ff26bd", # Pink
|
| 496 |
+
3: "#4caf50", # Green
|
| 497 |
+
4: "#2196f3", # Blue
|
| 498 |
+
5: "#a629ff" # Purple
|
| 499 |
+
}
|
| 500 |
+
phase_color = phase_colors.get(phase_num, "#cccccc")
|
| 501 |
+
|
| 502 |
+
with st.expander(f"**Phase {phase_num}: {phase_name}** | Confidence: {confidence}%", expanded=False):
|
| 503 |
+
# Phase header
|
| 504 |
+
st.markdown(f"""
|
| 505 |
+
<div style='padding: 15px; background-color: {phase_color}22; border-left: 5px solid {phase_color}; border-radius: 5px; margin-bottom: 15px;'>
|
| 506 |
+
<h4 style='margin: 0; color: {phase_color};'>{phase_title}</h4>
|
| 507 |
+
<p style='margin: 5px 0 0 0; color: #666;'>{phase_desc}</p>
|
| 508 |
+
</div>
|
| 509 |
+
""", unsafe_allow_html=True)
|
| 510 |
+
|
| 511 |
+
# Event Synopsis
|
| 512 |
+
event_synopsis = phase.get('eventSynopsis', '')
|
| 513 |
+
if event_synopsis:
|
| 514 |
+
st.markdown("**📋 Event Synopsis**")
|
| 515 |
+
st.info(event_synopsis)
|
| 516 |
+
|
| 517 |
+
# Key Characteristics
|
| 518 |
+
characteristics = phase.get('details', {}).get('characteristics', [])
|
| 519 |
+
if characteristics:
|
| 520 |
+
st.markdown("**🔑 Key Characteristics**")
|
| 521 |
+
for char in characteristics:
|
| 522 |
+
st.markdown(f"- {char}")
|
| 523 |
+
|
| 524 |
+
# Waveform Analysis
|
| 525 |
+
waveform = phase.get('waveformAnalysis', {})
|
| 526 |
+
if waveform:
|
| 527 |
+
st.markdown("**📊 Waveform Analysis**")
|
| 528 |
+
|
| 529 |
+
wave_col1, wave_col2, wave_col3 = st.columns(3)
|
| 530 |
+
|
| 531 |
+
with wave_col1:
|
| 532 |
+
st.markdown("**Resistance**")
|
| 533 |
+
st.caption(waveform.get('resistance', 'N/A'))
|
| 534 |
+
|
| 535 |
+
with wave_col2:
|
| 536 |
+
st.markdown("**Current**")
|
| 537 |
+
st.caption(waveform.get('current', 'N/A'))
|
| 538 |
+
|
| 539 |
+
with wave_col3:
|
| 540 |
+
st.markdown("**Travel**")
|
| 541 |
+
st.caption(waveform.get('travel', 'N/A'))
|
| 542 |
+
|
| 543 |
+
# Diagnostic Verdict
|
| 544 |
+
verdict = phase.get('diagnosticVerdict', '')
|
| 545 |
+
if verdict:
|
| 546 |
+
st.markdown("**🩺 Diagnostic Verdict & Justification**")
|
| 547 |
+
st.success(verdict)
|
| 548 |
+
|
| 549 |
+
st.markdown("---")
|
| 550 |
+
|
| 551 |
+
# ============================================
|
| 552 |
+
# SECTION 4: AI AGENT ANALYSIS
|
| 553 |
+
# ============================================
|
| 554 |
+
|
| 555 |
+
st.markdown("## 🤖 AI Agent Analysis (Generative)")
|
| 556 |
+
st.caption("Deep learning-based fault detection using physics-informed signatures")
|
| 557 |
+
|
| 558 |
+
ai_faults = ai_agent_result.get("Fault_Detection", [])
|
| 559 |
+
if not ai_faults:
|
| 560 |
+
st.success("✅ **No AI-detected faults found.** System appears healthy based on generative AI analysis.")
|
| 561 |
+
else:
|
| 562 |
+
for idx, fault in enumerate(ai_faults):
|
| 563 |
+
name = fault.get("defect_name", "Unknown")
|
| 564 |
+
conf = fault.get("Confidence", "0%")
|
| 565 |
+
sev = fault.get("Severity", "Low")
|
| 566 |
+
desc = fault.get("description", "")
|
| 567 |
+
|
| 568 |
+
# Check if this is "No Secondary Defect Detected"
|
| 569 |
+
if "no secondary defect" in name.lower() or "no defect" in name.lower():
|
| 570 |
+
st.success(f"✅ **{name}**")
|
| 571 |
+
else:
|
| 572 |
+
# Color code by severity
|
| 573 |
+
if sev == "Critical":
|
| 574 |
+
icon = "🔴"
|
| 575 |
+
border_color = "#e74c3c"
|
| 576 |
+
elif sev == "High":
|
| 577 |
+
icon = "🟠"
|
| 578 |
+
border_color = "#f39c12"
|
| 579 |
+
else:
|
| 580 |
+
icon = "🟡"
|
| 581 |
+
border_color = "#f1c40f"
|
| 582 |
+
|
| 583 |
+
with st.expander(f"{icon} **{name}** | Confidence: {conf} | Severity: {sev}", expanded=(sev in ["Critical", "High"])):
|
| 584 |
+
st.markdown(f"""
|
| 585 |
+
<div style='padding: 10px; background-color: {border_color}11; border-left: 4px solid {border_color}; border-radius: 5px;'>
|
| 586 |
+
<p style='margin: 0;'><strong>AI Reasoning:</strong> {desc}</p>
|
| 587 |
+
</div>
|
| 588 |
+
""", unsafe_allow_html=True)
|
| 589 |
+
|
| 590 |
+
st.markdown("---")
|
| 591 |
+
|
| 592 |
+
# ============================================
|
| 593 |
+
# SECTION 5: RULE ENGINE ANALYSIS
|
| 594 |
+
# ============================================
|
| 595 |
+
|
| 596 |
+
st.markdown("## ⚙️ Rule-Based Analysis (Deterministic)")
|
| 597 |
+
st.caption("Threshold-based fault detection using industry standards and expert knowledge")
|
| 598 |
+
|
| 599 |
+
re_faults = rule_engine_result.get("Fault_Detection", [])
|
| 600 |
+
if not re_faults:
|
| 601 |
+
st.success("✅ **No rule-based faults detected.** All parameters within acceptable ranges per industry standards.")
|
| 602 |
+
else:
|
| 603 |
+
for idx, fault in enumerate(re_faults):
|
| 604 |
+
name = fault.get("defect_name", "Unknown")
|
| 605 |
+
conf = fault.get("Confidence", "0%")
|
| 606 |
+
sev = fault.get("Severity", "Low")
|
| 607 |
+
desc = fault.get("description", "")
|
| 608 |
+
|
| 609 |
+
# Color code by severity
|
| 610 |
+
if sev == "Critical":
|
| 611 |
+
icon = "🔴"
|
| 612 |
+
border_color = "#e74c3c"
|
| 613 |
+
elif sev == "High":
|
| 614 |
+
icon = "🟠"
|
| 615 |
+
border_color = "#f39c12"
|
| 616 |
+
else:
|
| 617 |
+
icon = "🟡"
|
| 618 |
+
border_color = "#f1c40f"
|
| 619 |
+
|
| 620 |
+
with st.expander(f"{icon} **{name}** | Confidence: {conf} | Severity: {sev}", expanded=(sev in ["Critical", "High"])):
|
| 621 |
+
st.markdown(f"""
|
| 622 |
+
<div style='padding: 10px; background-color: {border_color}11; border-left: 4px solid {border_color}; border-radius: 5px;'>
|
| 623 |
+
<p style='margin: 0;'><strong>Rule Engine Reasoning:</strong> {desc}</p>
|
| 624 |
+
</div>
|
| 625 |
+
""", unsafe_allow_html=True)
|
| 626 |
+
|
| 627 |
+
st.markdown("---")
|
| 628 |
+
|
| 629 |
+
# ============================================
|
| 630 |
+
# SECTION 6: ViT MODEL ANALYSIS
|
| 631 |
+
# ============================================
|
| 632 |
+
|
| 633 |
+
st.markdown("## 📸 Visual Pattern Recognition (ViT Model)")
|
| 634 |
+
st.caption("Computer vision-based classification using Vision Transformer neural network")
|
| 635 |
+
|
| 636 |
+
vit_col1, vit_col2 = st.columns([1, 2])
|
| 637 |
+
|
| 638 |
+
with vit_col1:
|
| 639 |
+
if plot_generated:
|
| 640 |
+
st.image(vit_plot_path, caption="Resistance Curve Input", width='stretch')
|
| 641 |
+
else:
|
| 642 |
+
st.warning("⚠️ Could not generate visualization.")
|
| 643 |
+
|
| 644 |
+
with vit_col2:
|
| 645 |
+
if vit_result:
|
| 646 |
+
pred_class = vit_result['class']
|
| 647 |
+
pred_conf = vit_result['confidence'] * 100
|
| 648 |
+
|
| 649 |
+
st.markdown("### 🎯 Prediction Results (ViT + Gemini Ensemble)")
|
| 650 |
+
|
| 651 |
+
metric_col1, metric_col2 = st.columns(2)
|
| 652 |
+
with metric_col1:
|
| 653 |
+
st.metric("Predicted Condition", pred_class)
|
| 654 |
+
with metric_col2:
|
| 655 |
+
st.metric("Ensemble Confidence", f"{pred_conf:.1f}%")
|
| 656 |
+
|
| 657 |
+
st.markdown("---")
|
| 658 |
+
|
| 659 |
+
# Show Breakdown if available
|
| 660 |
+
details = vit_result.get("details", {})
|
| 661 |
+
if details and details.get("gemini_probs"):
|
| 662 |
+
with st.expander("📊 Detailed Ensemble Breakdown", expanded=True):
|
| 663 |
+
st.caption("Comparison of ViT Model and Gemini Expert Analysis")
|
| 664 |
+
|
| 665 |
+
# Prepare data for chart
|
| 666 |
+
vit_probs = details.get("vit_probs", {})
|
| 667 |
+
gemini_probs = details.get("gemini_probs", {})
|
| 668 |
+
ensemble_scores = details.get("ensemble_scores", {})
|
| 669 |
+
|
| 670 |
+
chart_data = []
|
| 671 |
+
for cls, score in ensemble_scores.items():
|
| 672 |
+
chart_data.append({
|
| 673 |
+
"Class": cls,
|
| 674 |
+
"ViT": vit_probs.get(cls, 0),
|
| 675 |
+
"Gemini": gemini_probs.get(cls, 0),
|
| 676 |
+
"Ensemble": score
|
| 677 |
+
})
|
| 678 |
+
|
| 679 |
+
if chart_data:
|
| 680 |
+
st.bar_chart(
|
| 681 |
+
pd.DataFrame(chart_data).set_index("Class")[["ViT", "Gemini", "Ensemble"]]
|
| 682 |
+
)
|
| 683 |
+
|
| 684 |
+
if pred_class == "Healthy":
|
| 685 |
+
st.success("✅ **Visual pattern matches healthy signature.**")
|
| 686 |
+
else:
|
| 687 |
+
st.warning(f"⚠️ **Visual pattern suggests: {pred_class}**")
|
| 688 |
+
|
| 689 |
+
elif not VIT_AVAILABLE:
|
| 690 |
+
st.info("ℹ️ **ViT Model Unavailable**\n\nThe Vision Transformer model requires PyTorch.")
|
| 691 |
+
else:
|
| 692 |
+
st.warning("⚠️ **Prediction failed.** Model may not be loaded correctly or input image is invalid.")
|
| 693 |
+
|
| 694 |
+
st.markdown("---")
|
| 695 |
+
|
| 696 |
+
# ============================================
|
| 697 |
+
# SECTION 7: ADDITIONAL DIAGNOSTICS
|
| 698 |
+
# ============================================
|
| 699 |
+
|
| 700 |
+
st.markdown("## 🔬 Additional Diagnostic Plots")
|
| 701 |
+
st.caption("Supplementary visualizations for detailed analysis")
|
| 702 |
+
|
| 703 |
+
diag_col1, diag_col2 = st.columns(2)
|
| 704 |
+
|
| 705 |
+
with diag_col1:
|
| 706 |
+
st.markdown("### Contact Velocity Profile")
|
| 707 |
+
st.caption("Derivative of travel - indicates mechanical performance")
|
| 708 |
+
fig_vel = create_velocity_plot(df)
|
| 709 |
+
st.plotly_chart(fig_vel, width='stretch')
|
| 710 |
+
|
| 711 |
+
with diag_col2:
|
| 712 |
+
st.markdown("### Resistance Detail (Log Scale)")
|
| 713 |
+
st.caption("Logarithmic view reveals subtle resistance variations")
|
| 714 |
+
fig_res = create_resistance_zoom_plot(df)
|
| 715 |
+
st.plotly_chart(fig_res, width='stretch')
|
| 716 |
+
|
| 717 |
+
st.markdown("---")
|
| 718 |
+
st.markdown("---")
|
| 719 |
+
|
| 720 |
+
# ============================================
|
| 721 |
+
# SECTION 8: RECOMMENDATIONS & PREDICTIONS
|
| 722 |
+
# ============================================
|
| 723 |
+
|
| 724 |
+
st.markdown("## 🛠️ Recommendations & Future Fault Predictions")
|
| 725 |
+
st.caption("AI-generated maintenance actions and predictive failure analysis")
|
| 726 |
+
|
| 727 |
+
# Maintenance Actions
|
| 728 |
+
if recommendations.get("maintenanceActions"):
|
| 729 |
+
st.markdown("### 🔧 Recommended Maintenance Actions")
|
| 730 |
+
for group in recommendations["maintenanceActions"]:
|
| 731 |
+
priority = group.get("priority", "Priority")
|
| 732 |
+
color = group.get("color", "#333")
|
| 733 |
+
bg_color = group.get("bgColor", "#eee")
|
| 734 |
+
|
| 735 |
+
st.markdown(f"""
|
| 736 |
+
<div style='background-color: {bg_color}; padding: 10px; border-radius: 5px; border-left: 5px solid {color}; margin-bottom: 10px;'>
|
| 737 |
+
<h4 style='color: {color}; margin: 0;'>{priority}</h4>
|
| 738 |
+
</div>
|
| 739 |
+
""", unsafe_allow_html=True)
|
| 740 |
+
|
| 741 |
+
for action in group.get("actions", []):
|
| 742 |
+
with st.expander(f"**{action.get('title')}**"):
|
| 743 |
+
st.markdown(f"**Justification:** {action.get('justification')}")
|
| 744 |
+
st.markdown(f"**Timeline:** {action.get('timeline')}")
|
| 745 |
+
if action.get("whatToLookFor"):
|
| 746 |
+
st.markdown("**What to Look For:**")
|
| 747 |
+
for item in action["whatToLookFor"]:
|
| 748 |
+
st.markdown(f"- {item}")
|
| 749 |
+
|
| 750 |
+
st.markdown("---")
|
| 751 |
+
|
| 752 |
+
# Future Faults
|
| 753 |
+
if recommendations.get("futureFaultsPdf"):
|
| 754 |
+
st.markdown("### 🔮 Future Fault Predictions")
|
| 755 |
+
|
| 756 |
+
cols = st.columns(len(recommendations["futureFaultsPdf"]))
|
| 757 |
+
for idx, fault in enumerate(recommendations["futureFaultsPdf"]):
|
| 758 |
+
with cols[idx % 4]: # Wrap around if many
|
| 759 |
+
prob = fault.get("probability", 0)
|
| 760 |
+
color = fault.get("color", "#333")
|
| 761 |
+
|
| 762 |
+
st.markdown(f"""
|
| 763 |
+
<div style='text-align: center; border: 1px solid #ddd; border-radius: 10px; padding: 15px; height: 100%;'>
|
| 764 |
+
<h3 style='color: {color};'>{prob}%</h3>
|
| 765 |
+
<p style='font-weight: bold;'>{fault.get('fault')}</p>
|
| 766 |
+
<p style='font-size: 0.9em; color: #666;'>{fault.get('timeline')}</p>
|
| 767 |
+
<hr>
|
| 768 |
+
<p style='font-size: 0.8em; text-align: left;'>{fault.get('evidence')}</p>
|
| 769 |
+
</div>
|
| 770 |
+
""", unsafe_allow_html=True)
|
| 771 |
+
|
| 772 |
+
st.markdown("---")
|
| 773 |
+
|
| 774 |
+
# RUL Estimation
|
| 775 |
+
ai_verdict = full_report.get("aiVerdict", {})
|
| 776 |
+
rul_est = ai_verdict.get("rulEstimate")
|
| 777 |
+
uncertainty = ai_verdict.get("uncertainty")
|
| 778 |
+
|
| 779 |
+
if rul_est:
|
| 780 |
+
st.markdown("### ⏳ Remaining Useful Life (RUL) Estimation")
|
| 781 |
+
st.caption("Estimated remaining operations based on current degradation")
|
| 782 |
+
|
| 783 |
+
rul_col1, rul_col2 = st.columns(2)
|
| 784 |
+
with rul_col1:
|
| 785 |
+
st.metric("RUL Estimate", rul_est, delta=uncertainty, delta_color="off")
|
| 786 |
+
with rul_col2:
|
| 787 |
+
st.info(f"**Uncertainty:** {uncertainty}\n\nThis estimate considers contact wear, timing deviations, and coil health.")
|
| 788 |
+
|
| 789 |
+
st.markdown("---")
|
| 790 |
+
|
| 791 |
+
# AI Strategic Advice
|
| 792 |
+
ai_advice = ai_verdict.get("aiAdvice", [])
|
| 793 |
+
if ai_advice:
|
| 794 |
+
st.markdown("## 🧠 AI Strategic Advisory")
|
| 795 |
+
st.caption("Expert recommendations based on engineering analysis")
|
| 796 |
+
|
| 797 |
+
for advice in ai_advice:
|
| 798 |
+
color = advice.get("color", "#333")
|
| 799 |
+
title = advice.get("title", "Recommendation")
|
| 800 |
+
desc = advice.get("description", "")
|
| 801 |
+
impact = advice.get("expectedImpact", "")
|
| 802 |
+
priority = advice.get("priority", "Medium")
|
| 803 |
+
|
| 804 |
+
with st.expander(f"**{priority}**: {title}", expanded=(priority=="Critical")):
|
| 805 |
+
st.markdown(f"""
|
| 806 |
+
<div style='border-left: 5px solid {color}; padding-left: 15px;'>
|
| 807 |
+
<p><strong>Rationale:</strong> {desc}</p>
|
| 808 |
+
<p><strong>Expected Impact:</strong> {impact}</p>
|
| 809 |
+
</div>
|
| 810 |
+
""", unsafe_allow_html=True)
|
| 811 |
+
|
| 812 |
+
effects = advice.get("effectAnalysis", {})
|
| 813 |
+
if effects:
|
| 814 |
+
e_col1, e_col2 = st.columns(2)
|
| 815 |
+
with e_col1:
|
| 816 |
+
if effects.get("shortTerm"):
|
| 817 |
+
st.markdown("**Short Term Benefits:**")
|
| 818 |
+
for item in effects["shortTerm"]:
|
| 819 |
+
st.markdown(f"- {item}")
|
| 820 |
+
with e_col2:
|
| 821 |
+
if effects.get("longTerm"):
|
| 822 |
+
st.markdown("**Long Term Benefits:**")
|
| 823 |
+
for item in effects["longTerm"]:
|
| 824 |
+
st.markdown(f"- {item}")
|
| 825 |
+
|
| 826 |
+
st.markdown("---")
|
| 827 |
+
|
| 828 |
+
# ============================================
|
| 829 |
+
# SECTION 9: DOWNLOAD REPORT
|
| 830 |
+
# ============================================
|
| 831 |
+
|
| 832 |
+
st.markdown("## 📥 Export Analysis Report")
|
| 833 |
+
st.caption("Download complete JSON report with all metrics, phase data, and AI insights")
|
| 834 |
+
|
| 835 |
+
col_download1, col_download2, col_download3 = st.columns([1, 2, 1])
|
| 836 |
+
|
| 837 |
+
with col_download1:
|
| 838 |
+
st.markdown("") # Spacer
|
| 839 |
+
|
| 840 |
+
with col_download2:
|
| 841 |
+
st.download_button(
|
| 842 |
+
label="📄 Download Full Report (JSON)",
|
| 843 |
+
data=json.dumps(full_report, indent=2),
|
| 844 |
+
file_name=f"dcrm_analysis_report_{pd.Timestamp.now().strftime('%Y%m%d_%H%M%S')}.json",
|
| 845 |
+
mime="application/json",
|
| 846 |
+
width='stretch'
|
| 847 |
+
)
|
| 848 |
+
st.caption("✅ Includes: KPIs, CBHI score, phase analysis, AI verdicts, rule engine results, and ViT predictions")
|
| 849 |
+
|
| 850 |
+
with col_download3:
|
| 851 |
+
st.markdown("") # Spacer
|
| 852 |
+
|
| 853 |
+
|
| 854 |
+
|
| 855 |
+
except Exception as e:
|
| 856 |
+
st.error(f"Analysis failed: {str(e)}")
|
| 857 |
+
st.expander("Debug Info").write(content if 'content' in locals() else "No content")
|
| 858 |
+
else:
|
| 859 |
+
st.info("Click 'Run Advanced Analysis' to start.")
|
| 860 |
+
with st.expander("Preview Raw Data"):
|
| 861 |
+
st.dataframe(df.head())
|
| 862 |
+
|
| 863 |
+
except Exception as e:
|
| 864 |
+
st.error(f"Error reading CSV file: {str(e)}")
|
| 865 |
+
|
| 866 |
+
if __name__ == "__main__":
|
| 867 |
+
main()
|
apps/temp_vit_plot.png
ADDED
|
core/agents/__pycache__/advice.cpython-313.pyc
ADDED
|
Binary file (22.9 kB). View file
|
|
|
core/agents/__pycache__/advice.cpython-39.pyc
ADDED
|
Binary file (21.3 kB). View file
|
|
|
core/agents/__pycache__/diagnosis.cpython-313.pyc
ADDED
|
Binary file (30.6 kB). View file
|
|
|
core/agents/__pycache__/diagnosis.cpython-39.pyc
ADDED
|
Binary file (24.9 kB). View file
|
|
|
core/agents/__pycache__/plotting.cpython-313.pyc
ADDED
|
Binary file (15.8 kB). View file
|
|
|
core/agents/__pycache__/recommendation.cpython-313.pyc
ADDED
|
Binary file (18.5 kB). View file
|
|
|
core/agents/__pycache__/recommendation.cpython-39.pyc
ADDED
|
Binary file (16.5 kB). View file
|
|
|
core/agents/advice.py
ADDED
|
@@ -0,0 +1,395 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Previous Name: analysis/agents/advice_agent.py
|
| 2 |
+
import os
|
| 3 |
+
import sys
|
| 4 |
+
import json
|
| 5 |
+
import google.generativeai as genai
|
| 6 |
+
|
| 7 |
+
# Set UTF-8 encoding for console output
|
| 8 |
+
if sys.stdout.encoding != 'utf-8':
|
| 9 |
+
sys.stdout.reconfigure(encoding='utf-8')
|
| 10 |
+
|
| 11 |
+
# =========================
|
| 12 |
+
# CONFIGURATION
|
| 13 |
+
# =========================
|
| 14 |
+
API_KEY = os.environ.get("GOOGLE_API_KEY", "AIzaSyDSGda4-5VLmd-y09K6sBfHqoqk1QUL6Xo")
|
| 15 |
+
genai.configure(api_key=API_KEY)
|
| 16 |
+
|
| 17 |
+
MODEL_NAME = "gemini-2.0-flash"
|
| 18 |
+
|
| 19 |
+
# =========================
|
| 20 |
+
# AI ADVICE GENERATION PROMPT
|
| 21 |
+
# =========================
|
| 22 |
+
AI_ADVICE_PROMPT = """
|
| 23 |
+
Role: You are an elite Circuit Breaker Condition Monitoring & Diagnostics Expert with deep domain expertise in DCRM (Dynamic Contact Resistance Measurement), operating mechanism health, SF6 gas systems, contact metallurgy, and circuit breaker failure modes. You understand the nuances of contact bounce, contact wear progression, trip coil degradation, insulation breakdown, and mechanical timing issues.
|
| 24 |
+
|
| 25 |
+
Your Task: Based on comprehensive diagnostic data including RUL analysis, maintenance recommendations, and future fault predictions, generate 3-5 **Strategic Advisory Recommendations** that provide actionable insights grounded in circuit breaker engineering principles and field maintenance best practices.
|
| 26 |
+
|
| 27 |
+
===== INPUT DATA =====
|
| 28 |
+
|
| 29 |
+
**RUL Analysis (Remaining Useful Life):**
|
| 30 |
+
{rul_json}
|
| 31 |
+
|
| 32 |
+
**Maintenance Actions:**
|
| 33 |
+
{maintenance_json}
|
| 34 |
+
|
| 35 |
+
**Future Fault Predictions:**
|
| 36 |
+
{future_faults_json}
|
| 37 |
+
|
| 38 |
+
**KPIs (Key Performance Indicators):**
|
| 39 |
+
{kpis_json}
|
| 40 |
+
|
| 41 |
+
**CBHI Score:**
|
| 42 |
+
{cbhi_score} / 100
|
| 43 |
+
|
| 44 |
+
**Phase-Wise Analysis:**
|
| 45 |
+
{phase_analysis_json}
|
| 46 |
+
|
| 47 |
+
**AI Fault Detection:**
|
| 48 |
+
{ai_verdict_json}
|
| 49 |
+
|
| 50 |
+
===== AI ADVICE GUIDELINES =====
|
| 51 |
+
|
| 52 |
+
**Purpose**: Provide **domain-rich, engineering-focused** recommendations that demonstrate deep understanding of circuit breaker systems. Focus on:
|
| 53 |
+
- Contact resistance mitigation strategies and bounce reduction techniques
|
| 54 |
+
- Operating mechanism calibration and spring tension optimization
|
| 55 |
+
- SF6 gas management protocols and leak detection
|
| 56 |
+
- Contact refurbishment timing based on resistance trends
|
| 57 |
+
- Trip coil redundancy monitoring and protection system integrity
|
| 58 |
+
- Mechanical timing adjustments and damping system optimization
|
| 59 |
+
|
| 60 |
+
**Advice Categories to Consider (Based on Maintenance Actions & Future Faults):**
|
| 61 |
+
1. **Critical Interventions** (Priority: Critical, Color: #B71C1C)
|
| 62 |
+
- Address Priority 1 maintenance actions and high-probability future faults
|
| 63 |
+
- Focus on trip coil failures, severe contact wear (>250µΩ DLRO), SF6 leaks
|
| 64 |
+
- Examples: "Optimize Contact Bounce Mitigation Strategy", "Enhance SF6 Gas Management Protocol"
|
| 65 |
+
|
| 66 |
+
2. **High-Priority Optimizations** (Priority: High, Color: #F57F17)
|
| 67 |
+
- Address Priority 2 maintenance actions and medium-probability future faults
|
| 68 |
+
- Focus on operating mechanism issues, contact refurbishment, insulation degradation
|
| 69 |
+
- Examples: "Recalibrate Operating Mechanism Timing", "Implement Contact Wear Tracking Program"
|
| 70 |
+
|
| 71 |
+
3. **Preventive Enhancements** (Priority: High/Medium, Color: #F57F17 or #0D47A1)
|
| 72 |
+
- Address Priority 3+ maintenance actions and low-probability future faults
|
| 73 |
+
- Focus on long-term reliability improvements and preventive measures
|
| 74 |
+
- Examples: "Optimize Lubrication & Damping Systems", "Enhance Phase-Wise Diagnostic Monitoring"
|
| 75 |
+
|
| 76 |
+
**CRITICAL JSON FORMATTING RULES:**
|
| 77 |
+
- All strings must use double quotes (")
|
| 78 |
+
- Escape any double quotes within strings using backslash (\")
|
| 79 |
+
- Do NOT use smart quotes, apostrophes, or special Unicode quotes
|
| 80 |
+
- Ensure all brackets and braces are properly matched
|
| 81 |
+
- Use standard ASCII characters only in field values
|
| 82 |
+
|
| 83 |
+
**Output Structure (EXACT format required):**
|
| 84 |
+
|
| 85 |
+
{{
|
| 86 |
+
"aiAdvice": [
|
| 87 |
+
{{
|
| 88 |
+
"color": "#B71C1C",
|
| 89 |
+
"confidence": 94,
|
| 90 |
+
"description": "Clear, domain-specific description focusing on circuit breaker systems. Explain the engineering rationale and field maintenance approach. Reference specific components like contact springs, damping dashpots, SF6 purity, contact metallurgy, or mechanism linkages. Keep it concise but technically rich (MAX 250 characters).",
|
| 91 |
+
"effectAnalysis": {{
|
| 92 |
+
"longTerm": [
|
| 93 |
+
"Long-term benefit 1 (e.g., 'Extended circuit breaker service life by 25-30%')",
|
| 94 |
+
"Long-term benefit 2 (e.g., 'Reduced maintenance costs by $50,000-75,000 annually')",
|
| 95 |
+
"Long-term benefit 3 (e.g., 'Improved grid reliability and reduced downtime')",
|
| 96 |
+
"Long-term benefit 4 (e.g., 'Predictive maintenance scheduling optimization')"
|
| 97 |
+
],
|
| 98 |
+
"performanceGains": [
|
| 99 |
+
"Performance gain 1 (e.g., 'Improved contact resistance stability')",
|
| 100 |
+
"Performance gain 2 (e.g., 'Enhanced operational reliability')"
|
| 101 |
+
],
|
| 102 |
+
"riskMitigation": [
|
| 103 |
+
"Risk mitigation 1 (e.g., 'Prevent catastrophic failures through early detection')",
|
| 104 |
+
"Risk mitigation 2 (e.g., 'Reduce unplanned outages by 75%')"
|
| 105 |
+
],
|
| 106 |
+
"shortTerm": [
|
| 107 |
+
"Short-term benefit 1 (e.g., 'Immediate improvement in contact resistance stability')",
|
| 108 |
+
"Short-term benefit 2 (e.g., 'Reduced contact bounce amplitude by 60-70%')",
|
| 109 |
+
"Short-term benefit 3 (e.g., 'Enhanced SF6 pressure monitoring accuracy')",
|
| 110 |
+
"Short-term benefit 4 (e.g., 'Real-time fault detection capabilities')"
|
| 111 |
+
]
|
| 112 |
+
}},
|
| 113 |
+
"expectedImpact": "Primary quantifiable or qualitative benefit (e.g., 'Reduce contact wear rate by 40%', 'Prevent contact welding risk', 'Extend asset life by 2-3 years'). Be specific and measurable (MAX 100 characters).",
|
| 114 |
+
"id": "1",
|
| 115 |
+
"priority": "Critical",
|
| 116 |
+
"title": "Domain-specific, action-oriented title (e.g., 'Optimize Contact Bounce Mitigation Strategy', 'Enhance SF6 Gas Management Protocol'). MAX 80 characters."
|
| 117 |
+
}},
|
| 118 |
+
{{
|
| 119 |
+
"color": "#F57F17",
|
| 120 |
+
"confidence": 89,
|
| 121 |
+
"description": "Second most critical advice with engineering depth.",
|
| 122 |
+
"effectAnalysis": {{
|
| 123 |
+
"longTerm": ["benefit 1", "benefit 2", "benefit 3", "benefit 4"],
|
| 124 |
+
"performanceGains": ["gain 1", "gain 2"],
|
| 125 |
+
"riskMitigation": ["mitigation 1", "mitigation 2"],
|
| 126 |
+
"shortTerm": ["benefit 1", "benefit 2", "benefit 3", "benefit 4"]
|
| 127 |
+
}},
|
| 128 |
+
"expectedImpact": "Expected benefit or risk reduction.",
|
| 129 |
+
"id": "2",
|
| 130 |
+
"priority": "High",
|
| 131 |
+
"title": "Second priority engineering recommendation"
|
| 132 |
+
}},
|
| 133 |
+
{{
|
| 134 |
+
"color": "#F57F17",
|
| 135 |
+
"confidence": 92,
|
| 136 |
+
"description": "Third strategic recommendation with domain expertise.",
|
| 137 |
+
"effectAnalysis": {{
|
| 138 |
+
"longTerm": ["benefit 1", "benefit 2", "benefit 3", "benefit 4"],
|
| 139 |
+
"performanceGains": ["gain 1", "gain 2"],
|
| 140 |
+
"riskMitigation": ["mitigation 1", "mitigation 2"],
|
| 141 |
+
"shortTerm": ["benefit 1", "benefit 2", "benefit 3", "benefit 4"]
|
| 142 |
+
}},
|
| 143 |
+
"expectedImpact": "Quantifiable impact or performance improvement.",
|
| 144 |
+
"id": "3",
|
| 145 |
+
"priority": "High",
|
| 146 |
+
"title": "Third priority optimization strategy"
|
| 147 |
+
}}
|
| 148 |
+
]
|
| 149 |
+
}}
|
| 150 |
+
|
| 151 |
+
===== ANALYSIS INSTRUCTIONS =====
|
| 152 |
+
|
| 153 |
+
**Step 1: Synthesize All Input Data**
|
| 154 |
+
- Analyze RUL estimate and uncertainty to assess urgency
|
| 155 |
+
- Review maintenance priorities to identify critical failure modes
|
| 156 |
+
- Examine future fault predictions to understand progression risks
|
| 157 |
+
- Cross-reference KPIs, phase analysis, and AI verdict for root cause insights
|
| 158 |
+
|
| 159 |
+
**Step 2: Generate Strategic AI Advice (3-5 items)**
|
| 160 |
+
- **CRITICAL: Generate exactly 3-5 advice items** (minimum 3, maximum 5)
|
| 161 |
+
- **CRITICAL: id field is sequential** ("1", "2", "3", "4", "5")
|
| 162 |
+
- **CRITICAL JSON FIELD ORDER**: color → confidence → description → effectAnalysis → expectedImpact → id → priority → title
|
| 163 |
+
- **effectAnalysis Structure**: MUST contain all 4 arrays: "longTerm" (4 items), "performanceGains" (2 items), "riskMitigation" (2 items), "shortTerm" (4 items)
|
| 164 |
+
- **HARDCODED Color scheme**:
|
| 165 |
+
* 1st advice: "#B71C1C" (Critical priority)
|
| 166 |
+
* 2nd advice: "#F57F17" (High priority)
|
| 167 |
+
* 3rd advice: "#F57F17" (High priority)
|
| 168 |
+
* 4th advice (if any): "#0D47A1" (Medium priority)
|
| 169 |
+
* 5th advice (if any): "#2E7D32" (Low priority)
|
| 170 |
+
|
| 171 |
+
**Confidence Scoring:**
|
| 172 |
+
- 90-100: High confidence, strong data support, clear evidence chain
|
| 173 |
+
- 80-89: Good confidence, supported by multiple indicators
|
| 174 |
+
- 70-79: Moderate confidence, some uncertainty in data
|
| 175 |
+
- 60-69: Lower confidence, limited data or conflicting signals
|
| 176 |
+
|
| 177 |
+
**Priority Assignment:**
|
| 178 |
+
- **Critical**: RUL <500 cycles, imminent safety risk, coil failure, SF6 leak
|
| 179 |
+
- **High**: RUL <1500 cycles, significant degradation, contact wear >200µΩ
|
| 180 |
+
- **High**: RUL <3000 cycles, preventive optimization, performance improvement
|
| 181 |
+
- **Medium**: RUL >3000 cycles, long-term enhancements, technology upgrades
|
| 182 |
+
- **Low**: RUL >5000 cycles, continuous improvement, best practices
|
| 183 |
+
|
| 184 |
+
**Strategic Advice Themes to Consider (Domain-Rich, Engineering-Focused):**
|
| 185 |
+
|
| 186 |
+
**IMPORTANT: Base your recommendations on the SPECIFIC maintenance actions and future faults provided in the input data.**
|
| 187 |
+
|
| 188 |
+
1. **Contact Bounce Mitigation** (if contact bounce detected in 55-65ms region):
|
| 189 |
+
- "Optimize Contact Bounce Mitigation Strategy"
|
| 190 |
+
- Description: "Address the detected contact bounce (55-65ms region) through operating mechanism calibration and spring tension adjustment. Calibrate damping dashpot to reduce impact velocity and minimize bounce amplitude, preventing accelerated contact erosion."
|
| 191 |
+
- Short-term effects: "Immediate reduction in contact bounce amplitude by 60-70%", "Stabilized contact resistance during closing", "Enhanced closing operation smoothness", "Real-time bounce monitoring activation"
|
| 192 |
+
- Long-term effects: "Extended contact life by 25-30%", "Reduced risk of contact welding by 40%", "Lower maintenance frequency and costs", "Improved breaker reliability over 5+ years"
|
| 193 |
+
- Performance gains: "Improved contact resistance stability", "Enhanced operational reliability"
|
| 194 |
+
- Risk mitigation: "Prevent accelerated contact erosion", "Reduce risk of contact failure"
|
| 195 |
+
|
| 196 |
+
2. **SF6 Gas Management** (if SF6 issues, insulation anomalies, or Phase 5 low confidence):
|
| 197 |
+
- "Enhanced SF6 Gas Management Protocol"
|
| 198 |
+
- Description: "Implement predictive SF6 pressure monitoring with automated alerts for gas quality degradation and leakage detection. Monitor moisture content, purity, and decomposition products to prevent insulation breakdown."
|
| 199 |
+
- Short-term effects: "Real-time SF6 leak detection", "Enhanced gas purity monitoring accuracy", "Immediate identification of moisture ingress", "Automated alert system activation"
|
| 200 |
+
- Long-term effects: "Prevented insulation failures saving $75,000-100,000", "Extended gas replacement intervals", "Reduced environmental SF6 emissions", "Improved arc quenching performance consistency"
|
| 201 |
+
- Performance gains: "Maintained optimal arc quenching capability", "Enhanced dielectric strength stability"
|
| 202 |
+
- Risk mitigation: "Prevent insulation breakdown and flashover events", "Reduce environmental compliance risks"
|
| 203 |
+
|
| 204 |
+
3. **Trip Coil Redundancy** (if Trip Coil 2 failed or coil current = 0.0A):
|
| 205 |
+
- "Trip Coil Redundancy & Protection System Enhancement"
|
| 206 |
+
- Description: "With Trip Coil 2 failed (0.0A current), establish continuous monitoring of Trip Coil 1 operational status. Implement backup trip mechanisms and redundancy verification protocols to ensure critical fault interruption capability."
|
| 207 |
+
- Short-term effects: "Immediate trip coil status verification", "Backup protection activation", "Enhanced fault interruption assurance", "Real-time coil health monitoring"
|
| 208 |
+
- Long-term effects: "Prevented protection system failures by 95%", "Enhanced grid reliability and safety", "Reduced risk of catastrophic grid faults", "Extended protection system lifespan"
|
| 209 |
+
- Performance gains: "Guaranteed fault clearing capability", "Improved protection system reliability"
|
| 210 |
+
- Risk mitigation: "Eliminate single-point-of-failure in trip system", "Prevent grid instability due to breaker trip failure"
|
| 211 |
+
|
| 212 |
+
4. **Contact Refurbishment Planning** (if DLRO >200µΩ or Main Contact Wear detected):
|
| 213 |
+
- "Strategic Contact Refurbishment & Wear Tracking Program"
|
| 214 |
+
- Description: "With DLRO at 300µΩ indicating severe contact wear, plan proactive contact refurbishment within 3 months. Implement contact wear tracking to optimize refurbishment timing and prevent contact welding or overheating failures."
|
| 215 |
+
- Short-term effects: "Immediate contact resistance stability improvement", "Thermal hotspot elimination", "Reduced I²R losses and heating", "Enhanced current carrying capacity"
|
| 216 |
+
- Long-term effects: "Extended circuit breaker service life by 2-3 years", "Reduced emergency outage costs by $50,000+", "Prevented contact welding incidents", "Optimized maintenance scheduling"
|
| 217 |
+
- Performance gains: "Restored nominal contact resistance levels", "Improved current distribution across contacts"
|
| 218 |
+
- Risk mitigation: "Prevent contact welding and breaker failure-to-open", "Eliminate overheating-induced insulation damage"
|
| 219 |
+
|
| 220 |
+
5. **Operating Mechanism Calibration** (if excessive travel, wipe, speed, or timing deviations):
|
| 221 |
+
- "Operating Mechanism Calibration & Timing Optimization"
|
| 222 |
+
- Description: "Recalibrate operating mechanism to address excessive contact travel, wipe, or speed anomalies. Optimize spring tension, lubrication, and linkage alignment to reduce mechanical stress and improve timing accuracy."
|
| 223 |
+
- Short-term effects: "Normalized contact motion parameters", "Reduced mechanical vibration by 40%", "Improved timing consistency", "Enhanced mechanism smoothness"
|
| 224 |
+
- Long-term effects: "Extended mechanism component life by 30%", "Improved operational reliability", "Reduced wear on linkages and bearings", "Lower maintenance intervention frequency"
|
| 225 |
+
- Performance gains: "Optimized contact closing/opening velocities", "Enhanced mechanical timing precision"
|
| 226 |
+
- Risk mitigation: "Prevent mechanism seizure or binding", "Reduce risk of contact damage due to excessive impact forces"
|
| 227 |
+
|
| 228 |
+
6. **Phase Anomaly Investigation** (if Phase 5 low confidence or electrical anomalies):
|
| 229 |
+
- "Phase-Wise Diagnostic Enhancement & Anomaly Resolution"
|
| 230 |
+
- Description: "Investigate Final Open State anomaly showing 15% confidence with abnormal resistance profile. Conduct insulation resistance tests (Megger, PI, DAR), SF6 analysis, and internal inspection to identify root cause of electrical anomaly."
|
| 231 |
+
- Short-term effects: "Root cause identification of phase anomaly", "Enhanced diagnostic confidence", "Immediate safety hazard assessment", "Isolation capability verification"
|
| 232 |
+
- Long-term effects: "Prevented insulation breakdown failures", "Optimized phase-wise health monitoring", "Improved diagnostic accuracy for future assessments", "Reduced uncertainty in condition assessments"
|
| 233 |
+
- Performance gains: "Confirmed dielectric strength integrity", "Enhanced phase-wise analysis reliability"
|
| 234 |
+
- Risk mitigation: "Prevent internal flashover or tracking failures", "Eliminate incomplete isolation hazards"
|
| 235 |
+
|
| 236 |
+
**Step 3: Tailor Advice to Current Situation (Analyze Maintenance Actions & Future Faults)**
|
| 237 |
+
- **CRITICAL**: Review the Priority 1, 2, 3+ maintenance actions in detail
|
| 238 |
+
- **CRITICAL**: Review the high, medium, low risk future faults in detail
|
| 239 |
+
- **CRITICAL**: Combine insights from both to create cohesive recommendations
|
| 240 |
+
|
| 241 |
+
**Mapping Guidelines:**
|
| 242 |
+
- If Priority 1 includes Trip Coil repair → Focus on trip coil redundancy and protection system
|
| 243 |
+
- If Priority 1 includes contact refurbishment → Focus on contact wear tracking and DLRO monitoring
|
| 244 |
+
- If Priority 2 includes mechanism overhaul → Focus on operating mechanism calibration and timing
|
| 245 |
+
- If Priority 3 includes insulation/SF6 investigation → Focus on SF6 gas management and phase diagnostics
|
| 246 |
+
- If future faults include "Main Contact Wear" (high probability) → Emphasize contact refurbishment urgency
|
| 247 |
+
- If future faults include "Operating Mechanism Malfunction" → Emphasize mechanism calibration
|
| 248 |
+
- If future faults include "Trip Coil Damage" → Emphasize coil monitoring and redundancy
|
| 249 |
+
- If RUL is low (<500 cycles) → Add Critical priority advice on immediate monitoring
|
| 250 |
+
- If DLRO >250µΩ → Add High priority advice on contact resistance mitigation
|
| 251 |
+
- If Phase 5 confidence <20% → Add advice on phase-wise diagnostic enhancement
|
| 252 |
+
|
| 253 |
+
**FINAL CHECKLIST BEFORE GENERATING OUTPUT:**
|
| 254 |
+
1. ✓ Total AI advice items: 3-5 (minimum 3, maximum 5)
|
| 255 |
+
2. ✓ Each advice item has all required fields: color, confidence, description, effectAnalysis, expectedImpact, id, priority, title
|
| 256 |
+
3. ✓ **CRITICAL JSON FIELD ORDER**: color → confidence → description → effectAnalysis → expectedImpact → id → priority → title
|
| 257 |
+
4. ✓ **effectAnalysis has all 4 required arrays**: longTerm (4 items), performanceGains (2 items), riskMitigation (2 items), shortTerm (4 items)
|
| 258 |
+
5. ✓ **HARDCODED Colors**: 1st=#B71C1C, 2nd=#F57F17, 3rd=#F57F17, 4th=#0D47A1, 5th=#2E7D32
|
| 259 |
+
6. ✓ Confidence values are realistic (60-100 range)
|
| 260 |
+
7. ✓ Descriptions are domain-rich, engineering-focused, and specific (under 250 chars)
|
| 261 |
+
8. ✓ Expected impacts are quantifiable benefits from circuit breaker domain (under 100 chars)
|
| 262 |
+
9. ✓ Titles are domain-specific and clear (under 80 chars)
|
| 263 |
+
10. ✓ All JSON properly formatted with double quotes, no smart quotes
|
| 264 |
+
11. ✓ Advice is based on SPECIFIC maintenance actions and future faults from input data
|
| 265 |
+
12. ✓ Recommendations reference actual KPI values (DLRO, coil current, timing, etc.)
|
| 266 |
+
13. ✓ Each advice item provides unique value focused on circuit breaker engineering
|
| 267 |
+
14. ✓ Short-term effects are immediate operational improvements (weeks to months)
|
| 268 |
+
15. ✓ Long-term effects are strategic benefits (years, cost savings, reliability)
|
| 269 |
+
16. ✓ Performance gains focus on operational improvements and capability enhancements
|
| 270 |
+
17. ✓ Risk mitigation items identify specific hazards being prevented
|
| 271 |
+
18. ✓ In any response dont mention like phase 1,2,3,4,5 etc..instead replace with their formal names like:
|
| 272 |
+
phase 1: Pre Contact Travel
|
| 273 |
+
phase 2: Arc Initiation
|
| 274 |
+
phase 3: Main Contact Conduction
|
| 275 |
+
phase 4: Main Contact Separation & Arc elongation
|
| 276 |
+
phase 5: Final Open State
|
| 277 |
+
**Remember**: Your advice should demonstrate deep expertise in DCRM circuit breaker systems. Reference specific components, failure modes, and field maintenance practices. Avoid generic "AI" or "IoT" buzzwords - instead use domain-specific terms like "contact bounce mitigation", "SF6 purity monitoring", "trip coil redundancy", "damping dashpot calibration", "DLRO trending", etc.
|
| 278 |
+
|
| 279 |
+
Generate value-rich, engineering-grounded insights that maintenance teams can immediately understand and implement.
|
| 280 |
+
"""
|
| 281 |
+
|
| 282 |
+
# =========================
|
| 283 |
+
# MAIN FUNCTION
|
| 284 |
+
# =========================
|
| 285 |
+
def generate_ai_advice(rul_data, recommendations_data, kpis_data, cbhi_score, phase_analysis, ai_verdict):
|
| 286 |
+
"""
|
| 287 |
+
Generate AI-driven strategic advice using Gemini AI.
|
| 288 |
+
|
| 289 |
+
Args:
|
| 290 |
+
rul_data (dict): RUL analysis with rulEstimate and uncertainty
|
| 291 |
+
recommendations_data (dict): Combined dict with "maintenanceActions" and "futureFaultsPdf"
|
| 292 |
+
kpis_data (dict): KPI data with structure {"kpis": [...]}
|
| 293 |
+
cbhi_score (int): Circuit Breaker Health Index (0-100)
|
| 294 |
+
phase_analysis (dict): Phase-wise analysis JSON
|
| 295 |
+
ai_verdict (dict): AI fault detection verdict
|
| 296 |
+
|
| 297 |
+
Returns:
|
| 298 |
+
dict: JSON with aiAdvice array
|
| 299 |
+
"""
|
| 300 |
+
|
| 301 |
+
# Extract maintenance and future faults from combined data
|
| 302 |
+
maintenance_data = recommendations_data.get("maintenanceActions", [])
|
| 303 |
+
future_faults_data = recommendations_data.get("futureFaultsPdf", [])
|
| 304 |
+
|
| 305 |
+
# Format the prompt with actual data
|
| 306 |
+
prompt = AI_ADVICE_PROMPT.format(
|
| 307 |
+
rul_json=json.dumps(rul_data, indent=2, ensure_ascii=False),
|
| 308 |
+
maintenance_json=json.dumps(maintenance_data, indent=2, ensure_ascii=False),
|
| 309 |
+
future_faults_json=json.dumps(future_faults_data, indent=2, ensure_ascii=False),
|
| 310 |
+
kpis_json=json.dumps(kpis_data, indent=2, ensure_ascii=False),
|
| 311 |
+
cbhi_score=cbhi_score,
|
| 312 |
+
phase_analysis_json=json.dumps(phase_analysis, indent=2, ensure_ascii=False),
|
| 313 |
+
ai_verdict_json=json.dumps(ai_verdict, indent=2, ensure_ascii=False)
|
| 314 |
+
)
|
| 315 |
+
|
| 316 |
+
# Configure generation
|
| 317 |
+
generation_config = {
|
| 318 |
+
"temperature": 0.3, # Lower temperature for more focused, consistent output
|
| 319 |
+
"top_p": 0.95,
|
| 320 |
+
"top_k": 40,
|
| 321 |
+
"max_output_tokens": 4096,
|
| 322 |
+
}
|
| 323 |
+
|
| 324 |
+
# Initialize model
|
| 325 |
+
model = genai.GenerativeModel(
|
| 326 |
+
model_name=MODEL_NAME,
|
| 327 |
+
generation_config=generation_config
|
| 328 |
+
)
|
| 329 |
+
|
| 330 |
+
max_retries = 3
|
| 331 |
+
for attempt in range(max_retries):
|
| 332 |
+
try:
|
| 333 |
+
if attempt == 0:
|
| 334 |
+
print("🤖 Generating AI strategic advice...")
|
| 335 |
+
else:
|
| 336 |
+
print(f"🔄 Retry attempt {attempt}/{max_retries-1}...")
|
| 337 |
+
|
| 338 |
+
# Add JSON validation instruction on retry
|
| 339 |
+
if attempt > 0:
|
| 340 |
+
prompt_with_retry = prompt + "\n\nIMPORTANT: Ensure all text in description, expectedImpact, and title fields has properly escaped quotes. Use single quotes within text or escape double quotes with backslash."
|
| 341 |
+
else:
|
| 342 |
+
prompt_with_retry = prompt
|
| 343 |
+
|
| 344 |
+
# Generate response
|
| 345 |
+
response = model.generate_content(prompt_with_retry)
|
| 346 |
+
|
| 347 |
+
# Extract JSON from response
|
| 348 |
+
response_text = response.text.strip()
|
| 349 |
+
|
| 350 |
+
# Clean up markdown code fences if present
|
| 351 |
+
if response_text.startswith("```json"):
|
| 352 |
+
response_text = response_text[7:]
|
| 353 |
+
if response_text.startswith("```"):
|
| 354 |
+
response_text = response_text[3:]
|
| 355 |
+
if response_text.endswith("```"):
|
| 356 |
+
response_text = response_text[:-3]
|
| 357 |
+
|
| 358 |
+
response_text = response_text.strip()
|
| 359 |
+
|
| 360 |
+
# Additional cleanup: fix common JSON issues
|
| 361 |
+
# Replace smart quotes with regular quotes
|
| 362 |
+
response_text = response_text.replace('"', '"').replace('"', '"')
|
| 363 |
+
response_text = response_text.replace("'", "'").replace("'", "'")
|
| 364 |
+
|
| 365 |
+
# Parse JSON
|
| 366 |
+
result = json.loads(response_text)
|
| 367 |
+
|
| 368 |
+
print("✅ Successfully generated AI strategic advice")
|
| 369 |
+
return result
|
| 370 |
+
|
| 371 |
+
except json.JSONDecodeError as e:
|
| 372 |
+
print(f"❌ JSON parsing error (attempt {attempt+1}/{max_retries}): {e}")
|
| 373 |
+
if attempt == max_retries - 1:
|
| 374 |
+
print(f"Raw response excerpt:\n{response_text[:1000]}...")
|
| 375 |
+
# Try to salvage what we can
|
| 376 |
+
return {
|
| 377 |
+
"error": f"Failed to parse AI response after {max_retries} attempts",
|
| 378 |
+
"raw_response": response_text[:2000],
|
| 379 |
+
"aiAdvice": []
|
| 380 |
+
}
|
| 381 |
+
# Wait a bit before retry
|
| 382 |
+
import time
|
| 383 |
+
time.sleep(1)
|
| 384 |
+
|
| 385 |
+
except Exception as e:
|
| 386 |
+
print(f"❌ Error generating AI advice: {e}")
|
| 387 |
+
return {
|
| 388 |
+
"error": str(e),
|
| 389 |
+
"aiAdvice": []
|
| 390 |
+
}
|
| 391 |
+
|
| 392 |
+
return {
|
| 393 |
+
"error": "Max retries exceeded",
|
| 394 |
+
"aiAdvice": []
|
| 395 |
+
}
|
core/agents/diagnosis.py
ADDED
|
@@ -0,0 +1,754 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Previous Name: analysis/agents/ai_agent.py
|
| 2 |
+
import os
|
| 3 |
+
import sys
|
| 4 |
+
import time
|
| 5 |
+
import json
|
| 6 |
+
import pandas as pd
|
| 7 |
+
import matplotlib.pyplot as plt
|
| 8 |
+
from PIL import Image
|
| 9 |
+
import google.generativeai as genai
|
| 10 |
+
import numpy as np
|
| 11 |
+
|
| 12 |
+
# Set UTF-8 encoding for console output (handles µ, Ω, etc.)
|
| 13 |
+
if sys.stdout.encoding != 'utf-8':
|
| 14 |
+
sys.stdout.reconfigure(encoding='utf-8')
|
| 15 |
+
|
| 16 |
+
# =========================
|
| 17 |
+
# CONFIGURATION
|
| 18 |
+
# =========================
|
| 19 |
+
API_KEY = os.getenv("GOOGLE_API_KEY")
|
| 20 |
+
genai.configure(api_key=API_KEY)
|
| 21 |
+
|
| 22 |
+
MODEL_NAME = "gemini-2.5-flash"
|
| 23 |
+
|
| 24 |
+
BASELINE_OFFSET_UOHM = float(os.getenv("DCRM_BASELINE_UOHM", "0"))
|
| 25 |
+
PLOT_PATH = "resistance_vs_time.png"
|
| 26 |
+
|
| 27 |
+
# =========================
|
| 28 |
+
# AGENT 1: PRIMARY CONTACT & COIL DEFECT DETECTOR (Classes 1-5, 11-12)
|
| 29 |
+
# =========================
|
| 30 |
+
AGENT1_SYSTEM_PROMPT = """
|
| 31 |
+
Role: You are a High Voltage Circuit Breaker Contact & Coil Diagnostic AI specialized in Dynamic Contact Resistance Measurement (DCRM).
|
| 32 |
+
You must analyze ONLY the Resistance-vs-Time waveform (uOhm) from T_0 to T_400 ms. Green line = Resistance.
|
| 33 |
+
|
| 34 |
+
Physics Regions (nominal):
|
| 35 |
+
- Region 1 (Open): Resistance > 10,000,000 uOhm (effectively open).
|
| 36 |
+
- Region 2 (Arcing Make / Left Shoulder): ~3500 uOhm nominal shoulder before main contact engages.
|
| 37 |
+
- Region 3 (Main Contact Floor): ~20–40 uOhm nominal, smooth, low noise.
|
| 38 |
+
- Region 4 (Arcing Break / Right Shoulder): ~3500 uOhm nominal shoulder after main contact disengages.
|
| 39 |
+
- Region 5 (Open): Return to infinite resistance.
|
| 40 |
+
|
| 41 |
+
in any response dont address regions 1,2,3,4,5 directly as newcomers will not understand these terminologies. instead for them address through below mapping:
|
| 42 |
+
|
| 43 |
+
- "Pre contact Travel": Region 1
|
| 44 |
+
- "Arcing Contact Engagement & Arc Initiation": Region 2
|
| 45 |
+
- "Main Contact Conduction": Region 3
|
| 46 |
+
- "Main Contact Parting & Arc Elongation": Region 4
|
| 47 |
+
- "Final Open Phase": Region 5
|
| 48 |
+
|
| 49 |
+
Defect classes & strict signatures:
|
| 50 |
+
1) Healthy:
|
| 51 |
+
- Region 3 mean ~20–50 uOhm, std low; transitions sharp (sigmoid-like) into and out of Region 3.
|
| 52 |
+
- Regions 2 & 4 stable, without severe spikes.
|
| 53 |
+
2) Main Contact Wear:
|
| 54 |
+
- Region 3 is ELEVATED (>100 uOhm after baseline correction) and NOISY (std > 25 uOhm or visible grassy drift).
|
| 55 |
+
3) Arcing Contact Wear:
|
| 56 |
+
- Region 3 healthy, but Region 2 and/or 4 exhibit severe upward spikes (>6000 uOhm) or heavy instability.
|
| 57 |
+
4) Main Contact Misalignment:
|
| 58 |
+
- Region 3 may show telegraph noise (square-wave-like jumping).
|
| 59 |
+
- Transition 2→3 or 3→4 has a mid-slope step/shelf (non-smooth pause).
|
| 60 |
+
5) Arcing Contact Misalignment:
|
| 61 |
+
- Timing asymmetry: Opening shoulder (Region 4) significantly wider/longer than Closing shoulder (Region 2).
|
| 62 |
+
- Rounded bounces on Opening shoulder.
|
| 63 |
+
11) Close Coil Damage:
|
| 64 |
+
- Peak Close Coil Current (KPI) is very low (<2A) or zero during a detected closing operation.
|
| 65 |
+
- Normal close coil current: 4-7A for EHV breakers.
|
| 66 |
+
12) Trip Coil Damage:
|
| 67 |
+
- BOTH Peak Trip Coil 1 AND Peak Trip Coil 2 currents are very low (<2A) or zero.
|
| 68 |
+
- CRITICAL: At least ONE trip coil must work (>=2A). If Trip Coil 1 OR Trip Coil 2 is working (>=2A), then NO FAULT.
|
| 69 |
+
- Only report fault if BOTH coils fail simultaneously.
|
| 70 |
+
- Normal trip coil current: 4-7A per coil for EHV breakers.
|
| 71 |
+
|
| 72 |
+
Mandatory analysis steps:
|
| 73 |
+
- Compute features from the provided time-series:
|
| 74 |
+
* Region 3 mean, median, std, min, max (approx. nominal window 120–320 ms).
|
| 75 |
+
* Presence and magnitude of spikes in Regions 2 & 4 (e.g., >6000 uOhm).
|
| 76 |
+
* Transition smoothness: detect shelves/steps around 100–120 ms (closing) and 320–340 ms (opening).
|
| 77 |
+
- If region boundaries are unclear, state that and lower confidence.
|
| 78 |
+
- DO NOT declare a defect unless thresholds are clearly met. Prefer "Healthy" with rationale if ambiguous.
|
| 79 |
+
- Consider baseline offset explicitly.
|
| 80 |
+
- Severity depends on confidence: above 85% then High, 50-85% then Medium, else Low.
|
| 81 |
+
- Confidence depends on how much the signature deviates from healthy and how many feature of a particular defect it satisfies.
|
| 82 |
+
|
| 83 |
+
Verification Notes (apply before finalizing):
|
| 84 |
+
- For Main Contact Wear: Look for consistent, uniform low-high medium to high spikes across entire Region 3 plateau.
|
| 85 |
+
- For Main Contact Misalignment: Look for square-shaped (telegraphic) spikes in Region 3, AND a two-or-more-step transition from Region 2 to 3.
|
| 86 |
+
- For Arcing Contact Wear: Look for very high up/down spikes in Regions 2 and 4. Spikes are 95% similar on both sides.
|
| 87 |
+
- For Arcing Contact Misalignment: Look for square-shaped (telegraphic) or sinusoidal high-frequency noise spikes in Regions 2 and 4. Curve MUST be asymmetric (Region 4 wider/longer than Region 2). Region 3 duration might be reduced.
|
| 88 |
+
|
| 89 |
+
Output: Return ONLY valid JSON (no markdown code fences). Structure:
|
| 90 |
+
{
|
| 91 |
+
"Fault_Detection": [
|
| 92 |
+
{
|
| 93 |
+
"defect_name": "Exact Class Name or 'Healthy'",
|
| 94 |
+
"Confidence": "XX.XX %",
|
| 95 |
+
"Severity": "Low/Medium/High",
|
| 96 |
+
"description": "1–2 short sentences citing quantified features and KPIs.",
|
| 97 |
+
}
|
| 98 |
+
],
|
| 99 |
+
"primary_defect_class": "1-5 or 11-12 or Healthy"
|
| 100 |
+
}
|
| 101 |
+
List ONLY the most likely primary defect. If healthy, return only Healthy.
|
| 102 |
+
"""
|
| 103 |
+
|
| 104 |
+
# =========================
|
| 105 |
+
# AGENT 2: SECONDARY MECHANICAL/OPERATIONAL DEFECT DETECTOR (Classes 6-10)
|
| 106 |
+
# =========================
|
| 107 |
+
AGENT2_SYSTEM_PROMPT = """
|
| 108 |
+
Role: You are a Circuit Breaker Mechanical & Operational Diagnostic AI. You analyze DCRM waveforms and KPIs for secondary mechanical/operational issues with EXTREME STRICTNESS.
|
| 109 |
+
|
| 110 |
+
You will receive:
|
| 111 |
+
1. DCRM waveform data (Resistance vs Time, T_0 to T_400 ms)
|
| 112 |
+
2. KPI values with industry-standard nominal ranges
|
| 113 |
+
3. PRIMARY DEFECT from Agent 1 (Classes 1-5, 11-12)
|
| 114 |
+
|
| 115 |
+
Your task: Detect ONLY Classes 6-10 defects with OVERWHELMING EVIDENCE. Confidence must be >75%.
|
| 116 |
+
|
| 117 |
+
=== DEFECT CLASSES WITH PHYSICS-BASED SIGNATURES ===
|
| 118 |
+
|
| 119 |
+
6) Operating Mechanism Malfunction (Slow/Fast Operation)
|
| 120 |
+
Physical Basis:
|
| 121 |
+
- Operating mechanism (spring, hydraulic, pneumatic) drives contacts. Weak springs, hydraulic leaks, sticky linkages affect speed.
|
| 122 |
+
|
| 123 |
+
Industry Standard KPI Ranges (Ministry of Power / POWERGRID norms for EHV breakers):
|
| 124 |
+
- Closing Time: 80-100 ms (nominal)
|
| 125 |
+
- Opening Time: 30-40 ms (nominal)
|
| 126 |
+
- Contact Speed: 4.5-6.5 m/s (nominal)
|
| 127 |
+
|
| 128 |
+
DCRM Curve Manifestation:
|
| 129 |
+
- **Horizontal Shift**: ENTIRE curve shifts left (fast) or right (slow) compared to nominal timeline.
|
| 130 |
+
- **Slope Changes**: Resistance transitions (Region 2->3, Region 3->4) have altered steepness across ENTIRE operation.
|
| 131 |
+
- **Delayed Closure/Opening**: Time from first contact movement to full closure/opening is >20% off nominal.
|
| 132 |
+
|
| 133 |
+
Detection Criteria (STRICT):
|
| 134 |
+
- Closing Time >120 ms OR <64 ms (>20% deviation from 80-100 ms range)
|
| 135 |
+
- Opening Time >48 ms OR <24 ms (>20% deviation from 30-40 ms range)
|
| 136 |
+
- Contact Speed <3.6 m/s OR >7.8 m/s (>20% deviation from 4.5-6.5 m/s)
|
| 137 |
+
- AND waveform shows consistent timing shift across ALL phases
|
| 138 |
+
|
| 139 |
+
Confidence Requirements:
|
| 140 |
+
- 90-95%: If 2+ KPIs exceed threshold by >25%
|
| 141 |
+
- 80-89%: If 1 KPI exceeds threshold by >20%
|
| 142 |
+
- <80%: Insufficient evidence, DO NOT report
|
| 143 |
+
also most notable point in it see region 1 and 5, if there are spikes in this region lines are not straight at high resistance then 100% its operating mechanism malfunction
|
| 144 |
+
|
| 145 |
+
7) Damping System Fault (Excessive Bouncing/Oscillation)
|
| 146 |
+
Physical Basis:
|
| 147 |
+
- Dampers (dashpots, hydraulic dampers) absorb kinetic energy, prevent contact rebound. Failure causes persistent oscillation.
|
| 148 |
+
|
| 149 |
+
DCRM Curve Manifestation:
|
| 150 |
+
- **Excessive Bounces at Main Contact Closure**: Multiple rapid spikes (>5 distinct bounces) back towards arcing resistance AFTER initial closure.
|
| 151 |
+
- **Prolonged Oscillation in Region 3**: Main contact plateau shows persistent fluctuations (decaying sinusoidal pattern, NOT random noise).
|
| 152 |
+
- **Secondary Bounces at Opening**: Clean separation disrupted by re-closure spikes.
|
| 153 |
+
|
| 154 |
+
Detection Criteria (VERY STRICT):
|
| 155 |
+
- >5 distinct bounces with amplitude >100 µΩ in Region 3 (120-320 ms window)
|
| 156 |
+
- Bounce pattern shows decaying oscillation (each bounce smaller than previous)
|
| 157 |
+
- NOT simple noise or wear (std in Region 3 must show structured oscillation, not random)
|
| 158 |
+
|
| 159 |
+
Confidence Requirements:
|
| 160 |
+
- 85-95%: Clear decaying oscillation pattern with >7 bounces
|
| 161 |
+
- 75-84%: 5-6 distinct bounces with structured pattern
|
| 162 |
+
- <75%: Could be noise/wear, DO NOT report
|
| 163 |
+
|
| 164 |
+
8) Pressure System Leakage (SF6 Gas Chamber)
|
| 165 |
+
Physical Basis:
|
| 166 |
+
- SF6 gas provides insulation and arc quenching. Leak reduces pressure, weakens dielectric strength, prolongs arc.
|
| 167 |
+
|
| 168 |
+
Industry Standard:
|
| 169 |
+
- SF6 Pressure: 5.5-6.5 bar (at 20°C) for typical EHV breakers
|
| 170 |
+
|
| 171 |
+
DCRM Curve Manifestation:
|
| 172 |
+
- **Prolonged Arc-Quenching**: Region 4->5 transition (arcing break to open) takes >20 ms (nominal: 10-15 ms).
|
| 173 |
+
- **Less Sharp Opening**: Opening slope less steep due to extended arc duration.
|
| 174 |
+
- **Higher Peak Arcing Resistance**: Arc resistance >5000 µΩ sustained for longer duration.
|
| 175 |
+
|
| 176 |
+
Detection Criteria (EXTREMELY STRICT):
|
| 177 |
+
- Requires external SF6 Pressure KPI <5.0 bar
|
| 178 |
+
- OR (if no pressure KPI): Arc-quenching duration >25 ms AND primary defect is Arcing Contact Wear with confidence >85%
|
| 179 |
+
- WITHOUT pressure KPI, confidence CANNOT exceed 70%
|
| 180 |
+
|
| 181 |
+
Confidence Requirements:
|
| 182 |
+
- 85-95%: SF6 Pressure KPI <4.5 bar AND prolonged arc
|
| 183 |
+
- 70-84%: No pressure KPI but severe arc prolongation (>30 ms) AND severe Arcing Wear
|
| 184 |
+
- <70%: Insufficient evidence, DO NOT report
|
| 185 |
+
|
| 186 |
+
9) Linkage/Connecting Rod Obstruction/Damage
|
| 187 |
+
Physical Basis:
|
| 188 |
+
- Operating rod and linkages transmit force from mechanism to contacts. Obstruction, bending, friction, or looseness impede smooth movement.
|
| 189 |
+
|
| 190 |
+
DCRM Curve Manifestation:
|
| 191 |
+
- **"Stutter" or "Hesitation"**: Momentary pauses, jerks, or sudden slope changes within smooth transitions (Region 2->3, Region 3->4).
|
| 192 |
+
- **Square Plateaus Within Slopes**: Small flat sections (20-50 ms duration) interrupting the sigmoid curve, indicating momentary stuck movement.
|
| 193 |
+
- **Sudden Speed Changes**: Abrupt changes in transition rate (detected as derivative discontinuities).
|
| 194 |
+
- **High-Frequency Mechanical Noise**: Increased jitter/vibration during transitions (NOT smooth noise).
|
| 195 |
+
|
| 196 |
+
Detection Criteria (VERY STRICT):
|
| 197 |
+
- >3 distinct "stutters" (flat plateaus >10 ms within transition slopes)
|
| 198 |
+
- AND increased operating time (>10% longer than nominal)
|
| 199 |
+
- Stutter signature must be VERY distinct (resistance plateaus for >10 ms, then sudden drops/rises)
|
| 200 |
+
|
| 201 |
+
Confidence Requirements:
|
| 202 |
+
- 85-95%: >5 distinct stutters with clear mechanical impedance pattern
|
| 203 |
+
- 75-84%: 3-4 stutters AND operating time >15% off nominal
|
| 204 |
+
- <75%: Could be normal transition, DO NOT report
|
| 205 |
+
|
| 206 |
+
10) Fixed Contact Damage/Deformation
|
| 207 |
+
Physical Basis:
|
| 208 |
+
- Fixed contacts can be damaged (scoring, pitting, bending, high connection resistance). Distinct from moving contact wear.
|
| 209 |
+
|
| 210 |
+
Industry Standard:
|
| 211 |
+
- DLRO Value (Dynamic Low Resistance Ohmmeter): <50 µΩ for healthy EHV breaker
|
| 212 |
+
- Acceptable Range: 50-80 µΩ (moderate concern)
|
| 213 |
+
- Critical: >80 µΩ (indicates fixed contact or connection issues)
|
| 214 |
+
|
| 215 |
+
DCRM Curve Manifestation:
|
| 216 |
+
- **Elevated Baseline Resistance**: Main Contact Plateau (Region 3) consistently higher (>80 µΩ) even when curve shape is smooth.
|
| 217 |
+
- **Low Variability**: Region 3 std <15 µΩ (smooth plateau, NOT noisy like Main Contact Wear).
|
| 218 |
+
- **Asymmetric Wear Pattern**: Closing and opening transitions show different resistance levels (fixed contact deformation).
|
| 219 |
+
- **Localized Spikes in Plateau**: Specific recurring spikes at same time points (damaged spot on fixed contact).
|
| 220 |
+
|
| 221 |
+
Detection Criteria (STRICT):
|
| 222 |
+
- DLRO Value >80 µΩ (from KPI)
|
| 223 |
+
- AND Region 3 mean >80 µΩ with std <15 µΩ (smooth but elevated)
|
| 224 |
+
- AND Primary defect is NOT Main Contact Wear (avoid duplication)
|
| 225 |
+
- IF Primary is Main Contact Wear: Can list as SECONDARY but with confidence <70%
|
| 226 |
+
|
| 227 |
+
Confidence Requirements:
|
| 228 |
+
- 80-90%: DLRO >100 µΩ AND smooth curve (std <10 µΩ)
|
| 229 |
+
- 70-79%: DLRO 80-100 µΩ AND smooth curve
|
| 230 |
+
- 50-69%: Listed as secondary to Main Contact Wear
|
| 231 |
+
- <50%: Primary defect already explains, DO NOT report
|
| 232 |
+
|
| 233 |
+
=== CRITICAL RULES ===
|
| 234 |
+
1. BE EXTREMELY STRICT. Only report if confidence >75% AND evidence is OVERWHELMING.
|
| 235 |
+
2. If Primary defect (from Agent 1) already explains the waveform, DO NOT add redundant secondary defects.
|
| 236 |
+
3. Check ALL thresholds and criteria. If ANY criterion is not met, DO NOT report that defect.
|
| 237 |
+
4. For Class 10 (Fixed Contact): Only if DLRO >80 µΩ AND std <15 µΩ. If Main Contact Wear is primary, list as secondary with reduced confidence.
|
| 238 |
+
5. If no secondary defect meets strict criteria, return: "No Secondary Defect Detected"
|
| 239 |
+
6. Confidence for Classes 6-10 should be 75-90% range unless extreme deviations (>95% rare).
|
| 240 |
+
7. Compare waveform timing against nominal windows: Region 2 (85-105 ms), Region 3 (120-300 ms), Region 4 (315-335 ms).
|
| 241 |
+
|
| 242 |
+
Output: Return ONLY valid JSON (no markdown):
|
| 243 |
+
{
|
| 244 |
+
"Secondary_Fault_Detection": [
|
| 245 |
+
{
|
| 246 |
+
"defect_name": "Exact Class Name or 'No Secondary Defect Detected'",
|
| 247 |
+
"Confidence": "XX.XX %",
|
| 248 |
+
"Severity": "Low/Medium/High",
|
| 249 |
+
"description": "1–2 short sentences citing quantified features and KPIs.",
|
| 250 |
+
}
|
| 251 |
+
]
|
| 252 |
+
}
|
| 253 |
+
List at most 1-2 secondary defects. If uncertain or evidence weak, return "No Secondary Defect Detected".
|
| 254 |
+
"""
|
| 255 |
+
|
| 256 |
+
def parse_kpis_from_json(kpis_json):
|
| 257 |
+
"""
|
| 258 |
+
Convert KPI JSON format to dictionary for internal use.
|
| 259 |
+
|
| 260 |
+
Input format:
|
| 261 |
+
{
|
| 262 |
+
"kpis": [
|
| 263 |
+
{"name": "Closing Time", "unit": "ms", "value": 87.8},
|
| 264 |
+
...
|
| 265 |
+
]
|
| 266 |
+
}
|
| 267 |
+
|
| 268 |
+
Output format:
|
| 269 |
+
{
|
| 270 |
+
"Closing Time (ms)": 87.8,
|
| 271 |
+
...
|
| 272 |
+
}
|
| 273 |
+
"""
|
| 274 |
+
if kpis_json is None:
|
| 275 |
+
return {}
|
| 276 |
+
|
| 277 |
+
# If already in dict format, return as-is
|
| 278 |
+
if isinstance(kpis_json, dict) and "kpis" not in kpis_json:
|
| 279 |
+
return kpis_json
|
| 280 |
+
|
| 281 |
+
# Parse from JSON format
|
| 282 |
+
kpi_dict = {}
|
| 283 |
+
kpis_list = kpis_json.get("kpis", [])
|
| 284 |
+
|
| 285 |
+
for kpi in kpis_list:
|
| 286 |
+
name = kpi.get("name", "")
|
| 287 |
+
unit = kpi.get("unit", "")
|
| 288 |
+
value = kpi.get("value", None)
|
| 289 |
+
|
| 290 |
+
# Create key in format "Name (unit)"
|
| 291 |
+
key = f"{name} ({unit})" if unit else name
|
| 292 |
+
kpi_dict[key] = value
|
| 293 |
+
|
| 294 |
+
return kpi_dict
|
| 295 |
+
|
| 296 |
+
|
| 297 |
+
def standardize_input(df: pd.DataFrame) -> pd.DataFrame:
|
| 298 |
+
"""
|
| 299 |
+
Returns a DataFrame with one row and columns T_0...T_400 containing Resistance values (uOhm).
|
| 300 |
+
"""
|
| 301 |
+
if 'Resistance' not in df.columns:
|
| 302 |
+
potential_resistance_cols = [c for c in df.columns if not c.startswith('T_')]
|
| 303 |
+
if not potential_resistance_cols:
|
| 304 |
+
raise KeyError("CSV must contain a 'Resistance' column.")
|
| 305 |
+
|
| 306 |
+
if len(potential_resistance_cols) > 1:
|
| 307 |
+
print(f"Warning: Multiple non-T_ columns found. Using first one as 'Resistance'.")
|
| 308 |
+
|
| 309 |
+
df = df.rename(columns={potential_resistance_cols[0]: 'Resistance'})
|
| 310 |
+
|
| 311 |
+
df = df[['Resistance']]
|
| 312 |
+
|
| 313 |
+
if df.shape[0] >= 401 and df.shape[1] == 1:
|
| 314 |
+
values = df.iloc[:401, 0].values.reshape(1, -1)
|
| 315 |
+
cols = [f"T_{i}" for i in range(401)]
|
| 316 |
+
return pd.DataFrame(values, columns=cols)
|
| 317 |
+
|
| 318 |
+
elif df.shape[1] >= 401:
|
| 319 |
+
df = df.iloc[:, :401]
|
| 320 |
+
df.columns = [f"T_{i}" for i in range(401)]
|
| 321 |
+
return df
|
| 322 |
+
|
| 323 |
+
else:
|
| 324 |
+
raise ValueError(f"Input shape {df.shape} invalid. Expected 401 Resistance points.")
|
| 325 |
+
|
| 326 |
+
def plot_resistance(row_values, save_path=PLOT_PATH):
|
| 327 |
+
"""Saves a line plot Resistance (uOhm) vs Time (ms)."""
|
| 328 |
+
t = list(range(401))
|
| 329 |
+
plt.figure(figsize=(10, 4.2), dpi=150)
|
| 330 |
+
plt.plot(t, row_values, color="green", linewidth=1.6, label="Resistance (uΩ)")
|
| 331 |
+
plt.title("Dynamic Resistance vs Time (DCRM)")
|
| 332 |
+
plt.xlabel("Time (ms)")
|
| 333 |
+
plt.ylabel("Resistance (μΩ)")
|
| 334 |
+
plt.grid(True, alpha=0.3)
|
| 335 |
+
plt.xlim(0, 400)
|
| 336 |
+
plt.legend(loc="upper right")
|
| 337 |
+
plt.tight_layout()
|
| 338 |
+
plt.savefig(save_path)
|
| 339 |
+
plt.close()
|
| 340 |
+
|
| 341 |
+
def validate_trip_coil_logic(kpis):
|
| 342 |
+
"""
|
| 343 |
+
Validates Trip Coil logic: At least ONE trip coil must work.
|
| 344 |
+
Returns (is_fault, description)
|
| 345 |
+
"""
|
| 346 |
+
trip_coil_1 = kpis.get("Peak Trip Coil 1 Current (A)", 0)
|
| 347 |
+
trip_coil_2 = kpis.get("Peak Trip Coil 2 Current (A)", 0)
|
| 348 |
+
|
| 349 |
+
# At least ONE must be >= 2A (working)
|
| 350 |
+
if trip_coil_1 >= 2.0 or trip_coil_2 >= 2.0:
|
| 351 |
+
return False, f"Trip coils functional (Coil 1: {trip_coil_1}A, Coil 2: {trip_coil_2}A)"
|
| 352 |
+
|
| 353 |
+
# BOTH are below 2A = FAULT
|
| 354 |
+
return True, f"BOTH trip coils failed (Coil 1: {trip_coil_1}A, Coil 2: {trip_coil_2}A - both below 2A threshold)"
|
| 355 |
+
|
| 356 |
+
def build_agent1_prompt(row_values, baseline_offset_uohm, kpis):
|
| 357 |
+
"""Build prompt for Agent 1 (Primary Contact & Coil Defects)."""
|
| 358 |
+
data_str = ",".join(map(str, row_values))
|
| 359 |
+
|
| 360 |
+
# Parse KPIs if in JSON format
|
| 361 |
+
kpis_dict = parse_kpis_from_json(kpis)
|
| 362 |
+
|
| 363 |
+
kpi_str = "\n".join([f"- {name}: {value}" for name, value in kpis_dict.items()])
|
| 364 |
+
|
| 365 |
+
# Validate Trip Coil logic
|
| 366 |
+
trip_fault, trip_msg = validate_trip_coil_logic(kpis_dict)
|
| 367 |
+
|
| 368 |
+
prompt = f"""
|
| 369 |
+
Analyze this DCRM waveform for PRIMARY CONTACT & COIL DEFECTS (Classes 1-5, 11-12).
|
| 370 |
+
|
| 371 |
+
DCRM Data:
|
| 372 |
+
- Array length: {len(row_values)} (T_0 to T_400 ms)
|
| 373 |
+
- Baseline offset: {baseline_offset_uohm} uOhm
|
| 374 |
+
- Attached image: Resistance vs Time plot (green line)
|
| 375 |
+
|
| 376 |
+
KPIs:
|
| 377 |
+
{kpi_str}
|
| 378 |
+
|
| 379 |
+
TRIP COIL PRE-VALIDATION:
|
| 380 |
+
{trip_msg}
|
| 381 |
+
{"⚠️ REPORT TRIP COIL DAMAGE (Class 12)" if trip_fault else "✓ Trip coils are functional - DO NOT report Trip Coil Damage"}
|
| 382 |
+
|
| 383 |
+
Tasks:
|
| 384 |
+
1) Compute Region 3 features (mean, std, spikes).
|
| 385 |
+
2) Analyze Regions 2 & 4 for arcing issues.
|
| 386 |
+
3) Check Close Coil Current (normal: 4-7A, fault if <2A).
|
| 387 |
+
4) For Trip Coils: {"Report Class 12 (Trip Coil Damage)" if trip_fault else "DO NOT report Class 12 - at least one coil is working"}.
|
| 388 |
+
5) Classify as one of: Healthy, Main Contact Wear, Arcing Contact Wear, Main Contact Misalignment, Arcing Contact Misalignment, Close Coil Damage, {"Trip Coil Damage" if trip_fault else "(NOT Trip Coil Damage)"}.
|
| 389 |
+
6) Return ONLY the PRIMARY defect with highest confidence.
|
| 390 |
+
|
| 391 |
+
Data array (uOhm):
|
| 392 |
+
[{data_str}]
|
| 393 |
+
"""
|
| 394 |
+
return prompt
|
| 395 |
+
|
| 396 |
+
def build_agent2_prompt(row_values, baseline_offset_uohm, kpis, agent1_result):
|
| 397 |
+
"""Build prompt for Agent 2 (Secondary Mechanical Defects)."""
|
| 398 |
+
data_str = ",".join(map(str, row_values))
|
| 399 |
+
|
| 400 |
+
# Parse KPIs if in JSON format
|
| 401 |
+
kpis_dict = parse_kpis_from_json(kpis)
|
| 402 |
+
|
| 403 |
+
kpi_str = "\n".join([f"- {name}: {value}" for name, value in kpis_dict.items()])
|
| 404 |
+
|
| 405 |
+
agent1_summary = json.dumps(agent1_result.get("Fault_Detection", []), indent=2)
|
| 406 |
+
primary_class = agent1_result.get("primary_defect_class", "Unknown")
|
| 407 |
+
|
| 408 |
+
prompt = f"""
|
| 409 |
+
Analyze this DCRM waveform for SECONDARY MECHANICAL/OPERATIONAL DEFECTS (Classes 6-10).
|
| 410 |
+
|
| 411 |
+
PRIMARY DEFECT (from Agent 1):
|
| 412 |
+
Class: {primary_class}
|
| 413 |
+
Details:
|
| 414 |
+
{agent1_summary}
|
| 415 |
+
|
| 416 |
+
DCRM Data:
|
| 417 |
+
- Array length: {len(row_values)} (T_0 to T_400 ms)
|
| 418 |
+
- Baseline offset: {baseline_offset_uohm} uOhm
|
| 419 |
+
- Attached image: Resistance vs Time plot (green line)
|
| 420 |
+
|
| 421 |
+
KPIs with Industry Standards:
|
| 422 |
+
{kpi_str}
|
| 423 |
+
|
| 424 |
+
Industry Standard Nominal Ranges (Ministry of Power / POWERGRID norms for EHV breakers):
|
| 425 |
+
- Closing Time: 80-100 ms (acceptable range)
|
| 426 |
+
- Opening Time: 30-40 ms (acceptable range)
|
| 427 |
+
- Contact Speed: 4.5-6.5 m/s (acceptable range)
|
| 428 |
+
- DLRO Value: <50 µΩ (healthy), 50-80 µΩ (moderate), >80 µΩ (critical)
|
| 429 |
+
- Close Coil Current: 4-7A (nominal)
|
| 430 |
+
- Trip Coil Current: 4-7A per coil (nominal)
|
| 431 |
+
- SF6 Pressure: 5.5-6.5 bar at 20°C (if applicable)
|
| 432 |
+
|
| 433 |
+
Tasks:
|
| 434 |
+
1) BE EXTREMELY STRICT. Only report if confidence >75% AND evidence is OVERWHELMING.
|
| 435 |
+
2) For each Class 6-10, check ALL detection criteria and thresholds listed in system instructions.
|
| 436 |
+
3) Calculate % deviation from nominal ranges for timing KPIs.
|
| 437 |
+
4) Analyze waveform for physics-based signatures:
|
| 438 |
+
- Class 6: Horizontal shift of ENTIRE curve, altered transition slopes
|
| 439 |
+
- Class 7: >5 distinct decaying bounces in Region 3
|
| 440 |
+
- Class 8: Prolonged arc-quenching (Region 4->5 >25 ms)
|
| 441 |
+
- Class 9: >3 distinct stutters (flat plateaus within slopes)
|
| 442 |
+
- Class 10: DLRO >80 µΩ AND Region 3 std <15 µΩ
|
| 443 |
+
5) Do NOT duplicate primary defect explanations.
|
| 444 |
+
6) If ALL criteria are not met for a defect, DO NOT report it.
|
| 445 |
+
7) If uncertain or evidence weak, return "No Secondary Defect Detected".
|
| 446 |
+
|
| 447 |
+
Expected Region Timing (nominal):
|
| 448 |
+
- Region 1 (Open before): 0-85 ms
|
| 449 |
+
- Region 2 (Arcing Make): 85-105 ms
|
| 450 |
+
- Region 3 (Main Contact): 120-300 ms
|
| 451 |
+
- Region 4 (Arcing Break): 315-335 ms
|
| 452 |
+
- Region 5 (Open after): 335-400 ms
|
| 453 |
+
|
| 454 |
+
Data array (uOhm):
|
| 455 |
+
[{data_str}]
|
| 456 |
+
"""
|
| 457 |
+
return prompt
|
| 458 |
+
|
| 459 |
+
def call_agent1(row_values, kpis_data):
|
| 460 |
+
"""Agent 1: Primary Contact & Coil Defect Detection."""
|
| 461 |
+
plot_resistance(row_values, PLOT_PATH)
|
| 462 |
+
file = genai.upload_file(path=PLOT_PATH)
|
| 463 |
+
|
| 464 |
+
prompt = build_agent1_prompt(row_values, BASELINE_OFFSET_UOHM, kpis_data)
|
| 465 |
+
|
| 466 |
+
model = genai.GenerativeModel(MODEL_NAME, system_instruction=AGENT1_SYSTEM_PROMPT)
|
| 467 |
+
response = model.generate_content([prompt, file])
|
| 468 |
+
|
| 469 |
+
text = (response.text or "").strip()
|
| 470 |
+
text = text.replace("```json", "").replace("```", "").strip()
|
| 471 |
+
|
| 472 |
+
try:
|
| 473 |
+
obj = json.loads(text)
|
| 474 |
+
return obj
|
| 475 |
+
except Exception as e:
|
| 476 |
+
return {"Error": f"Agent 1 failed. {e}", "raw_response": text}
|
| 477 |
+
|
| 478 |
+
def call_agent2(row_values, kpis_data, agent1_result):
|
| 479 |
+
"""Agent 2: Secondary Mechanical Defect Detection (STRICT)."""
|
| 480 |
+
prompt = build_agent2_prompt(row_values, BASELINE_OFFSET_UOHM, kpis_data, agent1_result)
|
| 481 |
+
|
| 482 |
+
# Reuse same image with error handling
|
| 483 |
+
try:
|
| 484 |
+
file = genai.upload_file(path=PLOT_PATH)
|
| 485 |
+
except Exception as upload_error:
|
| 486 |
+
print(f"⚠️ Agent 2: File upload failed: {upload_error}. Skipping Agent 2.")
|
| 487 |
+
return {"Error": f"File upload failed: {upload_error}", "agent2_skipped": True}
|
| 488 |
+
|
| 489 |
+
try:
|
| 490 |
+
model = genai.GenerativeModel(MODEL_NAME, system_instruction=AGENT2_SYSTEM_PROMPT)
|
| 491 |
+
response = model.generate_content([prompt, file])
|
| 492 |
+
|
| 493 |
+
# Check if response has valid content
|
| 494 |
+
if not response.text or response.text.strip() == "":
|
| 495 |
+
print(f"⚠️ Agent 2: Empty response. Finish reason: {response.candidates[0].finish_reason if response.candidates else 'Unknown'}")
|
| 496 |
+
return {"Error": "Agent 2 returned empty response", "finish_reason": response.candidates[0].finish_reason if response.candidates else None}
|
| 497 |
+
|
| 498 |
+
text = response.text.strip()
|
| 499 |
+
text = text.replace("```json", "").replace("```", "").strip()
|
| 500 |
+
|
| 501 |
+
try:
|
| 502 |
+
obj = json.loads(text)
|
| 503 |
+
return obj
|
| 504 |
+
except Exception as e:
|
| 505 |
+
return {"Error": f"Agent 2 JSON parse failed. {e}", "raw_response": text}
|
| 506 |
+
|
| 507 |
+
except Exception as e:
|
| 508 |
+
print(f"⚠️ Agent 2: Generation failed: {e}")
|
| 509 |
+
return {"Error": f"Agent 2 generation failed: {e}", "agent2_failed": True}
|
| 510 |
+
|
| 511 |
+
def merge_results(agent1_result, agent2_result):
|
| 512 |
+
"""Merge Agent 1 and Agent 2 results into final comprehensive report."""
|
| 513 |
+
final_report = {
|
| 514 |
+
"Fault_Detection": [],
|
| 515 |
+
"overall_health_assessment": {
|
| 516 |
+
"Contacts (moving & arcing)": "Normal",
|
| 517 |
+
"SF6 Gas Chamber": "Normal",
|
| 518 |
+
"Operating Mechanism": "Normal",
|
| 519 |
+
"Coil": "Normal"
|
| 520 |
+
}
|
| 521 |
+
}
|
| 522 |
+
|
| 523 |
+
# Add Agent 1 primary defects
|
| 524 |
+
if "Fault_Detection" in agent1_result:
|
| 525 |
+
final_report["Fault_Detection"].extend(agent1_result["Fault_Detection"])
|
| 526 |
+
|
| 527 |
+
# Add Agent 2 secondary defects (only if Agent 2 succeeded and not "No Secondary Defect Detected")
|
| 528 |
+
if "Error" not in agent2_result and "Secondary_Fault_Detection" in agent2_result:
|
| 529 |
+
for defect in agent2_result["Secondary_Fault_Detection"]:
|
| 530 |
+
if defect.get("defect_name", "").lower() != "no secondary defect detected":
|
| 531 |
+
final_report["Fault_Detection"].append(defect)
|
| 532 |
+
elif "Error" in agent2_result:
|
| 533 |
+
print(f"⚠️ Agent 2 failed, using only Agent 1 results: {agent2_result.get('Error')}")
|
| 534 |
+
|
| 535 |
+
# Assess overall health based on detected defects
|
| 536 |
+
for defect in final_report["Fault_Detection"]:
|
| 537 |
+
name = defect.get("defect_name", "").lower()
|
| 538 |
+
severity = defect.get("Severity", "").lower()
|
| 539 |
+
confidence = float(defect.get("Confidence", "0").replace("%", "").strip())
|
| 540 |
+
|
| 541 |
+
# Determine risk level
|
| 542 |
+
if confidence >= 85 and severity == "high":
|
| 543 |
+
risk = "High Risk"
|
| 544 |
+
elif confidence >= 50:
|
| 545 |
+
risk = "Moderate Risk"
|
| 546 |
+
else:
|
| 547 |
+
risk = "Normal"
|
| 548 |
+
|
| 549 |
+
# Map to health categories
|
| 550 |
+
if any(x in name for x in ["main contact", "arcing contact", "contact wear", "contact misalignment"]):
|
| 551 |
+
if final_report["overall_health_assessment"]["Contacts (moving & arcing)"] != "High Risk":
|
| 552 |
+
final_report["overall_health_assessment"]["Contacts (moving & arcing)"] = risk
|
| 553 |
+
|
| 554 |
+
if "sf6" in name or "pressure" in name:
|
| 555 |
+
if final_report["overall_health_assessment"]["SF6 Gas Chamber"] != "High Risk":
|
| 556 |
+
final_report["overall_health_assessment"]["SF6 Gas Chamber"] = risk
|
| 557 |
+
|
| 558 |
+
if any(x in name for x in ["operating mechanism", "damping", "linkage", "rod"]):
|
| 559 |
+
if final_report["overall_health_assessment"]["Operating Mechanism"] != "High Risk":
|
| 560 |
+
final_report["overall_health_assessment"]["Operating Mechanism"] = risk
|
| 561 |
+
|
| 562 |
+
if "coil" in name:
|
| 563 |
+
if final_report["overall_health_assessment"]["Coil"] != "High Risk":
|
| 564 |
+
final_report["overall_health_assessment"]["Coil"] = risk
|
| 565 |
+
|
| 566 |
+
return final_report
|
| 567 |
+
|
| 568 |
+
# def main():
|
| 569 |
+
# df = pd.read_csv("df3_final.csv")
|
| 570 |
+
# df = standardize_input(df)
|
| 571 |
+
|
| 572 |
+
# # Analyze all rows if small DF; otherwise sample
|
| 573 |
+
# indices = df.index if len(df) <= 3 else df.sample(3, random_state=42).index
|
| 574 |
+
|
| 575 |
+
# time_cols = [c for c in df.columns if c.startswith('T_')]
|
| 576 |
+
# for idx in indices:
|
| 577 |
+
# row_values_raw = df.loc[idx, time_cols].values.tolist()
|
| 578 |
+
|
| 579 |
+
# # Sample KPIs in new JSON format (based on industry standards and Ministry of Power / POWERGRID norms)
|
| 580 |
+
# sample_kpis = {
|
| 581 |
+
# "kpis": [
|
| 582 |
+
# {
|
| 583 |
+
# "name": "Closing Time",
|
| 584 |
+
# "unit": "ms",
|
| 585 |
+
# "value": 103.5
|
| 586 |
+
# },
|
| 587 |
+
# {
|
| 588 |
+
# "name": "Opening Time",
|
| 589 |
+
# "unit": "ms",
|
| 590 |
+
# "value": 37.0
|
| 591 |
+
# },
|
| 592 |
+
# {
|
| 593 |
+
# "name": "DLRO Value",
|
| 594 |
+
# "unit": "µΩ",
|
| 595 |
+
# "value": 299.93
|
| 596 |
+
# },
|
| 597 |
+
# {
|
| 598 |
+
# "name": "Peak Resistance",
|
| 599 |
+
# "unit": "µΩ",
|
| 600 |
+
# "value": 408.0
|
| 601 |
+
# },
|
| 602 |
+
# {
|
| 603 |
+
# "name": "Contact Travel Distance",
|
| 604 |
+
# "unit": "mm",
|
| 605 |
+
# "value": 550.0
|
| 606 |
+
# },
|
| 607 |
+
# {
|
| 608 |
+
# "name": "Main Wipe",
|
| 609 |
+
# "unit": "mm",
|
| 610 |
+
# "value": 46.0
|
| 611 |
+
# },
|
| 612 |
+
# {
|
| 613 |
+
# "name": "Arc Wipe",
|
| 614 |
+
# "unit": "mm",
|
| 615 |
+
# "value": 63.0
|
| 616 |
+
# },
|
| 617 |
+
# {
|
| 618 |
+
# "name": "Contact Speed",
|
| 619 |
+
# "unit": "m/s",
|
| 620 |
+
# "value": 5.2
|
| 621 |
+
# },
|
| 622 |
+
# {
|
| 623 |
+
# "name": "Peak Close Coil Current",
|
| 624 |
+
# "unit": "A",
|
| 625 |
+
# "value": 5.5
|
| 626 |
+
# },
|
| 627 |
+
# {
|
| 628 |
+
# "name": "Peak Trip Coil 1 Current",
|
| 629 |
+
# "unit": "A",
|
| 630 |
+
# "value": 5.5
|
| 631 |
+
# },
|
| 632 |
+
# {
|
| 633 |
+
# "name": "Peak Trip Coil 2 Current",
|
| 634 |
+
# "unit": "A",
|
| 635 |
+
# "value": 0.0
|
| 636 |
+
# },
|
| 637 |
+
# {
|
| 638 |
+
# "name": "Ambient Temperature",
|
| 639 |
+
# "unit": "°C",
|
| 640 |
+
# "value": 28.4
|
| 641 |
+
# }
|
| 642 |
+
# ]
|
| 643 |
+
# }
|
| 644 |
+
|
| 645 |
+
# # STEP 1: Call Agent 1 (Primary Defects)
|
| 646 |
+
# agent1_result = call_agent1(row_values_raw, sample_kpis)
|
| 647 |
+
|
| 648 |
+
# # STEP 2: Call Agent 2 (Secondary Defects) - runs in parallel after Agent 1
|
| 649 |
+
# agent2_result = call_agent2(row_values_raw, sample_kpis, agent1_result)
|
| 650 |
+
|
| 651 |
+
# # STEP 3: Merge Results and Print ONLY Final JSON
|
| 652 |
+
# final_report = merge_results(agent1_result, agent2_result)
|
| 653 |
+
|
| 654 |
+
# # Print with proper Unicode handling (ensure_ascii=False prevents \u00b5 escape sequences)
|
| 655 |
+
# print(json.dumps(final_report, indent=2, ensure_ascii=False))
|
| 656 |
+
# print() # Empty line separator between rows
|
| 657 |
+
|
| 658 |
+
|
| 659 |
+
|
| 660 |
+
|
| 661 |
+
|
| 662 |
+
def detect_fault(df,sample_kpis):
|
| 663 |
+
df = standardize_input(df)
|
| 664 |
+
|
| 665 |
+
# Analyze all rows if small DF; otherwise sample
|
| 666 |
+
indices = df.index if len(df) <= 3 else df.sample(3, random_state=42).index
|
| 667 |
+
|
| 668 |
+
time_cols = [c for c in df.columns if c.startswith('T_')]
|
| 669 |
+
for idx in indices:
|
| 670 |
+
row_values_raw = df.loc[idx, time_cols].values.tolist()
|
| 671 |
+
|
| 672 |
+
# STEP 1: Call Agent 1 (Primary Defects)
|
| 673 |
+
agent1_result = call_agent1(row_values_raw, sample_kpis)
|
| 674 |
+
|
| 675 |
+
# STEP 2: Call Agent 2 (Secondary Defects) - runs in parallel after Agent 1
|
| 676 |
+
agent2_result = call_agent2(row_values_raw, sample_kpis, agent1_result)
|
| 677 |
+
|
| 678 |
+
# STEP 3: Merge Results and Print ONLY Final JSON
|
| 679 |
+
final_report = merge_results(agent1_result, agent2_result)
|
| 680 |
+
|
| 681 |
+
return final_report
|
| 682 |
+
|
| 683 |
+
|
| 684 |
+
|
| 685 |
+
if __name__ == "__main__":
|
| 686 |
+
df = pd.read_csv("df3_final.csv")
|
| 687 |
+
# Sample KPIs in new JSON format (based on industry standards and Ministry of Power / POWERGRID norms)
|
| 688 |
+
sample_kpis = {
|
| 689 |
+
"kpis": [
|
| 690 |
+
{
|
| 691 |
+
"name": "Closing Time",
|
| 692 |
+
"unit": "ms",
|
| 693 |
+
"value": 103.5
|
| 694 |
+
},
|
| 695 |
+
{
|
| 696 |
+
"name": "Opening Time",
|
| 697 |
+
"unit": "ms",
|
| 698 |
+
"value": 37.0
|
| 699 |
+
},
|
| 700 |
+
{
|
| 701 |
+
"name": "DLRO Value",
|
| 702 |
+
"unit": "µΩ",
|
| 703 |
+
"value": 299.93
|
| 704 |
+
},
|
| 705 |
+
{
|
| 706 |
+
"name": "Peak Resistance",
|
| 707 |
+
"unit": "µΩ",
|
| 708 |
+
"value": 408.0
|
| 709 |
+
},
|
| 710 |
+
{
|
| 711 |
+
"name": "Contact Travel Distance",
|
| 712 |
+
"unit": "mm",
|
| 713 |
+
"value": 550.0
|
| 714 |
+
},
|
| 715 |
+
{
|
| 716 |
+
"name": "Main Wipe",
|
| 717 |
+
"unit": "mm",
|
| 718 |
+
"value": 46.0
|
| 719 |
+
},
|
| 720 |
+
{
|
| 721 |
+
"name": "Arc Wipe",
|
| 722 |
+
"unit": "mm",
|
| 723 |
+
"value": 63.0
|
| 724 |
+
},
|
| 725 |
+
{
|
| 726 |
+
"name": "Contact Speed",
|
| 727 |
+
"unit": "m/s",
|
| 728 |
+
"value": 5.2
|
| 729 |
+
},
|
| 730 |
+
{
|
| 731 |
+
"name": "Peak Close Coil Current",
|
| 732 |
+
"unit": "A",
|
| 733 |
+
"value": 5.5
|
| 734 |
+
},
|
| 735 |
+
{
|
| 736 |
+
"name": "Peak Trip Coil 1 Current",
|
| 737 |
+
"unit": "A",
|
| 738 |
+
"value": 5.5
|
| 739 |
+
},
|
| 740 |
+
{
|
| 741 |
+
"name": "Peak Trip Coil 2 Current",
|
| 742 |
+
"unit": "A",
|
| 743 |
+
"value": 0.0
|
| 744 |
+
},
|
| 745 |
+
{
|
| 746 |
+
"name": "Ambient Temperature",
|
| 747 |
+
"unit": "°C",
|
| 748 |
+
"value": 28.4
|
| 749 |
+
}
|
| 750 |
+
]
|
| 751 |
+
}
|
| 752 |
+
|
| 753 |
+
result = detect_fault(df,sample_kpis)
|
| 754 |
+
print(json.dumps(result, indent=2, ensure_ascii=False))
|
core/agents/plotting.py
ADDED
|
@@ -0,0 +1,271 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Previous Name: analysis/agents/dcrm_analysis.py
|
| 2 |
+
import pandas as pd
|
| 3 |
+
import plotly.graph_objects as go
|
| 4 |
+
from plotly.subplots import make_subplots
|
| 5 |
+
import base64
|
| 6 |
+
import json
|
| 7 |
+
from langchain_core.messages import HumanMessage
|
| 8 |
+
import streamlit as st
|
| 9 |
+
|
| 10 |
+
def get_dcrm_prompt(data_str):
|
| 11 |
+
return f"""
|
| 12 |
+
I have extracted data from a DCRM (Dynamic Contact Resistance Measurement) graph.
|
| 13 |
+
Data (Sampled): {data_str}
|
| 14 |
+
|
| 15 |
+
The columns are:
|
| 16 |
+
- 'time': Time in milliseconds.
|
| 17 |
+
- 'curr': Current signal amplitude (Blue curve) - represents the test current flowing through the contacts.
|
| 18 |
+
- 'res': Dynamic Resistance amplitude (Green curve) - represents the contact resistance in micro-ohms (µΩ).
|
| 19 |
+
- 'travel': Travel signal amplitude (Red curve) - represents the mechanical position/displacement of the moving contact.
|
| 20 |
+
|
| 21 |
+
IMPORTANT: Higher values mean the signal is HIGHER on the graph.
|
| 22 |
+
|
| 23 |
+
I have also provided the image of the graph. Use the visual information from the image to cross-reference with the data.
|
| 24 |
+
|
| 25 |
+
=== HEALTHY DCRM SIGNATURE REFERENCE ===
|
| 26 |
+
|
| 27 |
+
**Resistance (Green) - Healthy Characteristics:**
|
| 28 |
+
- Pre-contact: Infinite/Very High (off-scale or flat at top)
|
| 29 |
+
- Arcing engagement: Drops sharply with moderate spikes (arcing activity), typically 100-500 µΩ
|
| 30 |
+
- Main conduction: LOW and STABLE (30-80 µΩ for healthy contacts), minimal oscillation (<10 µΩ variance)
|
| 31 |
+
- Parting: Sharp rise with spikes (arcing during separation)
|
| 32 |
+
- Final open: Returns to infinite/very high (off-scale)
|
| 33 |
+
|
| 34 |
+
**Current (Blue) - Healthy Characteristics:**
|
| 35 |
+
- Pre-contact: Near zero baseline
|
| 36 |
+
- Arcing engagement: Begins rising as circuit closes
|
| 37 |
+
- Main conduction: Stable at test current level (plateau)
|
| 38 |
+
- Parting: Maintained until final separation
|
| 39 |
+
- Final open: Drops to zero
|
| 40 |
+
|
| 41 |
+
**Travel (Red) - Healthy Characteristics:**
|
| 42 |
+
- Pre-contact: Increasing linearly (contacts approaching)
|
| 43 |
+
- Arcing engagement: Continues increasing
|
| 44 |
+
- Main conduction: Reaches MAXIMUM and plateaus (fully closed position)
|
| 45 |
+
- Parting: Decreases linearly (contacts separating)
|
| 46 |
+
- Final open: Stabilizes at minimum (fully open position)
|
| 47 |
+
|
| 48 |
+
=== TASK: SEGMENT INTO 5 KINEMATIC ZONES ===
|
| 49 |
+
|
| 50 |
+
Use ALL THREE curves together for accurate boundary detection. Each zone represents a distinct physical state of the circuit breaker.
|
| 51 |
+
|
| 52 |
+
**Zone 1: Pre-Contact Travel (Initial Closing Motion)**
|
| 53 |
+
* **Physical Meaning**: The moving contact is traveling toward the stationary contact but has NOT yet made electrical contact. This is pure mechanical motion with no current flow.
|
| 54 |
+
* **Start**: time = 0 ms
|
| 55 |
+
* **End Boundary**: Detect when CURRENT (blue) FIRST starts rising significantly from baseline.
|
| 56 |
+
* Cross-reference: Resistance (green) should still be very high/infinite
|
| 57 |
+
* Cross-reference: Travel (red) should be steadily increasing
|
| 58 |
+
* **Typical Duration**: 80-120 ms
|
| 59 |
+
* **Detection Logic**: Find the point where 'curr' rises above baseline noise (e.g., >5% of max current)
|
| 60 |
+
|
| 61 |
+
**Zone 2: Arcing Contact Engagement (Initial Electrical Contact)**
|
| 62 |
+
* **Physical Meaning**: The arcing contacts (W-Cu tips) make first contact and establish an electrical path. Current begins flowing through a small contact area, causing arcing and resistance fluctuations. This is the "make" transition.
|
| 63 |
+
* **Start**: End of Zone 1
|
| 64 |
+
* **End Boundary**: Detect when resistance SETTLES after initial spike activity.
|
| 65 |
+
* Primary indicator: Resistance (green) drops from high values, exhibits spikes, then STABILIZES to low plateau
|
| 66 |
+
* Cross-reference: Current (blue) should be rising/stabilizing
|
| 67 |
+
* Cross-reference: Travel (red) continues increasing toward maximum
|
| 68 |
+
* **Typical Duration**: 20-40 ms (Zone 2 typically ends around 110-150 ms total time)
|
| 69 |
+
* **Detection Logic**: Find where 'res' completes its descent and spike activity, settling into a stable low range
|
| 70 |
+
|
| 71 |
+
**Zone 3: Main Contact Conduction (Fully Closed State)**
|
| 72 |
+
* **Physical Meaning**: The main contacts (Ag-plated) are fully engaged, providing a large, stable contact area. This is the "healthy contact" signature zone - resistance should be at its MINIMUM and STABLE. The breaker is in its fully closed, current-carrying state.
|
| 73 |
+
* **Start**: End of Zone 2
|
| 74 |
+
* **End Boundary**: Detect when the breaker begins OPENING (travel reverses direction).
|
| 75 |
+
* Primary indicator: Travel (red) reaches MAXIMUM and starts to DESCEND
|
| 76 |
+
* Cross-reference: Resistance (green) should remain low and stable throughout this zone
|
| 77 |
+
* Cross-reference: Current (blue) should be stable at test level
|
| 78 |
+
* **Typical Duration**: 100-200 ms (this is the longest zone, representing the dwell time)
|
| 79 |
+
* **Detection Logic**: Find the peak of 'travel' curve and the point where it starts decreasing
|
| 80 |
+
|
| 81 |
+
**Zone 4: Main Contact Parting (Breaking/Opening Transition)**
|
| 82 |
+
* **Physical Meaning**: The main contacts are separating. As the contact area decreases, resistance rises sharply. Arcing occurs during the final separation of the arcing contacts. This is the "break" transition - the most critical phase for fault detection.
|
| 83 |
+
* **Start**: End of Zone 3
|
| 84 |
+
* **End Boundary**: Detect when resistance STABILIZES at high value after parting spikes.
|
| 85 |
+
* Primary indicator: Resistance (green) shoots UP, exhibits parting spikes, then STABILIZES at high/infinite value
|
| 86 |
+
* Cross-reference: Travel (red) should be decreasing (opening motion)
|
| 87 |
+
* Cross-reference: Current (blue) may drop or fluctuate during final arc extinction
|
| 88 |
+
* **Typical Duration**: 40-80 ms (Zone 4 typically ends around 280-340 ms total time)
|
| 89 |
+
* **Detection Logic**: Find where 'res' completes its rise and spike activity, becoming constant at high value
|
| 90 |
+
* **CRITICAL**: Do NOT extend this zone too long - end AS SOON AS resistance stabilizes
|
| 91 |
+
|
| 92 |
+
**Zone 5: Final Open State (Fully Open)**
|
| 93 |
+
* **Physical Meaning**: The contacts are fully separated with an air gap. No current flows, resistance is infinite. The breaker is in its fully open, non-conducting state.
|
| 94 |
+
* **Start**: End of Zone 4
|
| 95 |
+
* **End**: The last time point in the dataset
|
| 96 |
+
* **Characteristics**:
|
| 97 |
+
* Resistance (green): Very high/infinite (flat line at top)
|
| 98 |
+
* Current (blue): Zero or near-zero
|
| 99 |
+
* Travel (red): Stable at minimum (fully open position)
|
| 100 |
+
|
| 101 |
+
**MULTI-CURVE ANALYSIS STRATEGY:**
|
| 102 |
+
1. Use Current (blue) to identify Zone 1 → Zone 2 transition (first current rise)
|
| 103 |
+
2. Use Resistance (green) to identify Zone 2 → Zone 3 transition (resistance settles to low plateau)
|
| 104 |
+
3. Use Travel (red) to identify Zone 3 → Zone 4 transition (travel peak and reversal)
|
| 105 |
+
4. Use Resistance (green) to identify Zone 4 → Zone 5 transition (resistance stabilizes at high value)
|
| 106 |
+
5. Always cross-validate boundaries using all three curves for consistency
|
| 107 |
+
|
| 108 |
+
**OUTPUT FORMAT (Strict JSON)**
|
| 109 |
+
Return ONLY this JSON object:
|
| 110 |
+
{{
|
| 111 |
+
"zones": {{
|
| 112 |
+
"zone_1_pre_contact": {{ "start_ms": float, "end_ms": float, "justification": "string (explain which curve indicators were used)" }},
|
| 113 |
+
"zone_2_arcing_engagement": {{ "start_ms": float, "end_ms": float, "justification": "string (explain which curve indicators were used)" }},
|
| 114 |
+
"zone_3_main_conduction": {{ "start_ms": float, "end_ms": float, "justification": "string (explain which curve indicators were used)" }},
|
| 115 |
+
"zone_4_parting": {{ "start_ms": float, "end_ms": float, "justification": "string (explain which curve indicators were used)" }},
|
| 116 |
+
"zone_5_final_open": {{ "start_ms": float, "end_ms": float, "justification": "string (explain which curve indicators were used)" }}
|
| 117 |
+
}},
|
| 118 |
+
"report_card": {{
|
| 119 |
+
"opening_speed": {{ "status": "Pass"|"Warning"|"Fail", "comment": "Assessment of travel curve steepness" }},
|
| 120 |
+
"contact_wear": {{ "status": "Pass"|"Warning"|"Fail", "comment": "Based on resistance fluctuations in Zone 2/4" }},
|
| 121 |
+
"timing_consistency": {{ "status": "Pass"|"Warning"|"Fail", "comment": "Are phases within expected ranges?" }},
|
| 122 |
+
"overall_health": {{ "status": "Healthy"|"Needs Review"|"Critical", "comment": "Overall summary" }}
|
| 123 |
+
}},
|
| 124 |
+
"detailed_analysis": "Provide a comprehensive technical analysis (in Markdown)..."
|
| 125 |
+
}}
|
| 126 |
+
"""
|
| 127 |
+
|
| 128 |
+
def create_dcrm_plot(df, zones):
|
| 129 |
+
# Create figure with secondary y-axis
|
| 130 |
+
fig = make_subplots(specs=[[{"secondary_y": True}]])
|
| 131 |
+
|
| 132 |
+
# Add Traces
|
| 133 |
+
fig.add_trace(go.Scatter(x=df['Time_ms'], y=df['Current'], name="Current (A)", line=dict(color='#2980b9', width=2)), secondary_y=False)
|
| 134 |
+
fig.add_trace(go.Scatter(x=df['Time_ms'], y=df['Resistance'], name="Resistance (uOhm)", line=dict(color='#27ae60', width=2)), secondary_y=False)
|
| 135 |
+
fig.add_trace(go.Scatter(x=df['Time_ms'], y=df['Travel'], name="Travel (mm)", line=dict(color='#c0392b', width=2)), secondary_y=True)
|
| 136 |
+
|
| 137 |
+
# Zone Colors
|
| 138 |
+
zone_colors = {
|
| 139 |
+
"zone_1_pre_contact": "rgba(52, 152, 219, 0.1)",
|
| 140 |
+
"zone_2_arcing_engagement": "rgba(231, 76, 60, 0.1)",
|
| 141 |
+
"zone_3_main_conduction": "rgba(46, 204, 113, 0.1)",
|
| 142 |
+
"zone_4_parting": "rgba(155, 89, 182, 0.1)",
|
| 143 |
+
"zone_5_final_open": "rgba(149, 165, 166, 0.1)"
|
| 144 |
+
}
|
| 145 |
+
|
| 146 |
+
# Add Zone Rectangles
|
| 147 |
+
for zone_name, details in zones.items():
|
| 148 |
+
start = details.get("start_ms")
|
| 149 |
+
end = details.get("end_ms")
|
| 150 |
+
color = zone_colors.get(zone_name, "rgba(0,0,0,0)")
|
| 151 |
+
|
| 152 |
+
if start is not None and end is not None:
|
| 153 |
+
fig.add_vrect(
|
| 154 |
+
x0=start, x1=end,
|
| 155 |
+
fillcolor=color, opacity=1,
|
| 156 |
+
layer="below", line_width=0,
|
| 157 |
+
annotation_text=zone_name.split('_')[1].upper(),
|
| 158 |
+
annotation_position="top left",
|
| 159 |
+
annotation_font_color="#7f8c8d"
|
| 160 |
+
)
|
| 161 |
+
|
| 162 |
+
fig.update_layout(
|
| 163 |
+
title_text="<b>Main Signals & Zones</b>",
|
| 164 |
+
height=500,
|
| 165 |
+
hovermode="x unified",
|
| 166 |
+
plot_bgcolor="white",
|
| 167 |
+
paper_bgcolor="white",
|
| 168 |
+
font=dict(family="Segoe UI, sans-serif"),
|
| 169 |
+
legend=dict(orientation="h", yanchor="bottom", y=1.02, xanchor="right", x=1),
|
| 170 |
+
margin=dict(l=20, r=20, t=60, b=20)
|
| 171 |
+
)
|
| 172 |
+
fig.update_xaxes(showgrid=True, gridwidth=1, gridcolor='#f0f0f0')
|
| 173 |
+
fig.update_yaxes(title_text="Current / Resistance", secondary_y=False, showgrid=True, gridwidth=1, gridcolor='#f0f0f0')
|
| 174 |
+
fig.update_yaxes(title_text="Travel", secondary_y=True, showgrid=False)
|
| 175 |
+
|
| 176 |
+
return fig
|
| 177 |
+
|
| 178 |
+
def create_velocity_plot(df):
|
| 179 |
+
# Calculate Velocity (Derivative of Travel)
|
| 180 |
+
# V = d(Travel) / d(Time)
|
| 181 |
+
# Units: mm/ms = m/s
|
| 182 |
+
df['Velocity'] = df['Travel'].diff() / df['Time_ms'].diff()
|
| 183 |
+
|
| 184 |
+
fig = go.Figure()
|
| 185 |
+
fig.add_trace(go.Scatter(x=df['Time_ms'], y=df['Velocity'], name="Velocity (m/s)", line=dict(color='#e67e22', width=2), fill='tozeroy'))
|
| 186 |
+
|
| 187 |
+
fig.update_layout(
|
| 188 |
+
title_text="<b>Contact Velocity Profile</b>",
|
| 189 |
+
height=300,
|
| 190 |
+
hovermode="x unified",
|
| 191 |
+
plot_bgcolor="white",
|
| 192 |
+
paper_bgcolor="white",
|
| 193 |
+
font=dict(family="Segoe UI, sans-serif"),
|
| 194 |
+
margin=dict(l=20, r=20, t=40, b=20)
|
| 195 |
+
)
|
| 196 |
+
fig.update_xaxes(showgrid=True, gridwidth=1, gridcolor='#f0f0f0')
|
| 197 |
+
fig.update_yaxes(title_text="Velocity (m/s)", showgrid=True, gridwidth=1, gridcolor='#f0f0f0')
|
| 198 |
+
return fig
|
| 199 |
+
|
| 200 |
+
def create_resistance_zoom_plot(df):
|
| 201 |
+
fig = go.Figure()
|
| 202 |
+
fig.add_trace(go.Scatter(x=df['Time_ms'], y=df['Resistance'], name="Resistance", line=dict(color='#27ae60', width=2)))
|
| 203 |
+
|
| 204 |
+
fig.update_layout(
|
| 205 |
+
title_text="<b>Detailed Resistance (Log Scale)</b>",
|
| 206 |
+
height=300,
|
| 207 |
+
hovermode="x unified",
|
| 208 |
+
plot_bgcolor="white",
|
| 209 |
+
paper_bgcolor="white",
|
| 210 |
+
font=dict(family="Segoe UI, sans-serif"),
|
| 211 |
+
yaxis_type="log", # Log scale to see details
|
| 212 |
+
margin=dict(l=20, r=20, t=40, b=20)
|
| 213 |
+
)
|
| 214 |
+
fig.update_xaxes(showgrid=True, gridwidth=1, gridcolor='#f0f0f0')
|
| 215 |
+
fig.update_yaxes(title_text="Resistance (uOhm)", showgrid=True, gridwidth=1, gridcolor='#f0f0f0')
|
| 216 |
+
return fig
|
| 217 |
+
|
| 218 |
+
def enrich_data_with_zones(df, llm):
|
| 219 |
+
"""
|
| 220 |
+
Uses the LLM to identify zones and adds a 'Zone' column to the DataFrame.
|
| 221 |
+
"""
|
| 222 |
+
try:
|
| 223 |
+
# 1. Prepare Text Data
|
| 224 |
+
data_str = df.to_string(index=False)
|
| 225 |
+
prompt_text = get_dcrm_prompt(data_str)
|
| 226 |
+
|
| 227 |
+
# 2. Prepare Image Data
|
| 228 |
+
# Create a simplified plot for the LLM to "see"
|
| 229 |
+
fig = make_subplots(specs=[[{"secondary_y": True}]])
|
| 230 |
+
fig.add_trace(go.Scatter(x=df['Time_ms'], y=df['Current'], name="Current", line=dict(color='blue')), secondary_y=False)
|
| 231 |
+
fig.add_trace(go.Scatter(x=df['Time_ms'], y=df['Resistance'], name="Resistance", line=dict(color='green')), secondary_y=False)
|
| 232 |
+
fig.add_trace(go.Scatter(x=df['Time_ms'], y=df['Travel'], name="Travel", line=dict(color='red')), secondary_y=True)
|
| 233 |
+
fig.update_layout(title="DCRM Graph", showlegend=True)
|
| 234 |
+
|
| 235 |
+
# Convert plot to image bytes
|
| 236 |
+
img_bytes = fig.to_image(format="png", width=1024, height=600)
|
| 237 |
+
base64_image = base64.b64encode(img_bytes).decode('utf-8')
|
| 238 |
+
|
| 239 |
+
# 3. Construct Multimodal Message
|
| 240 |
+
message = HumanMessage(
|
| 241 |
+
content=[
|
| 242 |
+
{"type": "text", "text": prompt_text},
|
| 243 |
+
{
|
| 244 |
+
"type": "image_url",
|
| 245 |
+
"image_url": {"url": f"data:image/png;base64,{base64_image}"}
|
| 246 |
+
}
|
| 247 |
+
]
|
| 248 |
+
)
|
| 249 |
+
|
| 250 |
+
# 4. Invoke LLM
|
| 251 |
+
response = llm.invoke([message])
|
| 252 |
+
content = response.content.replace("```json", "").replace("```", "").strip()
|
| 253 |
+
result = json.loads(content)
|
| 254 |
+
zones = result.get("zones", {})
|
| 255 |
+
|
| 256 |
+
# Initialize Zone column
|
| 257 |
+
df['Zone'] = "Unknown"
|
| 258 |
+
|
| 259 |
+
for zone_name, details in zones.items():
|
| 260 |
+
start = details.get("start_ms")
|
| 261 |
+
end = details.get("end_ms")
|
| 262 |
+
if start is not None and end is not None:
|
| 263 |
+
# Map zone name to a simpler label (e.g., "Zone 1")
|
| 264 |
+
short_name = zone_name.split('_')[1] # "1", "2", etc.
|
| 265 |
+
mask = (df['Time_ms'] >= start) & (df['Time_ms'] <= end)
|
| 266 |
+
df.loc[mask, 'Zone'] = f"Zone {short_name}"
|
| 267 |
+
|
| 268 |
+
return df, zones
|
| 269 |
+
except Exception as e:
|
| 270 |
+
st.error(f"Enrichment failed: {str(e)}")
|
| 271 |
+
return df, {}
|
core/agents/recommendation.py
ADDED
|
@@ -0,0 +1,427 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Previous Name: analysis/agents/recommendation_agent.py
|
| 2 |
+
import json
|
| 3 |
+
import google.generativeai as genai
|
| 4 |
+
|
| 5 |
+
# Configure Gemini API
|
| 6 |
+
API_KEY = "AIzaSyDSGda4-5VLmd-y09K6sBfHqoqk1QUL6Xo"
|
| 7 |
+
genai.configure(api_key=API_KEY)
|
| 8 |
+
MODEL_NAME = "gemini-2.5-flash"
|
| 9 |
+
|
| 10 |
+
# =========================
|
| 11 |
+
# DEEP DOMAIN KNOWLEDGE PROMPT
|
| 12 |
+
# =========================
|
| 13 |
+
RECOMMENDATIONS_PROMPT = """
|
| 14 |
+
Role: You are an expert High Voltage Circuit Breaker Maintenance Engineer with 25+ years of experience in DCRM (Dynamic Contact Resistance Measurement) diagnostics, predictive maintenance, and failure analysis for SF6 circuit breakers in EHV (220 kV and onwards) substations.
|
| 15 |
+
|
| 16 |
+
Your Task: Based on comprehensive circuit breaker diagnostic data, provide:
|
| 17 |
+
1. **Maintenance Recommendations** (Priority 1, 2, 3)
|
| 18 |
+
2. **Future Fault Predictions** (minimum 3, with probabilities and timelines)
|
| 19 |
+
|
| 20 |
+
===== DOMAIN KNOWLEDGE BASE =====
|
| 21 |
+
|
| 22 |
+
**Circuit Breaker Defects (12 Classes) - Deep Knowledge Base:**
|
| 23 |
+
|
| 24 |
+
1. **Healthy**: All parameters within spec, DLRO 20-50µΩ, smooth phase transitions, stable contact pressure, no timing deviations
|
| 25 |
+
|
| 26 |
+
2. **Main Contact Wear**:
|
| 27 |
+
- DLRO >100µΩ (healthy: 20-70µΩ), elevated plateau resistance during Phase 3
|
| 28 |
+
- Surface erosion, material loss, rough contact surfaces with "grassy noise" signature
|
| 29 |
+
- Causes: Thermal stress, mechanical friction, oxidation, improper contact pressure
|
| 30 |
+
- Progression: Early (70-100µΩ) → Moderate (100-180µΩ) → Severe (180-280µΩ) → Critical (>280µΩ)
|
| 31 |
+
- Risks: Overheating → Contact welding → Failed interruption → Explosion hazard
|
| 32 |
+
|
| 33 |
+
3. **Arcing Contact Wear**:
|
| 34 |
+
- High-amplitude spikes during Phase 2/4 arcing zones (>5000µΩ), sustained arc duration
|
| 35 |
+
- Contact pitting, erosion, craters from arc flash, prolonged arcing time
|
| 36 |
+
- Causes: Excessive fault interruptions, poor SF6 quality, contact misalignment
|
| 37 |
+
- Risks: Arc flash → Main contact damage → Failed interruption → Catastrophic failure
|
| 38 |
+
|
| 39 |
+
4. **Main Contact Misalignment**:
|
| 40 |
+
- "Telegraph pattern" with square-wave jumps (>120µΩ steps), uneven contact engagement
|
| 41 |
+
- Stepped resistance plateaus, asymmetric contact pressure distribution
|
| 42 |
+
- Causes: Mechanical wear, linkage looseness, contact holder deformation, improper assembly
|
| 43 |
+
- Risks: Localized heating → Uneven wear → Contact welding → Mechanical seizure
|
| 44 |
+
|
| 45 |
+
5. **Arcing Contact Misalignment**:
|
| 46 |
+
- Asymmetric Phase 2 vs Phase 4 durations (ratio >1.6), sinusoidal bounce patterns
|
| 47 |
+
- One contact engages before the other, timing mismatch between poles
|
| 48 |
+
- Causes: Linkage rod bending, pole asynchrony, damping system failure
|
| 49 |
+
- Risks: Unbalanced arcing → Accelerated wear → Phase-to-phase timing errors
|
| 50 |
+
|
| 51 |
+
6. **Operating Mechanism Malfunction**:
|
| 52 |
+
- Abnormal closing/opening times (>20% deviation), reduced contact speed (<4.5 m/s)
|
| 53 |
+
- Sluggish operation, stutter patterns, inconsistent travel curves
|
| 54 |
+
- Causes: Lubrication degradation, bearing wear, spring fatigue, control linkage issues
|
| 55 |
+
- Risks: Timing drift → Failed synchronization → Protection coordination loss
|
| 56 |
+
|
| 57 |
+
7. **Damping System Fault**:
|
| 58 |
+
- Contact bounce (>5 oscillations, >100µΩ amplitude), sinusoidal resistance patterns
|
| 59 |
+
- Excessive mechanical vibration during closing, oil leakage from dampers
|
| 60 |
+
- Causes: Hydraulic oil degradation, seal failure, piston wear, gas spring leakage
|
| 61 |
+
- Risks: Contact welding → Mechanical damage → Spring breakage → Stuck breaker
|
| 62 |
+
|
| 63 |
+
8. **SF6 Pressure Leakage**:
|
| 64 |
+
- Prolonged arcing duration (>25ms), poor arc quenching, elevated arc resistance
|
| 65 |
+
- Pressure below spec (<5.5 bar), gas purity degradation, moisture ingress
|
| 66 |
+
- Causes: Gasket deterioration, seal failure, manufacturing defects, thermal cycling
|
| 67 |
+
- Risks: Reduced dielectric strength → Internal flashover → Arc quenching failure → Explosion
|
| 68 |
+
|
| 69 |
+
9. **Linkage/Rod Obstruction**:
|
| 70 |
+
- Stutter patterns during travel (>3 distinct flat plateaus >10ms), mechanical binding
|
| 71 |
+
- Irregular travel curve, sudden speed changes, torque fluctuations
|
| 72 |
+
- Causes: Foreign object ingress, ice formation, corrosion, misaligned rods
|
| 73 |
+
- Risks: Incomplete stroke → Failed operation → Mechanical jam → Safety hazard
|
| 74 |
+
|
| 75 |
+
10. **Fixed Contact Damage**:
|
| 76 |
+
- Elevated DLRO (>80µΩ) with smooth stable Phase 3 (no grassy noise = stationary contact issue)
|
| 77 |
+
- DC offset shift, baseline resistance elevation, contact deformation
|
| 78 |
+
- Causes: Thermal damage, arc erosion of stationary contact, improper installation
|
| 79 |
+
- Risks: Increased heating → Insulation damage → Tracking → Ground fault
|
| 80 |
+
|
| 81 |
+
11. **Close Coil Damage**:
|
| 82 |
+
- Close coil current <2A (healthy: 4-7A), failed closing operation, coil overheating
|
| 83 |
+
- Open/short circuit in coil winding, control circuit failure
|
| 84 |
+
- Causes: Electrical overstress, insulation breakdown, mechanical damage, moisture ingress
|
| 85 |
+
- Risks: Failed closing → Breaker stuck open → System de-energization
|
| 86 |
+
|
| 87 |
+
12. **Trip Coil Damage**:
|
| 88 |
+
- Trip coil 1 or 2 current <2A, redundancy loss, failed opening operation
|
| 89 |
+
- Both coils failed = catastrophic (breaker cannot trip during fault)
|
| 90 |
+
- Causes: Coil burnout, circuit failure, auxiliary contact malfunction
|
| 91 |
+
- Risks: Failed interruption → Sustained fault current → Equipment damage → Fire hazard
|
| 92 |
+
|
| 93 |
+
**KPI Thresholds (Healthy Ranges):**
|
| 94 |
+
- Closing Time: 70-110 ms
|
| 95 |
+
- Opening Time: 20-40 ms
|
| 96 |
+
- DLRO Value: 20-100 µΩ (ideal: 20-50 µΩ)
|
| 97 |
+
- Peak Resistance: 500-1000000000000 µΩ (during conduction excluding open baseline)
|
| 98 |
+
- Main Wipe: 10-20 mm
|
| 99 |
+
- Arc Wipe: 15-25 mm
|
| 100 |
+
- Contact Travel Distance: 150-200 mm
|
| 101 |
+
- Contact Speed: 2.0-6.0 m/s
|
| 102 |
+
- Coil Currents: 1-7 A (nominal: 4-7A)
|
| 103 |
+
- Ambient Temperature: 10-40°C
|
| 104 |
+
|
| 105 |
+
**CBHI Score Interpretation:**
|
| 106 |
+
- 90-100: Excellent - Routine monitoring only
|
| 107 |
+
- 75-89: Good - Minor preventive actions
|
| 108 |
+
- 60-74: Fair - Scheduled maintenance needed
|
| 109 |
+
- 40-59: Poor - Urgent attention required
|
| 110 |
+
- 0-39: Critical - Immediate intervention
|
| 111 |
+
|
| 112 |
+
**Common Failure Progressions:**
|
| 113 |
+
- Contact Wear → Increased Resistance → Overheating → Welding/Failure
|
| 114 |
+
- Gas Leak → Poor Arc Quenching → Contact Damage → Catastrophic Failure
|
| 115 |
+
- Mechanism Issues → Timing Drift → Coordination Loss → System Fault
|
| 116 |
+
- Coil Degradation → Incomplete Operation → Stuck Breaker → Protection Failure
|
| 117 |
+
|
| 118 |
+
**Maintenance Action Guidelines:**
|
| 119 |
+
- **Priority 1 (Critical)**: 0-1 month, safety/reliability risk, Red (#B71C1C bg:#FFCDD2)
|
| 120 |
+
- **Priority 2 (Important)**: 1-3 months, performance degradation, Amber (#F57F17 bg:#FFF9C4)
|
| 121 |
+
- **Priority 3 (Preventive)**: 3-6 months, optimization/prevention, Blue (#0D47A1 bg:#BBDEFB)
|
| 122 |
+
- **Priority 4 (Additional)**: If needed, Red (#B71C1C bg:#FFCDD2)
|
| 123 |
+
- **Priority 5 (Additional)**: If needed, Amber (#F57F17 bg:#FFF9C4)
|
| 124 |
+
- **CRITICAL: Each priority level must have EXACTLY ONE action** (create Priority 4, 5, etc. if needed)
|
| 125 |
+
- **Total actions: Minimum 3, Maximum 5** (distribute across priorities as needed)
|
| 126 |
+
|
| 127 |
+
**Future Fault Risk Levels & STRICT Color Mapping:**
|
| 128 |
+
- **High Risk**: Probability >60%, Timeline <12 months
|
| 129 |
+
- **Medium Risk**: Probability 30-60%, Timeline 12-24 months
|
| 130 |
+
- **Low Risk**: Probability <30%, Timeline >24 months
|
| 131 |
+
- **HARDCODED Colors (by position, NOT risk_level)**: 1st=#2E7D32, 2nd=#F57F17, 3rd=#0D47A1, 4th=#2E7D32, 5th=#F57F17
|
| 132 |
+
- **Total predictions: Minimum 3, Maximum 5** (ensure variety of risk levels)
|
| 133 |
+
|
| 134 |
+
===== INPUT DATA =====
|
| 135 |
+
|
| 136 |
+
**KPIs (Key Performance Indicators):**
|
| 137 |
+
{kpis_json}
|
| 138 |
+
|
| 139 |
+
**CBHI Score (Circuit Breaker Health Index):**
|
| 140 |
+
{cbhi_score} / 100
|
| 141 |
+
|
| 142 |
+
**Detected Faults:**
|
| 143 |
+
{faults_summary}
|
| 144 |
+
|
| 145 |
+
===== OUTPUT REQUIREMENTS =====
|
| 146 |
+
|
| 147 |
+
Return ONLY valid JSON (no markdown fences, no extra text).
|
| 148 |
+
|
| 149 |
+
**CRITICAL JSON FORMATTING RULES:**
|
| 150 |
+
- All strings must use double quotes (")
|
| 151 |
+
- Escape any double quotes within strings using backslash (\")
|
| 152 |
+
- Do NOT use smart quotes, apostrophes, or special Unicode quotes
|
| 153 |
+
- Ensure all brackets and braces are properly matched
|
| 154 |
+
- Use standard ASCII characters only in field values
|
| 155 |
+
- Multi-line text should be a single string with spaces, not actual line breaks
|
| 156 |
+
|
| 157 |
+
**LANGUAGE STYLE - CRITICAL:**
|
| 158 |
+
- **justification**: Write as NATURAL EXPLANATION (not bullet points). Explain the CAUSE and EFFECT relationship clearly.
|
| 159 |
+
- Example GOOD: "SF6 pressure declining from 7.0 to 6.8 bar over recent tests. Gas quality directly affects arc quenching and contact performance."
|
| 160 |
+
- Example BAD: "KPI: SF6 6.8 bar; Phase 2: arc; AI: leak 45%"
|
| 161 |
+
- MAX 150 characters total
|
| 162 |
+
|
| 163 |
+
- **evidence** (for future faults): Write as EXPLANATORY SENTENCE describing the trend or pattern observed.
|
| 164 |
+
- Example GOOD: "Contact resistance is significantly elevated, indicating material degradation. This will worsen over time, leading to overheating."
|
| 165 |
+
- Example BAD: "DLRO +15%/mo; Phase 3: plateau; Wear: 68% conf"
|
| 166 |
+
- MAX 120 characters total
|
| 167 |
+
|
| 168 |
+
- **Use proper technical narratives, NOT statistical shorthand**
|
| 169 |
+
|
| 170 |
+
Structure (EXACT format and ordering required):
|
| 171 |
+
|
| 172 |
+
{{
|
| 173 |
+
"maintenanceActions": [
|
| 174 |
+
{{
|
| 175 |
+
"actions": [
|
| 176 |
+
{{
|
| 177 |
+
"id": "1",
|
| 178 |
+
"justification": "Natural language explanation citing key evidence from KPIs/Faults",
|
| 179 |
+
"timeline": "Within 1 month",
|
| 180 |
+
"title": "Clear, actionable maintenance task",
|
| 181 |
+
"whatToLookFor": [
|
| 182 |
+
"Specific inspection point 1",
|
| 183 |
+
"Specific measurement/check 2",
|
| 184 |
+
"Expected finding/threshold 3",
|
| 185 |
+
"Corrective action trigger 4"
|
| 186 |
+
]
|
| 187 |
+
}}
|
| 188 |
+
],
|
| 189 |
+
"bgColor": "#FFCDD2",
|
| 190 |
+
"color": "#B71C1C",
|
| 191 |
+
"priority": "Priority 1"
|
| 192 |
+
}},
|
| 193 |
+
{{
|
| 194 |
+
"actions": [
|
| 195 |
+
{{
|
| 196 |
+
"id": "2",
|
| 197 |
+
"justification": "Clear explanation with evidence",
|
| 198 |
+
"timeline": "Within 3 months",
|
| 199 |
+
"title": "Second most critical task",
|
| 200 |
+
"whatToLookFor": ["item1", "item2", "item3", "item4"]
|
| 201 |
+
}}
|
| 202 |
+
],
|
| 203 |
+
"bgColor": "#FFF9C4",
|
| 204 |
+
"color": "#F57F17",
|
| 205 |
+
"priority": "Priority 2"
|
| 206 |
+
}},
|
| 207 |
+
{{
|
| 208 |
+
"actions": [
|
| 209 |
+
{{
|
| 210 |
+
"id": "3",
|
| 211 |
+
"justification": "Brief evidence-based reasoning",
|
| 212 |
+
"timeline": "Within 6 months",
|
| 213 |
+
"title": "Preventive maintenance task",
|
| 214 |
+
"whatToLookFor": ["item1", "item2", "item3", "item4"]
|
| 215 |
+
}}
|
| 216 |
+
],
|
| 217 |
+
"bgColor": "#BBDEFB",
|
| 218 |
+
"color": "#0D47A1",
|
| 219 |
+
"priority": "Priority 3"
|
| 220 |
+
}}
|
| 221 |
+
],
|
| 222 |
+
"futureFaultsPdf": [
|
| 223 |
+
{{
|
| 224 |
+
"color": "#2E7D32",
|
| 225 |
+
"evidence": "Natural language explanation of trends and patterns observed.",
|
| 226 |
+
"fault": "Specific fault name from 12 defect classes",
|
| 227 |
+
"id": "1",
|
| 228 |
+
"probability": 68,
|
| 229 |
+
"risk_level": "high",
|
| 230 |
+
"timeline": "6 - 12 Months"
|
| 231 |
+
}},
|
| 232 |
+
{{
|
| 233 |
+
"color": "#F57F17",
|
| 234 |
+
"evidence": "Descriptive sentence about observed patterns and correlations.",
|
| 235 |
+
"fault": "Another potential fault",
|
| 236 |
+
"id": "2",
|
| 237 |
+
"probability": 42,
|
| 238 |
+
"risk_level": "medium",
|
| 239 |
+
"timeline": "12 - 18 Months"
|
| 240 |
+
}},
|
| 241 |
+
{{
|
| 242 |
+
"color": "#0D47A1",
|
| 243 |
+
"evidence": "Long-term risk explanation based on industry experience.",
|
| 244 |
+
"fault": "Third potential fault",
|
| 245 |
+
"id": "3",
|
| 246 |
+
"probability": 25,
|
| 247 |
+
"risk_level": "low",
|
| 248 |
+
"timeline": "> 24 Months"
|
| 249 |
+
}}
|
| 250 |
+
]
|
| 251 |
+
}}
|
| 252 |
+
|
| 253 |
+
===== ANALYSIS INSTRUCTIONS =====
|
| 254 |
+
|
| 255 |
+
**Step 1: Deep Multi-Source Analysis**
|
| 256 |
+
- Cross-reference KPIs, CBHI, and Detected Faults to identify CORRELATIONS and PATTERNS
|
| 257 |
+
- Look for evidence chains: e.g., High DLRO → Elevated resistance → Main Wear verdict → Overheating risk
|
| 258 |
+
- Consider ALL 12 defect classes and identify which ones apply (even if probability is lower)
|
| 259 |
+
|
| 260 |
+
**Step 2: Maintenance Action Prioritization (3-5 actions total)**
|
| 261 |
+
- **CRITICAL RULE: Each priority level has EXACTLY ONE action** (no multiple actions per priority)
|
| 262 |
+
- **CRITICAL RULE: Provide 3-5 priority levels** (create Priority 4, 5 if needed)
|
| 263 |
+
- **CRITICAL RULE: The "id" field MUST match priority number** (Priority 1 → id="1", Priority 2 → id="2", etc.)
|
| 264 |
+
- **HARDCODED Color scheme (DO NOT CHANGE)**:
|
| 265 |
+
* Priority 1: color="#B71C1C", bgColor="#FFCDD2"
|
| 266 |
+
* Priority 2: color="#F57F17", bgColor="#FFF9C4"
|
| 267 |
+
* Priority 3: color="#0D47A1", bgColor="#BBDEFB"
|
| 268 |
+
* Priority 4 (if any): color="#B71C1C", bgColor="#FFCDD2"
|
| 269 |
+
* Priority 5 (if any): color="#F57F17", bgColor="#FFF9C4"
|
| 270 |
+
|
| 271 |
+
**Step 3: Future Fault Predictions (3-5 predictions)**
|
| 272 |
+
- **CRITICAL RULE: Provide 3-5 predictions with DIVERSE risk levels and timelines**
|
| 273 |
+
- **CRITICAL RULE: The "id" field is sequential** (1, 2, 3, 4, 5)
|
| 274 |
+
- **HARDCODED Color scheme (DO NOT CHANGE)**:
|
| 275 |
+
* 1st fault: color="#2E7D32"
|
| 276 |
+
* 2nd fault: color="#F57F17"
|
| 277 |
+
* 3rd fault: color="#0D47A1"
|
| 278 |
+
* 4th fault (if any): color="#2E7D32"
|
| 279 |
+
* 5th fault (if any): color="#F57F17"
|
| 280 |
+
- **Color assignment is FIXED by position, NOT by risk_level**
|
| 281 |
+
- **Progression logic to apply:**
|
| 282 |
+
- Contact Wear → Overheating → Welding → Failed interruption (6-12 months high risk)
|
| 283 |
+
- Gas Leak → Poor arc quenching → Contact damage → Catastrophic failure (12-18 months medium)
|
| 284 |
+
- Mechanism wear → Timing drift → Coordination loss → Protection failure (18-24 months medium)
|
| 285 |
+
- Coil degradation (one failed) → Second coil stress → Redundancy loss (>24 months low)
|
| 286 |
+
- Arcing wear → Main contact damage → System fault (12-18 months medium)
|
| 287 |
+
|
| 288 |
+
**Remember**: Your output will be used by field maintenance engineers. Be PRECISE, ACTIONABLE, and INSIGHTFUL.
|
| 289 |
+
Generate thoughtful, data-driven, practical insights that can immediately guide maintenance decisions.
|
| 290 |
+
"""
|
| 291 |
+
|
| 292 |
+
|
| 293 |
+
def generate_recommendations(kpis, cbhi_score, rule_faults, ai_faults, llm):
|
| 294 |
+
"""
|
| 295 |
+
Generates maintenance actions and future fault predictions based on DCRM analysis.
|
| 296 |
+
|
| 297 |
+
Args:
|
| 298 |
+
kpis (dict): Calculated KPIs.
|
| 299 |
+
cbhi_score (float): The overall health score.
|
| 300 |
+
rule_faults (list): Faults detected by the Rule Engine.
|
| 301 |
+
ai_faults (list): Faults detected by the AI Agent.
|
| 302 |
+
llm: The LLM instance (not used, using Gemini directly).
|
| 303 |
+
|
| 304 |
+
Returns:
|
| 305 |
+
dict: A dictionary containing 'maintenanceActions' and 'futureFaultsPdf'.
|
| 306 |
+
"""
|
| 307 |
+
|
| 308 |
+
# 1. Prepare Context
|
| 309 |
+
faults_summary = "Deterministic Faults:\n"
|
| 310 |
+
if not rule_faults:
|
| 311 |
+
faults_summary += "- None (Healthy)\n"
|
| 312 |
+
else:
|
| 313 |
+
for f in rule_faults:
|
| 314 |
+
faults_summary += f"- {f.get('defect_name')}: {f.get('description')} (Severity: {f.get('Severity')})\n"
|
| 315 |
+
|
| 316 |
+
faults_summary += "\nAI Agent Insights:\n"
|
| 317 |
+
if not ai_faults:
|
| 318 |
+
faults_summary += "- None\n"
|
| 319 |
+
else:
|
| 320 |
+
for f in ai_faults:
|
| 321 |
+
faults_summary += f"- {f.get('defect_name')}: {f.get('description')} (Severity: {f.get('Severity')})\n"
|
| 322 |
+
|
| 323 |
+
kpi_summary = json.dumps(kpis, indent=2, ensure_ascii=False)
|
| 324 |
+
|
| 325 |
+
# 2. Format the prompt with actual data
|
| 326 |
+
prompt = RECOMMENDATIONS_PROMPT.format(
|
| 327 |
+
kpis_json=kpi_summary,
|
| 328 |
+
cbhi_score=cbhi_score,
|
| 329 |
+
faults_summary=faults_summary
|
| 330 |
+
)
|
| 331 |
+
|
| 332 |
+
# 3. Configure generation
|
| 333 |
+
generation_config = {
|
| 334 |
+
"temperature": 0.4,
|
| 335 |
+
"top_p": 0.95,
|
| 336 |
+
"top_k": 40,
|
| 337 |
+
"max_output_tokens": 8192,
|
| 338 |
+
}
|
| 339 |
+
|
| 340 |
+
# 4. Initialize model
|
| 341 |
+
model = genai.GenerativeModel(
|
| 342 |
+
model_name=MODEL_NAME,
|
| 343 |
+
generation_config=generation_config
|
| 344 |
+
)
|
| 345 |
+
|
| 346 |
+
# 5. Generate with retry logic
|
| 347 |
+
max_retries = 3
|
| 348 |
+
for attempt in range(max_retries):
|
| 349 |
+
try:
|
| 350 |
+
if attempt == 0:
|
| 351 |
+
print("🔮 Generating recommendations and future fault predictions...")
|
| 352 |
+
else:
|
| 353 |
+
print(f"🔄 Retry attempt {attempt}/{max_retries-1}...")
|
| 354 |
+
|
| 355 |
+
# Add JSON validation instruction on retry
|
| 356 |
+
if attempt > 0:
|
| 357 |
+
prompt_with_retry = prompt + "\n\nIMPORTANT: Ensure all text in justification, evidence, and whatToLookFor fields has properly escaped quotes. Use single quotes within text or escape double quotes with backslash."
|
| 358 |
+
else:
|
| 359 |
+
prompt_with_retry = prompt
|
| 360 |
+
|
| 361 |
+
# Generate response
|
| 362 |
+
response = model.generate_content(prompt_with_retry)
|
| 363 |
+
|
| 364 |
+
# Check if response has valid content
|
| 365 |
+
if not response or not response.text or response.text.strip() == "":
|
| 366 |
+
print(f"⚠️ Empty response from API. Finish reason: {response.candidates[0].finish_reason if response and response.candidates else 'Unknown'}")
|
| 367 |
+
if attempt == max_retries - 1:
|
| 368 |
+
return {
|
| 369 |
+
"error": "Empty response from API (quota/safety block)",
|
| 370 |
+
"finish_reason": response.candidates[0].finish_reason if response and response.candidates else None,
|
| 371 |
+
"maintenanceActions": [],
|
| 372 |
+
"futureFaultsPdf": []
|
| 373 |
+
}
|
| 374 |
+
continue # Try again
|
| 375 |
+
|
| 376 |
+
# Extract JSON from response
|
| 377 |
+
response_text = response.text.strip()
|
| 378 |
+
|
| 379 |
+
# Clean up markdown code fences if present
|
| 380 |
+
if response_text.startswith("```json"):
|
| 381 |
+
response_text = response_text[7:]
|
| 382 |
+
if response_text.startswith("```"):
|
| 383 |
+
response_text = response_text[3:]
|
| 384 |
+
if response_text.endswith("```"):
|
| 385 |
+
response_text = response_text[:-3]
|
| 386 |
+
|
| 387 |
+
response_text = response_text.strip()
|
| 388 |
+
|
| 389 |
+
# Additional cleanup: fix common JSON issues
|
| 390 |
+
# Replace smart quotes with regular quotes
|
| 391 |
+
response_text = response_text.replace('"', '"').replace('"', '"')
|
| 392 |
+
response_text = response_text.replace("'", "'").replace("'", "'")
|
| 393 |
+
|
| 394 |
+
# Parse JSON
|
| 395 |
+
result = json.loads(response_text)
|
| 396 |
+
|
| 397 |
+
print("✅ Successfully generated recommendations and predictions")
|
| 398 |
+
return result
|
| 399 |
+
|
| 400 |
+
except json.JSONDecodeError as e:
|
| 401 |
+
print(f"❌ JSON parsing error (attempt {attempt+1}/{max_retries}): {e}")
|
| 402 |
+
if attempt == max_retries - 1:
|
| 403 |
+
print(f"Raw response excerpt:\n{response_text[:1000]}...")
|
| 404 |
+
# Try to salvage what we can
|
| 405 |
+
return {
|
| 406 |
+
"error": f"Failed to parse AI response after {max_retries} attempts",
|
| 407 |
+
"raw_response": response_text[:2000],
|
| 408 |
+
"maintenanceActions": [],
|
| 409 |
+
"futureFaultsPdf": []
|
| 410 |
+
}
|
| 411 |
+
# Wait a bit before retry
|
| 412 |
+
import time
|
| 413 |
+
time.sleep(1)
|
| 414 |
+
|
| 415 |
+
except Exception as e:
|
| 416 |
+
print(f"❌ Error generating recommendations: {e}")
|
| 417 |
+
return {
|
| 418 |
+
"error": str(e),
|
| 419 |
+
"maintenanceActions": [],
|
| 420 |
+
"futureFaultsPdf": []
|
| 421 |
+
}
|
| 422 |
+
|
| 423 |
+
return {
|
| 424 |
+
"error": "Max retries exceeded",
|
| 425 |
+
"maintenanceActions": [],
|
| 426 |
+
"futureFaultsPdf": []
|
| 427 |
+
}
|
core/calculators/__pycache__/cbhi.cpython-313.pyc
ADDED
|
Binary file (6.2 kB). View file
|
|
|
core/calculators/__pycache__/cbhi.cpython-39.pyc
ADDED
|
Binary file (4.2 kB). View file
|
|
|
core/calculators/__pycache__/kpi.cpython-313.pyc
ADDED
|
Binary file (17.3 kB). View file
|
|
|
core/calculators/__pycache__/kpi.cpython-39.pyc
ADDED
|
Binary file (8.04 kB). View file
|
|
|
core/calculators/__pycache__/rul.cpython-313.pyc
ADDED
|
Binary file (16.2 kB). View file
|
|
|
core/calculators/__pycache__/rul.cpython-39.pyc
ADDED
|
Binary file (9.31 kB). View file
|
|
|
core/calculators/cbhi.py
ADDED
|
@@ -0,0 +1,197 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Previous Name: analysis/cbhi_calculator.py
|
| 2 |
+
import json
|
| 3 |
+
|
| 4 |
+
def calculate_score(val, min_good, max_good, buffer):
|
| 5 |
+
# Calculate the middle of the ideal range
|
| 6 |
+
middle = (min_good + max_good) / 2.0
|
| 7 |
+
half_range = (max_good - min_good) / 2.0
|
| 8 |
+
|
| 9 |
+
if min_good <= val <= max_good:
|
| 10 |
+
# Distance from the middle point
|
| 11 |
+
distance_from_middle = abs(val - middle)
|
| 12 |
+
# If exactly at middle, score is 100
|
| 13 |
+
if distance_from_middle == 0:
|
| 14 |
+
return 100.0
|
| 15 |
+
# Scale from 90 to 100 based on how far from middle
|
| 16 |
+
# Closer to middle = higher score (90-100%)
|
| 17 |
+
score = 100.0 - (10.0 * (distance_from_middle / half_range))
|
| 18 |
+
return max(90.0, score) # Ensure minimum 90% within range
|
| 19 |
+
|
| 20 |
+
# 2. Low Side: Value is lower than min_good
|
| 21 |
+
if val < min_good:
|
| 22 |
+
# Calculate how far off it is
|
| 23 |
+
distance = min_good - val
|
| 24 |
+
# If distance exceeds buffer, score is 0. Otherwise, scale it.
|
| 25 |
+
if distance >= buffer:
|
| 26 |
+
return 0.0
|
| 27 |
+
return 90.0 * (1.0 - (distance / buffer))
|
| 28 |
+
|
| 29 |
+
# 3. High Side: Value is higher than max_good
|
| 30 |
+
if val > max_good:
|
| 31 |
+
distance = val - max_good
|
| 32 |
+
if distance >= buffer:
|
| 33 |
+
return 0.0
|
| 34 |
+
return 90.0 * (1.0 - (distance / buffer))
|
| 35 |
+
|
| 36 |
+
return 0.0
|
| 37 |
+
|
| 38 |
+
WEIGHTS = {
|
| 39 |
+
"peak_resistance": 0.15,
|
| 40 |
+
"dlro": 0.10,
|
| 41 |
+
"travel": 0.10,
|
| 42 |
+
"speed": 0.10,
|
| 43 |
+
"open_time": 0.15,
|
| 44 |
+
"close_time": 0.15,
|
| 45 |
+
"main_wipe": 0.05,
|
| 46 |
+
"arc_wipe": 0.05,
|
| 47 |
+
"coil_current": 0.10,
|
| 48 |
+
"temp": 0.05
|
| 49 |
+
}
|
| 50 |
+
|
| 51 |
+
# ==========================================
|
| 52 |
+
# NEW LOGIC: AI & PHASE ADJUSTMENTS
|
| 53 |
+
# ==========================================
|
| 54 |
+
|
| 55 |
+
def calculate_ai_penalty(ai_verdict_list):
|
| 56 |
+
"""
|
| 57 |
+
Calculates penalty based on AI defects.
|
| 58 |
+
High: -15, Medium: -12, Low: -10 (Scaled by Confidence)
|
| 59 |
+
"""
|
| 60 |
+
total_penalty = 0.0
|
| 61 |
+
|
| 62 |
+
# Base penalty values
|
| 63 |
+
severity_map = {
|
| 64 |
+
"High": 15.0,
|
| 65 |
+
"Medium": 12.0,
|
| 66 |
+
"Low": 10.0
|
| 67 |
+
}
|
| 68 |
+
|
| 69 |
+
# Ensure input is a list (handle single object or list of objects)
|
| 70 |
+
if isinstance(ai_verdict_list, dict):
|
| 71 |
+
ai_verdict_list = [ai_verdict_list]
|
| 72 |
+
|
| 73 |
+
for defect in ai_verdict_list:
|
| 74 |
+
label = defect.get("faultLabel", defect.get("defect_name", "Healthy"))
|
| 75 |
+
|
| 76 |
+
# If explicitly Healthy, no penalty
|
| 77 |
+
if label.lower() == "healthy":
|
| 78 |
+
continue
|
| 79 |
+
|
| 80 |
+
severity = defect.get("severity", "Low")
|
| 81 |
+
confidence = float(defect.get("confidence", 0)) / 100.0
|
| 82 |
+
|
| 83 |
+
# Dynamic Penalty: Base * Confidence
|
| 84 |
+
# Example: High (15) * 0.90 = 13.5 deduction
|
| 85 |
+
base_deduction = severity_map.get(severity, 10.0)
|
| 86 |
+
actual_deduction = base_deduction * confidence
|
| 87 |
+
|
| 88 |
+
total_penalty += actual_deduction
|
| 89 |
+
|
| 90 |
+
return total_penalty
|
| 91 |
+
|
| 92 |
+
def calculate_phase_adjustment(phase_data):
|
| 93 |
+
"""
|
| 94 |
+
Calculates adjustment based on 5 phases.
|
| 95 |
+
Healthy: Add +2.0 to +3.0 (based on confidence)
|
| 96 |
+
Not Healthy: Subtract -1.5 to -2.0 (based on confidence)
|
| 97 |
+
"""
|
| 98 |
+
adjustment = 0.0
|
| 99 |
+
|
| 100 |
+
for phase_name, data in phase_data.items():
|
| 101 |
+
status = data.get("status", "Not Healthy").lower()
|
| 102 |
+
confidence = float(data.get("confidence", 0)) / 100.0
|
| 103 |
+
|
| 104 |
+
if status == "healthy":
|
| 105 |
+
# REWARD: Range 2.0 to 3.0
|
| 106 |
+
# If conf=100 -> +3.0, If conf=0 -> +2.0
|
| 107 |
+
reward = 2.0 + (1.0 * confidence)
|
| 108 |
+
adjustment += reward
|
| 109 |
+
else:
|
| 110 |
+
# PENALTY: Range 1.5 to 2.0
|
| 111 |
+
# If conf=100 -> -2.0, If conf=0 -> -1.5
|
| 112 |
+
penalty = 1.5 + (0.5 * confidence)
|
| 113 |
+
adjustment -= penalty
|
| 114 |
+
|
| 115 |
+
return adjustment
|
| 116 |
+
|
| 117 |
+
# ==========================================
|
| 118 |
+
# MAIN COMPUTE FUNCTION (UPDATED)
|
| 119 |
+
# ==========================================
|
| 120 |
+
|
| 121 |
+
def compute_cbhi(kpis_list, ai_data=None, phase_data=None):
|
| 122 |
+
# 1. Parse KPIs into Dictionary
|
| 123 |
+
kpi_dict = {item['name']: item['value'] for item in kpis_list}
|
| 124 |
+
|
| 125 |
+
# 2. Compute Individual KPI Scores (Existing Logic)
|
| 126 |
+
def get_val(name):
|
| 127 |
+
v = kpi_dict.get(name)
|
| 128 |
+
return v if v is not None else 0.0
|
| 129 |
+
|
| 130 |
+
s_dlro = calculate_score(get_val("DLRO Value"), 20, 100, 50)
|
| 131 |
+
s_peak = calculate_score(get_val("Peak Resistance"), 80, 150, 200)
|
| 132 |
+
s_travel = calculate_score(get_val("Contact Travel Distance"), 150, 200, 30)
|
| 133 |
+
s_speed = calculate_score(get_val("Contact Speed"), 2.0, 6.0, 1.5)
|
| 134 |
+
s_open = calculate_score(get_val("Opening Time"), 20, 40, 20)
|
| 135 |
+
s_close = calculate_score(get_val("Closing Time"), 70, 110, 20)
|
| 136 |
+
s_mw = calculate_score(get_val("Main Wipe"), 10, 20, 5)
|
| 137 |
+
s_aw = calculate_score(get_val("Arc Wipe"), 15, 25, 5)
|
| 138 |
+
|
| 139 |
+
c_close = get_val("Peak Close Coil Current")
|
| 140 |
+
c_trip = max(get_val("Peak Trip Coil 1 Current"), get_val("Peak Trip Coil 2 Current"))
|
| 141 |
+
s_coil_c = calculate_score(c_close, 1.0, 7.0, 5.0)
|
| 142 |
+
s_coil_t = calculate_score(c_trip, 1.0, 7.0, 5.0)
|
| 143 |
+
s_coil = (s_coil_c + s_coil_t) / 2.0
|
| 144 |
+
|
| 145 |
+
s_temp = calculate_score(get_val("Ambient Temperature"), 10, 40, 30)
|
| 146 |
+
|
| 147 |
+
# 3. Store Scores
|
| 148 |
+
scores = {
|
| 149 |
+
"dlro": s_dlro,
|
| 150 |
+
"peak_resistance": s_peak,
|
| 151 |
+
"travel": s_travel,
|
| 152 |
+
"speed": s_speed,
|
| 153 |
+
"open_time": s_open,
|
| 154 |
+
"close_time": s_close,
|
| 155 |
+
"main_wipe": s_mw,
|
| 156 |
+
"arc_wipe": s_aw,
|
| 157 |
+
"coil_current": s_coil,
|
| 158 |
+
"temp": s_temp
|
| 159 |
+
}
|
| 160 |
+
|
| 161 |
+
# 4. Calculate Base Weighted Score
|
| 162 |
+
base_cbhi = sum(scores[key] * WEIGHTS[key] for key in scores)
|
| 163 |
+
|
| 164 |
+
# 5. Calculate AI Verdict Penalty
|
| 165 |
+
ai_penalty = 0.0
|
| 166 |
+
if ai_data:
|
| 167 |
+
# Extract verdicts from ai_data robustly
|
| 168 |
+
defects = []
|
| 169 |
+
if isinstance(ai_data, dict):
|
| 170 |
+
if "aiVerdict" in ai_data:
|
| 171 |
+
verdicts = ai_data["aiVerdict"]
|
| 172 |
+
if isinstance(verdicts, dict):
|
| 173 |
+
defects = [verdicts]
|
| 174 |
+
elif isinstance(verdicts, list):
|
| 175 |
+
defects = verdicts
|
| 176 |
+
else:
|
| 177 |
+
defects = [ai_data]
|
| 178 |
+
elif isinstance(ai_data, list):
|
| 179 |
+
defects = ai_data
|
| 180 |
+
# Only penalize non-healthy verdicts
|
| 181 |
+
non_healthy_defects = [d for d in defects if str(d.get("faultLabel", d.get("defect_name", "Healthy")).lower()) != "healthy"]
|
| 182 |
+
if non_healthy_defects:
|
| 183 |
+
ai_penalty = calculate_ai_penalty(non_healthy_defects)
|
| 184 |
+
|
| 185 |
+
# 6. Calculate Phase Adjustments
|
| 186 |
+
phase_adj = 0.0
|
| 187 |
+
if phase_data:
|
| 188 |
+
phase_adj = calculate_phase_adjustment(phase_data)
|
| 189 |
+
|
| 190 |
+
# 7. Final Aggregation
|
| 191 |
+
final_score = base_cbhi - ai_penalty + phase_adj
|
| 192 |
+
|
| 193 |
+
# 8. Clamp to 0-100
|
| 194 |
+
final_score = max(3.33, min(98.67, final_score))
|
| 195 |
+
|
| 196 |
+
# Return Detailed Object (optional) or just the number
|
| 197 |
+
return int(round(final_score, 2))
|
core/calculators/kpi.py
ADDED
|
@@ -0,0 +1,307 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Previous Name: analysis/kpi_calculator.py
|
| 2 |
+
import json
|
| 3 |
+
import math
|
| 4 |
+
import numpy as np
|
| 5 |
+
import pandas as pd
|
| 6 |
+
import requests
|
| 7 |
+
|
| 8 |
+
# =========================
|
| 9 |
+
# 1) Weather (optional)
|
| 10 |
+
# =========================
|
| 11 |
+
def get_ambient_temperature_pune(timeout: int = 3, fallback_c: float = 28.0) -> float:
|
| 12 |
+
try:
|
| 13 |
+
lat, lon = 18.52, 73.86
|
| 14 |
+
url = f"https://api.open-meteo.com/v1/forecast?latitude={lat}&longitude={lon}¤t_weather=true"
|
| 15 |
+
j = requests.get(url, timeout=timeout).json()
|
| 16 |
+
return float(j["current_weather"]["temperature"])
|
| 17 |
+
except Exception:
|
| 18 |
+
return float(fallback_c)
|
| 19 |
+
|
| 20 |
+
# =========================
|
| 21 |
+
# 2) Helpers
|
| 22 |
+
# =========================
|
| 23 |
+
def rolling_smooth(x: np.ndarray, window: int = 7) -> np.ndarray:
|
| 24 |
+
"""Centered rolling median smoothing (robust to spikes)."""
|
| 25 |
+
s = pd.Series(x)
|
| 26 |
+
sm = s.rolling(window=window, center=True).median()
|
| 27 |
+
sm = sm.bfill().ffill().fillna(s)
|
| 28 |
+
return sm.values
|
| 29 |
+
|
| 30 |
+
def first_time_above(t: np.ndarray, y: np.ndarray, thr: float) -> float:
|
| 31 |
+
"""First time y crosses above thr (with linear interpolation)."""
|
| 32 |
+
for i in range(1, len(y)):
|
| 33 |
+
if y[i-1] < thr <= y[i]:
|
| 34 |
+
dy = y[i] - y[i-1]
|
| 35 |
+
dt = t[i] - t[i-1]
|
| 36 |
+
if dy == 0 or dt == 0:
|
| 37 |
+
return float(t[i])
|
| 38 |
+
frac = (thr - y[i-1]) / dy
|
| 39 |
+
return float(t[i-1] + frac * dt)
|
| 40 |
+
if len(y) and y[0] >= thr:
|
| 41 |
+
return float(t[0])
|
| 42 |
+
return math.nan
|
| 43 |
+
|
| 44 |
+
def crossing_time(t: np.ndarray, y: np.ndarray, thr: float, direction: str) -> float:
|
| 45 |
+
for i in range(1, len(y)):
|
| 46 |
+
y0, y1 = y[i-1], y[i]
|
| 47 |
+
if direction == "below" and y0 > thr >= y1:
|
| 48 |
+
dt = t[i] - t[i-1]
|
| 49 |
+
dy = y1 - y0
|
| 50 |
+
if dy == 0 or dt == 0:
|
| 51 |
+
return float(t[i])
|
| 52 |
+
frac = (thr - y0) / dy
|
| 53 |
+
return float(t[i-1] + frac * dt)
|
| 54 |
+
if direction == "above" and y0 < thr <= y1:
|
| 55 |
+
dt = t[i] - t[i-1]
|
| 56 |
+
dy = y1 - y0
|
| 57 |
+
if dy == 0 or dt == 0:
|
| 58 |
+
return float(t[i])
|
| 59 |
+
frac = (thr - y0) / dy
|
| 60 |
+
return float(t[i-1] + frac * dt)
|
| 61 |
+
return math.nan
|
| 62 |
+
|
| 63 |
+
def interp_series_at(t: np.ndarray, y: np.ndarray, t_query: float) -> float:
|
| 64 |
+
if np.isnan(t_query):
|
| 65 |
+
return math.nan
|
| 66 |
+
idx = np.searchsorted(t, t_query)
|
| 67 |
+
if idx == 0:
|
| 68 |
+
return float(y[0])
|
| 69 |
+
if idx >= len(t):
|
| 70 |
+
return float(y[-1])
|
| 71 |
+
t0, t1 = t[idx-1], t[idx]
|
| 72 |
+
y0, y1 = y[idx-1], y[idx]
|
| 73 |
+
if t1 == t0:
|
| 74 |
+
return float(y0)
|
| 75 |
+
return float(y0 + (y1 - y0) * (t_query - t0) / (t1 - t0))
|
| 76 |
+
|
| 77 |
+
# =========================
|
| 78 |
+
# 3) KPI computation
|
| 79 |
+
# =========================
|
| 80 |
+
def analyze_breaker_data(
|
| 81 |
+
df,
|
| 82 |
+
coil_threshold: float = 0.5,
|
| 83 |
+
travel_edge_mm: float = 2.0,
|
| 84 |
+
arc_end_pct_open: float = 0.95,
|
| 85 |
+
dlro_margin_ms: float = 10.0,
|
| 86 |
+
ambient_fallback_c: float = 28.0,
|
| 87 |
+
):
|
| 88 |
+
|
| 89 |
+
df.columns = [c.strip() for c in df.columns]
|
| 90 |
+
|
| 91 |
+
# Extract series as floats
|
| 92 |
+
t = df["Time_ms"].values.astype(float)
|
| 93 |
+
R = df["Resistance"].values.astype(float)
|
| 94 |
+
travel = df["Travel"].values.astype(float)
|
| 95 |
+
close_coil = df["Close_Coil"].values.astype(float)
|
| 96 |
+
trip1 = df["Trip_Coil_1"].values.astype(float)
|
| 97 |
+
trip2 = df["Trip_Coil_2"].values.astype(float)
|
| 98 |
+
|
| 99 |
+
# Basic travel stats
|
| 100 |
+
travel_min = float(np.min(travel))
|
| 101 |
+
travel_max = float(np.max(travel))
|
| 102 |
+
|
| 103 |
+
# Define open/closed masks
|
| 104 |
+
open_mask = travel <= (travel_min + 5.0) # near minimum (open)
|
| 105 |
+
closed_mask = travel >= (travel_max - 5.0) # near maximum (closed)
|
| 106 |
+
|
| 107 |
+
# Resistance baselines
|
| 108 |
+
R_open_baseline = float(np.median(R[open_mask])) if np.any(open_mask) else float(np.median(R[:max(1, int(len(R)*0.2))]))
|
| 109 |
+
R_closed_baseline = float(np.median(R[closed_mask])) if np.any(closed_mask) else float(np.median(R[int(len(R)*0.3):int(len(R)*0.7)]))
|
| 110 |
+
|
| 111 |
+
# Threshold for state transition
|
| 112 |
+
R_mid = (R_open_baseline + R_closed_baseline) / 2.0
|
| 113 |
+
|
| 114 |
+
# Coil energization times
|
| 115 |
+
close_sm = pd.Series(close_coil).rolling(window=3, center=True).max().bfill().ffill().values
|
| 116 |
+
trip_sm = pd.Series(trip1).rolling(window=3, center=True).max().bfill().ffill().values
|
| 117 |
+
|
| 118 |
+
T_close_cmd = first_time_above(t, close_sm, coil_threshold)
|
| 119 |
+
T_trip1_cmd = first_time_above(t, trip_sm, coil_threshold)
|
| 120 |
+
|
| 121 |
+
# Contact make / part
|
| 122 |
+
post_close_mask = t >= T_close_cmd if not math.isnan(T_close_cmd) else np.ones_like(t, dtype=bool)
|
| 123 |
+
T_make = crossing_time(t[post_close_mask], R[post_close_mask], R_mid, direction="below")
|
| 124 |
+
|
| 125 |
+
post_trip_mask = t >= T_trip1_cmd if not math.isnan(T_trip1_cmd) else np.ones_like(t, dtype=bool)
|
| 126 |
+
T_part = crossing_time(t[post_trip_mask], R[post_trip_mask], R_mid, direction="above")
|
| 127 |
+
|
| 128 |
+
# Travel at make/part
|
| 129 |
+
travel_at_make = interp_series_at(t, travel, T_make)
|
| 130 |
+
travel_at_part = interp_series_at(t, travel, T_part)
|
| 131 |
+
|
| 132 |
+
# Main wipe
|
| 133 |
+
main_wipe_mm = travel_max - travel_at_make if not math.isnan(travel_at_make) else math.nan
|
| 134 |
+
|
| 135 |
+
# Arc wipe
|
| 136 |
+
R_arc_end_thr = R_open_baseline * arc_end_pct_open
|
| 137 |
+
after_part_mask = t >= T_part if not math.isnan(T_part) else np.ones_like(t, dtype=bool)
|
| 138 |
+
T_arc_end = crossing_time(t[after_part_mask], R[after_part_mask], R_arc_end_thr, direction="above")
|
| 139 |
+
travel_at_arc_end = interp_series_at(t, travel, T_arc_end)
|
| 140 |
+
arc_wipe_mm = (travel_at_part - travel_at_arc_end) if (not math.isnan(travel_at_part) and not math.isnan(travel_at_arc_end)) else math.nan
|
| 141 |
+
if arc_wipe_mm < 0 and not math.isnan(arc_wipe_mm):
|
| 142 |
+
arc_wipe_mm = abs(arc_wipe_mm)
|
| 143 |
+
|
| 144 |
+
# Contact travel distance
|
| 145 |
+
contact_travel_distance_mm = travel_max - travel_min
|
| 146 |
+
|
| 147 |
+
# Motion segments and speeds (m/s)
|
| 148 |
+
travel_sm = rolling_smooth(travel, window=7)
|
| 149 |
+
dt = np.gradient(t)
|
| 150 |
+
dy = np.gradient(travel_sm)
|
| 151 |
+
velocity_ms = np.where(dt != 0, dy / dt, 0.0)
|
| 152 |
+
|
| 153 |
+
# Closing segment
|
| 154 |
+
if not math.isnan(T_close_cmd):
|
| 155 |
+
start_idx = np.argmax((t >= T_close_cmd) & (travel > (travel_min + travel_edge_mm)))
|
| 156 |
+
else:
|
| 157 |
+
start_idx = np.argmax(travel > (travel_min + travel_edge_mm))
|
| 158 |
+
end_candidates = np.where(travel >= (travel_max - 1.0))[0]
|
| 159 |
+
end_idx = end_candidates[0] if len(end_candidates) else len(t)-1
|
| 160 |
+
closing_time_window_ms = t[end_idx] - t[start_idx] if end_idx > start_idx else math.nan
|
| 161 |
+
closing_distance_mm = travel[end_idx] - travel[start_idx] if end_idx > start_idx else math.nan
|
| 162 |
+
closing_speed_avg_ms = (closing_distance_mm / closing_time_window_ms) if (not math.isnan(closing_time_window_ms) and closing_time_window_ms > 0) else math.nan
|
| 163 |
+
closing_speed_peak_ms = float(np.max(velocity_ms[start_idx:end_idx+1])) if end_idx > start_idx else math.nan
|
| 164 |
+
|
| 165 |
+
# Opening segment
|
| 166 |
+
if not math.isnan(T_trip1_cmd):
|
| 167 |
+
dec = np.r_[False, np.diff(travel) < 0]
|
| 168 |
+
opening_start_idx = np.argmax((t >= T_trip1_cmd) & dec)
|
| 169 |
+
else:
|
| 170 |
+
opening_start_idx = np.argmax(np.r_[False, np.diff(travel) < 0])
|
| 171 |
+
open_end_candidates = np.where(travel <= (travel_min + 1.0))[0]
|
| 172 |
+
opening_end_idx = open_end_candidates[-1] if len(open_end_candidates) else len(t)-1
|
| 173 |
+
opening_time_window_ms = t[opening_end_idx] - t[opening_start_idx] if opening_end_idx > opening_start_idx else math.nan
|
| 174 |
+
opening_distance_mm = travel[opening_start_idx] - travel[opening_end_idx] if opening_end_idx > opening_start_idx else math.nan
|
| 175 |
+
opening_speed_avg_ms = (opening_distance_mm / opening_time_window_ms) if (not math.isnan(opening_time_window_ms) and opening_time_window_ms > 0) else math.nan
|
| 176 |
+
opening_speed_peak_ms = float(np.max(velocity_ms[opening_start_idx:opening_end_idx+1])) if opening_end_idx > opening_start_idx else math.nan
|
| 177 |
+
|
| 178 |
+
# DLRO
|
| 179 |
+
start_closed = (T_make + dlro_margin_ms) if not math.isnan(T_make) else t[0]
|
| 180 |
+
end_closed = (T_part - dlro_margin_ms) if not math.isnan(T_part) else t[-1]
|
| 181 |
+
closed_interval_mask = (t >= start_closed) & (t <= end_closed)
|
| 182 |
+
if np.any(closed_interval_mask):
|
| 183 |
+
DLRO_uohm = float(np.median(R[closed_interval_mask]))
|
| 184 |
+
else:
|
| 185 |
+
DLRO_uohm = float(np.median(R[closed_mask])) if np.any(closed_mask) else float(np.median(R))
|
| 186 |
+
|
| 187 |
+
# --- ROBUST PEAK RESISTANCE LOGIC ---
|
| 188 |
+
# Peak Resistance should be the maximum value during actual conduction (when contacts are touching).
|
| 189 |
+
# The "open baseline" (850 in this case) represents infinite/saturated resistance and should be excluded.
|
| 190 |
+
# Strategy:
|
| 191 |
+
# 1. Identify the open baseline as the most frequent high value (mode of upper percentile)
|
| 192 |
+
# 2. Find peak in the conduction range (values significantly below open baseline)
|
| 193 |
+
|
| 194 |
+
# Find the open baseline (saturation value) - use upper 10% of data
|
| 195 |
+
R_sorted = np.sort(R)
|
| 196 |
+
upper_10pct_idx = int(len(R_sorted) * 0.90)
|
| 197 |
+
upper_values = R_sorted[upper_10pct_idx:]
|
| 198 |
+
|
| 199 |
+
# If upper values are relatively constant (std < 5% of mean), treat as saturation
|
| 200 |
+
if len(upper_values) > 0:
|
| 201 |
+
upper_mean = np.mean(upper_values)
|
| 202 |
+
upper_std = np.std(upper_values)
|
| 203 |
+
|
| 204 |
+
if upper_std < (upper_mean * 0.05): # Very stable upper region = saturation
|
| 205 |
+
saturation_value = upper_mean
|
| 206 |
+
else:
|
| 207 |
+
saturation_value = R_open_baseline
|
| 208 |
+
else:
|
| 209 |
+
saturation_value = R_open_baseline
|
| 210 |
+
|
| 211 |
+
# Define conduction range: values at least 10% below saturation
|
| 212 |
+
conduction_threshold = saturation_value * 0.90
|
| 213 |
+
|
| 214 |
+
# Find all resistance values in the conduction range
|
| 215 |
+
conduction_mask = R < conduction_threshold
|
| 216 |
+
|
| 217 |
+
if np.any(conduction_mask):
|
| 218 |
+
peak_resistance_uohm = float(np.max(R[conduction_mask]))
|
| 219 |
+
else:
|
| 220 |
+
# Fallback: use median of lower 50% of data
|
| 221 |
+
lower_half_idx = int(len(R_sorted) * 0.50)
|
| 222 |
+
peak_resistance_uohm = float(np.max(R_sorted[:lower_half_idx])) if lower_half_idx > 0 else float(np.median(R))
|
| 223 |
+
# -------------------------------------
|
| 224 |
+
|
| 225 |
+
# Peak coil currents
|
| 226 |
+
peak_close_coil_A = float(np.max(close_coil))
|
| 227 |
+
peak_trip1_coil_A = float(np.max(trip1))
|
| 228 |
+
peak_trip2_coil_A = float(np.max(trip2))
|
| 229 |
+
|
| 230 |
+
# Opening/Closing times
|
| 231 |
+
closing_time_ms = (T_make - T_close_cmd) if (not math.isnan(T_make) and not math.isnan(T_close_cmd)) else math.nan
|
| 232 |
+
opening_time_ms = (T_part - T_trip1_cmd) if (not math.isnan(T_part) and not math.isnan(T_trip1_cmd)) else math.nan
|
| 233 |
+
|
| 234 |
+
# Ambient temperature
|
| 235 |
+
ambient_temp_c = get_ambient_temperature_pune(fallback_c=ambient_fallback_c)
|
| 236 |
+
|
| 237 |
+
# Contact Speed
|
| 238 |
+
contact_speed_ms = None
|
| 239 |
+
if not math.isnan(closing_speed_avg_ms) and not math.isnan(opening_speed_avg_ms):
|
| 240 |
+
contact_speed_ms = (closing_speed_avg_ms + opening_speed_avg_ms) / 2.0
|
| 241 |
+
elif not math.isnan(closing_speed_avg_ms):
|
| 242 |
+
contact_speed_ms = closing_speed_avg_ms
|
| 243 |
+
elif not math.isnan(opening_speed_avg_ms):
|
| 244 |
+
contact_speed_ms = opening_speed_avg_ms
|
| 245 |
+
|
| 246 |
+
kpis = [
|
| 247 |
+
{"name": "Closing Time", "value": float(round(closing_time_ms, 2)) if not math.isnan(closing_time_ms) else None, "unit": "ms"},
|
| 248 |
+
{"name": "Opening Time", "value": float(round(opening_time_ms, 2)) if not math.isnan(opening_time_ms) else None, "unit": "ms"},
|
| 249 |
+
{"name": "DLRO Value", "value": float(round(DLRO_uohm, 2)), "unit": "µΩ"},
|
| 250 |
+
{"name": "Peak Resistance", "value": float(round(peak_resistance_uohm, 2)), "unit": "µΩ"},
|
| 251 |
+
{"name": "Main Wipe", "value": float(round(main_wipe_mm, 2)) if not math.isnan(main_wipe_mm) else None, "unit": "mm"},
|
| 252 |
+
{"name": "Arc Wipe", "value": float(round(arc_wipe_mm, 2)) if not math.isnan(arc_wipe_mm) else None, "unit": "mm"},
|
| 253 |
+
{"name": "Contact Travel Distance", "value": float(round(contact_travel_distance_mm, 2)), "unit": "mm"},
|
| 254 |
+
{"name": "Contact Speed", "value": float(round(contact_speed_ms, 2)) if contact_speed_ms is not None else None, "unit": "m/s"},
|
| 255 |
+
{"name": "Peak Close Coil Current", "value": float(round(peak_close_coil_A, 2)), "unit": "A"},
|
| 256 |
+
{"name": "Peak Trip Coil 1 Current", "value": float(round(peak_trip1_coil_A, 2)), "unit": "A"},
|
| 257 |
+
{"name": "Peak Trip Coil 2 Current", "value": float(round(peak_trip2_coil_A, 2)), "unit": "A"},
|
| 258 |
+
{"name": "Ambient Temperature", "value": float(round(ambient_temp_c, 1)), "unit": "°C"},
|
| 259 |
+
]
|
| 260 |
+
|
| 261 |
+
return {"kpis": kpis}
|
| 262 |
+
|
| 263 |
+
# ==========================================
|
| 264 |
+
# 3. MAIN INTERFACE
|
| 265 |
+
# ==========================================
|
| 266 |
+
|
| 267 |
+
def calculate_kpis(df):
|
| 268 |
+
"""
|
| 269 |
+
Main entry point. Calculates KPIs.
|
| 270 |
+
Returns a dictionary containing KPIs in the flat format expected by app.py.
|
| 271 |
+
"""
|
| 272 |
+
result = analyze_breaker_data(df)
|
| 273 |
+
kpi_list = result['kpis']
|
| 274 |
+
|
| 275 |
+
# Convert list of dicts to flat dict
|
| 276 |
+
# Expected keys: closing_time, opening_time, dlro, peak_resistance, main_wipe, arc_wipe,
|
| 277 |
+
# contact_travel, contact_speed, peak_close_coil, peak_trip_coil_1, peak_trip_coil_2, ambient_temp
|
| 278 |
+
|
| 279 |
+
flat_kpis = {}
|
| 280 |
+
|
| 281 |
+
key_map = {
|
| 282 |
+
"Closing Time": "closing_time",
|
| 283 |
+
"Opening Time": "opening_time",
|
| 284 |
+
"DLRO Value": "dlro",
|
| 285 |
+
"Peak Resistance": "peak_resistance",
|
| 286 |
+
"Main Wipe": "main_wipe",
|
| 287 |
+
"Arc Wipe": "arc_wipe",
|
| 288 |
+
"Contact Travel Distance": "contact_travel",
|
| 289 |
+
"Contact Speed": "contact_speed",
|
| 290 |
+
"Peak Close Coil Current": "peak_close_coil",
|
| 291 |
+
"Peak Trip Coil 1 Current": "peak_trip_coil_1",
|
| 292 |
+
"Peak Trip Coil 2 Current": "peak_trip_coil_2",
|
| 293 |
+
"Ambient Temperature": "ambient_temp"
|
| 294 |
+
}
|
| 295 |
+
|
| 296 |
+
for item in kpi_list:
|
| 297 |
+
name = item['name']
|
| 298 |
+
value = item['value']
|
| 299 |
+
if name in key_map:
|
| 300 |
+
flat_kpis[key_map[name]] = value if value is not None else 0.0
|
| 301 |
+
|
| 302 |
+
# Add default SF6 pressure as it's not in the new logic but used in app
|
| 303 |
+
flat_kpis["sf6_pressure"] = 7.0
|
| 304 |
+
|
| 305 |
+
return {
|
| 306 |
+
"kpis": flat_kpis
|
| 307 |
+
}
|
core/calculators/rul.py
ADDED
|
@@ -0,0 +1,500 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Previous Name: analysis/rul_calculator.py
|
| 2 |
+
import math
|
| 3 |
+
from typing import Dict, Any
|
| 4 |
+
|
| 5 |
+
def calculate_rul_and_uncertainty(kpis: Dict, cbhi_score: float, ai_verdict: Dict, phase_data: Dict) -> Dict:
|
| 6 |
+
"""
|
| 7 |
+
Optimized RUL Calculator with tight bounds and defect-specific degradation.
|
| 8 |
+
|
| 9 |
+
Returns:
|
| 10 |
+
Dictionary with only 'rulEstimate' and 'uncertainty'
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
# =========================================================================
|
| 14 |
+
# 1. INDUSTRIAL CONSTANTS (IEEE C37.10 / IEC 62271-100)
|
| 15 |
+
# =========================================================================
|
| 16 |
+
MAX_CYCLES = 10000 # Class M2 rated life
|
| 17 |
+
HEALTHY_BASELINE = 8500 # Healthy breaker typical RUL
|
| 18 |
+
MIN_CYCLES = 100 # Absolute floor
|
| 19 |
+
|
| 20 |
+
# Defect Class Degradation Multipliers (1.0 = no impact, lower = worse)
|
| 21 |
+
# These directly reduce RUL based on detected fault type
|
| 22 |
+
DEFECT_MULTIPLIERS = {
|
| 23 |
+
"healthy": 1.0,
|
| 24 |
+
"main contact wear": 0.35,
|
| 25 |
+
"arcing contact wear": 0.40,
|
| 26 |
+
"main contact misalignment": 0.45,
|
| 27 |
+
"arcing contact misalignment": 0.50,
|
| 28 |
+
"operating mechanism malfunction": 0.55,
|
| 29 |
+
"damping system fault": 0.60,
|
| 30 |
+
"sf6 pressure leakage": 0.30,
|
| 31 |
+
"linkage obstruction": 0.50,
|
| 32 |
+
"fixed contact damage": 0.40,
|
| 33 |
+
"close coil damage": 0.25,
|
| 34 |
+
"trip coil damage": 0.20,
|
| 35 |
+
"unknown": 0.70
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
# Severity-based additional reduction
|
| 39 |
+
SEVERITY_REDUCTION = {
|
| 40 |
+
"Critical": 0.50,
|
| 41 |
+
"High": 0.70,
|
| 42 |
+
"Medium": 0.85,
|
| 43 |
+
"Low": 0.95,
|
| 44 |
+
"None": 1.0
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
# KPI Critical Thresholds (beyond these = severe degradation)
|
| 48 |
+
KPI_CRITICAL = {
|
| 49 |
+
"DLRO Value": {"healthy_max": 70, "warning": 150, "critical": 250, "severe": 350},
|
| 50 |
+
"Peak Resistance": {"healthy_max": 400, "warning": 600, "critical": 800, "severe": 1000},
|
| 51 |
+
"Closing Time": {"healthy_max": 100, "warning": 120, "critical": 140, "severe": 160},
|
| 52 |
+
"Opening Time": {"healthy_max": 45, "warning": 55, "critical": 70, "severe": 90},
|
| 53 |
+
"Contact Speed": {"healthy_min": 4.0, "healthy_max": 6.5, "warning_dev": 1.0, "critical_dev": 2.0},
|
| 54 |
+
"Peak Trip Coil 1 Current": {"healthy_min": 3.0, "critical_min": 1.5, "zero": 0.5},
|
| 55 |
+
"Peak Trip Coil 2 Current": {"healthy_min": 3.0, "critical_min": 1.5, "zero": 0.5},
|
| 56 |
+
"Peak Close Coil Current": {"healthy_min": 3.0, "critical_min": 1.5, "zero": 0.5},
|
| 57 |
+
}
|
| 58 |
+
|
| 59 |
+
# =========================================================================
|
| 60 |
+
# 2. HELPER FUNCTIONS
|
| 61 |
+
# =========================================================================
|
| 62 |
+
|
| 63 |
+
def clamp(val: float, min_val: float = 0.0, max_val: float = 1.0) -> float:
|
| 64 |
+
return max(min_val, min(max_val, val))
|
| 65 |
+
|
| 66 |
+
def safe_get(d: Any, *keys, default=None):
|
| 67 |
+
for key in keys:
|
| 68 |
+
if isinstance(d, dict):
|
| 69 |
+
d = d.get(key, default)
|
| 70 |
+
elif isinstance(d, list) and isinstance(key, int) and 0 <= key < len(d):
|
| 71 |
+
d = d[key]
|
| 72 |
+
else:
|
| 73 |
+
return default
|
| 74 |
+
return d if d is not None else default
|
| 75 |
+
|
| 76 |
+
def get_defect_multiplier(fault_label: str) -> float:
|
| 77 |
+
"""Get degradation multiplier based on fault label."""
|
| 78 |
+
label_lower = fault_label.lower().strip()
|
| 79 |
+
for defect, multiplier in DEFECT_MULTIPLIERS.items():
|
| 80 |
+
if defect in label_lower or label_lower in defect:
|
| 81 |
+
return multiplier
|
| 82 |
+
# Partial matches
|
| 83 |
+
if "wear" in label_lower:
|
| 84 |
+
return 0.38
|
| 85 |
+
if "misalignment" in label_lower:
|
| 86 |
+
return 0.48
|
| 87 |
+
if "coil" in label_lower:
|
| 88 |
+
return 0.22
|
| 89 |
+
if "mechanism" in label_lower or "damping" in label_lower:
|
| 90 |
+
return 0.55
|
| 91 |
+
if "sf6" in label_lower or "pressure" in label_lower or "leak" in label_lower:
|
| 92 |
+
return 0.30
|
| 93 |
+
return DEFECT_MULTIPLIERS.get("unknown", 0.70)
|
| 94 |
+
|
| 95 |
+
# =========================================================================
|
| 96 |
+
# 3. EXTRACT DATA
|
| 97 |
+
# =========================================================================
|
| 98 |
+
|
| 99 |
+
# Extract KPIs
|
| 100 |
+
kpis_list = kpis.get("kpis", kpis) if isinstance(kpis, dict) else kpis
|
| 101 |
+
if isinstance(kpis_list, dict):
|
| 102 |
+
kpis_list = [{"name": k, "value": v} for k, v in kpis_list.items()]
|
| 103 |
+
kpi_map = {k.get('name', ''): float(k.get('value', 0)) for k in kpis_list if isinstance(k, dict)}
|
| 104 |
+
|
| 105 |
+
# Extract AI Verdict
|
| 106 |
+
ai_verdict_data = safe_get(ai_verdict, "aiVerdict", default={})
|
| 107 |
+
fault_label = safe_get(ai_verdict_data, "faultLabel", default="Unknown")
|
| 108 |
+
ai_confidence = float(safe_get(ai_verdict_data, "confidence", default=50)) / 100.0
|
| 109 |
+
ai_severity = safe_get(ai_verdict_data, "severity", default="Low")
|
| 110 |
+
severity_reason = safe_get(ai_verdict_data, "severityReason", default="")
|
| 111 |
+
|
| 112 |
+
# Extract CBHI components
|
| 113 |
+
cbhi_components = safe_get(ai_verdict, "cbhi", "overall_health_assessment", default={})
|
| 114 |
+
|
| 115 |
+
# Extract Phase Analysis
|
| 116 |
+
phases_list = safe_get(phase_data, "phaseWiseAnalysis", default=[])
|
| 117 |
+
if not isinstance(phases_list, list):
|
| 118 |
+
phases_list = []
|
| 119 |
+
|
| 120 |
+
# Normalize CBHI
|
| 121 |
+
cbhi_normalized = clamp(float(cbhi_score) / 100.0)
|
| 122 |
+
|
| 123 |
+
# =========================================================================
|
| 124 |
+
# 4. DETERMINE IF HEALTHY OR FAULTY
|
| 125 |
+
# =========================================================================
|
| 126 |
+
|
| 127 |
+
is_healthy = fault_label.lower().strip() == "healthy"
|
| 128 |
+
|
| 129 |
+
# =========================================================================
|
| 130 |
+
# 5. CALCULATE ELECTRICAL WEAR FACTOR (Contact & Resistance Issues)
|
| 131 |
+
# =========================================================================
|
| 132 |
+
|
| 133 |
+
electrical_penalties = []
|
| 134 |
+
|
| 135 |
+
# DLRO Analysis (most critical for electrical wear)
|
| 136 |
+
dlro = kpi_map.get("DLRO Value", 50)
|
| 137 |
+
if dlro <= 70:
|
| 138 |
+
electrical_penalties.append(0.0) # Healthy
|
| 139 |
+
elif dlro <= 150:
|
| 140 |
+
electrical_penalties.append(0.15 + 0.15 * (dlro - 70) / 80)
|
| 141 |
+
elif dlro <= 250:
|
| 142 |
+
electrical_penalties.append(0.30 + 0.25 * (dlro - 150) / 100)
|
| 143 |
+
elif dlro <= 350:
|
| 144 |
+
electrical_penalties.append(0.55 + 0.25 * (dlro - 250) / 100)
|
| 145 |
+
else:
|
| 146 |
+
electrical_penalties.append(0.80 + 0.15 * min((dlro - 350) / 150, 1.0))
|
| 147 |
+
|
| 148 |
+
# Peak Resistance Analysis
|
| 149 |
+
peak_r = kpi_map.get("Peak Resistance", 300)
|
| 150 |
+
if peak_r <= 400:
|
| 151 |
+
electrical_penalties.append(0.0)
|
| 152 |
+
elif peak_r <= 600:
|
| 153 |
+
electrical_penalties.append(0.10 + 0.15 * (peak_r - 400) / 200)
|
| 154 |
+
elif peak_r <= 800:
|
| 155 |
+
electrical_penalties.append(0.25 + 0.20 * (peak_r - 600) / 200)
|
| 156 |
+
else:
|
| 157 |
+
electrical_penalties.append(0.45 + 0.25 * min((peak_r - 800) / 400, 1.0))
|
| 158 |
+
|
| 159 |
+
# Coil Current Analysis (Trip Coil failures are CRITICAL)
|
| 160 |
+
trip1 = kpi_map.get("Peak Trip Coil 1 Current", 5.0)
|
| 161 |
+
trip2 = kpi_map.get("Peak Trip Coil 2 Current", 5.0)
|
| 162 |
+
close_coil = kpi_map.get("Peak Close Coil Current", 5.0)
|
| 163 |
+
|
| 164 |
+
# Trip Coil 1 penalty
|
| 165 |
+
if trip1 < 0.5:
|
| 166 |
+
electrical_penalties.append(0.70) # Critical - coil failed
|
| 167 |
+
elif trip1 < 1.5:
|
| 168 |
+
electrical_penalties.append(0.50)
|
| 169 |
+
elif trip1 < 3.0:
|
| 170 |
+
electrical_penalties.append(0.25)
|
| 171 |
+
else:
|
| 172 |
+
electrical_penalties.append(0.0)
|
| 173 |
+
|
| 174 |
+
# Trip Coil 2 penalty
|
| 175 |
+
if trip2 < 0.5:
|
| 176 |
+
electrical_penalties.append(0.60) # Critical - redundancy lost
|
| 177 |
+
elif trip2 < 1.5:
|
| 178 |
+
electrical_penalties.append(0.40)
|
| 179 |
+
elif trip2 < 3.0:
|
| 180 |
+
electrical_penalties.append(0.20)
|
| 181 |
+
else:
|
| 182 |
+
electrical_penalties.append(0.0)
|
| 183 |
+
|
| 184 |
+
# Close Coil penalty
|
| 185 |
+
if close_coil < 0.5:
|
| 186 |
+
electrical_penalties.append(0.50)
|
| 187 |
+
elif close_coil < 1.5:
|
| 188 |
+
electrical_penalties.append(0.30)
|
| 189 |
+
elif close_coil < 3.0:
|
| 190 |
+
electrical_penalties.append(0.15)
|
| 191 |
+
else:
|
| 192 |
+
electrical_penalties.append(0.0)
|
| 193 |
+
|
| 194 |
+
# Weighted electrical wear (DLRO has highest weight)
|
| 195 |
+
electrical_weights = [0.35, 0.20, 0.20, 0.15, 0.10] # DLRO, Peak_R, Trip1, Trip2, Close
|
| 196 |
+
electrical_wear = sum(p * w for p, w in zip(electrical_penalties, electrical_weights))
|
| 197 |
+
|
| 198 |
+
# =========================================================================
|
| 199 |
+
# 6. CALCULATE MECHANICAL WEAR FACTOR (Timing & Mechanism Issues)
|
| 200 |
+
# =========================================================================
|
| 201 |
+
|
| 202 |
+
mechanical_penalties = []
|
| 203 |
+
|
| 204 |
+
# Closing Time Analysis
|
| 205 |
+
close_time = kpi_map.get("Closing Time", 85)
|
| 206 |
+
if close_time <= 100:
|
| 207 |
+
mechanical_penalties.append(0.0)
|
| 208 |
+
elif close_time <= 120:
|
| 209 |
+
mechanical_penalties.append(0.15 + 0.15 * (close_time - 100) / 20)
|
| 210 |
+
elif close_time <= 140:
|
| 211 |
+
mechanical_penalties.append(0.30 + 0.25 * (close_time - 120) / 20)
|
| 212 |
+
else:
|
| 213 |
+
mechanical_penalties.append(0.55 + 0.25 * min((close_time - 140) / 40, 1.0))
|
| 214 |
+
|
| 215 |
+
# Opening Time Analysis
|
| 216 |
+
open_time = kpi_map.get("Opening Time", 35)
|
| 217 |
+
if open_time <= 45:
|
| 218 |
+
mechanical_penalties.append(0.0)
|
| 219 |
+
elif open_time <= 55:
|
| 220 |
+
mechanical_penalties.append(0.15 + 0.15 * (open_time - 45) / 10)
|
| 221 |
+
elif open_time <= 70:
|
| 222 |
+
mechanical_penalties.append(0.30 + 0.25 * (open_time - 55) / 15)
|
| 223 |
+
else:
|
| 224 |
+
mechanical_penalties.append(0.55 + 0.25 * min((open_time - 70) / 30, 1.0))
|
| 225 |
+
|
| 226 |
+
# Contact Speed Analysis
|
| 227 |
+
speed = kpi_map.get("Contact Speed", 5.0)
|
| 228 |
+
if 4.0 <= speed <= 6.5:
|
| 229 |
+
mechanical_penalties.append(0.0)
|
| 230 |
+
elif 3.0 <= speed <= 7.5:
|
| 231 |
+
deviation = max(abs(speed - 4.0), abs(speed - 6.5)) if speed < 4.0 or speed > 6.5 else 0
|
| 232 |
+
mechanical_penalties.append(0.15 + 0.15 * deviation)
|
| 233 |
+
else:
|
| 234 |
+
mechanical_penalties.append(0.50)
|
| 235 |
+
|
| 236 |
+
# Contact Travel Distance Analysis
|
| 237 |
+
travel = kpi_map.get("Contact Travel Distance", 550)
|
| 238 |
+
if 500 <= travel <= 600:
|
| 239 |
+
mechanical_penalties.append(0.0)
|
| 240 |
+
elif 450 <= travel <= 650:
|
| 241 |
+
mechanical_penalties.append(0.15)
|
| 242 |
+
else:
|
| 243 |
+
mechanical_penalties.append(0.35)
|
| 244 |
+
|
| 245 |
+
# Main/Arc Wipe Analysis
|
| 246 |
+
main_wipe = kpi_map.get("Main Wipe", 150)
|
| 247 |
+
arc_wipe = kpi_map.get("Arc Wipe", 15)
|
| 248 |
+
|
| 249 |
+
if main_wipe < 100 or main_wipe > 200:
|
| 250 |
+
mechanical_penalties.append(0.25)
|
| 251 |
+
else:
|
| 252 |
+
mechanical_penalties.append(0.0)
|
| 253 |
+
|
| 254 |
+
if arc_wipe < 10 or arc_wipe > 25:
|
| 255 |
+
mechanical_penalties.append(0.20)
|
| 256 |
+
else:
|
| 257 |
+
mechanical_penalties.append(0.0)
|
| 258 |
+
|
| 259 |
+
# Weighted mechanical wear
|
| 260 |
+
mech_weights = [0.25, 0.25, 0.20, 0.10, 0.10, 0.10]
|
| 261 |
+
mechanical_wear = sum(p * w for p, w in zip(mechanical_penalties, mech_weights))
|
| 262 |
+
|
| 263 |
+
# =========================================================================
|
| 264 |
+
# 7. CALCULATE CBHI COMPONENT IMPACT
|
| 265 |
+
# =========================================================================
|
| 266 |
+
|
| 267 |
+
component_multipliers = {
|
| 268 |
+
"Contacts (moving & arcing)": {"weight": 0.40, "High Risk": 0.35, "Medium Risk": 0.60, "Low Risk": 0.85, "Normal": 1.0},
|
| 269 |
+
"SF6 Gas Chamber": {"weight": 0.25, "High Risk": 0.30, "Medium Risk": 0.55, "Low Risk": 0.80, "Normal": 1.0},
|
| 270 |
+
"Operating Mechanism": {"weight": 0.20, "High Risk": 0.40, "Medium Risk": 0.65, "Low Risk": 0.85, "Normal": 1.0},
|
| 271 |
+
"Coil": {"weight": 0.15, "High Risk": 0.25, "Medium Risk": 0.50, "Low Risk": 0.80, "Normal": 1.0}
|
| 272 |
+
}
|
| 273 |
+
|
| 274 |
+
cbhi_factor = 0.0
|
| 275 |
+
total_weight = 0.0
|
| 276 |
+
|
| 277 |
+
for component, status in cbhi_components.items():
|
| 278 |
+
if component in component_multipliers:
|
| 279 |
+
config = component_multipliers[component]
|
| 280 |
+
weight = config["weight"]
|
| 281 |
+
multiplier = config.get(status, 0.70)
|
| 282 |
+
cbhi_factor += weight * multiplier
|
| 283 |
+
total_weight += weight
|
| 284 |
+
|
| 285 |
+
if total_weight > 0:
|
| 286 |
+
cbhi_factor = cbhi_factor / total_weight
|
| 287 |
+
else:
|
| 288 |
+
cbhi_factor = cbhi_normalized
|
| 289 |
+
|
| 290 |
+
# =========================================================================
|
| 291 |
+
# 8. CALCULATE PHASE CONFIDENCE IMPACT
|
| 292 |
+
# =========================================================================
|
| 293 |
+
|
| 294 |
+
phase_weights = {
|
| 295 |
+
"Pre-Contact Travel": 0.10,
|
| 296 |
+
"Arcing Contact Engagement & Arc Initiation": 0.25,
|
| 297 |
+
"Main Contact Conduction": 0.35,
|
| 298 |
+
"Main Contact Parting & Arc Elongation": 0.20,
|
| 299 |
+
"Final Open State": 0.10
|
| 300 |
+
}
|
| 301 |
+
|
| 302 |
+
phase_factor = 0.0
|
| 303 |
+
phase_total_weight = 0.0
|
| 304 |
+
critical_phase_failure = False
|
| 305 |
+
|
| 306 |
+
for phase in phases_list:
|
| 307 |
+
conf = float(safe_get(phase, "confidence", default=75))
|
| 308 |
+
name = safe_get(phase, "name", default="")
|
| 309 |
+
|
| 310 |
+
weight = 0.15 # Default weight
|
| 311 |
+
for pname, pw in phase_weights.items():
|
| 312 |
+
if pname.lower() in name.lower() or name.lower() in pname.lower():
|
| 313 |
+
weight = pw
|
| 314 |
+
break
|
| 315 |
+
|
| 316 |
+
# Normalize confidence to factor (0-1)
|
| 317 |
+
conf_factor = conf / 100.0
|
| 318 |
+
|
| 319 |
+
# Critical phase failure detection
|
| 320 |
+
if conf < 30:
|
| 321 |
+
critical_phase_failure = True
|
| 322 |
+
conf_factor *= 0.5 # Double penalty for critical failure
|
| 323 |
+
elif conf < 50:
|
| 324 |
+
conf_factor *= 0.75
|
| 325 |
+
|
| 326 |
+
phase_factor += weight * conf_factor
|
| 327 |
+
phase_total_weight += weight
|
| 328 |
+
|
| 329 |
+
if phase_total_weight > 0:
|
| 330 |
+
phase_factor = phase_factor / phase_total_weight
|
| 331 |
+
else:
|
| 332 |
+
phase_factor = 0.75
|
| 333 |
+
|
| 334 |
+
# =========================================================================
|
| 335 |
+
# 9. CALCULATE DEFECT-SPECIFIC DEGRADATION
|
| 336 |
+
# =========================================================================
|
| 337 |
+
|
| 338 |
+
# Get defect multiplier based on AI verdict
|
| 339 |
+
defect_multiplier = get_defect_multiplier(fault_label)
|
| 340 |
+
|
| 341 |
+
# Apply severity reduction
|
| 342 |
+
severity_factor = SEVERITY_REDUCTION.get(ai_severity, 0.85)
|
| 343 |
+
|
| 344 |
+
# Confidence-weighted defect impact
|
| 345 |
+
# Higher confidence = more trust in the defect diagnosis
|
| 346 |
+
confidence_weight = 0.3 + 0.7 * ai_confidence # Range: 0.3-1.0
|
| 347 |
+
|
| 348 |
+
if is_healthy:
|
| 349 |
+
# Healthy: Use baseline with minor adjustments
|
| 350 |
+
defect_impact = 1.0 - (1.0 - defect_multiplier) * (1.0 - ai_confidence) * 0.3
|
| 351 |
+
else:
|
| 352 |
+
# Faulty: Full defect impact weighted by confidence
|
| 353 |
+
defect_impact = defect_multiplier * confidence_weight + (1.0 - confidence_weight) * 0.6
|
| 354 |
+
|
| 355 |
+
defect_impact *= severity_factor
|
| 356 |
+
|
| 357 |
+
# =========================================================================
|
| 358 |
+
# 10. EXTRACT DEVIATION PERCENTAGE FROM SEVERITY REASON
|
| 359 |
+
# =========================================================================
|
| 360 |
+
|
| 361 |
+
import re
|
| 362 |
+
deviation_penalty = 0.0
|
| 363 |
+
if severity_reason:
|
| 364 |
+
percentages = re.findall(r'(\d+)%', severity_reason)
|
| 365 |
+
if percentages:
|
| 366 |
+
max_dev = max(int(p) for p in percentages)
|
| 367 |
+
if max_dev > 500:
|
| 368 |
+
deviation_penalty = 0.40
|
| 369 |
+
elif max_dev > 300:
|
| 370 |
+
deviation_penalty = 0.30
|
| 371 |
+
elif max_dev > 150:
|
| 372 |
+
deviation_penalty = 0.20
|
| 373 |
+
elif max_dev > 50:
|
| 374 |
+
deviation_penalty = 0.10
|
| 375 |
+
|
| 376 |
+
# =========================================================================
|
| 377 |
+
# 11. COMBINE ALL FACTORS INTO FINAL RUL
|
| 378 |
+
# =========================================================================
|
| 379 |
+
|
| 380 |
+
# Combined wear factor (electrical has higher weight for contact issues)
|
| 381 |
+
if "contact" in fault_label.lower() or "wear" in fault_label.lower():
|
| 382 |
+
combined_wear = 0.65 * electrical_wear + 0.35 * mechanical_wear
|
| 383 |
+
elif "mechanism" in fault_label.lower() or "damping" in fault_label.lower() or "linkage" in fault_label.lower():
|
| 384 |
+
combined_wear = 0.35 * electrical_wear + 0.65 * mechanical_wear
|
| 385 |
+
else:
|
| 386 |
+
combined_wear = 0.50 * electrical_wear + 0.50 * mechanical_wear
|
| 387 |
+
|
| 388 |
+
# Health factor combining all components
|
| 389 |
+
# Weights: Defect (35%), CBHI (25%), Wear (25%), Phase (15%)
|
| 390 |
+
health_factor = (
|
| 391 |
+
0.35 * defect_impact +
|
| 392 |
+
0.25 * cbhi_factor +
|
| 393 |
+
0.25 * (1.0 - combined_wear) +
|
| 394 |
+
0.15 * phase_factor
|
| 395 |
+
)
|
| 396 |
+
|
| 397 |
+
# Apply deviation penalty
|
| 398 |
+
health_factor *= (1.0 - deviation_penalty)
|
| 399 |
+
|
| 400 |
+
# Apply critical phase penalty
|
| 401 |
+
if critical_phase_failure:
|
| 402 |
+
health_factor *= 0.70
|
| 403 |
+
|
| 404 |
+
# Clamp health factor
|
| 405 |
+
health_factor = clamp(health_factor, 0.05, 1.0)
|
| 406 |
+
|
| 407 |
+
# Calculate base RUL
|
| 408 |
+
if is_healthy:
|
| 409 |
+
base_rul = HEALTHY_BASELINE * health_factor
|
| 410 |
+
else:
|
| 411 |
+
# Non-healthy: Start from reduced baseline
|
| 412 |
+
base_rul = (MAX_CYCLES * 0.6) * health_factor
|
| 413 |
+
|
| 414 |
+
# Apply Weibull-like degradation curve for acceleration
|
| 415 |
+
beta = 1.3 if is_healthy else 1.6 # Steeper curve for faulty
|
| 416 |
+
final_rul = base_rul * math.pow(health_factor, beta - 1)
|
| 417 |
+
|
| 418 |
+
# Ensure minimum floor
|
| 419 |
+
final_rul = max(MIN_CYCLES, min(MAX_CYCLES, final_rul))
|
| 420 |
+
|
| 421 |
+
# =========================================================================
|
| 422 |
+
# 12. CALCULATE TIGHT UNCERTAINTY BOUNDS
|
| 423 |
+
# =========================================================================
|
| 424 |
+
|
| 425 |
+
# Base uncertainty factors
|
| 426 |
+
uncertainty_components = []
|
| 427 |
+
|
| 428 |
+
# Factor 1: AI Confidence uncertainty (inversely proportional)
|
| 429 |
+
ai_uncertainty = (1.0 - ai_confidence) * 0.08
|
| 430 |
+
uncertainty_components.append(ai_uncertainty)
|
| 431 |
+
|
| 432 |
+
# Factor 2: Phase confidence variance
|
| 433 |
+
phase_confs = [float(safe_get(p, "confidence", default=75)) for p in phases_list]
|
| 434 |
+
if len(phase_confs) >= 2:
|
| 435 |
+
phase_variance = (max(phase_confs) - min(phase_confs)) / 100.0
|
| 436 |
+
uncertainty_components.append(phase_variance * 0.06)
|
| 437 |
+
|
| 438 |
+
# Factor 3: CBHI vs AI discrepancy
|
| 439 |
+
cbhi_health = cbhi_factor
|
| 440 |
+
ai_health = defect_impact
|
| 441 |
+
discrepancy = abs(cbhi_health - ai_health)
|
| 442 |
+
uncertainty_components.append(discrepancy * 0.05)
|
| 443 |
+
|
| 444 |
+
# Factor 4: KPI spread (how many KPIs are in warning/critical range)
|
| 445 |
+
warning_count = 0
|
| 446 |
+
for name, val in kpi_map.items():
|
| 447 |
+
if name == "DLRO Value" and val > 100:
|
| 448 |
+
warning_count += 1
|
| 449 |
+
if name == "Closing Time" and val > 110:
|
| 450 |
+
warning_count += 1
|
| 451 |
+
if name == "Opening Time" and val > 50:
|
| 452 |
+
warning_count += 1
|
| 453 |
+
if name in ["Peak Trip Coil 1 Current", "Peak Trip Coil 2 Current"] and val < 2.0:
|
| 454 |
+
warning_count += 1
|
| 455 |
+
uncertainty_components.append(warning_count * 0.025)
|
| 456 |
+
|
| 457 |
+
# Base irreducible uncertainty (5%)
|
| 458 |
+
base_uncertainty_ratio = 0.05
|
| 459 |
+
|
| 460 |
+
# Total uncertainty ratio
|
| 461 |
+
total_uncertainty_ratio = base_uncertainty_ratio + sum(uncertainty_components)
|
| 462 |
+
total_uncertainty_ratio = clamp(total_uncertainty_ratio, 0.08, 0.25) # Tight bounds: 8-25%
|
| 463 |
+
|
| 464 |
+
# Calculate uncertainty value
|
| 465 |
+
uncertainty_value = final_rul * total_uncertainty_ratio
|
| 466 |
+
|
| 467 |
+
# Round to sensible values
|
| 468 |
+
uncertainty_value = int(round(uncertainty_value / 25) * 25)
|
| 469 |
+
uncertainty_value = max(50, min(uncertainty_value, int(final_rul * 0.30)))
|
| 470 |
+
|
| 471 |
+
# =========================================================================
|
| 472 |
+
# 13. CALCULATE FINAL RANGE
|
| 473 |
+
# =========================================================================
|
| 474 |
+
|
| 475 |
+
estimated_cycles = int(round(final_rul / 10) * 10)
|
| 476 |
+
low_range = max(MIN_CYCLES, estimated_cycles - uncertainty_value)
|
| 477 |
+
high_range = min(MAX_CYCLES, estimated_cycles + uncertainty_value)
|
| 478 |
+
|
| 479 |
+
# Ensure reasonable gap (minimum 10% of estimate or 100 cycles)
|
| 480 |
+
min_gap = max(100, int(estimated_cycles * 0.10))
|
| 481 |
+
if high_range - low_range < min_gap:
|
| 482 |
+
mid = (low_range + high_range) // 2
|
| 483 |
+
low_range = max(MIN_CYCLES, mid - min_gap // 2)
|
| 484 |
+
high_range = min(MAX_CYCLES, mid + min_gap // 2)
|
| 485 |
+
|
| 486 |
+
# Safety checks
|
| 487 |
+
if low_range >= high_range:
|
| 488 |
+
low_range = max(MIN_CYCLES, high_range - min_gap)
|
| 489 |
+
|
| 490 |
+
# Ensure estimate is within range
|
| 491 |
+
estimated_cycles = clamp(estimated_cycles, low_range, high_range)
|
| 492 |
+
|
| 493 |
+
# =========================================================================
|
| 494 |
+
# 14. RETURN SIMPLIFIED OUTPUT
|
| 495 |
+
# =========================================================================
|
| 496 |
+
|
| 497 |
+
return {
|
| 498 |
+
"rulEstimate": f"{low_range}-{high_range} cycles",
|
| 499 |
+
"uncertainty": f"±{uncertainty_value} cycles"
|
| 500 |
+
}
|
core/engines/__pycache__/advanced_rules.cpython-313.pyc
ADDED
|
Binary file (20 kB). View file
|
|
|
core/engines/__pycache__/diagnostics.cpython-313.pyc
ADDED
|
Binary file (8.55 kB). View file
|
|
|
core/engines/__pycache__/rules.cpython-313.pyc
ADDED
|
Binary file (49.6 kB). View file
|
|
|
core/engines/__pycache__/rules.cpython-39.pyc
ADDED
|
Binary file (29.6 kB). View file
|
|
|
core/engines/advanced_rules.py
ADDED
|
@@ -0,0 +1,399 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Previous Name: analysis/engines/advanced_rule_engine.py
|
| 2 |
+
import numpy as np
|
| 3 |
+
import pandas as pd
|
| 4 |
+
import json
|
| 5 |
+
import sys
|
| 6 |
+
from scipy.signal import savgol_filter, find_peaks, welch
|
| 7 |
+
from scipy.stats import skew, kurtosis
|
| 8 |
+
from scipy.integrate import simpson
|
| 9 |
+
|
| 10 |
+
# Set UTF-8 encoding for console output
|
| 11 |
+
if sys.stdout.encoding != 'utf-8':
|
| 12 |
+
sys.stdout.reconfigure(encoding='utf-8')
|
| 13 |
+
|
| 14 |
+
class AdvancedDCRMEngine:
|
| 15 |
+
"""
|
| 16 |
+
Top-Notch Advanced Rule-Based DCRM Engine
|
| 17 |
+
=========================================
|
| 18 |
+
Combines Physics-Based Signal Processing (Scipy) with Expert Heuristic Logic.
|
| 19 |
+
|
| 20 |
+
Features:
|
| 21 |
+
- 12-Class Defect Detection (Primary + Secondary)
|
| 22 |
+
- Evidence-Based Scoring (0-100% Confidence)
|
| 23 |
+
- Advanced Signal Processing:
|
| 24 |
+
* Savitzky-Golay Filtering (Noise reduction without edge blurring)
|
| 25 |
+
* FFT (Mechanical Chatter detection)
|
| 26 |
+
* Peak Finding (Spike counting, Bounce detection)
|
| 27 |
+
* Derivative Analysis (Jerk/Stutter detection)
|
| 28 |
+
* Energy Integration (Arcing Ablation)
|
| 29 |
+
"""
|
| 30 |
+
|
| 31 |
+
def __init__(self):
|
| 32 |
+
# --- CONFIGURATION & THRESHOLDS ---
|
| 33 |
+
|
| 34 |
+
# 1. Signal Processing Config
|
| 35 |
+
self.SAVGOL_WINDOW = 11 # Window length for smoothing (must be odd)
|
| 36 |
+
self.SAVGOL_POLYORDER = 2 # Polynomial order
|
| 37 |
+
|
| 38 |
+
# 2. Physics Thresholds (Strict - Industry Standard)
|
| 39 |
+
self.R_OPEN_THRESHOLD = 1_000_000 # µΩ
|
| 40 |
+
self.R_MAIN_MAX_HEALTHY = 50.0 # µΩ
|
| 41 |
+
self.R_MAIN_WARNING = 80.0 # µΩ
|
| 42 |
+
self.R_MAIN_CRITICAL = 150.0 # µΩ
|
| 43 |
+
|
| 44 |
+
# 3. Wear & Arcing Thresholds
|
| 45 |
+
self.ARCING_SPIKE_CRITICAL = 8000 # µΩ
|
| 46 |
+
self.ARCING_SPIKE_SEVERE = 5000 # µΩ
|
| 47 |
+
self.ARCING_ENERGY_CRITICAL = 2000.0 # Joules
|
| 48 |
+
|
| 49 |
+
# 4. Mechanical Thresholds
|
| 50 |
+
self.BOUNCE_PROMINENCE = 500 # µΩ
|
| 51 |
+
self.JERK_THRESHOLD = 500.0 # µΩ/ms^2
|
| 52 |
+
self.FFT_CHATTER_POWER_THRESHOLD = 100.0
|
| 53 |
+
|
| 54 |
+
def analyze(self, df: pd.DataFrame, segments: dict, kpis: dict = None) -> dict:
|
| 55 |
+
"""
|
| 56 |
+
Main entry point for analysis.
|
| 57 |
+
|
| 58 |
+
Args:
|
| 59 |
+
df: DataFrame with 'Resistance' column.
|
| 60 |
+
segments: Dictionary containing phase start/end indices.
|
| 61 |
+
Expected keys: 'phase2_start', 'phase2_end', 'phase3_start',
|
| 62 |
+
'phase3_end', 'phase4_start', 'phase4_end'.
|
| 63 |
+
kpis: Optional dictionary of Key Performance Indicators.
|
| 64 |
+
"""
|
| 65 |
+
if kpis is None: kpis = {}
|
| 66 |
+
|
| 67 |
+
# 1. Standardize Input
|
| 68 |
+
df_std = self._standardize_input(df)
|
| 69 |
+
resistance = df_std['Resistance'].values
|
| 70 |
+
|
| 71 |
+
# 2. Signal Preprocessing (Scipy)
|
| 72 |
+
try:
|
| 73 |
+
resistance_smooth = savgol_filter(resistance, self.SAVGOL_WINDOW, self.SAVGOL_POLYORDER)
|
| 74 |
+
except Exception:
|
| 75 |
+
resistance_smooth = resistance
|
| 76 |
+
|
| 77 |
+
# Derivatives
|
| 78 |
+
res_velocity = np.gradient(resistance_smooth)
|
| 79 |
+
res_acceleration = np.gradient(res_velocity)
|
| 80 |
+
|
| 81 |
+
# 3. Advanced Feature Extraction (Using provided segments)
|
| 82 |
+
features = self._extract_advanced_features(
|
| 83 |
+
resistance, resistance_smooth, res_velocity, res_acceleration, segments, kpis
|
| 84 |
+
)
|
| 85 |
+
|
| 86 |
+
# 4. Heuristic Defect Detection (12 Classes)
|
| 87 |
+
defects = self._detect_defects(features, kpis)
|
| 88 |
+
|
| 89 |
+
# 5. Construct Report
|
| 90 |
+
report = self._build_report(features, defects)
|
| 91 |
+
|
| 92 |
+
return report
|
| 93 |
+
|
| 94 |
+
def _standardize_input(self, df: pd.DataFrame) -> pd.DataFrame:
|
| 95 |
+
if 'Resistance' not in df.columns:
|
| 96 |
+
cols = [c for c in df.columns if 'res' in c.lower() or 'uohm' in c.lower()]
|
| 97 |
+
if cols: df = df.rename(columns={cols[0]: 'Resistance'})
|
| 98 |
+
else:
|
| 99 |
+
non_time = [c for c in df.columns if not c.lower().startswith('t')]
|
| 100 |
+
if non_time: df = df.rename(columns={non_time[0]: 'Resistance'})
|
| 101 |
+
else: raise ValueError("Could not identify Resistance column")
|
| 102 |
+
|
| 103 |
+
if df.shape[1] > 100 and df.shape[0] < 10:
|
| 104 |
+
vals = df.iloc[0].values
|
| 105 |
+
return pd.DataFrame({'Resistance': vals})
|
| 106 |
+
|
| 107 |
+
return df[['Resistance']].reset_index(drop=True)
|
| 108 |
+
|
| 109 |
+
def _extract_advanced_features(self, r_raw, r_smooth, r_vel, r_acc, seg, kpis):
|
| 110 |
+
"""
|
| 111 |
+
Extracts comprehensive features for heuristic scoring.
|
| 112 |
+
"""
|
| 113 |
+
features = {}
|
| 114 |
+
if not seg['valid']:
|
| 115 |
+
features['valid_data'] = False
|
| 116 |
+
return features
|
| 117 |
+
features['valid_data'] = True
|
| 118 |
+
|
| 119 |
+
# --- A. MAIN CONTACT (Phase 3) ---
|
| 120 |
+
p3_slice = slice(seg['phase3_start'], seg['phase3_end'])
|
| 121 |
+
r_p3 = r_raw[p3_slice]
|
| 122 |
+
|
| 123 |
+
if len(r_p3) > 0:
|
| 124 |
+
features['main_mean'] = float(np.mean(r_p3))
|
| 125 |
+
features['main_std'] = float(np.std(r_p3))
|
| 126 |
+
features['main_min'] = float(np.min(r_p3))
|
| 127 |
+
features['main_max'] = float(np.max(r_p3))
|
| 128 |
+
features['main_range'] = float(features['main_max'] - features['main_min'])
|
| 129 |
+
|
| 130 |
+
# Detrend for roughness analysis
|
| 131 |
+
x = np.arange(len(r_p3))
|
| 132 |
+
p = np.polyfit(x, r_p3, 1)
|
| 133 |
+
detrended = r_p3 - np.polyval(p, x)
|
| 134 |
+
|
| 135 |
+
features['roughness_rms'] = float(np.sqrt(np.mean(detrended**2)))
|
| 136 |
+
features['roughness_skew'] = float(skew(detrended)) if len(detrended) > 2 else 0
|
| 137 |
+
features['roughness_kurtosis'] = float(kurtosis(detrended)) if len(detrended) > 2 else 0
|
| 138 |
+
|
| 139 |
+
# FFT for Chatter (50-300Hz)
|
| 140 |
+
if len(detrended) > 32:
|
| 141 |
+
freqs, psd = welch(detrended, fs=1000)
|
| 142 |
+
chatter_band = (freqs >= 50) & (freqs <= 300)
|
| 143 |
+
features['chatter_power'] = float(simpson(psd[chatter_band], freqs[chatter_band]))
|
| 144 |
+
else:
|
| 145 |
+
features['chatter_power'] = 0.0
|
| 146 |
+
|
| 147 |
+
# Telegraph Noise (Square Jumps)
|
| 148 |
+
diffs = np.abs(np.diff(r_p3))
|
| 149 |
+
features['telegraph_jumps'] = int(np.sum(diffs > 120)) # Jump > 120uOhm
|
| 150 |
+
|
| 151 |
+
# Shelf Detection (Histogram)
|
| 152 |
+
hist, _ = np.histogram(r_p3, bins=10)
|
| 153 |
+
features['num_shelves'] = int(np.sum(hist > len(r_p3)*0.1)) # Bins with >10% data
|
| 154 |
+
|
| 155 |
+
else:
|
| 156 |
+
features.update({'main_mean': 9999, 'roughness_rms': 0, 'chatter_power': 0, 'telegraph_jumps': 0})
|
| 157 |
+
|
| 158 |
+
# --- B. ARCING ZONES (Phase 2 & 4) ---
|
| 159 |
+
test_current = kpis.get('Test Current (A)', 100.0)
|
| 160 |
+
|
| 161 |
+
# Closing Arc
|
| 162 |
+
p2_slice = slice(seg['phase2_start'], seg['phase2_end'])
|
| 163 |
+
r_p2 = r_raw[p2_slice]
|
| 164 |
+
features['closing_arc_duration'] = len(r_p2)
|
| 165 |
+
if len(r_p2) > 0:
|
| 166 |
+
power_p2 = (test_current ** 2) * r_p2 * 1e-6
|
| 167 |
+
features['closing_arc_energy_joules'] = float(np.sum(power_p2) * 0.001)
|
| 168 |
+
features['closing_critical_spikes'] = int(np.sum(r_p2 > self.ARCING_SPIKE_CRITICAL))
|
| 169 |
+
features['closing_severe_spikes'] = int(np.sum(r_p2 > self.ARCING_SPIKE_SEVERE))
|
| 170 |
+
else:
|
| 171 |
+
features.update({'closing_arc_energy_joules': 0, 'closing_critical_spikes': 0, 'closing_severe_spikes': 0})
|
| 172 |
+
|
| 173 |
+
# Opening Arc
|
| 174 |
+
p4_slice = slice(seg['phase4_start'], seg['phase4_end'])
|
| 175 |
+
r_p4 = r_raw[p4_slice]
|
| 176 |
+
features['opening_arc_duration'] = len(r_p4)
|
| 177 |
+
if len(r_p4) > 0:
|
| 178 |
+
power_p4 = (test_current ** 2) * r_p4 * 1e-6
|
| 179 |
+
features['opening_arc_energy_joules'] = float(np.sum(power_p4) * 0.001)
|
| 180 |
+
features['opening_critical_spikes'] = int(np.sum(r_p4 > self.ARCING_SPIKE_CRITICAL))
|
| 181 |
+
features['opening_severe_spikes'] = int(np.sum(r_p4 > self.ARCING_SPIKE_SEVERE))
|
| 182 |
+
|
| 183 |
+
# Bounce Detection (Find Peaks)
|
| 184 |
+
peaks, _ = find_peaks(r_p4, prominence=self.BOUNCE_PROMINENCE, distance=5)
|
| 185 |
+
features['num_bounces'] = len(peaks)
|
| 186 |
+
|
| 187 |
+
# Telegraph in Arcing
|
| 188 |
+
features['arcing_telegraph'] = int(np.sum(np.abs(np.diff(r_p4)) > 400))
|
| 189 |
+
else:
|
| 190 |
+
features.update({'opening_arc_energy_joules': 0, 'opening_critical_spikes': 0, 'opening_severe_spikes': 0, 'num_bounces': 0, 'arcing_telegraph': 0})
|
| 191 |
+
|
| 192 |
+
# --- C. KINEMATICS ---
|
| 193 |
+
features['dur_closing'] = len(r_p2)
|
| 194 |
+
features['dur_opening'] = len(r_p4)
|
| 195 |
+
features['asymmetry_ratio'] = float(features['dur_opening'] / max(1, features['dur_closing']))
|
| 196 |
+
|
| 197 |
+
acc_p3 = r_acc[p3_slice] if len(r_p3) > 0 else []
|
| 198 |
+
features['max_micro_jerk'] = float(np.max(np.abs(acc_p3))) if len(acc_p3) > 0 else 0.0
|
| 199 |
+
|
| 200 |
+
return features
|
| 201 |
+
|
| 202 |
+
def _detect_defects(self, f, kpis):
|
| 203 |
+
"""
|
| 204 |
+
Applies Heuristic Scoring Logic for 12 Defect Classes.
|
| 205 |
+
Returns list of defects with 'Confidence' and 'Evidence'.
|
| 206 |
+
"""
|
| 207 |
+
defects = []
|
| 208 |
+
if not f['valid_data']: return defects
|
| 209 |
+
|
| 210 |
+
# --- CLASS 1: HEALTHY (Implicit - if no defects found) ---
|
| 211 |
+
|
| 212 |
+
# --- CLASS 2: MAIN CONTACT WEAR ---
|
| 213 |
+
score = 0
|
| 214 |
+
evidence = []
|
| 215 |
+
if f['main_mean'] > self.R_MAIN_CRITICAL:
|
| 216 |
+
score += 50; evidence.append(f"Critical Resistance ({f['main_mean']:.1f} µΩ)")
|
| 217 |
+
elif f['main_mean'] > self.R_MAIN_WARNING:
|
| 218 |
+
score += 30; evidence.append(f"Elevated Resistance ({f['main_mean']:.1f} µΩ)")
|
| 219 |
+
|
| 220 |
+
if f['roughness_rms'] > 25:
|
| 221 |
+
score += 25; evidence.append(f"Severe Surface Roughness (RMS {f['roughness_rms']:.1f})")
|
| 222 |
+
elif f['roughness_rms'] > 15:
|
| 223 |
+
score += 15; evidence.append(f"Moderate Roughness (RMS {f['roughness_rms']:.1f})")
|
| 224 |
+
|
| 225 |
+
if f['roughness_skew'] > 1.5:
|
| 226 |
+
score += 10; evidence.append("Positive Skew indicates pitting")
|
| 227 |
+
|
| 228 |
+
if score > 40:
|
| 229 |
+
defects.append(self._make_defect("Main Contact Wear", score, evidence))
|
| 230 |
+
|
| 231 |
+
# --- CLASS 3: ARCING CONTACT WEAR ---
|
| 232 |
+
score = 0
|
| 233 |
+
evidence = []
|
| 234 |
+
total_spikes = f['closing_critical_spikes'] + f['opening_critical_spikes']
|
| 235 |
+
total_energy = f['closing_arc_energy_joules'] + f['opening_arc_energy_joules']
|
| 236 |
+
|
| 237 |
+
if total_spikes >= 4:
|
| 238 |
+
score += 50; evidence.append(f"{total_spikes} Critical Arc Flashes (>8000µΩ)")
|
| 239 |
+
elif f['closing_severe_spikes'] + f['opening_severe_spikes'] >= 3:
|
| 240 |
+
score += 40; evidence.append("Multiple Severe Spikes (>5000µΩ)")
|
| 241 |
+
|
| 242 |
+
if total_energy > self.ARCING_ENERGY_CRITICAL:
|
| 243 |
+
score += 30; evidence.append(f"Critical Arc Energy ({total_energy:.1f} J)")
|
| 244 |
+
|
| 245 |
+
if score > 40:
|
| 246 |
+
defects.append(self._make_defect("Arcing Contact Wear", score, evidence))
|
| 247 |
+
|
| 248 |
+
# --- CLASS 4: MAIN CONTACT MISALIGNMENT ---
|
| 249 |
+
score = 0
|
| 250 |
+
evidence = []
|
| 251 |
+
if f['telegraph_jumps'] >= 6:
|
| 252 |
+
score += 45; evidence.append(f"Telegraph Pattern: {f['telegraph_jumps']} square jumps")
|
| 253 |
+
elif f['telegraph_jumps'] >= 3:
|
| 254 |
+
score += 25; evidence.append(f"Partial Telegraph: {f['telegraph_jumps']} jumps")
|
| 255 |
+
|
| 256 |
+
if f['num_shelves'] >= 3:
|
| 257 |
+
score += 20; evidence.append(f"Stepped Shelves: {f['num_shelves']} plateaus")
|
| 258 |
+
|
| 259 |
+
if f['main_std'] > 70:
|
| 260 |
+
score += 15; evidence.append(f"High Instability (Std {f['main_std']:.1f})")
|
| 261 |
+
|
| 262 |
+
if score > 40:
|
| 263 |
+
defects.append(self._make_defect("Main Contact Misalignment", score, evidence))
|
| 264 |
+
|
| 265 |
+
# --- CLASS 5: ARCING CONTACT MISALIGNMENT ---
|
| 266 |
+
score = 0
|
| 267 |
+
evidence = []
|
| 268 |
+
if f['asymmetry_ratio'] > 2.2:
|
| 269 |
+
score += 35; evidence.append(f"Severe Asymmetry (Opening {f['asymmetry_ratio']:.1f}x Closing)")
|
| 270 |
+
elif f['asymmetry_ratio'] > 1.6:
|
| 271 |
+
score += 20; evidence.append(f"Moderate Asymmetry ({f['asymmetry_ratio']:.1f}x)")
|
| 272 |
+
|
| 273 |
+
if f['num_bounces'] >= 5:
|
| 274 |
+
score += 30; evidence.append(f"Mechanical Oscillation: {f['num_bounces']} bounces")
|
| 275 |
+
elif f['num_bounces'] >= 3:
|
| 276 |
+
score += 15; evidence.append(f"{f['num_bounces']} bounces detected")
|
| 277 |
+
|
| 278 |
+
if f['arcing_telegraph'] > 10:
|
| 279 |
+
score += 15; evidence.append("High-freq telegraph in arcing zone")
|
| 280 |
+
|
| 281 |
+
if score > 40:
|
| 282 |
+
defects.append(self._make_defect("Arcing Contact Misalignment", score, evidence))
|
| 283 |
+
|
| 284 |
+
# --- CLASS 6: OPERATING MECHANISM (Timing) ---
|
| 285 |
+
# Requires KPIs
|
| 286 |
+
score = 0
|
| 287 |
+
evidence = []
|
| 288 |
+
c_time = kpis.get('Closing Time (ms)')
|
| 289 |
+
o_time = kpis.get('Opening Time (ms)')
|
| 290 |
+
|
| 291 |
+
if c_time and (c_time > 120 or c_time < 64):
|
| 292 |
+
score += 40; evidence.append(f"Closing Time Deviation ({c_time}ms)")
|
| 293 |
+
if o_time and (o_time > 48 or o_time < 24):
|
| 294 |
+
score += 40; evidence.append(f"Opening Time Deviation ({o_time}ms)")
|
| 295 |
+
|
| 296 |
+
if score > 40:
|
| 297 |
+
defects.append(self._make_defect("Operating Mechanism Malfunction", score, evidence))
|
| 298 |
+
|
| 299 |
+
# --- CLASS 7: DAMPING SYSTEM FAULT ---
|
| 300 |
+
score = 0
|
| 301 |
+
evidence = []
|
| 302 |
+
if f['num_bounces'] > 7:
|
| 303 |
+
score += 80; evidence.append(f"Excessive Bouncing ({f['num_bounces']} peaks) - Damper failure")
|
| 304 |
+
elif f['num_bounces'] > 5:
|
| 305 |
+
score += 50; evidence.append(f"High Bouncing ({f['num_bounces']} peaks)")
|
| 306 |
+
|
| 307 |
+
if score > 40:
|
| 308 |
+
defects.append(self._make_defect("Damping System Fault", score, evidence))
|
| 309 |
+
|
| 310 |
+
# --- CLASS 9: LINKAGE/ROD OBSTRUCTION ---
|
| 311 |
+
score = 0
|
| 312 |
+
evidence = []
|
| 313 |
+
if f['max_micro_jerk'] > self.JERK_THRESHOLD:
|
| 314 |
+
score += 60; evidence.append(f"High Kinematic Jerk ({f['max_micro_jerk']:.1f}) - Stick-slip friction")
|
| 315 |
+
|
| 316 |
+
if score > 40:
|
| 317 |
+
defects.append(self._make_defect("Linkage/Rod Obstruction", score, evidence))
|
| 318 |
+
|
| 319 |
+
# --- CLASS 10: FIXED CONTACT DAMAGE ---
|
| 320 |
+
score = 0
|
| 321 |
+
evidence = []
|
| 322 |
+
dlro = kpis.get('DLRO Value (µΩ)')
|
| 323 |
+
if dlro and dlro > 80 and f['roughness_rms'] < 15:
|
| 324 |
+
# High resistance but smooth curve = Fixed contact issue
|
| 325 |
+
score += 85; evidence.append(f"High DLRO ({dlro}µΩ) with Smooth Curve (Fixed Contact)")
|
| 326 |
+
|
| 327 |
+
if score > 40:
|
| 328 |
+
defects.append(self._make_defect("Fixed Contact Damage", score, evidence))
|
| 329 |
+
|
| 330 |
+
# --- CLASS 11/12: COIL DAMAGE ---
|
| 331 |
+
# Simple threshold checks
|
| 332 |
+
cc = kpis.get('Peak Close Coil Current (A)')
|
| 333 |
+
if cc and cc < 2.0:
|
| 334 |
+
defects.append(self._make_defect("Close Coil Damage", 95, [f"Current {cc}A < 2A"]))
|
| 335 |
+
|
| 336 |
+
tc1 = kpis.get('Peak Trip Coil 1 Current (A)')
|
| 337 |
+
tc2 = kpis.get('Peak Trip Coil 2 Current (A)')
|
| 338 |
+
if tc1 and tc2 and tc1 < 2.0 and tc2 < 2.0:
|
| 339 |
+
defects.append(self._make_defect("Trip Coil Damage", 95, [f"Both Coils Failed (TC1:{tc1}A, TC2:{tc2}A)"]))
|
| 340 |
+
|
| 341 |
+
return defects
|
| 342 |
+
|
| 343 |
+
def _make_defect(self, name, score, evidence):
|
| 344 |
+
return {
|
| 345 |
+
"defect_name": name,
|
| 346 |
+
"Confidence": f"{min(99.9, score):.1f} %",
|
| 347 |
+
"Severity": "High" if score > 70 else "Medium",
|
| 348 |
+
"description": "; ".join(evidence)
|
| 349 |
+
}
|
| 350 |
+
|
| 351 |
+
def _build_report(self, features, defects):
|
| 352 |
+
"""Constructs the final JSON report."""
|
| 353 |
+
|
| 354 |
+
# Calculate Overall Health Score
|
| 355 |
+
health_score = 100
|
| 356 |
+
for d in defects:
|
| 357 |
+
sev = d['Severity']
|
| 358 |
+
if sev == 'High': health_score -= 30
|
| 359 |
+
elif sev == 'Medium': health_score -= 15
|
| 360 |
+
|
| 361 |
+
health_score = max(0, health_score)
|
| 362 |
+
status = "Healthy"
|
| 363 |
+
if health_score < 50: status = "Critical"
|
| 364 |
+
elif health_score < 80: status = "Warning"
|
| 365 |
+
|
| 366 |
+
# Sort defects by confidence
|
| 367 |
+
defects.sort(key=lambda x: float(x['Confidence'].replace('%','')), reverse=True)
|
| 368 |
+
|
| 369 |
+
return {
|
| 370 |
+
"Fault_Detection": defects,
|
| 371 |
+
"advanced_analysis": {
|
| 372 |
+
"health_score": health_score,
|
| 373 |
+
"status": status,
|
| 374 |
+
"physics_metrics": {
|
| 375 |
+
"main_contact_resistance_uohm": round(features.get('main_mean', 0), 2),
|
| 376 |
+
"surface_roughness_rms": round(features.get('roughness_rms', 0), 2),
|
| 377 |
+
"arc_energy_joules": round(features.get('closing_arc_energy_joules', 0) + features.get('opening_arc_energy_joules', 0), 2),
|
| 378 |
+
"mechanical_chatter_power": round(features.get('chatter_power', 0), 2),
|
| 379 |
+
"kinematic_jerk_index": round(features.get('max_micro_jerk', 0), 2),
|
| 380 |
+
"telegraph_jumps": features.get('telegraph_jumps', 0),
|
| 381 |
+
"bounces": features.get('num_bounces', 0)
|
| 382 |
+
}
|
| 383 |
+
}
|
| 384 |
+
}
|
| 385 |
+
|
| 386 |
+
if __name__ == "__main__":
|
| 387 |
+
# Test Block
|
| 388 |
+
t = np.linspace(0, 400, 401)
|
| 389 |
+
r = np.ones_like(t) * 100000
|
| 390 |
+
r[100:300] = 40 + np.random.normal(0, 2, 200)
|
| 391 |
+
# Add synthetic defects
|
| 392 |
+
r[150:200] += 10 * np.sin(2 * np.pi * 60 * t[150:200]/1000) # Chatter
|
| 393 |
+
r[305:315] = 4000 # Bounce
|
| 394 |
+
|
| 395 |
+
df = pd.DataFrame({'T_ms': t, 'Resistance': r})
|
| 396 |
+
|
| 397 |
+
engine = AdvancedDCRMEngine()
|
| 398 |
+
report = engine.analyze(df)
|
| 399 |
+
print(json.dumps(report, indent=2))
|
core/engines/diagnostics.py
ADDED
|
@@ -0,0 +1,141 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Previous Name: analysis/advanced_diagnostics.py
|
| 2 |
+
import pandas as pd
|
| 3 |
+
import plotly.graph_objects as go
|
| 4 |
+
from plotly.subplots import make_subplots
|
| 5 |
+
import base64
|
| 6 |
+
import json
|
| 7 |
+
from langchain_core.messages import HumanMessage
|
| 8 |
+
import streamlit as st
|
| 9 |
+
|
| 10 |
+
def get_defect_prompt(data_str):
|
| 11 |
+
return f"""
|
| 12 |
+
System Role: Principal DCRM & Kinematic Analyst
|
| 13 |
+
Role:
|
| 14 |
+
You are an expert High-Voltage Circuit Breaker Diagnostician. Your task is to interpret Dynamic Contact Resistance (DCRM) traces to detect specific electrical and mechanical faults.
|
| 15 |
+
|
| 16 |
+
Critical "Anti-Overfitting" Directive:
|
| 17 |
+
You must distinguish between Systematic Defects and Artifacts.
|
| 18 |
+
True Degradation: Flag issues only when the visual signature is statistically significant and exceeds the "noise floor."
|
| 19 |
+
|
| 20 |
+
Capability:
|
| 21 |
+
Identify Multiple Concurrent Issues if present. (e.g., A breaker can have both misalignment and contact wear).
|
| 22 |
+
there will mostly be 3 line charts in the input
|
| 23 |
+
green resistance profile
|
| 24 |
+
blue current profile
|
| 25 |
+
red travel profile
|
| 26 |
+
|
| 27 |
+
Data (Sampled): {data_str}
|
| 28 |
+
|
| 29 |
+
1. Diagnostic Heuristics & Defect Taxonomy
|
| 30 |
+
Map the visual DCRM trace to ONLY the following defect types. Use the specific Visual Heuristics to confirm detection.
|
| 31 |
+
|
| 32 |
+
Defect Type | Visual Heuristic (The "Hint") | Mechanical Significance (Root Cause)
|
| 33 |
+
--- | --- | ---
|
| 34 |
+
Main Contact Issue (Corrosion/Oxidation) | "The Significant Grass"<br>In the fully closed plateau, look for pronounced, erratic instability. <br>• Ignore: Uniform, low-amplitude fuzz (sensor noise).<br>• Flag: Jagged, irregular peaks/valleys with significant amplitude (e.g., > 15–20 μΩ variance). The trace looks like a "rough rocky road," not just a "gravel path." | Surface Pathology: The Silver (Ag) plating is compromised (fretting corrosion) or heavy oxidation has occurred. The current path is constantly shifting through microscopic non-conductive spots.
|
| 35 |
+
Arcing Contact Wear | "Big Spikes & Short Wipe"<br>Resistance spikes are frequent and significantly large (high amplitude). Crucially, the duration of the arcing zone (the time between first touch and main contact touch) is noticeably shorter than expected. | Ablation: The Tungsten-Copper (W-Cu) tips are heavily eroded. The contact length has physically diminished, risking failure to commutate current during opening.
|
| 36 |
+
Misalignment (Main) | "The Struggle to Settle"<br>There are significant, high-amplitude peaks just before the trace tries to settle into the stable plateau. These are not bounces; they are "struggles" to mate that persist longer than 3-5ms. | Mechanical Centering: The moving contact pin is hitting the side or edge of the stationary rosette fingers before forcing its way in. Caused by loose nuts, kinematic play, or guide ring failure.
|
| 37 |
+
Misalignment (Arcing) | "Rough Entry"<br>Erratic resistance spikes occurring specifically during the initial entry (commutation), well before the main contacts engage. | Tip Eccentricity: The arcing pin is not entering the nozzle concentrically. It is scraping the nozzle throat or hitting the side, indicating a bent rod or skewed interrupter.
|
| 38 |
+
Slow Mechanism | "Stretched Time"<br>The entire resistance profile is elongated along the X-axis. Events happen later than normal. | Energy Starvation: Low spring charge, hydraulic pressure loss, or high friction due to hardened grease in the linkage.
|
| 39 |
+
|
| 40 |
+
2. Analysis Logic (The "Signal-to-Noise" Filter)
|
| 41 |
+
Before declaring a defect, run these logic checks:
|
| 42 |
+
The "Noise Floor" Test (For Main Contacts):
|
| 43 |
+
Is the plateau variance uniform and small (< 10 μΩ)? -> Classify as Healthy (Sensor/Manufacturing artifact).
|
| 44 |
+
Is the variance erratic, jagged, and large (> 15 μΩ)? -> Classify as Corrosion/Oxidation.
|
| 45 |
+
The "Duration" Test (For Misalignment):
|
| 46 |
+
Are the pre-plateau peaks < 2ms? -> Ignore (Benign Bounce).
|
| 47 |
+
Do the peaks persist > 3-5ms before settling? -> Classify as Misalignment.
|
| 48 |
+
The "Combination" Check:
|
| 49 |
+
Does the trace show both "Rough Entry" AND "Stretched Time"? -> Report Both (Misalignment + Slow Mechanism).
|
| 50 |
+
|
| 51 |
+
3. Output Structure
|
| 52 |
+
Provide a concise Executive Lead followed by the JSON.
|
| 53 |
+
|
| 54 |
+
Executive Lead (3-4 Lines)
|
| 55 |
+
Status: Healthy | Warning | Critical.
|
| 56 |
+
Key Findings: Summary of valid defects found (ignoring sensor noise).
|
| 57 |
+
Action: "Return to service" or specific repair instruction.
|
| 58 |
+
|
| 59 |
+
JSON Schema
|
| 60 |
+
Return ONLY this JSON object:
|
| 61 |
+
{{
|
| 62 |
+
"image_url": "string",
|
| 63 |
+
"overall_condition": "Healthy|Warning|Critical",
|
| 64 |
+
"executive_lead": "string (The 3-4 line summary)",
|
| 65 |
+
"detected_issues": [
|
| 66 |
+
{{
|
| 67 |
+
"issue_type": "Main Contact Issue (Corrosion/Oxidation)|Arcing Contact Wear|Misalignment (Main)|Misalignment (Arcing)|Slow Mechanism",
|
| 68 |
+
"confidence": "High|Medium|Low",
|
| 69 |
+
"visual_evidence": "string (e.g., 'Plateau instability >20 micro-ohms detected, exceeding sensor noise threshold.')",
|
| 70 |
+
"mechanical_significance": "string (Root cause from table)",
|
| 71 |
+
"severity": "Low|Medium|High"
|
| 72 |
+
}}
|
| 73 |
+
],
|
| 74 |
+
"analysis_metrics": {{
|
| 75 |
+
"static_resistance_Rp_uOhm": "float",
|
| 76 |
+
"signal_noise_level": "Low (Sensor/Mfg)|High (Defect)",
|
| 77 |
+
"wipe_quality": "Normal|Short|Erratic"
|
| 78 |
+
}},
|
| 79 |
+
"maintenance_recommendation": "string"
|
| 80 |
+
}}
|
| 81 |
+
"""
|
| 82 |
+
|
| 83 |
+
def perform_defect_analysis(df, llm):
|
| 84 |
+
"""
|
| 85 |
+
Performs advanced defect analysis using the specialized DCRM prompt.
|
| 86 |
+
"""
|
| 87 |
+
try:
|
| 88 |
+
# 1. Prepare Text Data
|
| 89 |
+
data_str = df.to_string(index=False)
|
| 90 |
+
prompt_text = get_defect_prompt(data_str)
|
| 91 |
+
|
| 92 |
+
# 2. Prepare Image Data (Simplified for LLM Vision)
|
| 93 |
+
fig = make_subplots(specs=[[{"secondary_y": True}]])
|
| 94 |
+
fig.add_trace(go.Scatter(x=df['Time_ms'], y=df['Current'], name="Current", line=dict(color='blue')), secondary_y=False)
|
| 95 |
+
fig.add_trace(go.Scatter(x=df['Time_ms'], y=df['Resistance'], name="Resistance", line=dict(color='green')), secondary_y=False)
|
| 96 |
+
fig.add_trace(go.Scatter(x=df['Time_ms'], y=df['Travel'], name="Travel", line=dict(color='red')), secondary_y=True)
|
| 97 |
+
fig.update_layout(title="DCRM Graph for Defect Analysis", showlegend=True)
|
| 98 |
+
|
| 99 |
+
# Convert plot to image bytes
|
| 100 |
+
img_bytes = fig.to_image(format="png", width=1024, height=600)
|
| 101 |
+
base64_image = base64.b64encode(img_bytes).decode('utf-8')
|
| 102 |
+
|
| 103 |
+
# 3. Construct Multimodal Message
|
| 104 |
+
message = HumanMessage(
|
| 105 |
+
content=[
|
| 106 |
+
{"type": "text", "text": prompt_text},
|
| 107 |
+
{
|
| 108 |
+
"type": "image_url",
|
| 109 |
+
"image_url": {"url": f"data:image/png;base64,{base64_image}"}
|
| 110 |
+
}
|
| 111 |
+
]
|
| 112 |
+
)
|
| 113 |
+
|
| 114 |
+
# 4. Invoke LLM
|
| 115 |
+
response = llm.invoke([message])
|
| 116 |
+
content = response.content.replace("```json", "").replace("```", "").strip()
|
| 117 |
+
|
| 118 |
+
# Extract JSON part if there's extra text
|
| 119 |
+
try:
|
| 120 |
+
start_idx = content.find("{")
|
| 121 |
+
end_idx = content.rfind("}") + 1
|
| 122 |
+
if start_idx != -1 and end_idx != -1:
|
| 123 |
+
json_str = content[start_idx:end_idx]
|
| 124 |
+
result = json.loads(json_str)
|
| 125 |
+
else:
|
| 126 |
+
raise ValueError("No JSON found in response")
|
| 127 |
+
except json.JSONDecodeError:
|
| 128 |
+
# Fallback: try to parse the whole content
|
| 129 |
+
result = json.loads(content)
|
| 130 |
+
|
| 131 |
+
return result
|
| 132 |
+
|
| 133 |
+
except Exception as e:
|
| 134 |
+
st.error(f"Defect Analysis failed: {str(e)}")
|
| 135 |
+
return {
|
| 136 |
+
"overall_condition": "Unknown",
|
| 137 |
+
"executive_lead": f"Analysis failed due to error: {str(e)}",
|
| 138 |
+
"detected_issues": [],
|
| 139 |
+
"analysis_metrics": {},
|
| 140 |
+
"maintenance_recommendation": "Check system logs."
|
| 141 |
+
}
|
core/engines/rules.py
ADDED
|
@@ -0,0 +1,1328 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Previous Name: analysis/engines/rule_engine.py
|
| 2 |
+
import pandas as pd
|
| 3 |
+
import numpy as np
|
| 4 |
+
import json
|
| 5 |
+
import sys
|
| 6 |
+
from scipy.signal import find_peaks
|
| 7 |
+
|
| 8 |
+
# Set UTF-8 encoding for console output (handles µ, Ω, etc.)
|
| 9 |
+
if sys.stdout.encoding != 'utf-8':
|
| 10 |
+
sys.stdout.reconfigure(encoding='utf-8')
|
| 11 |
+
|
| 12 |
+
# ============================================================================
|
| 13 |
+
# PHYSICS-BASED CONSTANTS (Ultra-Optimized for Real-Life DCRM Analysis)
|
| 14 |
+
# ============================================================================
|
| 15 |
+
|
| 16 |
+
# Phase Detection Thresholds (Industry-Calibrated)
|
| 17 |
+
R_OPEN_THRESHOLD = 1_000_000 # Above this = Open Circuit (Phase 1 & 5)
|
| 18 |
+
R_ARCING_THRESHOLD = 1000 # Intermediate resistance = Arcing Phase (Phase 2 & 4)
|
| 19 |
+
R_MAIN_THRESHOLD = 600 # Below this = Main Contact Zone (Phase 3)
|
| 20 |
+
|
| 21 |
+
# Nominal Healthy Values (Real-World Calibrated)
|
| 22 |
+
R_HEALTHY_MEAN_IDEAL = 35 # Ideal healthy: ~20-50 µΩ
|
| 23 |
+
R_HEALTHY_MAX = 70 # STRICT: >70 µΩ = early wear
|
| 24 |
+
R_HEALTHY_STD_MAX = 15 # Healthy std deviation < 15 µΩ (very smooth)
|
| 25 |
+
|
| 26 |
+
# Main Contact Wear Thresholds (Physics-Based Progressive Scale)
|
| 27 |
+
R_WEAR_EARLY_MIN = 70 # Mean > 70 µΩ = early wear (preventive)
|
| 28 |
+
R_WEAR_MODERATE_MIN = 100 # Mean > 100 µΩ = moderate wear
|
| 29 |
+
R_WEAR_SEVERE_MIN = 180 # Mean > 180 µΩ = severe wear
|
| 30 |
+
R_WEAR_CRITICAL_MIN = 280 # Mean > 280 µΩ = critical wear (imminent failure)
|
| 31 |
+
WEAR_STD_EARLY = 15 # Std > 15 µΩ = surface roughness
|
| 32 |
+
WEAR_STD_MODERATE = 25 # Std > 25 µΩ = noisy/grassy wear
|
| 33 |
+
WEAR_STD_SEVERE = 45 # Std > 45 µΩ = severe pitting/erosion
|
| 34 |
+
|
| 35 |
+
# Main Contact Misalignment (Square-Wave/Telegraph Pattern)
|
| 36 |
+
MISALIGNMENT_JUMP_MIN = 120 # Telegraph jump > 120 µΩ (square wave edge)
|
| 37 |
+
MISALIGNMENT_COUNT_MIN = 6 # Must have >= 6 distinct square-wave jumps
|
| 38 |
+
MISALIGNMENT_JUMP_RATIO = 0.15 # Affects >= 15% of main contact duration
|
| 39 |
+
MISALIGNMENT_STD_MIN = 70 # Std deviation > 70 µΩ for telegraph
|
| 40 |
+
SHELF_DETECTION_THRESHOLD = 80 # Mid-transition shelf > 80 µΩ
|
| 41 |
+
SQUARE_WAVE_DUTY_CYCLE = 0.3 # Square wave duty cycle check
|
| 42 |
+
|
| 43 |
+
# Arcing Contact Wear (Spike/Impulse Detection)
|
| 44 |
+
ARCING_SPIKE_CRITICAL = 8000 # Critical spike > 8000 µΩ (arc flash)
|
| 45 |
+
ARCING_SPIKE_SEVERE = 5000 # Severe spike > 5000 µΩ
|
| 46 |
+
ARCING_SPIKE_MODERATE = 3000 # Moderate spike > 3000 µΩ
|
| 47 |
+
ARCING_SPIKE_COUNT_CRITICAL = 4 # >= 4 critical spikes
|
| 48 |
+
ARCING_SPIKE_COUNT_SEVERE = 3 # >= 3 severe spikes
|
| 49 |
+
ARCING_INSTABILITY_STD = 700 # High std in arcing zones
|
| 50 |
+
SPIKE_WIDTH_THRESHOLD = 3 # Spike width > 3 samples = sustained arc
|
| 51 |
+
|
| 52 |
+
# Arcing Contact Misalignment (Asymmetry + Sinusoidal Bounce)
|
| 53 |
+
ASYMMETRY_RATIO_MODERATE = 1.6 # Opening/Closing ratio > 1.6
|
| 54 |
+
ASYMMETRY_RATIO_SEVERE = 2.2 # Ratio > 2.2 = severe asymmetry
|
| 55 |
+
ASYMMETRY_RATIO_CRITICAL = 3.0 # Ratio > 3.0 = critical misalignment
|
| 56 |
+
BOUNCE_PROMINENCE = 500 # Bounce/rounded peak > 500 µΩ
|
| 57 |
+
BOUNCE_SINUSOIDAL_FREQ = 10 # Sinusoidal frequency (samples per cycle)
|
| 58 |
+
PHASE3_REDUCTION_RATIO = 0.65 # Phase 3 < 65% of expected = reduced contact
|
| 59 |
+
|
| 60 |
+
# ============================================================================
|
| 61 |
+
# KPI THRESHOLDS FOR CLASSES 6-12 (Secondary Mechanical & Coil Defects)
|
| 62 |
+
# ============================================================================
|
| 63 |
+
|
| 64 |
+
# Class 6: Operating Mechanism Malfunction (Timing/Speed)
|
| 65 |
+
CLOSING_TIME_NOM = (80, 100) # ms
|
| 66 |
+
OPENING_TIME_NOM = (30, 40) # ms
|
| 67 |
+
CONTACT_SPEED_NOM = (4.5, 6.5) # m/s
|
| 68 |
+
TIMING_DEVIATION_THRESHOLD = 0.20 # >20% off nominal
|
| 69 |
+
|
| 70 |
+
# Class 7: Damping System Fault (Bouncing)
|
| 71 |
+
BOUNCE_COUNT_THRESHOLD = 5 # >5 distinct bounces
|
| 72 |
+
BOUNCE_AMPLITUDE = 100 # >100 µΩ amplitude
|
| 73 |
+
|
| 74 |
+
# Class 8: SF6 Pressure Leakage
|
| 75 |
+
SF6_PRESSURE_NOM = (5.5, 6.5) # bar
|
| 76 |
+
SF6_PRESSURE_CRITICAL = 5.0 # <5.0 bar = leak
|
| 77 |
+
ARC_QUENCH_DURATION_MAX = 25 # >25 ms = prolonged arc
|
| 78 |
+
|
| 79 |
+
# Class 9: Linkage/Rod Obstruction
|
| 80 |
+
STUTTER_COUNT_MIN = 3 # >3 distinct stutters
|
| 81 |
+
STUTTER_DURATION_MIN = 10 # >10 ms flat plateau
|
| 82 |
+
|
| 83 |
+
# Class 10: Fixed Contact Damage
|
| 84 |
+
DLRO_HEALTHY_MAX = 50 # <50 µΩ healthy
|
| 85 |
+
DLRO_MODERATE = 80 # 50-80 µΩ moderate
|
| 86 |
+
DLRO_CRITICAL = 100 # >100 µΩ critical
|
| 87 |
+
FIXED_CONTACT_STD_MAX = 15 # <15 µΩ = smooth (not wear)
|
| 88 |
+
|
| 89 |
+
# Class 11/12: Coil Damage
|
| 90 |
+
CLOSE_COIL_CURRENT_MIN = 2.0 # <2A = failure
|
| 91 |
+
TRIP_COIL_CURRENT_MIN = 2.0 # <2A = failure (both coils)
|
| 92 |
+
COIL_CURRENT_NOM = (4.0, 7.0) # Normal: 4-7A
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
def standardize_input(df: pd.DataFrame) -> pd.DataFrame:
|
| 97 |
+
"""
|
| 98 |
+
Returns a DataFrame with one row and columns T_0...T_400 containing Resistance values (uOhm).
|
| 99 |
+
Supports vertical (>=401 rows, 1 col 'Resistance') or horizontal (>=401 cols).
|
| 100 |
+
"""
|
| 101 |
+
if 'Resistance' not in df.columns:
|
| 102 |
+
raise KeyError("CSV must contain a 'Resistance' column.")
|
| 103 |
+
|
| 104 |
+
df = df[['Resistance']]
|
| 105 |
+
|
| 106 |
+
# Vertical data (e.g., 401+ rows, single column)
|
| 107 |
+
if df.shape[0] >= 401 and df.shape[1] == 1:
|
| 108 |
+
values = df.iloc[:401, 0].values.reshape(1, -1)
|
| 109 |
+
cols = [f"T_{i}" for i in range(401)]
|
| 110 |
+
return pd.DataFrame(values, columns=cols)
|
| 111 |
+
|
| 112 |
+
# Horizontal data (already a single row with many columns)
|
| 113 |
+
elif df.shape[1] >= 401:
|
| 114 |
+
df = df.iloc[:, :401]
|
| 115 |
+
df.columns = [f"T_{i}" for i in range(401)]
|
| 116 |
+
return df
|
| 117 |
+
|
| 118 |
+
else:
|
| 119 |
+
raise ValueError(f"Input shape {df.shape} invalid. Expected 401 Resistance points.")
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
def analyze_dcrm_advanced(row_values, kpis=None):
|
| 123 |
+
"""
|
| 124 |
+
Production-Grade DCRM Analysis Engine with Full KPI Support
|
| 125 |
+
============================================================
|
| 126 |
+
Detects ALL 12 defect classes:
|
| 127 |
+
1. Healthy
|
| 128 |
+
2. Main Contact Wear
|
| 129 |
+
3. Arcing Contact Wear
|
| 130 |
+
4. Main Contact Misalignment
|
| 131 |
+
5. Arcing Contact Misalignment
|
| 132 |
+
6. Operating Mechanism Malfunction
|
| 133 |
+
7. Damping System Fault
|
| 134 |
+
8. SF6 Pressure Leakage
|
| 135 |
+
9. Linkage/Rod Obstruction
|
| 136 |
+
10. Fixed Contact Damage
|
| 137 |
+
11. Close Coil Damage
|
| 138 |
+
12. Trip Coil Damage
|
| 139 |
+
|
| 140 |
+
Args:
|
| 141 |
+
row_values: DCRM waveform (401 time points, resistance in µΩ)
|
| 142 |
+
kpis: Optional dictionary with KPIs (Closing Time, Opening Time, Contact Speed,
|
| 143 |
+
SF6 Pressure, DLRO, Close Coil Current, Trip Coil 1/2 Currents, etc.)
|
| 144 |
+
|
| 145 |
+
Returns:
|
| 146 |
+
JSON with ALL defects having confidence >75% (Gemini-style dual-agent output)
|
| 147 |
+
+ classifications array with confidence scores for all 12 classes
|
| 148 |
+
"""
|
| 149 |
+
arr = np.array(row_values, dtype=float)
|
| 150 |
+
|
| 151 |
+
# Default KPIs if not provided
|
| 152 |
+
if kpis is None:
|
| 153 |
+
kpis = {}
|
| 154 |
+
|
| 155 |
+
# === PHASE 1: AUTOMATIC PHASE DETECTION ===
|
| 156 |
+
phases = detect_five_phases(arr)
|
| 157 |
+
|
| 158 |
+
if phases is None:
|
| 159 |
+
return {
|
| 160 |
+
"Fault_Detection": [_build_result(
|
| 161 |
+
"Open Circuit or Invalid Data",
|
| 162 |
+
"100.00 %",
|
| 163 |
+
"Critical",
|
| 164 |
+
"Breaker did not close properly or data is corrupted"
|
| 165 |
+
)],
|
| 166 |
+
"overall_health_assessment": {
|
| 167 |
+
"Contacts (moving & arcing)": "High Risk",
|
| 168 |
+
"SF6 Gas Chamber": "Normal",
|
| 169 |
+
"Operating Mechanism": "High Risk",
|
| 170 |
+
"Coil": "Normal"
|
| 171 |
+
}
|
| 172 |
+
}
|
| 173 |
+
|
| 174 |
+
# Extract phase segments
|
| 175 |
+
phase1_open = arr[phases['phase1_start']:phases['phase1_end']]
|
| 176 |
+
phase2_closing = arr[phases['phase2_start']:phases['phase2_end']]
|
| 177 |
+
phase3_main = arr[phases['phase3_start']:phases['phase3_end']]
|
| 178 |
+
phase4_opening = arr[phases['phase4_start']:phases['phase4_end']]
|
| 179 |
+
phase5_open = arr[phases['phase5_start']:phases['phase5_end']]
|
| 180 |
+
|
| 181 |
+
# === PHASE 2: FEATURE EXTRACTION ===
|
| 182 |
+
features = extract_features(phase3_main, phase2_closing, phase4_opening, phases)
|
| 183 |
+
|
| 184 |
+
# === PHASE 3: PRIMARY FAULT CLASSIFICATION (Classes 1-5, 11-12) ===
|
| 185 |
+
primary_faults = classify_primary_faults(features, phases, kpis)
|
| 186 |
+
|
| 187 |
+
# === PHASE 4: SECONDARY MECHANICAL FAULT CLASSIFICATION (Classes 6-10) ===
|
| 188 |
+
secondary_faults = classify_secondary_faults(features, phases, kpis, primary_faults)
|
| 189 |
+
|
| 190 |
+
# === PHASE 5: MERGE AND FILTER (Return ALL defects with probability >50%) ===
|
| 191 |
+
all_faults = primary_faults + secondary_faults
|
| 192 |
+
|
| 193 |
+
# Filter: Only return defects with probability >50% (convert "XX.XX %" to float)
|
| 194 |
+
high_prob_faults = []
|
| 195 |
+
for fault in all_faults:
|
| 196 |
+
prob_str = fault['Confidence'].replace('%', '').strip()
|
| 197 |
+
prob_val = float(prob_str)
|
| 198 |
+
if prob_val > 50.0:
|
| 199 |
+
high_prob_faults.append(fault)
|
| 200 |
+
|
| 201 |
+
# Sort by probability (highest first)
|
| 202 |
+
high_prob_faults.sort(key=lambda x: float(x['Confidence'].replace('%', '').strip()), reverse=True)
|
| 203 |
+
|
| 204 |
+
# If no high-probability defects, return Healthy with low score
|
| 205 |
+
if not high_prob_faults:
|
| 206 |
+
healthy_desc = f"Insufficient evidence for any specific defect. Main Contact: Mean={features['main_mean']:.1f} µΩ, Std={features['main_std']:.1f} µΩ. All defect probabilities <50%."
|
| 207 |
+
high_prob_faults.append(_build_result(
|
| 208 |
+
"Inconclusive",
|
| 209 |
+
"45.00 %",
|
| 210 |
+
"Low",
|
| 211 |
+
healthy_desc
|
| 212 |
+
))
|
| 213 |
+
|
| 214 |
+
# === PHASE 6: BUILD OVERALL HEALTH ASSESSMENT (Gemini-style) ===
|
| 215 |
+
overall_health = {
|
| 216 |
+
"Contacts (moving & arcing)": "Normal",
|
| 217 |
+
"SF6 Gas Chamber": "Normal",
|
| 218 |
+
"Operating Mechanism": "Normal",
|
| 219 |
+
"Coil": "Normal"
|
| 220 |
+
}
|
| 221 |
+
|
| 222 |
+
for fault in high_prob_faults:
|
| 223 |
+
name = fault['defect_name'].lower()
|
| 224 |
+
severity = fault['Severity'].lower()
|
| 225 |
+
probability = float(fault['Confidence'].replace('%', '').strip())
|
| 226 |
+
|
| 227 |
+
# Determine risk level based on probability
|
| 228 |
+
if probability >= 85 and severity in ["high", "critical"]:
|
| 229 |
+
risk = "High Risk"
|
| 230 |
+
elif probability >= 70:
|
| 231 |
+
risk = "Moderate Risk"
|
| 232 |
+
elif probability >= 50:
|
| 233 |
+
risk = "Low Risk"
|
| 234 |
+
else:
|
| 235 |
+
risk = "Normal"
|
| 236 |
+
|
| 237 |
+
# Map defects to health categories
|
| 238 |
+
if any(x in name for x in ["main contact", "arcing contact", "contact wear", "contact misalignment", "fixed contact"]):
|
| 239 |
+
if overall_health["Contacts (moving & arcing)"] != "High Risk":
|
| 240 |
+
overall_health["Contacts (moving & arcing)"] = risk
|
| 241 |
+
|
| 242 |
+
if "sf6" in name or "pressure" in name:
|
| 243 |
+
if overall_health["SF6 Gas Chamber"] != "High Risk":
|
| 244 |
+
overall_health["SF6 Gas Chamber"] = risk
|
| 245 |
+
|
| 246 |
+
if any(x in name for x in ["operating mechanism", "damping", "linkage", "rod"]):
|
| 247 |
+
if overall_health["Operating Mechanism"] != "High Risk":
|
| 248 |
+
overall_health["Operating Mechanism"] = risk
|
| 249 |
+
|
| 250 |
+
if "coil" in name:
|
| 251 |
+
if overall_health["Coil"] != "High Risk":
|
| 252 |
+
overall_health["Coil"] = risk
|
| 253 |
+
|
| 254 |
+
# === PHASE 7: BUILD CLASSIFICATIONS ARRAY FOR ALL 12 CLASSES ===
|
| 255 |
+
# Collect all probabilities (convert from all_faults list)
|
| 256 |
+
class_probabilities = {
|
| 257 |
+
"Healthy": 0.0,
|
| 258 |
+
"Main Contact Wear": 0.0,
|
| 259 |
+
"Arcing Contact Wear": 0.0,
|
| 260 |
+
"Main Contact Misalignment": 0.0,
|
| 261 |
+
"Arcing Contact Misalignment": 0.0,
|
| 262 |
+
"Operating Mechanism Malfunction": 0.0,
|
| 263 |
+
"Damping System Fault": 0.0,
|
| 264 |
+
"Pressure System Leakage (SF6 Gas Chamber)": 0.0,
|
| 265 |
+
"Linkage/Connecting Rod Obstruction/Damage": 0.0,
|
| 266 |
+
"Fixed Contact Damage/Deformation": 0.0,
|
| 267 |
+
"Close Coil Damage": 0.0,
|
| 268 |
+
"Trip Coil Damage": 0.0
|
| 269 |
+
}
|
| 270 |
+
|
| 271 |
+
# Fill in probabilities from all_faults (including those <50%)
|
| 272 |
+
for fault in all_faults:
|
| 273 |
+
name = fault['defect_name']
|
| 274 |
+
prob_str = fault['Confidence'].replace('%', '').strip()
|
| 275 |
+
prob_val = float(prob_str) / 100.0 # Convert to 0-1 scale
|
| 276 |
+
class_probabilities[name] = prob_val
|
| 277 |
+
|
| 278 |
+
# Build classifications array (all 12 classes)
|
| 279 |
+
classifications = []
|
| 280 |
+
for class_name, confidence in class_probabilities.items():
|
| 281 |
+
classifications.append({
|
| 282 |
+
"Class": class_name,
|
| 283 |
+
"Confidence": round(confidence, 4)
|
| 284 |
+
})
|
| 285 |
+
|
| 286 |
+
# === RETURN OPTIMIZED JSON (Probability-Based Scoring) ===
|
| 287 |
+
result = {
|
| 288 |
+
"Fault_Detection": high_prob_faults,
|
| 289 |
+
"overall_health_assessment": overall_health,
|
| 290 |
+
"classifications": classifications
|
| 291 |
+
}
|
| 292 |
+
|
| 293 |
+
return result
|
| 294 |
+
|
| 295 |
+
|
| 296 |
+
def detect_five_phases(arr):
|
| 297 |
+
"""
|
| 298 |
+
Automatically detects all 5 DCRM phases using adaptive thresholding.
|
| 299 |
+
Returns dict with start/end indices for each phase, or None if detection fails.
|
| 300 |
+
"""
|
| 301 |
+
# Find contact regions (below threshold)
|
| 302 |
+
is_contact = arr < R_ARCING_THRESHOLD
|
| 303 |
+
contact_indices = np.where(is_contact)[0]
|
| 304 |
+
|
| 305 |
+
if len(contact_indices) < 20:
|
| 306 |
+
return None # No valid contact detected
|
| 307 |
+
|
| 308 |
+
# Identify main contact region (very low resistance)
|
| 309 |
+
is_main = arr < R_MAIN_THRESHOLD
|
| 310 |
+
main_indices = np.where(is_main)[0]
|
| 311 |
+
|
| 312 |
+
if len(main_indices) < 5:
|
| 313 |
+
# No main contact = severe fault
|
| 314 |
+
# Best effort: assume entire contact is arcing
|
| 315 |
+
phase1_end = contact_indices[0]
|
| 316 |
+
phase5_start = contact_indices[-1] + 1
|
| 317 |
+
|
| 318 |
+
return {
|
| 319 |
+
'phase1_start': 0,
|
| 320 |
+
'phase1_end': phase1_end,
|
| 321 |
+
'phase2_start': phase1_end,
|
| 322 |
+
'phase2_end': contact_indices[-1],
|
| 323 |
+
'phase3_start': contact_indices[-1],
|
| 324 |
+
'phase3_end': contact_indices[-1], # Empty main phase
|
| 325 |
+
'phase4_start': contact_indices[-1],
|
| 326 |
+
'phase4_end': phase5_start,
|
| 327 |
+
'phase5_start': phase5_start,
|
| 328 |
+
'phase5_end': len(arr)
|
| 329 |
+
}
|
| 330 |
+
|
| 331 |
+
# Normal case: Main contact exists
|
| 332 |
+
t_contact_start = contact_indices[0]
|
| 333 |
+
t_contact_end = contact_indices[-1]
|
| 334 |
+
t_main_start = main_indices[0]
|
| 335 |
+
t_main_end = main_indices[-1]
|
| 336 |
+
|
| 337 |
+
return {
|
| 338 |
+
'phase1_start': 0,
|
| 339 |
+
'phase1_end': t_contact_start,
|
| 340 |
+
'phase2_start': t_contact_start,
|
| 341 |
+
'phase2_end': t_main_start,
|
| 342 |
+
'phase3_start': t_main_start,
|
| 343 |
+
'phase3_end': t_main_end,
|
| 344 |
+
'phase4_start': t_main_end,
|
| 345 |
+
'phase4_end': t_contact_end + 1,
|
| 346 |
+
'phase5_start': t_contact_end + 1,
|
| 347 |
+
'phase5_end': len(arr)
|
| 348 |
+
}
|
| 349 |
+
|
| 350 |
+
|
| 351 |
+
def extract_features(seg_main, seg_closing, seg_opening, phases):
|
| 352 |
+
"""
|
| 353 |
+
ULTRA-OPTIMIZED Feature Extraction with Micro-Level Waveform Analysis
|
| 354 |
+
=====================================================================
|
| 355 |
+
Detects:
|
| 356 |
+
- Square wave patterns (misalignment)
|
| 357 |
+
- Sinusoidal oscillations (damping/bounce)
|
| 358 |
+
- Impulse spikes (arcing wear)
|
| 359 |
+
- Grassy noise (contact wear)
|
| 360 |
+
- Telegraph jumps (mechanical defects)
|
| 361 |
+
- DC offset shifts (fixed contact issues)
|
| 362 |
+
"""
|
| 363 |
+
features = {}
|
| 364 |
+
|
| 365 |
+
# === MAIN CONTACT FEATURES (Phase 3) - MICRO-LEVEL ANALYSIS ===
|
| 366 |
+
if len(seg_main) > 0:
|
| 367 |
+
# Basic statistics
|
| 368 |
+
features['main_mean'] = float(np.mean(seg_main))
|
| 369 |
+
features['main_median'] = float(np.median(seg_main))
|
| 370 |
+
features['main_std'] = float(np.std(seg_main))
|
| 371 |
+
features['main_min'] = float(np.min(seg_main))
|
| 372 |
+
features['main_max'] = float(np.max(seg_main))
|
| 373 |
+
features['main_range'] = float(features['main_max'] - features['main_min'])
|
| 374 |
+
|
| 375 |
+
# === SQUARE WAVE PATTERN DETECTION (Misalignment) ===
|
| 376 |
+
diffs = np.diff(seg_main)
|
| 377 |
+
abs_diffs = np.abs(diffs)
|
| 378 |
+
|
| 379 |
+
# Count sharp edges (square wave transitions)
|
| 380 |
+
sharp_edges = np.sum(abs_diffs > MISALIGNMENT_JUMP_MIN)
|
| 381 |
+
features['telegraph_jumps'] = int(sharp_edges)
|
| 382 |
+
features['jump_ratio'] = float(sharp_edges / len(seg_main) if len(seg_main) > 0 else 0)
|
| 383 |
+
|
| 384 |
+
# Detect duty cycle of square wave (time spent at high vs low levels)
|
| 385 |
+
if features['main_range'] > 100:
|
| 386 |
+
threshold = features['main_median']
|
| 387 |
+
high_time = np.sum(seg_main > threshold)
|
| 388 |
+
duty_cycle = high_time / len(seg_main)
|
| 389 |
+
features['square_wave_duty'] = float(duty_cycle)
|
| 390 |
+
# True square wave has duty cycle ~0.3-0.7 (not 0 or 1)
|
| 391 |
+
features['is_square_wave'] = 1 if 0.2 < duty_cycle < 0.8 else 0
|
| 392 |
+
else:
|
| 393 |
+
features['square_wave_duty'] = 0.5
|
| 394 |
+
features['is_square_wave'] = 0
|
| 395 |
+
|
| 396 |
+
# === SINUSOIDAL/OSCILLATION DETECTION (Damping Fault) ===
|
| 397 |
+
# Use autocorrelation to detect periodic patterns
|
| 398 |
+
if len(seg_main) > 20:
|
| 399 |
+
# Detrend signal
|
| 400 |
+
detrended = seg_main - np.mean(seg_main)
|
| 401 |
+
# Simple autocorrelation at lag=10 (typical bounce frequency)
|
| 402 |
+
if len(detrended) > BOUNCE_SINUSOIDAL_FREQ:
|
| 403 |
+
autocorr = np.correlate(detrended[:min(100, len(detrended))],
|
| 404 |
+
detrended[:min(100, len(detrended))], mode='valid')[0]
|
| 405 |
+
features['oscillation_score'] = float(abs(autocorr) / (np.std(detrended)**2 * len(detrended) + 1))
|
| 406 |
+
else:
|
| 407 |
+
features['oscillation_score'] = 0.0
|
| 408 |
+
else:
|
| 409 |
+
features['oscillation_score'] = 0.0
|
| 410 |
+
|
| 411 |
+
# === GRASSY NOISE PATTERN (Wear Signature) ===
|
| 412 |
+
# Count uniform medium-amplitude spikes throughout plateau
|
| 413 |
+
noise_threshold_low = features['main_median'] + 30
|
| 414 |
+
noise_threshold_high = features['main_median'] + 200
|
| 415 |
+
grassy_spikes = np.sum((seg_main > noise_threshold_low) & (seg_main < noise_threshold_high))
|
| 416 |
+
features['uniform_spikes'] = int(grassy_spikes)
|
| 417 |
+
features['spike_density'] = float(grassy_spikes / len(seg_main) if len(seg_main) > 0 else 0)
|
| 418 |
+
|
| 419 |
+
# Measure continuous noise level (RMS of derivative)
|
| 420 |
+
features['avg_noise'] = float(np.mean(abs_diffs))
|
| 421 |
+
features['max_single_jump'] = float(np.max(abs_diffs)) if len(abs_diffs) > 0 else 0
|
| 422 |
+
features['noise_rms'] = float(np.sqrt(np.mean(abs_diffs**2)))
|
| 423 |
+
|
| 424 |
+
# === STEPPED SHELF PATTERN (Misalignment Signature) ===
|
| 425 |
+
if len(seg_main) > 20:
|
| 426 |
+
# Histogram analysis to find discrete levels
|
| 427 |
+
hist, edges = np.histogram(seg_main, bins=min(15, len(seg_main)//10))
|
| 428 |
+
# Count bins with >8% of data (significant plateaus)
|
| 429 |
+
significant_bins = np.sum(hist > (len(seg_main) * 0.08))
|
| 430 |
+
features['num_shelves'] = int(significant_bins)
|
| 431 |
+
|
| 432 |
+
# Detect initial transient vs steady-state
|
| 433 |
+
split_point = min(25, len(seg_main)//3)
|
| 434 |
+
initial_segment = seg_main[:split_point]
|
| 435 |
+
plateau_segment = seg_main[split_point:]
|
| 436 |
+
features['initial_deviation'] = float(np.std(initial_segment))
|
| 437 |
+
features['plateau_stability'] = float(np.std(plateau_segment))
|
| 438 |
+
|
| 439 |
+
# Initial jump indicator
|
| 440 |
+
if len(initial_segment) > 0 and len(plateau_segment) > 0:
|
| 441 |
+
features['has_initial_jump'] = 1 if features['initial_deviation'] > features['plateau_stability'] * 1.8 else 0
|
| 442 |
+
else:
|
| 443 |
+
features['has_initial_jump'] = 0
|
| 444 |
+
else:
|
| 445 |
+
features['num_shelves'] = 1
|
| 446 |
+
features['initial_deviation'] = 0
|
| 447 |
+
features['plateau_stability'] = features['main_std']
|
| 448 |
+
features['has_initial_jump'] = 0
|
| 449 |
+
|
| 450 |
+
else:
|
| 451 |
+
# Empty main contact = severe fault
|
| 452 |
+
features.update({
|
| 453 |
+
'main_mean': 9999, 'main_median': 9999, 'main_std': 0,
|
| 454 |
+
'main_min': 9999, 'main_max': 9999, 'main_range': 0,
|
| 455 |
+
'telegraph_jumps': 0, 'jump_ratio': 0, 'uniform_spikes': 0,
|
| 456 |
+
'spike_density': 0, 'avg_noise': 0, 'max_single_jump': 0,
|
| 457 |
+
'noise_rms': 0, 'square_wave_duty': 0, 'is_square_wave': 0,
|
| 458 |
+
'oscillation_score': 0, 'initial_deviation': 0,
|
| 459 |
+
'plateau_stability': 0, 'has_initial_jump': 0, 'num_shelves': 0
|
| 460 |
+
})
|
| 461 |
+
|
| 462 |
+
# === TIMING FEATURES ===
|
| 463 |
+
features['dur_closing'] = int(max(1, len(seg_closing)))
|
| 464 |
+
features['dur_opening'] = int(max(1, len(seg_opening)))
|
| 465 |
+
features['dur_main'] = int(len(seg_main))
|
| 466 |
+
features['asymmetry_ratio'] = float(features['dur_opening'] / features['dur_closing'])
|
| 467 |
+
|
| 468 |
+
# Detect reduced Phase 3 (arcing misalignment signature)
|
| 469 |
+
expected_main_duration = 160 # Typical ~160-180 ms
|
| 470 |
+
features['phase3_reduction'] = float(features['dur_main'] / expected_main_duration)
|
| 471 |
+
|
| 472 |
+
# === ARCING CONTACT FEATURES - IMPULSE SPIKE ANALYSIS ===
|
| 473 |
+
|
| 474 |
+
features['closing_critical_spikes'] = 0
|
| 475 |
+
features['closing_severe_spikes'] = 0
|
| 476 |
+
features['closing_moderate_spikes'] = 0
|
| 477 |
+
features['opening_critical_spikes'] = 0
|
| 478 |
+
features['opening_severe_spikes'] = 0
|
| 479 |
+
features['opening_moderate_spikes'] = 0
|
| 480 |
+
features['closing_std'] = 0
|
| 481 |
+
features['opening_std'] = 0
|
| 482 |
+
features['closing_peak'] = 0
|
| 483 |
+
features['opening_peak'] = 0
|
| 484 |
+
|
| 485 |
+
# === CLOSING ARCING ZONE ANALYSIS ===
|
| 486 |
+
if len(seg_closing) > 0:
|
| 487 |
+
features['closing_critical_spikes'] = int(np.sum(seg_closing > ARCING_SPIKE_CRITICAL))
|
| 488 |
+
features['closing_severe_spikes'] = int(np.sum(seg_closing > ARCING_SPIKE_SEVERE))
|
| 489 |
+
features['closing_moderate_spikes'] = int(np.sum(seg_closing > ARCING_SPIKE_MODERATE))
|
| 490 |
+
features['closing_std'] = float(np.std(seg_closing))
|
| 491 |
+
features['closing_peak'] = float(np.max(seg_closing))
|
| 492 |
+
|
| 493 |
+
# Analyze spike width (sustained arc vs transient)
|
| 494 |
+
critical_indices = np.where(seg_closing > ARCING_SPIKE_SEVERE)[0]
|
| 495 |
+
if len(critical_indices) > 0:
|
| 496 |
+
# Count sustained spikes (width > threshold)
|
| 497 |
+
spike_groups = np.split(critical_indices, np.where(np.diff(critical_indices) > 2)[0] + 1)
|
| 498 |
+
sustained_spikes = sum(1 for group in spike_groups if len(group) >= SPIKE_WIDTH_THRESHOLD)
|
| 499 |
+
features['closing_sustained_spikes'] = int(sustained_spikes)
|
| 500 |
+
else:
|
| 501 |
+
features['closing_sustained_spikes'] = 0
|
| 502 |
+
else:
|
| 503 |
+
features['closing_sustained_spikes'] = 0
|
| 504 |
+
|
| 505 |
+
# === OPENING ARCING ZONE ANALYSIS ===
|
| 506 |
+
if len(seg_opening) > 0:
|
| 507 |
+
features['opening_critical_spikes'] = int(np.sum(seg_opening > ARCING_SPIKE_CRITICAL))
|
| 508 |
+
features['opening_severe_spikes'] = int(np.sum(seg_opening > ARCING_SPIKE_SEVERE))
|
| 509 |
+
features['opening_moderate_spikes'] = int(np.sum(seg_opening > ARCING_SPIKE_MODERATE))
|
| 510 |
+
features['opening_std'] = float(np.std(seg_opening))
|
| 511 |
+
features['opening_peak'] = float(np.max(seg_opening))
|
| 512 |
+
|
| 513 |
+
# Spike width analysis
|
| 514 |
+
critical_indices = np.where(seg_opening > ARCING_SPIKE_SEVERE)[0]
|
| 515 |
+
if len(critical_indices) > 0:
|
| 516 |
+
spike_groups = np.split(critical_indices, np.where(np.diff(critical_indices) > 2)[0] + 1)
|
| 517 |
+
sustained_spikes = sum(1 for group in spike_groups if len(group) >= SPIKE_WIDTH_THRESHOLD)
|
| 518 |
+
features['opening_sustained_spikes'] = int(sustained_spikes)
|
| 519 |
+
else:
|
| 520 |
+
features['opening_sustained_spikes'] = 0
|
| 521 |
+
|
| 522 |
+
# === SINUSOIDAL BOUNCE DETECTION (Arcing Misalignment) ===
|
| 523 |
+
# Detect rounded/oscillating peaks
|
| 524 |
+
peaks, properties = find_peaks(seg_opening, prominence=BOUNCE_PROMINENCE, distance=5)
|
| 525 |
+
features['num_bounces'] = int(len(peaks))
|
| 526 |
+
|
| 527 |
+
# Detect high-frequency telegraph in arcing zone
|
| 528 |
+
opening_diffs = np.abs(np.diff(seg_opening))
|
| 529 |
+
features['arcing_telegraph'] = int(np.sum(opening_diffs > 400))
|
| 530 |
+
else:
|
| 531 |
+
features['opening_sustained_spikes'] = 0
|
| 532 |
+
features['num_bounces'] = 0
|
| 533 |
+
features['arcing_telegraph'] = 0
|
| 534 |
+
|
| 535 |
+
# Add closing telegraph
|
| 536 |
+
if len(seg_closing) > 10:
|
| 537 |
+
closing_diffs = np.abs(np.diff(seg_closing))
|
| 538 |
+
features['arcing_telegraph'] += int(np.sum(closing_diffs > 400))
|
| 539 |
+
|
| 540 |
+
# === TOTAL SPIKE COUNTS (All Severity Levels) ===
|
| 541 |
+
features['total_critical_spikes'] = features['closing_critical_spikes'] + features['opening_critical_spikes']
|
| 542 |
+
features['total_severe_spikes'] = features['closing_severe_spikes'] + features['opening_severe_spikes']
|
| 543 |
+
features['total_moderate_spikes'] = features['closing_moderate_spikes'] + features['opening_moderate_spikes']
|
| 544 |
+
features['total_sustained_spikes'] = features['closing_sustained_spikes'] + features['opening_sustained_spikes']
|
| 545 |
+
|
| 546 |
+
# Symmetry check (wear = similar spikes; misalignment = asymmetric)
|
| 547 |
+
if features['closing_severe_spikes'] > 0 and features['opening_severe_spikes'] > 0:
|
| 548 |
+
spike_ratio = max(features['closing_severe_spikes'], features['opening_severe_spikes']) / \
|
| 549 |
+
max(1, min(features['closing_severe_spikes'], features['opening_severe_spikes']))
|
| 550 |
+
features['spike_symmetry'] = float(spike_ratio) # ~1.0 = symmetric (wear)
|
| 551 |
+
else:
|
| 552 |
+
features['spike_symmetry'] = 1.0
|
| 553 |
+
|
| 554 |
+
return features
|
| 555 |
+
|
| 556 |
+
|
| 557 |
+
def classify_primary_faults(features, phases, kpis):
|
| 558 |
+
"""
|
| 559 |
+
ULTRA-OPTIMIZED Multi-Class PRIMARY Fault Classification with Individual Probability Scores
|
| 560 |
+
============================================================================================
|
| 561 |
+
Returns ALL defects with probability > 50% (independent scores, not cumulative).
|
| 562 |
+
|
| 563 |
+
Scoring Logic:
|
| 564 |
+
- Each defect gets 0-100% probability based on signature strength
|
| 565 |
+
- Multiple defects can coexist (e.g., Wear + Misalignment both at 85%)
|
| 566 |
+
- Scores are NOT normalized (don't sum to 100%)
|
| 567 |
+
- Only defects >50% probability are returned
|
| 568 |
+
|
| 569 |
+
Classes: 1-Healthy, 2-Main Wear, 3-Arcing Wear, 4-Main Misalign,
|
| 570 |
+
5-Arcing Misalign, 11-Close Coil, 12-Trip Coil
|
| 571 |
+
"""
|
| 572 |
+
all_faults = []
|
| 573 |
+
|
| 574 |
+
# ========================================================================
|
| 575 |
+
# CLASS 11: CLOSE COIL DAMAGE
|
| 576 |
+
# ========================================================================
|
| 577 |
+
close_coil_current = kpis.get('Peak Close Coil Current (A)', None)
|
| 578 |
+
if close_coil_current is not None:
|
| 579 |
+
if close_coil_current < CLOSE_COIL_CURRENT_MIN:
|
| 580 |
+
conf = 95.0
|
| 581 |
+
sev = "High"
|
| 582 |
+
desc = f"Close Coil Current critically low ({close_coil_current:.2f} A, normal: 4-7 A). Coil winding damaged or control circuit fault."
|
| 583 |
+
all_faults.append(_build_result("Close Coil Damage", f"{conf:.2f} %", sev, desc))
|
| 584 |
+
|
| 585 |
+
# ========================================================================
|
| 586 |
+
# CLASS 12: TRIP COIL DAMAGE
|
| 587 |
+
# CRITICAL: BOTH trip coils must fail. If at least ONE works, NO FAULT.
|
| 588 |
+
# ========================================================================
|
| 589 |
+
trip_coil1 = kpis.get('Peak Trip Coil 1 Current (A)', None)
|
| 590 |
+
trip_coil2 = kpis.get('Peak Trip Coil 2 Current (A)', None)
|
| 591 |
+
|
| 592 |
+
if trip_coil1 is not None and trip_coil2 is not None:
|
| 593 |
+
# Check if BOTH coils failed
|
| 594 |
+
if trip_coil1 < TRIP_COIL_CURRENT_MIN and trip_coil2 < TRIP_COIL_CURRENT_MIN:
|
| 595 |
+
conf = 95.0
|
| 596 |
+
sev = "Critical"
|
| 597 |
+
desc = f"BOTH Trip Coils failed (TC1: {trip_coil1:.2f} A, TC2: {trip_coil2:.2f} A, normal: 4-7 A each). Breaker cannot trip - SAFETY CRITICAL."
|
| 598 |
+
all_faults.append(_build_result("Trip Coil Damage", f"{conf:.2f} %", sev, desc))
|
| 599 |
+
# If at least one coil works, NO fault (this is normal redundancy)
|
| 600 |
+
|
| 601 |
+
# ========================================================================
|
| 602 |
+
# CLASS 5: ARCING CONTACT MISALIGNMENT
|
| 603 |
+
# Signature: Asymmetric curve + Reduced Phase 3 + Sinusoidal Bounces
|
| 604 |
+
# Probability Score: 0-100% based on signature strength
|
| 605 |
+
# ========================================================================
|
| 606 |
+
arcing_misalign_prob = 0.0
|
| 607 |
+
arcing_misalign_reasons = []
|
| 608 |
+
|
| 609 |
+
# Factor 1: Timing asymmetry (CRITICAL SIGNATURE - 40 points max)
|
| 610 |
+
if features['asymmetry_ratio'] > ASYMMETRY_RATIO_CRITICAL:
|
| 611 |
+
arcing_misalign_prob += 40.0
|
| 612 |
+
arcing_misalign_reasons.append(f"Critical timing asymmetry: Opening phase {features['asymmetry_ratio']:.2f}x longer than closing (>3.0x indicates severe misalignment)")
|
| 613 |
+
elif features['asymmetry_ratio'] > ASYMMETRY_RATIO_SEVERE:
|
| 614 |
+
arcing_misalign_prob += 32.0
|
| 615 |
+
arcing_misalign_reasons.append(f"Severe timing asymmetry: Opening {features['asymmetry_ratio']:.2f}x longer (>2.2x threshold)")
|
| 616 |
+
elif features['asymmetry_ratio'] > ASYMMETRY_RATIO_MODERATE:
|
| 617 |
+
arcing_misalign_prob += 24.0
|
| 618 |
+
arcing_misalign_reasons.append(f"Moderate asymmetry: Timing ratio {features['asymmetry_ratio']:.2f} (normal <1.5)")
|
| 619 |
+
|
| 620 |
+
# Factor 2: Reduced Phase 3 duration (20 points max)
|
| 621 |
+
if features['phase3_reduction'] < PHASE3_REDUCTION_RATIO:
|
| 622 |
+
reduction_pct = (1 - features['phase3_reduction']) * 100
|
| 623 |
+
arcing_misalign_prob += 20.0
|
| 624 |
+
arcing_misalign_reasons.append(f"Main contact duration reduced by {reduction_pct:.0f}% ({features['dur_main']} ms vs expected ~160 ms)")
|
| 625 |
+
elif features['phase3_reduction'] < 0.80:
|
| 626 |
+
arcing_misalign_prob += 12.0
|
| 627 |
+
arcing_misalign_reasons.append(f"Slightly reduced contact engagement ({features['dur_main']} ms)")
|
| 628 |
+
|
| 629 |
+
# Factor 3: Sinusoidal bounces (25 points max)
|
| 630 |
+
if features['num_bounces'] >= 5:
|
| 631 |
+
arcing_misalign_prob += 25.0
|
| 632 |
+
arcing_misalign_reasons.append(f"Detected {features['num_bounces']} sinusoidal bounces during opening (indicates mechanical oscillation)")
|
| 633 |
+
elif features['num_bounces'] >= 3:
|
| 634 |
+
arcing_misalign_prob += 18.0
|
| 635 |
+
arcing_misalign_reasons.append(f"{features['num_bounces']} rounded bounces detected")
|
| 636 |
+
elif features['num_bounces'] >= 1:
|
| 637 |
+
arcing_misalign_prob += 10.0
|
| 638 |
+
arcing_misalign_reasons.append(f"{features['num_bounces']} bounce peak(s) in arcing phase")
|
| 639 |
+
|
| 640 |
+
# Factor 4: Telegraph noise in arcing zones (15 points max)
|
| 641 |
+
if features['arcing_telegraph'] > 15:
|
| 642 |
+
arcing_misalign_prob += 15.0
|
| 643 |
+
arcing_misalign_reasons.append(f"High-frequency telegraph noise in arcing zones ({features['arcing_telegraph']} rapid transitions)")
|
| 644 |
+
elif features['arcing_telegraph'] > 8:
|
| 645 |
+
arcing_misalign_prob += 10.0
|
| 646 |
+
arcing_misalign_reasons.append(f"Telegraph noise detected ({features['arcing_telegraph']} events)")
|
| 647 |
+
|
| 648 |
+
if arcing_misalign_prob >= 50.0:
|
| 649 |
+
prob_str = f"{min(99.0, arcing_misalign_prob):.2f} %"
|
| 650 |
+
sev = _get_severity(arcing_misalign_prob)
|
| 651 |
+
desc = ". ".join(arcing_misalign_reasons)
|
| 652 |
+
all_faults.append(_build_result("Arcing Contact Misalignment", prob_str, sev, desc))
|
| 653 |
+
|
| 654 |
+
# ========================================================================
|
| 655 |
+
# CLASS 4: MAIN CONTACT MISALIGNMENT
|
| 656 |
+
# Signature: Square-wave telegraph + Stepped shelves + Initial jump
|
| 657 |
+
# Probability Score: 0-100% based on signature strength
|
| 658 |
+
# ========================================================================
|
| 659 |
+
main_misalign_prob = 0.0
|
| 660 |
+
main_misalign_reasons = []
|
| 661 |
+
|
| 662 |
+
# Factor 1: Square-wave telegraph pattern (45 points max)
|
| 663 |
+
if (features['telegraph_jumps'] >= MISALIGNMENT_COUNT_MIN and
|
| 664 |
+
features['jump_ratio'] > MISALIGNMENT_JUMP_RATIO and
|
| 665 |
+
features['main_std'] > MISALIGNMENT_STD_MIN and
|
| 666 |
+
features['is_square_wave'] == 1):
|
| 667 |
+
main_misalign_prob += 45.0
|
| 668 |
+
main_misalign_reasons.append(f"Square-wave telegraph pattern: {features['telegraph_jumps']} sharp jumps (>{MISALIGNMENT_JUMP_MIN} µΩ), duty cycle {features['square_wave_duty']:.2f}")
|
| 669 |
+
elif features['telegraph_jumps'] >= MISALIGNMENT_COUNT_MIN and features['main_std'] > MISALIGNMENT_STD_MIN:
|
| 670 |
+
main_misalign_prob += 35.0
|
| 671 |
+
main_misalign_reasons.append(f"Telegraph pattern detected: {features['telegraph_jumps']} jumps, Std={features['main_std']:.1f} µΩ")
|
| 672 |
+
elif features['telegraph_jumps'] >= 4:
|
| 673 |
+
main_misalign_prob += 20.0
|
| 674 |
+
main_misalign_reasons.append(f"Partial telegraph: {features['telegraph_jumps']} jumps")
|
| 675 |
+
|
| 676 |
+
# Factor 2: Initial deviation/transient jump (20 points max)
|
| 677 |
+
if features['has_initial_jump'] == 1 and features['initial_deviation'] > 120:
|
| 678 |
+
main_misalign_prob += 20.0
|
| 679 |
+
main_misalign_reasons.append(f"High initial transient (Std={features['initial_deviation']:.1f} µΩ) then stabilizes - classic misalignment signature")
|
| 680 |
+
elif features['has_initial_jump'] == 1:
|
| 681 |
+
main_misalign_prob += 12.0
|
| 682 |
+
main_misalign_reasons.append(f"Initial deviation detected ({features['initial_deviation']:.1f} µΩ)")
|
| 683 |
+
|
| 684 |
+
# Factor 3: Stepped shelf transitions (20 points max)
|
| 685 |
+
if features['num_shelves'] >= 4 and features['main_range'] > SHELF_DETECTION_THRESHOLD:
|
| 686 |
+
main_misalign_prob += 20.0
|
| 687 |
+
main_misalign_reasons.append(f"Stepped transitions: {features['num_shelves']} discrete resistance plateaus (range {features['main_range']:.1f} µΩ)")
|
| 688 |
+
elif features['num_shelves'] >= 3:
|
| 689 |
+
main_misalign_prob += 12.0
|
| 690 |
+
main_misalign_reasons.append(f"{features['num_shelves']} resistance shelves detected")
|
| 691 |
+
|
| 692 |
+
# Factor 4: High variability (15 points max)
|
| 693 |
+
if features['main_std'] > MISALIGNMENT_STD_MIN * 2:
|
| 694 |
+
main_misalign_prob += 15.0
|
| 695 |
+
main_misalign_reasons.append(f"Very high variability (Std={features['main_std']:.1f} µΩ, normal <15 µΩ)")
|
| 696 |
+
elif features['main_std'] > MISALIGNMENT_STD_MIN:
|
| 697 |
+
main_misalign_prob += 10.0
|
| 698 |
+
main_misalign_reasons.append(f"Elevated variability (Std={features['main_std']:.1f} µΩ)")
|
| 699 |
+
|
| 700 |
+
if main_misalign_prob >= 50.0:
|
| 701 |
+
prob_str = f"{min(99.0, main_misalign_prob):.2f} %"
|
| 702 |
+
sev = _get_severity(main_misalign_prob)
|
| 703 |
+
desc = ". ".join(main_misalign_reasons)
|
| 704 |
+
all_faults.append(_build_result("Main Contact Misalignment", prob_str, sev, desc))
|
| 705 |
+
|
| 706 |
+
# ========================================================================
|
| 707 |
+
# CLASS 2: MAIN CONTACT WEAR
|
| 708 |
+
# Signature: Elevated resistance + Grassy noise + Uniform spikes
|
| 709 |
+
# Probability Score: 0-100% based on wear severity
|
| 710 |
+
# ========================================================================
|
| 711 |
+
main_wear_prob = 0.0
|
| 712 |
+
main_wear_reasons = []
|
| 713 |
+
|
| 714 |
+
# Get DLRO value if available
|
| 715 |
+
dlro_value = kpis.get('DLRO Value (µΩ)', kpis.get('DLRO_Value_uOhm', kpis.get('dlro_uohm', None)))
|
| 716 |
+
|
| 717 |
+
# Factor 1: Elevated resistance (50 points max - MOST CRITICAL)
|
| 718 |
+
if features['main_mean'] > R_WEAR_CRITICAL_MIN:
|
| 719 |
+
elevation = ((features['main_mean'] - R_HEALTHY_MEAN_IDEAL) / R_HEALTHY_MEAN_IDEAL) * 100
|
| 720 |
+
main_wear_prob += 50.0
|
| 721 |
+
main_wear_reasons.append(f"CRITICAL wear: Resistance {features['main_mean']:.1f} µΩ (healthy: 20-70 µΩ, {elevation:.0f}% above ideal). Severe erosion/material loss detected")
|
| 722 |
+
if dlro_value is not None and dlro_value > 250:
|
| 723 |
+
main_wear_prob += 8.0
|
| 724 |
+
main_wear_reasons.append(f"Confirmed by DLRO: {dlro_value:.1f} µΩ")
|
| 725 |
+
elif features['main_mean'] > R_WEAR_SEVERE_MIN:
|
| 726 |
+
elevation = ((features['main_mean'] - R_HEALTHY_MEAN_IDEAL) / R_HEALTHY_MEAN_IDEAL) * 100
|
| 727 |
+
main_wear_prob += 42.0
|
| 728 |
+
main_wear_reasons.append(f"SEVERE wear: Resistance {features['main_mean']:.1f} µΩ ({elevation:.0f}% above ideal). Significant contact degradation")
|
| 729 |
+
if dlro_value is not None and dlro_value > 180:
|
| 730 |
+
main_wear_prob += 8.0
|
| 731 |
+
main_wear_reasons.append(f"Confirmed by DLRO: {dlro_value:.1f} µΩ")
|
| 732 |
+
elif features['main_mean'] > R_WEAR_MODERATE_MIN:
|
| 733 |
+
main_wear_prob += 32.0
|
| 734 |
+
main_wear_reasons.append(f"MODERATE wear: Resistance {features['main_mean']:.1f} µΩ (healthy <70 µΩ). Contact wear progressing")
|
| 735 |
+
if dlro_value is not None and dlro_value > 100:
|
| 736 |
+
main_wear_prob += 8.0
|
| 737 |
+
main_wear_reasons.append(f"Confirmed by DLRO: {dlro_value:.1f} µΩ")
|
| 738 |
+
elif features['main_mean'] > R_WEAR_EARLY_MIN:
|
| 739 |
+
main_wear_prob += 20.0
|
| 740 |
+
main_wear_reasons.append(f"EARLY wear signs: Resistance {features['main_mean']:.1f} µΩ (healthy <70 µΩ)")
|
| 741 |
+
if dlro_value is not None and dlro_value > 70:
|
| 742 |
+
main_wear_prob += 8.0
|
| 743 |
+
main_wear_reasons.append(f"DLRO confirms: {dlro_value:.1f} µΩ")
|
| 744 |
+
|
| 745 |
+
# Factor 2: Grassy/noisy pattern - Surface roughness (25 points max)
|
| 746 |
+
if features['main_std'] > WEAR_STD_SEVERE:
|
| 747 |
+
main_wear_prob += 25.0
|
| 748 |
+
main_wear_reasons.append(f"Severe surface roughness: Std={features['main_std']:.1f} µΩ (healthy <15 µΩ). Indicates pitting/erosion")
|
| 749 |
+
elif features['main_std'] > WEAR_STD_MODERATE:
|
| 750 |
+
main_wear_prob += 18.0
|
| 751 |
+
main_wear_reasons.append(f"Moderate roughness: Std={features['main_std']:.1f} µΩ (healthy <15 µΩ)")
|
| 752 |
+
elif features['main_std'] > WEAR_STD_EARLY:
|
| 753 |
+
main_wear_prob += 10.0
|
| 754 |
+
main_wear_reasons.append(f"Surface roughness detected: Std={features['main_std']:.1f} µΩ")
|
| 755 |
+
|
| 756 |
+
# Factor 3: Uniform grassy spikes throughout plateau (15 points max)
|
| 757 |
+
if features['spike_density'] > 0.35:
|
| 758 |
+
main_wear_prob += 15.0
|
| 759 |
+
main_wear_reasons.append(f"Dense uniform spikes: {features['uniform_spikes']} spikes ({features['spike_density']*100:.1f}% density). Classic wear signature")
|
| 760 |
+
elif features['spike_density'] > 0.20:
|
| 761 |
+
main_wear_prob += 10.0
|
| 762 |
+
main_wear_reasons.append(f"Grassy pattern: {features['uniform_spikes']} spikes detected")
|
| 763 |
+
elif features['uniform_spikes'] > 10:
|
| 764 |
+
main_wear_prob += 5.0
|
| 765 |
+
main_wear_reasons.append(f"{features['uniform_spikes']} noise spikes in plateau")
|
| 766 |
+
|
| 767 |
+
# Factor 4: Continuous noise level (10 points max)
|
| 768 |
+
if features['noise_rms'] > 40:
|
| 769 |
+
main_wear_prob += 10.0
|
| 770 |
+
main_wear_reasons.append(f"High continuous noise: RMS={features['noise_rms']:.1f} µΩ")
|
| 771 |
+
elif features['avg_noise'] > 25:
|
| 772 |
+
main_wear_prob += 6.0
|
| 773 |
+
main_wear_reasons.append(f"Elevated noise level: {features['avg_noise']:.1f} µΩ")
|
| 774 |
+
|
| 775 |
+
if main_wear_prob >= 50.0:
|
| 776 |
+
prob_str = f"{min(99.0, main_wear_prob):.2f} %"
|
| 777 |
+
sev = _get_severity(main_wear_prob)
|
| 778 |
+
desc = ". ".join(main_wear_reasons)
|
| 779 |
+
all_faults.append(_build_result("Main Contact Wear", prob_str, sev, desc))
|
| 780 |
+
|
| 781 |
+
# ========================================================================
|
| 782 |
+
# CLASS 3: ARCING CONTACT WEAR
|
| 783 |
+
# Signature: Sustained impulse spikes + Symmetric pattern + Healthy Phase 3
|
| 784 |
+
# Probability Score: 0-100% based on arc severity
|
| 785 |
+
# ========================================================================
|
| 786 |
+
arcing_wear_prob = 0.0
|
| 787 |
+
arcing_wear_reasons = []
|
| 788 |
+
|
| 789 |
+
# Factor 1: Critical/Severe spikes in arcing zones (50 points max)
|
| 790 |
+
if features['total_critical_spikes'] >= ARCING_SPIKE_COUNT_CRITICAL:
|
| 791 |
+
arcing_wear_prob += 50.0
|
| 792 |
+
arcing_wear_reasons.append(f"CRITICAL: {features['total_critical_spikes']} severe arc flashes detected (>8000 µΩ). Arcing contact severely eroded")
|
| 793 |
+
if features['total_sustained_spikes'] >= 2:
|
| 794 |
+
arcing_wear_prob += 8.0
|
| 795 |
+
arcing_wear_reasons.append(f"{features['total_sustained_spikes']} sustained arc events (>3 samples width)")
|
| 796 |
+
elif features['total_severe_spikes'] >= ARCING_SPIKE_COUNT_SEVERE:
|
| 797 |
+
arcing_wear_prob += 40.0
|
| 798 |
+
arcing_wear_reasons.append(f"SEVERE: {features['total_severe_spikes']} high-energy spikes (>5000 µΩ) in arcing zones")
|
| 799 |
+
if features['total_sustained_spikes'] >= 2:
|
| 800 |
+
arcing_wear_prob += 8.0
|
| 801 |
+
arcing_wear_reasons.append(f"{features['total_sustained_spikes']} sustained arcs detected")
|
| 802 |
+
elif features['total_severe_spikes'] >= 2:
|
| 803 |
+
arcing_wear_prob += 28.0
|
| 804 |
+
arcing_wear_reasons.append(f"{features['total_severe_spikes']} arcing spikes detected (>5000 µΩ)")
|
| 805 |
+
elif features['total_moderate_spikes'] >= 5:
|
| 806 |
+
arcing_wear_prob += 20.0
|
| 807 |
+
arcing_wear_reasons.append(f"{features['total_moderate_spikes']} moderate arcing events (>3000 µΩ)")
|
| 808 |
+
|
| 809 |
+
# Factor 2: Spike symmetry check (wear = symmetric; misalignment = asymmetric) (20 points max)
|
| 810 |
+
if features['spike_symmetry'] < 1.4 and features['total_severe_spikes'] > 0:
|
| 811 |
+
arcing_wear_prob += 20.0
|
| 812 |
+
arcing_wear_reasons.append(f"Symmetric spike distribution (ratio {features['spike_symmetry']:.2f}). Confirms uniform arcing wear on both contacts")
|
| 813 |
+
elif features['spike_symmetry'] < 1.8 and features['total_severe_spikes'] > 0:
|
| 814 |
+
arcing_wear_prob += 12.0
|
| 815 |
+
arcing_wear_reasons.append(f"Relatively symmetric pattern (ratio {features['spike_symmetry']:.2f})")
|
| 816 |
+
|
| 817 |
+
# Factor 3: High arcing zone instability (15 points max)
|
| 818 |
+
max_arcing_std = max(features['closing_std'], features['opening_std'])
|
| 819 |
+
if max_arcing_std > ARCING_INSTABILITY_STD * 1.5:
|
| 820 |
+
arcing_wear_prob += 15.0
|
| 821 |
+
arcing_wear_reasons.append(f"Very high arcing instability: Std={max_arcing_std:.1f} µΩ (normal <500 µΩ)")
|
| 822 |
+
elif max_arcing_std > ARCING_INSTABILITY_STD:
|
| 823 |
+
arcing_wear_prob += 10.0
|
| 824 |
+
arcing_wear_reasons.append(f"Elevated arcing zone variability: Std={max_arcing_std:.1f} µΩ")
|
| 825 |
+
|
| 826 |
+
# Factor 4: Phase 3 health check (15 points max - confirms isolation to arcing contacts)
|
| 827 |
+
if features['main_mean'] < R_HEALTHY_MAX and features['main_std'] < WEAR_STD_MODERATE:
|
| 828 |
+
arcing_wear_prob += 15.0
|
| 829 |
+
arcing_wear_reasons.append(f"Main contact healthy (Mean={features['main_mean']:.1f} µΩ, Std={features['main_std']:.1f} µΩ). Confirms wear isolated to arcing contacts")
|
| 830 |
+
else:
|
| 831 |
+
# If main contact also worn, arcing wear is secondary/co-existing
|
| 832 |
+
if features['main_mean'] > R_WEAR_MODERATE_MIN:
|
| 833 |
+
arcing_wear_prob -= 10.0 # Penalty: main wear likely primary
|
| 834 |
+
|
| 835 |
+
# Disqualify if strong misalignment signature (asymmetric timing)
|
| 836 |
+
if features['asymmetry_ratio'] > ASYMMETRY_RATIO_MODERATE:
|
| 837 |
+
arcing_wear_prob -= 18.0
|
| 838 |
+
# Don't add reason - just reduce probability
|
| 839 |
+
|
| 840 |
+
if arcing_wear_prob >= 50.0:
|
| 841 |
+
prob_str = f"{min(99.0, arcing_wear_prob):.2f} %"
|
| 842 |
+
sev = _get_severity(arcing_wear_prob)
|
| 843 |
+
desc = ". ".join(arcing_wear_reasons)
|
| 844 |
+
all_faults.append(_build_result("Arcing Contact Wear", prob_str, sev, desc))
|
| 845 |
+
|
| 846 |
+
# ========================================================================
|
| 847 |
+
# CLASS 1: HEALTHY
|
| 848 |
+
# Signature: Low resistance + Low variability + Smooth transitions
|
| 849 |
+
# Probability Score: 100% minus penalties for any abnormalities
|
| 850 |
+
# ULTRA-STRICT: Only high score if ALL parameters ideal
|
| 851 |
+
# ========================================================================
|
| 852 |
+
healthy_prob = 100.0
|
| 853 |
+
healthy_reasons = []
|
| 854 |
+
|
| 855 |
+
# Penalty 1: Elevated resistance (MOST CRITICAL - up to 55 points penalty)
|
| 856 |
+
if features['main_mean'] > R_WEAR_CRITICAL_MIN:
|
| 857 |
+
healthy_prob -= 55.0
|
| 858 |
+
elif features['main_mean'] > R_WEAR_SEVERE_MIN:
|
| 859 |
+
healthy_prob -= 48.0
|
| 860 |
+
elif features['main_mean'] > R_WEAR_MODERATE_MIN:
|
| 861 |
+
healthy_prob -= 40.0
|
| 862 |
+
elif features['main_mean'] > R_WEAR_EARLY_MIN:
|
| 863 |
+
healthy_prob -= 25.0
|
| 864 |
+
elif features['main_mean'] > R_HEALTHY_MAX:
|
| 865 |
+
healthy_prob -= 12.0
|
| 866 |
+
|
| 867 |
+
# Penalty 2: High variability/noise (up to 30 points)
|
| 868 |
+
if features['main_std'] > WEAR_STD_SEVERE:
|
| 869 |
+
healthy_prob -= 30.0
|
| 870 |
+
elif features['main_std'] > WEAR_STD_MODERATE:
|
| 871 |
+
healthy_prob -= 22.0
|
| 872 |
+
elif features['main_std'] > R_HEALTHY_STD_MAX:
|
| 873 |
+
healthy_prob -= 12.0
|
| 874 |
+
|
| 875 |
+
# Penalty 3: Telegraph/square wave pattern (up to 20 points)
|
| 876 |
+
if features['telegraph_jumps'] > 8:
|
| 877 |
+
healthy_prob -= 20.0
|
| 878 |
+
elif features['telegraph_jumps'] > 5:
|
| 879 |
+
healthy_prob -= 12.0
|
| 880 |
+
elif features['telegraph_jumps'] > 2:
|
| 881 |
+
healthy_prob -= 6.0
|
| 882 |
+
|
| 883 |
+
# Penalty 4: Arcing spikes (up to 20 points)
|
| 884 |
+
if features['total_critical_spikes'] > 0:
|
| 885 |
+
healthy_prob -= 20.0
|
| 886 |
+
elif features['total_severe_spikes'] > 1:
|
| 887 |
+
healthy_prob -= 15.0
|
| 888 |
+
elif features['total_moderate_spikes'] > 3:
|
| 889 |
+
healthy_prob -= 10.0
|
| 890 |
+
|
| 891 |
+
# Penalty 5: Timing asymmetry (up to 15 points)
|
| 892 |
+
if features['asymmetry_ratio'] > ASYMMETRY_RATIO_SEVERE:
|
| 893 |
+
healthy_prob -= 15.0
|
| 894 |
+
elif features['asymmetry_ratio'] > ASYMMETRY_RATIO_MODERATE:
|
| 895 |
+
healthy_prob -= 10.0
|
| 896 |
+
elif features['asymmetry_ratio'] > 1.5:
|
| 897 |
+
healthy_prob -= 5.0
|
| 898 |
+
|
| 899 |
+
# Penalty 6: Bounces/oscillations (up to 12 points)
|
| 900 |
+
if features['num_bounces'] > 4:
|
| 901 |
+
healthy_prob -= 12.0
|
| 902 |
+
elif features['num_bounces'] > 2:
|
| 903 |
+
healthy_prob -= 7.0
|
| 904 |
+
|
| 905 |
+
# Penalty 7: Grassy noise pattern (up to 10 points)
|
| 906 |
+
if features['spike_density'] > 0.30:
|
| 907 |
+
healthy_prob -= 10.0
|
| 908 |
+
elif features['spike_density'] > 0.15:
|
| 909 |
+
healthy_prob -= 5.0
|
| 910 |
+
|
| 911 |
+
# Build healthy description if score is high
|
| 912 |
+
if healthy_prob >= 50.0:
|
| 913 |
+
healthy_reasons.append(f"Normal operation. Main Contact: Mean={features['main_mean']:.1f} µΩ (ideal: 20-70 µΩ), Std={features['main_std']:.1f} µΩ (smooth: <15 µΩ)")
|
| 914 |
+
healthy_reasons.append(f"Timing: {features['asymmetry_ratio']:.2f} ratio (balanced: <1.5)")
|
| 915 |
+
|
| 916 |
+
if features['total_critical_spikes'] == 0 and features['total_severe_spikes'] == 0:
|
| 917 |
+
healthy_reasons.append("No abnormal arcing detected")
|
| 918 |
+
|
| 919 |
+
if features['telegraph_jumps'] <= 2 and features['is_square_wave'] == 0:
|
| 920 |
+
healthy_reasons.append("Smooth transitions, no misalignment patterns")
|
| 921 |
+
|
| 922 |
+
prob_str = f"{max(0.0, healthy_prob):.2f} %"
|
| 923 |
+
sev = "None" if healthy_prob >= 85.0 else "Low"
|
| 924 |
+
desc = ". ".join(healthy_reasons)
|
| 925 |
+
all_faults.append(_build_result("Healthy", prob_str, sev, desc))
|
| 926 |
+
|
| 927 |
+
return all_faults
|
| 928 |
+
|
| 929 |
+
|
| 930 |
+
def classify_secondary_faults(features, phases, kpis, primary_faults):
|
| 931 |
+
"""
|
| 932 |
+
Detect SECONDARY MECHANICAL/OPERATIONAL DEFECTS (Classes 6-10).
|
| 933 |
+
Uses EXTREME STRICTNESS as per Gemini Agent 2 logic.
|
| 934 |
+
Only reports if confidence >75% AND overwhelming evidence.
|
| 935 |
+
"""
|
| 936 |
+
secondary_faults = []
|
| 937 |
+
|
| 938 |
+
# Get primary defect names for context
|
| 939 |
+
primary_names = [f['defect_name'] for f in primary_faults]
|
| 940 |
+
|
| 941 |
+
# ========================================================================
|
| 942 |
+
# CLASS 6: OPERATING MECHANISM MALFUNCTION (Slow/Fast Operation)
|
| 943 |
+
# ========================================================================
|
| 944 |
+
closing_time = kpis.get('Closing Time (ms)', None)
|
| 945 |
+
opening_time = kpis.get('Opening Time (ms)', None)
|
| 946 |
+
contact_speed = kpis.get('Contact Speed (m/s)', None)
|
| 947 |
+
|
| 948 |
+
mechanism_score = 0
|
| 949 |
+
mechanism_reasons = []
|
| 950 |
+
kpi_count = 0
|
| 951 |
+
|
| 952 |
+
# Check Closing Time
|
| 953 |
+
if closing_time is not None:
|
| 954 |
+
if closing_time > CLOSING_TIME_NOM[1] * (1 + TIMING_DEVIATION_THRESHOLD):
|
| 955 |
+
deviation = ((closing_time - CLOSING_TIME_NOM[1]) / CLOSING_TIME_NOM[1]) * 100
|
| 956 |
+
mechanism_score += 35
|
| 957 |
+
mechanism_reasons.append(f"Slow closing: {closing_time:.1f} ms (nominal 80-100 ms, {deviation:.1f}% slower)")
|
| 958 |
+
kpi_count += 1
|
| 959 |
+
elif closing_time < CLOSING_TIME_NOM[0] * (1 - TIMING_DEVIATION_THRESHOLD):
|
| 960 |
+
deviation = ((CLOSING_TIME_NOM[0] - closing_time) / CLOSING_TIME_NOM[0]) * 100
|
| 961 |
+
mechanism_score += 35
|
| 962 |
+
mechanism_reasons.append(f"Fast closing: {closing_time:.1f} ms ({deviation:.1f}% faster)")
|
| 963 |
+
kpi_count += 1
|
| 964 |
+
|
| 965 |
+
# Check Opening Time
|
| 966 |
+
if opening_time is not None:
|
| 967 |
+
if opening_time > OPENING_TIME_NOM[1] * (1 + TIMING_DEVIATION_THRESHOLD):
|
| 968 |
+
deviation = ((opening_time - OPENING_TIME_NOM[1]) / OPENING_TIME_NOM[1]) * 100
|
| 969 |
+
mechanism_score += 35
|
| 970 |
+
mechanism_reasons.append(f"Slow opening: {opening_time:.1f} ms (nominal 30-40 ms, {deviation:.1f}% slower)")
|
| 971 |
+
kpi_count += 1
|
| 972 |
+
elif opening_time < OPENING_TIME_NOM[0] * (1 - TIMING_DEVIATION_THRESHOLD):
|
| 973 |
+
deviation = ((OPENING_TIME_NOM[0] - opening_time) / OPENING_TIME_NOM[0]) * 100
|
| 974 |
+
mechanism_score += 35
|
| 975 |
+
mechanism_reasons.append(f"Fast opening: {opening_time:.1f} ms ({deviation:.1f}% faster)")
|
| 976 |
+
kpi_count += 1
|
| 977 |
+
|
| 978 |
+
# Check Contact Speed
|
| 979 |
+
if contact_speed is not None:
|
| 980 |
+
if contact_speed < CONTACT_SPEED_NOM[0] * (1 - TIMING_DEVIATION_THRESHOLD):
|
| 981 |
+
deviation = ((CONTACT_SPEED_NOM[0] - contact_speed) / CONTACT_SPEED_NOM[0]) * 100
|
| 982 |
+
mechanism_score += 30
|
| 983 |
+
mechanism_reasons.append(f"Low contact speed: {contact_speed:.2f} m/s (nominal 4.5-6.5 m/s, {deviation:.1f}% slower)")
|
| 984 |
+
kpi_count += 1
|
| 985 |
+
elif contact_speed > CONTACT_SPEED_NOM[1] * (1 + TIMING_DEVIATION_THRESHOLD):
|
| 986 |
+
deviation = ((contact_speed - CONTACT_SPEED_NOM[1]) / CONTACT_SPEED_NOM[1]) * 100
|
| 987 |
+
mechanism_score += 30
|
| 988 |
+
mechanism_reasons.append(f"High contact speed: {contact_speed:.2f} m/s ({deviation:.1f}% faster)")
|
| 989 |
+
kpi_count += 1
|
| 990 |
+
|
| 991 |
+
# Confidence boost if multiple KPIs affected
|
| 992 |
+
if kpi_count >= 2:
|
| 993 |
+
mechanism_score += 15
|
| 994 |
+
mechanism_reasons.append("Multiple timing parameters affected - confirms mechanism malfunction")
|
| 995 |
+
|
| 996 |
+
if mechanism_score > 0:
|
| 997 |
+
conf = min(95.0, mechanism_score)
|
| 998 |
+
sev = _get_severity(conf)
|
| 999 |
+
desc = ". ".join(mechanism_reasons)
|
| 1000 |
+
secondary_faults.append(_build_result("Operating Mechanism Malfunction", f"{conf:.2f} %", sev, desc))
|
| 1001 |
+
|
| 1002 |
+
# ========================================================================
|
| 1003 |
+
# CLASS 7: DAMPING SYSTEM FAULT (Excessive Bouncing/Oscillation)
|
| 1004 |
+
# ========================================================================
|
| 1005 |
+
damping_score = 0
|
| 1006 |
+
damping_reasons = []
|
| 1007 |
+
|
| 1008 |
+
# Count bounces in main contact zone (>5 distinct bounces with >100 µΩ amplitude)
|
| 1009 |
+
if features['num_bounces'] > BOUNCE_COUNT_THRESHOLD:
|
| 1010 |
+
damping_score += 50
|
| 1011 |
+
damping_reasons.append(f"Excessive bouncing detected: {features['num_bounces']} distinct bounces in main contact zone (>5 indicates damper failure)")
|
| 1012 |
+
elif features['num_bounces'] >= 5:
|
| 1013 |
+
damping_score += 35
|
| 1014 |
+
damping_reasons.append(f"{features['num_bounces']} bounces detected")
|
| 1015 |
+
|
| 1016 |
+
# Check for oscillation pattern (not random noise)
|
| 1017 |
+
if features['main_std'] > 50 and features['num_bounces'] >= 5:
|
| 1018 |
+
damping_score += 25
|
| 1019 |
+
damping_reasons.append(f"Oscillation pattern in main contact (Std={features['main_std']:.1f} µΩ with structured bounces)")
|
| 1020 |
+
|
| 1021 |
+
if damping_score > 0:
|
| 1022 |
+
conf = min(95.0, damping_score)
|
| 1023 |
+
sev = _get_severity(conf)
|
| 1024 |
+
desc = ". ".join(damping_reasons)
|
| 1025 |
+
secondary_faults.append(_build_result("Damping System Fault", f"{conf:.2f} %", sev, desc))
|
| 1026 |
+
|
| 1027 |
+
# ========================================================================
|
| 1028 |
+
# CLASS 8: SF6 PRESSURE LEAKAGE
|
| 1029 |
+
# ========================================================================
|
| 1030 |
+
sf6_pressure = kpis.get('SF6 Pressure (bar)', None)
|
| 1031 |
+
|
| 1032 |
+
sf6_score = 0
|
| 1033 |
+
sf6_reasons = []
|
| 1034 |
+
|
| 1035 |
+
if sf6_pressure is not None:
|
| 1036 |
+
if sf6_pressure < SF6_PRESSURE_CRITICAL:
|
| 1037 |
+
sf6_score += 60
|
| 1038 |
+
sf6_reasons.append(f"SF6 pressure critically low: {sf6_pressure:.2f} bar (normal: 5.5-6.5 bar)")
|
| 1039 |
+
|
| 1040 |
+
# Check for prolonged arc (secondary evidence)
|
| 1041 |
+
if features['dur_opening'] > ARC_QUENCH_DURATION_MAX:
|
| 1042 |
+
sf6_score += 25
|
| 1043 |
+
sf6_reasons.append(f"Prolonged arc quenching ({features['dur_opening']} ms) confirms gas leak")
|
| 1044 |
+
|
| 1045 |
+
# Check if primary defect is Arcing Wear (supports SF6 leak hypothesis)
|
| 1046 |
+
if "Arcing Contact Wear" in primary_names:
|
| 1047 |
+
sf6_score += 10
|
| 1048 |
+
sf6_reasons.append("Arcing wear detected as primary defect - consistent with SF6 leak")
|
| 1049 |
+
else:
|
| 1050 |
+
# No SF6 KPI: Can only infer from waveform (low confidence)
|
| 1051 |
+
if features['dur_opening'] > ARC_QUENCH_DURATION_MAX + 10:
|
| 1052 |
+
if "Arcing Contact Wear" in primary_names:
|
| 1053 |
+
# Get arcing wear confidence
|
| 1054 |
+
arcing_conf = 0
|
| 1055 |
+
for pf in primary_faults:
|
| 1056 |
+
if pf['defect_name'] == "Arcing Contact Wear":
|
| 1057 |
+
arcing_conf = float(pf['Confidence'].replace('%', '').strip())
|
| 1058 |
+
|
| 1059 |
+
if arcing_conf > 85:
|
| 1060 |
+
sf6_score += 55
|
| 1061 |
+
sf6_reasons.append(f"Prolonged arc quenching ({features['dur_opening']} ms, normal <25 ms) with severe arcing wear - indicates possible SF6 leak")
|
| 1062 |
+
sf6_reasons.append("WARNING: No SF6 pressure sensor data. Confidence limited to 70%")
|
| 1063 |
+
sf6_score = min(sf6_score, 70) # Cap at 70% without pressure KPI
|
| 1064 |
+
|
| 1065 |
+
if sf6_score > 0:
|
| 1066 |
+
conf = min(95.0, sf6_score)
|
| 1067 |
+
sev = _get_severity(conf)
|
| 1068 |
+
desc = ". ".join(sf6_reasons)
|
| 1069 |
+
secondary_faults.append(_build_result("Pressure System Leakage (SF6 Gas Chamber)", f"{conf:.2f} %", sev, desc))
|
| 1070 |
+
|
| 1071 |
+
# ========================================================================
|
| 1072 |
+
# CLASS 9: LINKAGE/CONNECTING ROD OBSTRUCTION
|
| 1073 |
+
# ========================================================================
|
| 1074 |
+
linkage_score = 0
|
| 1075 |
+
linkage_reasons = []
|
| 1076 |
+
|
| 1077 |
+
# Detect "stutters" (flat plateaus within transitions)
|
| 1078 |
+
# This requires analyzing slope changes in transitions (complex detection)
|
| 1079 |
+
# Simplified: Use telegraph in main contact + increased duration as proxy
|
| 1080 |
+
|
| 1081 |
+
if features['telegraph_jumps'] > STUTTER_COUNT_MIN and features['num_shelves'] > 3:
|
| 1082 |
+
# Check if operating time is increased
|
| 1083 |
+
total_op_time = features['dur_closing'] + features['dur_main'] + features['dur_opening']
|
| 1084 |
+
expected_time = 250 # Nominal ~250 ms total
|
| 1085 |
+
|
| 1086 |
+
if total_op_time > expected_time * 1.15: # >15% longer
|
| 1087 |
+
linkage_score += 50
|
| 1088 |
+
linkage_reasons.append(f"Detected {features['telegraph_jumps']} mechanical stutters with {features['num_shelves']} stepped plateaus")
|
| 1089 |
+
linkage_reasons.append(f"Total operation time {total_op_time} ms (expected ~{expected_time} ms, {((total_op_time/expected_time - 1)*100):.1f}% longer)")
|
| 1090 |
+
elif features['num_shelves'] >= 5:
|
| 1091 |
+
linkage_score += 35
|
| 1092 |
+
linkage_reasons.append(f"Multiple stepped plateaus ({features['num_shelves']}) indicate mechanical impedance")
|
| 1093 |
+
|
| 1094 |
+
if linkage_score > 0:
|
| 1095 |
+
conf = min(95.0, linkage_score)
|
| 1096 |
+
sev = _get_severity(conf)
|
| 1097 |
+
desc = ". ".join(linkage_reasons)
|
| 1098 |
+
secondary_faults.append(_build_result("Linkage/Connecting Rod Obstruction/Damage", f"{conf:.2f} %", sev, desc))
|
| 1099 |
+
|
| 1100 |
+
# ========================================================================
|
| 1101 |
+
# CLASS 10: FIXED CONTACT DAMAGE/DEFORMATION
|
| 1102 |
+
# ========================================================================
|
| 1103 |
+
dlro_value = kpis.get('DLRO Value (µΩ)', None)
|
| 1104 |
+
|
| 1105 |
+
fixed_contact_score = 0
|
| 1106 |
+
fixed_contact_reasons = []
|
| 1107 |
+
|
| 1108 |
+
if dlro_value is not None:
|
| 1109 |
+
if dlro_value > DLRO_CRITICAL:
|
| 1110 |
+
# Check if curve is smooth (not wear)
|
| 1111 |
+
if features['main_std'] < FIXED_CONTACT_STD_MAX:
|
| 1112 |
+
fixed_contact_score += 50
|
| 1113 |
+
fixed_contact_reasons.append(f"DLRO critically high: {dlro_value:.1f} µΩ (normal <50 µΩ, critical >100 µΩ)")
|
| 1114 |
+
fixed_contact_reasons.append(f"Smooth plateau (Std={features['main_std']:.1f} µΩ) indicates fixed contact/connection issue, not wear")
|
| 1115 |
+
else:
|
| 1116 |
+
# High DLRO but noisy = likely Main Contact Wear already detected
|
| 1117 |
+
if "Main Contact Wear" not in primary_names:
|
| 1118 |
+
fixed_contact_score += 40
|
| 1119 |
+
fixed_contact_reasons.append(f"DLRO high: {dlro_value:.1f} µΩ with noisy plateau")
|
| 1120 |
+
else:
|
| 1121 |
+
# List as secondary with reduced confidence
|
| 1122 |
+
fixed_contact_score += 30
|
| 1123 |
+
fixed_contact_reasons.append(f"DLRO high: {dlro_value:.1f} µΩ (secondary to Main Contact Wear)")
|
| 1124 |
+
|
| 1125 |
+
elif dlro_value > DLRO_MODERATE:
|
| 1126 |
+
if features['main_std'] < FIXED_CONTACT_STD_MAX:
|
| 1127 |
+
fixed_contact_score += 35
|
| 1128 |
+
fixed_contact_reasons.append(f"DLRO moderately elevated: {dlro_value:.1f} µΩ (normal <50 µΩ)")
|
| 1129 |
+
else:
|
| 1130 |
+
# No DLRO KPI: Can infer from smooth elevated plateau
|
| 1131 |
+
if features['main_mean'] > DLRO_MODERATE and features['main_std'] < FIXED_CONTACT_STD_MAX:
|
| 1132 |
+
if "Main Contact Wear" not in primary_names:
|
| 1133 |
+
fixed_contact_score += 30
|
| 1134 |
+
fixed_contact_reasons.append(f"Elevated but smooth plateau (Mean={features['main_mean']:.1f} µΩ, Std={features['main_std']:.1f} µΩ) suggests fixed contact issue")
|
| 1135 |
+
fixed_contact_reasons.append("WARNING: No DLRO sensor data. Confidence limited to 65%")
|
| 1136 |
+
fixed_contact_score = min(fixed_contact_score, 65)
|
| 1137 |
+
|
| 1138 |
+
if fixed_contact_score > 0:
|
| 1139 |
+
conf = min(90.0, fixed_contact_score)
|
| 1140 |
+
sev = _get_severity(conf)
|
| 1141 |
+
desc = ". ".join(fixed_contact_reasons)
|
| 1142 |
+
secondary_faults.append(_build_result("Fixed Contact Damage/Deformation", f"{conf:.2f} %", sev, desc))
|
| 1143 |
+
|
| 1144 |
+
return secondary_faults
|
| 1145 |
+
|
| 1146 |
+
|
| 1147 |
+
def _get_severity(probability):
|
| 1148 |
+
"""
|
| 1149 |
+
Determine severity based on defect probability score.
|
| 1150 |
+
|
| 1151 |
+
Args:
|
| 1152 |
+
probability: Float 0-100 representing defect probability
|
| 1153 |
+
|
| 1154 |
+
Returns:
|
| 1155 |
+
String: "Critical", "High", "Medium", "Low", or "None"
|
| 1156 |
+
"""
|
| 1157 |
+
if probability >= 90:
|
| 1158 |
+
return "Critical"
|
| 1159 |
+
elif probability >= 75:
|
| 1160 |
+
return "High"
|
| 1161 |
+
elif probability >= 60:
|
| 1162 |
+
return "Medium"
|
| 1163 |
+
elif probability >= 50:
|
| 1164 |
+
return "Low"
|
| 1165 |
+
else:
|
| 1166 |
+
return "None"
|
| 1167 |
+
|
| 1168 |
+
|
| 1169 |
+
def _build_result(name, conf, sev, desc):
|
| 1170 |
+
"""Helper to build fault result dictionary with proper Unicode handling"""
|
| 1171 |
+
return {
|
| 1172 |
+
"defect_name": name,
|
| 1173 |
+
"Confidence": conf,
|
| 1174 |
+
"Severity": sev,
|
| 1175 |
+
"description": desc
|
| 1176 |
+
}
|
| 1177 |
+
|
| 1178 |
+
|
| 1179 |
+
# =============================================================================
|
| 1180 |
+
# KPI HELPER FUNCTIONS
|
| 1181 |
+
# =============================================================================
|
| 1182 |
+
def parse_kpis_from_json(kpis_json):
|
| 1183 |
+
"""
|
| 1184 |
+
Convert KPI JSON format to dictionary for internal use.
|
| 1185 |
+
|
| 1186 |
+
Input format:
|
| 1187 |
+
{
|
| 1188 |
+
"kpis": [
|
| 1189 |
+
{"name": "Closing Time", "unit": "ms", "value": 87.8},
|
| 1190 |
+
...
|
| 1191 |
+
]
|
| 1192 |
+
}
|
| 1193 |
+
|
| 1194 |
+
Output format:
|
| 1195 |
+
{
|
| 1196 |
+
"Closing Time (ms)": 87.8,
|
| 1197 |
+
...
|
| 1198 |
+
}
|
| 1199 |
+
"""
|
| 1200 |
+
if kpis_json is None:
|
| 1201 |
+
return {}
|
| 1202 |
+
|
| 1203 |
+
# If already in dict format, return as-is
|
| 1204 |
+
if isinstance(kpis_json, dict) and "kpis" not in kpis_json:
|
| 1205 |
+
return kpis_json
|
| 1206 |
+
|
| 1207 |
+
# Parse from JSON format
|
| 1208 |
+
kpi_dict = {}
|
| 1209 |
+
kpis_list = kpis_json.get("kpis", [])
|
| 1210 |
+
|
| 1211 |
+
for kpi in kpis_list:
|
| 1212 |
+
name = kpi.get("name", "")
|
| 1213 |
+
unit = kpi.get("unit", "")
|
| 1214 |
+
value = kpi.get("value", None)
|
| 1215 |
+
|
| 1216 |
+
# Create key in format "Name (unit)"
|
| 1217 |
+
key = f"{name} ({unit})" if unit else name
|
| 1218 |
+
kpi_dict[key] = value
|
| 1219 |
+
|
| 1220 |
+
return kpi_dict
|
| 1221 |
+
|
| 1222 |
+
|
| 1223 |
+
# =============================================================================
|
| 1224 |
+
# CENTRAL PIPELINE FUNCTION
|
| 1225 |
+
# =============================================================================
|
| 1226 |
+
def analyze_dcrm_from_dataframe(df, kpis=None):
|
| 1227 |
+
"""
|
| 1228 |
+
Central pipeline function to analyze DCRM data from DataFrame.
|
| 1229 |
+
|
| 1230 |
+
Args:
|
| 1231 |
+
df: DataFrame with Resistance column (401 points)
|
| 1232 |
+
kpis: KPI data in JSON format or dict format
|
| 1233 |
+
JSON format: {"kpis": [{"name": "...", "unit": "...", "value": ...}, ...]}
|
| 1234 |
+
Dict format: {"Name (unit)": value, ...}
|
| 1235 |
+
|
| 1236 |
+
Returns:
|
| 1237 |
+
JSON with fault detection results and classifications
|
| 1238 |
+
"""
|
| 1239 |
+
# Standardize input
|
| 1240 |
+
df_standardized = standardize_input(df)
|
| 1241 |
+
|
| 1242 |
+
# Extract time columns
|
| 1243 |
+
time_cols = [c for c in df_standardized.columns if c.startswith('T_')]
|
| 1244 |
+
|
| 1245 |
+
# Get first row values (for single waveform analysis)
|
| 1246 |
+
row_values = df_standardized.iloc[0][time_cols].values
|
| 1247 |
+
|
| 1248 |
+
# Parse KPIs to internal format
|
| 1249 |
+
kpis_dict = parse_kpis_from_json(kpis)
|
| 1250 |
+
|
| 1251 |
+
# Run analysis
|
| 1252 |
+
result = analyze_dcrm_advanced(row_values, kpis=kpis_dict)
|
| 1253 |
+
|
| 1254 |
+
return result
|
| 1255 |
+
|
| 1256 |
+
|
| 1257 |
+
if __name__ == "__main__":
|
| 1258 |
+
df = pd.read_csv('C:\\Users\\rkhanke\\Downloads\\parallel_proccessing\\combined\\data\\df3_final.csv')
|
| 1259 |
+
|
| 1260 |
+
# New JSON format for KPIs
|
| 1261 |
+
sample_kpis = {
|
| 1262 |
+
"kpis": [
|
| 1263 |
+
{
|
| 1264 |
+
"name": "Closing Time",
|
| 1265 |
+
"unit": "ms",
|
| 1266 |
+
"value": 90.0
|
| 1267 |
+
},
|
| 1268 |
+
{
|
| 1269 |
+
"name": "Opening Time",
|
| 1270 |
+
"unit": "ms",
|
| 1271 |
+
"value": 35.0
|
| 1272 |
+
},
|
| 1273 |
+
{
|
| 1274 |
+
"name": "DLRO Value",
|
| 1275 |
+
"unit": "µΩ",
|
| 1276 |
+
"value": 299.93
|
| 1277 |
+
},
|
| 1278 |
+
{
|
| 1279 |
+
"name": "Peak Resistance",
|
| 1280 |
+
"unit": "µΩ",
|
| 1281 |
+
"value": 408.0
|
| 1282 |
+
},
|
| 1283 |
+
{
|
| 1284 |
+
"name": "Main Wipe",
|
| 1285 |
+
"unit": "mm",
|
| 1286 |
+
"value": 46.0
|
| 1287 |
+
},
|
| 1288 |
+
{
|
| 1289 |
+
"name": "Arc Wipe",
|
| 1290 |
+
"unit": "mm",
|
| 1291 |
+
"value": 63.0
|
| 1292 |
+
},
|
| 1293 |
+
{
|
| 1294 |
+
"name": "Contact Travel Distance",
|
| 1295 |
+
"unit": "mm",
|
| 1296 |
+
"value": 550.0
|
| 1297 |
+
},
|
| 1298 |
+
{
|
| 1299 |
+
"name": "Contact Speed",
|
| 1300 |
+
"unit": "m/s",
|
| 1301 |
+
"value": 5.5
|
| 1302 |
+
},
|
| 1303 |
+
{
|
| 1304 |
+
"name": "Peak Close Coil Current",
|
| 1305 |
+
"unit": "A",
|
| 1306 |
+
"value": 5.2
|
| 1307 |
+
},
|
| 1308 |
+
{
|
| 1309 |
+
"name": "Peak Trip Coil 1 Current",
|
| 1310 |
+
"unit": "A",
|
| 1311 |
+
"value": 5.0
|
| 1312 |
+
},
|
| 1313 |
+
{
|
| 1314 |
+
"name": "Peak Trip Coil 2 Current",
|
| 1315 |
+
"unit": "A",
|
| 1316 |
+
"value": 4.8
|
| 1317 |
+
},
|
| 1318 |
+
{
|
| 1319 |
+
"name": "Ambient Temperature",
|
| 1320 |
+
"unit": "°C",
|
| 1321 |
+
"value": 28.4
|
| 1322 |
+
}
|
| 1323 |
+
]
|
| 1324 |
+
}
|
| 1325 |
+
|
| 1326 |
+
result = analyze_dcrm_from_dataframe(df, kpis=sample_kpis)
|
| 1327 |
+
print(json.dumps(result, indent=2, ensure_ascii=False))
|
| 1328 |
+
|
core/models/__pycache__/vit_classifier.cpython-313.pyc
ADDED
|
Binary file (12.2 kB). View file
|
|
|
core/models/__pycache__/vit_classifier.cpython-39.pyc
ADDED
|
Binary file (9.19 kB). View file
|
|
|
core/models/vit_classifier.py
ADDED
|
@@ -0,0 +1,261 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Previous Name: analysis/models/vit_classifier.py
|
| 2 |
+
import os
|
| 3 |
+
import matplotlib
|
| 4 |
+
matplotlib.use('Agg') # Set backend to non-interactive
|
| 5 |
+
import matplotlib.pyplot as plt
|
| 6 |
+
import numpy as np
|
| 7 |
+
from PIL import Image
|
| 8 |
+
import requests
|
| 9 |
+
import google.generativeai as genai
|
| 10 |
+
import json
|
| 11 |
+
|
| 12 |
+
# Parameters
|
| 13 |
+
IMG_HEIGHT = 224
|
| 14 |
+
IMG_WIDTH = 224
|
| 15 |
+
|
| 16 |
+
# Define classes (must match training - sorted alphabetically)
|
| 17 |
+
CLASSES = sorted([
|
| 18 |
+
"Healthy",
|
| 19 |
+
"Arcing_Contact_Misalignment",
|
| 20 |
+
"Arcing_Contact_Wear",
|
| 21 |
+
"Main Contact Misalignment",
|
| 22 |
+
"main_contact_wear"
|
| 23 |
+
])
|
| 24 |
+
|
| 25 |
+
# Deployed ViT Model URL
|
| 26 |
+
DEPLOYED_VIT_URL = "http://143.110.244.235/predict"
|
| 27 |
+
|
| 28 |
+
def plot_resistance_for_vit(df, save_path="temp_vit_plot.png"):
|
| 29 |
+
"""
|
| 30 |
+
Generates and saves the resistance plot in the format expected by the ViT model.
|
| 31 |
+
Plots all three curves: Green (Resistance), Blue (Current), Red (Travel)
|
| 32 |
+
"""
|
| 33 |
+
fig, ax = plt.subplots(figsize=(10, 6))
|
| 34 |
+
|
| 35 |
+
# Plot based on column availability
|
| 36 |
+
# Green: Resistance, Blue: Current, Red: Travel
|
| 37 |
+
if 'Resistance' in df.columns:
|
| 38 |
+
ax.plot(df['Resistance'], color='green', label='Resistance')
|
| 39 |
+
if 'Current' in df.columns:
|
| 40 |
+
ax.plot(df['Current'], color='blue', label='Current')
|
| 41 |
+
if 'Travel' in df.columns:
|
| 42 |
+
ax.plot(df['Travel'], color='red', label='Travel')
|
| 43 |
+
|
| 44 |
+
ax.legend()
|
| 45 |
+
ax.set_title("DCRM Trace")
|
| 46 |
+
ax.set_xlabel("Time (Samples)")
|
| 47 |
+
ax.set_ylabel("Value")
|
| 48 |
+
ax.grid(True)
|
| 49 |
+
|
| 50 |
+
# Save to file
|
| 51 |
+
plt.savefig(save_path)
|
| 52 |
+
plt.close(fig)
|
| 53 |
+
return True
|
| 54 |
+
|
| 55 |
+
def get_remote_vit_probabilities(image_path):
|
| 56 |
+
"""
|
| 57 |
+
Get probability distribution for all classes from the deployed ViT API.
|
| 58 |
+
Returns: dict of {class_name: probability}
|
| 59 |
+
"""
|
| 60 |
+
try:
|
| 61 |
+
if not os.path.exists(image_path):
|
| 62 |
+
print(f"Error: Image file not found at {image_path}")
|
| 63 |
+
return {}
|
| 64 |
+
|
| 65 |
+
with open(image_path, "rb") as f:
|
| 66 |
+
# Explicitly set the filename and MIME type
|
| 67 |
+
files = {"file": (os.path.basename(image_path), f, "image/jpeg")}
|
| 68 |
+
# Add headers
|
| 69 |
+
headers = {"accept": "application/json"}
|
| 70 |
+
|
| 71 |
+
print(f"Sending request to {DEPLOYED_VIT_URL}...")
|
| 72 |
+
response = requests.post(DEPLOYED_VIT_URL, headers=headers, files=files, timeout=10)
|
| 73 |
+
|
| 74 |
+
if response.status_code != 200:
|
| 75 |
+
print(f"Error: API returned status code {response.status_code}")
|
| 76 |
+
print(f"Response: {response.text}")
|
| 77 |
+
return {}
|
| 78 |
+
|
| 79 |
+
data = response.json()
|
| 80 |
+
|
| 81 |
+
if "probabilities" in data:
|
| 82 |
+
return data["probabilities"]
|
| 83 |
+
else:
|
| 84 |
+
print(f"Error: 'probabilities' key not found in response: {data}")
|
| 85 |
+
return {}
|
| 86 |
+
|
| 87 |
+
except requests.exceptions.RequestException as e:
|
| 88 |
+
print(f"Request Error: {e}")
|
| 89 |
+
return {}
|
| 90 |
+
except Exception as e:
|
| 91 |
+
print(f"Error processing image with remote API: {e}")
|
| 92 |
+
return {}
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
def get_gemini_prediction(image, api_key=None):
|
| 96 |
+
"""
|
| 97 |
+
Get Gemini's expert analysis of the DCRM trace.
|
| 98 |
+
Returns: (probabilities_dict, error_message)
|
| 99 |
+
"""
|
| 100 |
+
if not api_key:
|
| 101 |
+
api_key = os.environ.get("GOOGLE_API_KEY") or os.environ.get("GEMINI_API_KEY")
|
| 102 |
+
|
| 103 |
+
if not api_key:
|
| 104 |
+
return None, "API Key missing"
|
| 105 |
+
|
| 106 |
+
try:
|
| 107 |
+
genai.configure(api_key=api_key)
|
| 108 |
+
model = genai.GenerativeModel('gemini-2.0-flash')
|
| 109 |
+
prompt = f"""
|
| 110 |
+
System Role: Principal DCRM & Kinematic Analyst
|
| 111 |
+
Role:
|
| 112 |
+
You are an expert High-Voltage Circuit Breaker Diagnostician. Your task is to interpret Dynamic Contact Resistance (DCRM) traces to detect specific electrical and mechanical faults.
|
| 113 |
+
|
| 114 |
+
The input image contains 3 line charts:
|
| 115 |
+
- Green: Resistance profile
|
| 116 |
+
- Blue: Current profile
|
| 117 |
+
- Red: Travel profile
|
| 118 |
+
|
| 119 |
+
1. Diagnostic Heuristics & Defect Taxonomy
|
| 120 |
+
Map the visual DCRM trace to ONLY the following defect types. Use the specific Visual Heuristics to confirm detection.
|
| 121 |
+
|
| 122 |
+
Defect Type | Visual Heuristic (The "Hint") | Mechanical Significance (Root Cause)
|
| 123 |
+
--- | --- | ---
|
| 124 |
+
Main Contact Issue (Corrosion/Oxidation) | "The Significant Grass"<br>In the fully closed plateau, look for pronounced, erratic instability. <br>• Ignore: Uniform, low-amplitude fuzz (sensor noise).<br>• Flag: Jagged, irregular peaks/valleys with significant amplitude (e.g., > 15–20 μΩ variance). The trace looks like a "rough rocky road," not just a "gravel path." | Surface Pathology: The Silver (Ag) plating is compromised (fretting corrosion) or heavy oxidation has occurred. The current path is constantly shifting through microscopic non-conductive spots.
|
| 125 |
+
Arcing Contact Wear | "Big Spikes & Short Wipe"<br>Resistance spikes are frequent and significantly large (high amplitude). Crucially, the duration of the arcing zone (the time between first touch and main contact touch) is noticeably shorter than expected. | Ablation: The Tungsten-Copper (W-Cu) tips are heavily eroded. The contact length has physically diminished, risking failure to commutate current during opening.
|
| 126 |
+
Misalignment (Main) | "The Struggle to Settle"<br>There are significant, high-amplitude peaks just before the trace tries to settle into the stable plateau. These are not bounces; they are "struggles" to mate that persist longer than 3-5ms. | Mechanical Centering: The moving contact pin is hitting the side or edge of the stationary rosette fingers before forcing its way in. Caused by loose nuts, kinematic play, or guide ring failure.
|
| 127 |
+
Misalignment (Arcing) | "Rough Entry"<br>Erratic resistance spikes occurring specifically during the initial entry (commutation), well before the main contacts engage. | Tip Eccentricity: The arcing pin is not entering the nozzle concentrically. It is scraping the nozzle throat or hitting the side, indicating a bent rod or skewed interrupter.
|
| 128 |
+
Slow Mechanism | "Stretched Time"<br>The entire resistance profile is elongated along the X-axis. Events happen later than normal. | Energy Starvation: Low spring charge, hydraulic pressure loss, or high friction due to hardened grease in the linkage.
|
| 129 |
+
|
| 130 |
+
2. Analysis Logic (The "Signal-to-Noise" Filter)
|
| 131 |
+
Before declaring a defect, run these logic checks:
|
| 132 |
+
The "Noise Floor" Test (For Main Contacts):
|
| 133 |
+
Is the plateau variance uniform and small (< 10 μΩ)? -> Classify as Healthy (Sensor/Manufacturing artifact).
|
| 134 |
+
Is the variance erratic, jagged, and large (> 15 μΩ)? -> Classify as Corrosion/Oxidation.
|
| 135 |
+
The "Duration" Test (For Misalignment):
|
| 136 |
+
Are the pre-plateau peaks < 2ms? -> Ignore (Benign Bounce).
|
| 137 |
+
Do the peaks persist > 3-5ms before settling? -> Classify as Misalignment.
|
| 138 |
+
The "Combination" Check:
|
| 139 |
+
Does the trace show both "Rough Entry" AND "Stretched Time"? -> Report Both (Misalignment + Slow Mechanism).
|
| 140 |
+
|
| 141 |
+
3. Output Structure
|
| 142 |
+
You must return a JSON object containing the probability (confidence score between 0.0 and 1.0) for EACH of the following classes. The sum of probabilities should ideally be close to 1.0.
|
| 143 |
+
|
| 144 |
+
Classes: {CLASSES}
|
| 145 |
+
|
| 146 |
+
Example Output Format:
|
| 147 |
+
{{
|
| 148 |
+
"Healthy": 0.1,
|
| 149 |
+
"Arcing_Contact_Misalignment": 0.05,
|
| 150 |
+
"Arcing_Contact_Wear": 0.8,
|
| 151 |
+
"Main Contact Misalignment": 0.02,
|
| 152 |
+
"main_contact_wear": 0.03
|
| 153 |
+
}}
|
| 154 |
+
|
| 155 |
+
RETURN ONLY THE JSON OBJECT. NO MARKDOWN. NO EXPLANATION.
|
| 156 |
+
"""
|
| 157 |
+
response = model.generate_content([prompt, image])
|
| 158 |
+
|
| 159 |
+
# Clean response to ensure valid JSON
|
| 160 |
+
text = response.text.strip()
|
| 161 |
+
# Remove markdown code blocks if present
|
| 162 |
+
if text.startswith("```json"):
|
| 163 |
+
text = text[7:]
|
| 164 |
+
if text.startswith("```"):
|
| 165 |
+
text = text[3:]
|
| 166 |
+
if text.endswith("```"):
|
| 167 |
+
text = text[:-3]
|
| 168 |
+
text = text.strip()
|
| 169 |
+
|
| 170 |
+
probs = json.loads(text)
|
| 171 |
+
return probs, None
|
| 172 |
+
except Exception as e:
|
| 173 |
+
return None, str(e)
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
def predict_dcrm_image(image_path, model_path=None, api_key=None):
|
| 177 |
+
"""
|
| 178 |
+
Predicts the class of the DCRM image using Deployed ViT + Gemini Ensemble.
|
| 179 |
+
Returns: (predicted_class, confidence_score, details_dict)
|
| 180 |
+
|
| 181 |
+
The details_dict contains:
|
| 182 |
+
- vit_probs: Dictionary of ViT probabilities for each class (from deployed API)
|
| 183 |
+
- gemini_probs: Dictionary of Gemini probabilities for each class
|
| 184 |
+
- ensemble_scores: Dictionary of combined scores for each class
|
| 185 |
+
"""
|
| 186 |
+
try:
|
| 187 |
+
# Get API key from environment if not provided
|
| 188 |
+
if not api_key:
|
| 189 |
+
api_key = os.environ.get("GOOGLE_API_KEY") or os.environ.get("GEMINI_API_KEY")
|
| 190 |
+
|
| 191 |
+
# Load image for Gemini
|
| 192 |
+
image = Image.open(image_path).convert('RGB')
|
| 193 |
+
|
| 194 |
+
# 1. ViT Prediction (Remote)
|
| 195 |
+
vit_probs = get_remote_vit_probabilities(image_path)
|
| 196 |
+
|
| 197 |
+
if not vit_probs:
|
| 198 |
+
print("Warning: Failed to get ViT probabilities from remote API.")
|
| 199 |
+
# We can continue if we want to rely on Gemini, or return failure.
|
| 200 |
+
# For now, let's continue but note the failure.
|
| 201 |
+
|
| 202 |
+
# 2. Gemini Prediction
|
| 203 |
+
gemini_probs, error = get_gemini_prediction(image, api_key)
|
| 204 |
+
|
| 205 |
+
# Ensemble Logic
|
| 206 |
+
ensemble_scores = {}
|
| 207 |
+
|
| 208 |
+
# Initialize with 0.0 for all classes
|
| 209 |
+
for cls in CLASSES:
|
| 210 |
+
ensemble_scores[cls] = 0.0
|
| 211 |
+
|
| 212 |
+
# Add ViT scores
|
| 213 |
+
if vit_probs:
|
| 214 |
+
for cls, prob in vit_probs.items():
|
| 215 |
+
if cls in ensemble_scores:
|
| 216 |
+
ensemble_scores[cls] += prob
|
| 217 |
+
|
| 218 |
+
# Add Gemini scores
|
| 219 |
+
if gemini_probs:
|
| 220 |
+
for cls, prob in gemini_probs.items():
|
| 221 |
+
if cls in ensemble_scores:
|
| 222 |
+
ensemble_scores[cls] += prob
|
| 223 |
+
|
| 224 |
+
# If both failed, return error
|
| 225 |
+
if not vit_probs and not gemini_probs:
|
| 226 |
+
return None, 0.0, {}
|
| 227 |
+
|
| 228 |
+
# Sort classes by score (descending)
|
| 229 |
+
sorted_classes = sorted(ensemble_scores.items(), key=lambda item: item[1], reverse=True)
|
| 230 |
+
|
| 231 |
+
best_class = sorted_classes[0][0]
|
| 232 |
+
best_score = sorted_classes[0][1]
|
| 233 |
+
|
| 234 |
+
# Conditional Logic - Use fallback only when Healthy is at top with low score
|
| 235 |
+
if best_score < 1.0 and best_class == "Healthy" and best_score < 0.8:
|
| 236 |
+
if len(sorted_classes) > 1:
|
| 237 |
+
best_class = sorted_classes[1][0]
|
| 238 |
+
best_score = sorted_classes[1][1]
|
| 239 |
+
|
| 240 |
+
# Normalize score
|
| 241 |
+
# If we had both models, max score is 2.0. If only one, max is 1.0.
|
| 242 |
+
divisor = 0.0
|
| 243 |
+
if vit_probs: divisor += 1.0
|
| 244 |
+
if gemini_probs: divisor += 1.0
|
| 245 |
+
|
| 246 |
+
if divisor > 0:
|
| 247 |
+
normalized_confidence = best_score / divisor
|
| 248 |
+
else:
|
| 249 |
+
normalized_confidence = 0.0
|
| 250 |
+
|
| 251 |
+
return best_class, normalized_confidence, {
|
| 252 |
+
"vit_probs": vit_probs,
|
| 253 |
+
"gemini_probs": gemini_probs,
|
| 254 |
+
"ensemble_scores": ensemble_scores
|
| 255 |
+
}
|
| 256 |
+
|
| 257 |
+
except Exception as e:
|
| 258 |
+
print(f"Error in predict_dcrm_image: {e}")
|
| 259 |
+
import traceback
|
| 260 |
+
traceback.print_exc()
|
| 261 |
+
return None, 0.0, {}
|
core/signal/__pycache__/arcing.cpython-313.pyc
ADDED
|
Binary file (5.82 kB). View file
|
|
|
core/signal/__pycache__/phases.cpython-313.pyc
ADDED
|
Binary file (41.2 kB). View file
|
|
|
core/signal/__pycache__/phases.cpython-39.pyc
ADDED
|
Binary file (24.4 kB). View file
|
|
|
core/signal/__pycache__/segmentation.cpython-313.pyc
ADDED
|
Binary file (6.04 kB). View file
|
|
|
core/signal/arcing.py
ADDED
|
@@ -0,0 +1,143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Previous Name: analysis/arcing_analysis.py
|
| 2 |
+
import pandas as pd
|
| 3 |
+
import numpy as np
|
| 4 |
+
from .segmentation import identify_arcing_events
|
| 5 |
+
|
| 6 |
+
def calculate_velocity(df, window=5):
|
| 7 |
+
"""
|
| 8 |
+
Calculates velocity (m/s) from Travel (mm) and Time (ms).
|
| 9 |
+
"""
|
| 10 |
+
# Ensure we work on a copy to avoid SettingWithCopyWarning
|
| 11 |
+
df = df.copy()
|
| 12 |
+
df['Travel_Smooth'] = df['Travel'].rolling(window=window, center=True).mean().fillna(df['Travel'])
|
| 13 |
+
dy = np.gradient(df['Travel_Smooth'])
|
| 14 |
+
dx = np.gradient(df['Time_ms'])
|
| 15 |
+
velocity = np.where(dx != 0, dy / dx, 0)
|
| 16 |
+
return np.abs(velocity)
|
| 17 |
+
|
| 18 |
+
def calculate_arcing_parameters(df, llm=None):
|
| 19 |
+
"""
|
| 20 |
+
Calculates T0, T1, T2, T3, T4 and derived arcing health metrics.
|
| 21 |
+
|
| 22 |
+
If `llm` is provided, it uses the AI agent to identify the event timestamps (Robust).
|
| 23 |
+
Otherwise, it uses deterministic threshold logic (Fallback).
|
| 24 |
+
"""
|
| 25 |
+
results = {
|
| 26 |
+
"events": {},
|
| 27 |
+
"metrics": {},
|
| 28 |
+
"status": "Unknown",
|
| 29 |
+
"method": "Deterministic"
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
try:
|
| 33 |
+
# 1. Calculate Velocity
|
| 34 |
+
velocity = calculate_velocity(df)
|
| 35 |
+
df['Velocity'] = velocity
|
| 36 |
+
|
| 37 |
+
t0, t1, t2, t4 = None, None, None, None
|
| 38 |
+
|
| 39 |
+
# --- A. LLM-Based Detection (Preferred) ---
|
| 40 |
+
if llm:
|
| 41 |
+
try:
|
| 42 |
+
ai_result = identify_arcing_events(df, llm)
|
| 43 |
+
if "events" in ai_result:
|
| 44 |
+
events = ai_result["events"]
|
| 45 |
+
t0 = events.get("T0_breaker_closed")
|
| 46 |
+
t1 = events.get("T1_motion_start")
|
| 47 |
+
t2 = events.get("T2_main_separation")
|
| 48 |
+
t4 = events.get("T4_arcing_separation")
|
| 49 |
+
results["method"] = "AI-Enhanced"
|
| 50 |
+
results["ai_reasoning"] = ai_result.get("reasoning", "")
|
| 51 |
+
except Exception as e:
|
| 52 |
+
print(f"AI Segmentation failed, falling back to deterministic: {e}")
|
| 53 |
+
|
| 54 |
+
# --- B. Deterministic Fallback ---
|
| 55 |
+
if t2 is None or t4 is None:
|
| 56 |
+
# T0: Breaker Closed
|
| 57 |
+
t0_window = df[df['Time_ms'] < 10]
|
| 58 |
+
r_static = t0_window['Resistance'].mean() if not t0_window.empty else 0.0
|
| 59 |
+
t0 = 0.0 # Nominal
|
| 60 |
+
|
| 61 |
+
# T1: Motion Starts
|
| 62 |
+
start_pos = df['Travel'].iloc[0]
|
| 63 |
+
motion_mask = abs(df['Travel'] - start_pos) > 1.0
|
| 64 |
+
if motion_mask.any():
|
| 65 |
+
t1 = float(df.loc[motion_mask.idxmax(), 'Time_ms'])
|
| 66 |
+
|
| 67 |
+
# T2 & T4: State-Based
|
| 68 |
+
R_ARC_MIN = 150.0
|
| 69 |
+
R_OPEN_MIN = 1500.0
|
| 70 |
+
|
| 71 |
+
search_start = t1 if t1 else 0
|
| 72 |
+
arcing_candidates = df[(df['Time_ms'] > search_start) & (df['Resistance'] >= R_ARC_MIN)]
|
| 73 |
+
|
| 74 |
+
if not arcing_candidates.empty:
|
| 75 |
+
t2 = float(df.loc[arcing_candidates.index[0], 'Time_ms'])
|
| 76 |
+
|
| 77 |
+
open_candidates = df[(df['Time_ms'] > t2) & (df['Resistance'] >= R_OPEN_MIN)]
|
| 78 |
+
if not open_candidates.empty:
|
| 79 |
+
t4 = float(df.loc[open_candidates.index[0], 'Time_ms'])
|
| 80 |
+
|
| 81 |
+
# --- Store Events ---
|
| 82 |
+
results['events']['T0_static_resistance'] = 0.0 # Placeholder, calc below
|
| 83 |
+
if t0 is not None: results['events']['T0_time'] = t0
|
| 84 |
+
if t1 is not None: results['events']['T1_motion_start'] = t1
|
| 85 |
+
if t2 is not None: results['events']['T2_main_separation'] = t2
|
| 86 |
+
if t4 is not None: results['events']['T4_arcing_separation'] = t4
|
| 87 |
+
|
| 88 |
+
# T3: Duration
|
| 89 |
+
if t2 and t4:
|
| 90 |
+
results['events']['T3_duration_ms'] = round(t4 - t2, 2)
|
| 91 |
+
|
| 92 |
+
# --- Metric Calculation (Deterministic Math) ---
|
| 93 |
+
|
| 94 |
+
if t2 and t4:
|
| 95 |
+
# T0 Resistance Value
|
| 96 |
+
t0_val = df[df['Time_ms'] < 10]['Resistance'].mean()
|
| 97 |
+
results['events']['T0_static_resistance'] = round(t0_val, 2) if not np.isnan(t0_val) else 0.0
|
| 98 |
+
|
| 99 |
+
# Ra: Average Arcing Resistance (Exclude T4 spike)
|
| 100 |
+
# We add a small buffer to T2 to avoid the rising edge
|
| 101 |
+
arcing_zone = df[(df['Time_ms'] >= t2 + 0.5) & (df['Time_ms'] < t4 - 0.5)]
|
| 102 |
+
|
| 103 |
+
if not arcing_zone.empty:
|
| 104 |
+
ra = arcing_zone['Resistance'].mean()
|
| 105 |
+
else:
|
| 106 |
+
# Fallback if zone is too small (single point)
|
| 107 |
+
ra = df[(df['Time_ms'] >= t2) & (df['Time_ms'] <= t4)]['Resistance'].mean()
|
| 108 |
+
|
| 109 |
+
results['metrics']['Ra_avg_arcing_res'] = round(ra, 2)
|
| 110 |
+
|
| 111 |
+
# Speed at Separation
|
| 112 |
+
speed_during_arc = arcing_zone['Velocity'].mean() if not arcing_zone.empty else 0.0
|
| 113 |
+
results['metrics']['Speed_at_separation'] = round(speed_during_arc, 2)
|
| 114 |
+
|
| 115 |
+
# Da: Arcing Contact Wipe
|
| 116 |
+
da = (t4 - t2) * speed_during_arc
|
| 117 |
+
results['metrics']['Da_arcing_wipe'] = round(da, 2)
|
| 118 |
+
|
| 119 |
+
# Wear Index: Ra * Da
|
| 120 |
+
ra_mOhm = ra / 1000.0
|
| 121 |
+
wear_index = ra_mOhm * da
|
| 122 |
+
results['metrics']['Wear_Index'] = round(wear_index, 2)
|
| 123 |
+
|
| 124 |
+
# --- Health Assessment ---
|
| 125 |
+
if da < 10.0: da_status = "Critical (Short Wipe)"
|
| 126 |
+
elif da < 15.0: da_status = "Warning (Low Wipe)"
|
| 127 |
+
else: da_status = "Healthy"
|
| 128 |
+
results['metrics']['Da_Status'] = da_status
|
| 129 |
+
|
| 130 |
+
if wear_index > 10.0: wear_status = "Critical (Replace Interrupter)"
|
| 131 |
+
elif wear_index > 5.0: wear_status = "Warning (Worn)"
|
| 132 |
+
else: wear_status = "Healthy"
|
| 133 |
+
results['metrics']['Wear_Status'] = wear_status
|
| 134 |
+
|
| 135 |
+
results['status'] = "Success"
|
| 136 |
+
|
| 137 |
+
else:
|
| 138 |
+
results['status'] = "Incomplete Data (Could not find T2/T4)"
|
| 139 |
+
|
| 140 |
+
except Exception as e:
|
| 141 |
+
results['status'] = f"Error: {str(e)}"
|
| 142 |
+
|
| 143 |
+
return results
|
core/signal/phases.py
ADDED
|
@@ -0,0 +1,687 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Previous Name: analysis/phase_analysis.py
|
| 2 |
+
import pandas as pd
|
| 3 |
+
import numpy as np
|
| 4 |
+
import json
|
| 5 |
+
from langchain_core.messages import HumanMessage
|
| 6 |
+
|
| 7 |
+
def get_coil_state(coil_series, active_threshold=0.1):
|
| 8 |
+
"""
|
| 9 |
+
Determines the state of a coil (Close, Trip 1, Trip 2) during a phase.
|
| 10 |
+
|
| 11 |
+
Args:
|
| 12 |
+
coil_series (pd.Series): Series of coil values for the phase.
|
| 13 |
+
active_threshold (float): Value above which a coil is considered active.
|
| 14 |
+
|
| 15 |
+
Returns:
|
| 16 |
+
str: 'Active' or 'Inactive'.
|
| 17 |
+
"""
|
| 18 |
+
if coil_series.empty:
|
| 19 |
+
return "Unknown"
|
| 20 |
+
# A simple mean-based check is often sufficient for coil states in DCRM phases.
|
| 21 |
+
# If the average value is above the threshold, consider it active during that phase.
|
| 22 |
+
if coil_series.mean() > active_threshold:
|
| 23 |
+
return "Active"
|
| 24 |
+
return "Inactive"
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def generate_llm_description_prompt(phase_data_summary, phase_info):
|
| 28 |
+
"""
|
| 29 |
+
Generates an optimized prompt for Gemini to describe a DCRM phase
|
| 30 |
+
and its diagnostic verdict based on numerical summary.
|
| 31 |
+
|
| 32 |
+
Args:
|
| 33 |
+
phase_data_summary (dict): A dictionary containing summarized numerical data.
|
| 34 |
+
phase_info (dict): A dictionary containing static phase information.
|
| 35 |
+
|
| 36 |
+
Returns:
|
| 37 |
+
str: The optimized prompt string.
|
| 38 |
+
"""
|
| 39 |
+
# DCRM Phase-Wise Diagnostic Reference
|
| 40 |
+
dcrm_diagnostic_reference = """
|
| 41 |
+
**DCRM Phase-Wise Diagnostic Criteria Reference:**
|
| 42 |
+
|
| 43 |
+
Phase 1 - Pre-Contact Travel:
|
| 44 |
+
Healthy: High/stable resistance (around open circuit baseline), zero/low current (around open circuit baseline), smooth increasing travel, clean close coil energization.
|
| 45 |
+
Unhealthy: Premature drop in resistance or current rise (leakage/insulation breakdown), erratic/non-linear travel (mechanical binding), hesitations/plateaus (obstructions), slow/incomplete close coil activation.
|
| 46 |
+
|
| 47 |
+
Phase 2 - Arcing Contact Engagement:
|
| 48 |
+
Healthy: Rapid sharp resistance drop from high to low, rapid current rise from low to high, smooth travel with minimal bounce (<5% amplitude, <10ms duration).
|
| 49 |
+
Unhealthy: Slow/incomplete resistance drop (contamination/erosion/misalignment), excessive/prolonged bounce (worn contacts/weak springs), current fails to reach expected level or rises slowly, abnormal current oscillations.
|
| 50 |
+
|
| 51 |
+
Phase 3 - Main Contact Conduction:
|
| 52 |
+
Healthy: Very low stable resistance (around closed circuit baseline, low StdDev), high constant current at test level (around closed circuit baseline, low StdDev), stable travel at maximum (fully closed, low StdDev).
|
| 53 |
+
Unhealthy: Elevated resistance (> 50-100 μΩ above baseline - severe erosion/contamination), fluctuating resistance (poor pressure/loose connections), gradual resistance increase (progressive degradation), unstable travel (insufficient overtravel).
|
| 54 |
+
|
| 55 |
+
Phase 4 - Contact Parting & Arc Elongation:
|
| 56 |
+
Healthy: Smooth, near-linear resistance rise from low to high with moderate deviation (not excessive spikes), possible short intermediate plateau during main contact parting before arc elongation, steady current drop from high to low, smooth consistent travel decrease at expected opening speed, clean trip coil energization. Resistance should show controlled progressive increase without large erratic spikes.
|
| 57 |
+
Unhealthy: Excessively spiked resistance with large deviations (severe arcing/welding), abrupt or erratic resistance changes, current drop too slow or fails to decrease steadily, erratic/non-linear travel (binding/friction), hesitations in travel, slow/inconsistent opening speed, prolonged arcing indicated by chaotic resistance pattern.
|
| 58 |
+
|
| 59 |
+
Phase 5 - Final Open State:
|
| 60 |
+
Healthy: High/stable resistance (around open circuit baseline), zero/low current (around open circuit baseline), stable travel at minimum (fully open position, no rebound/drift).
|
| 61 |
+
Unhealthy: Resistance not reaching open circuit baseline or current not reaching open circuit baseline (leakage/contaminated insulation), travel rebound/drift (damping issues), travel not reaching full open position (obstructions).
|
| 62 |
+
"""
|
| 63 |
+
|
| 64 |
+
# Reference key_characteristics for each phase
|
| 65 |
+
reference_key_characteristics = {
|
| 66 |
+
1: [
|
| 67 |
+
"Contacts are physically separated",
|
| 68 |
+
"No electrical conduction",
|
| 69 |
+
"Travel distance increasing as contacts approach",
|
| 70 |
+
"High resistance due to air gap"
|
| 71 |
+
],
|
| 72 |
+
2: [
|
| 73 |
+
"Sharp drop in resistance",
|
| 74 |
+
"Current injection initiation",
|
| 75 |
+
"Initial contact touch point",
|
| 76 |
+
"Potential for contact bounce"
|
| 77 |
+
],
|
| 78 |
+
3: [
|
| 79 |
+
"Minimum contact resistance",
|
| 80 |
+
"Maximum current flow",
|
| 81 |
+
"No mechanical movement",
|
| 82 |
+
"Trip coil energization initiated (towards end)"
|
| 83 |
+
],
|
| 84 |
+
4: [
|
| 85 |
+
"Physical contact separation",
|
| 86 |
+
"Arc elongation",
|
| 87 |
+
"Current interruption",
|
| 88 |
+
"Rapid decrease in travel"
|
| 89 |
+
],
|
| 90 |
+
5: [
|
| 91 |
+
"Circuit fully isolated",
|
| 92 |
+
"Contacts at rest position",
|
| 93 |
+
"Arc fully extinguished",
|
| 94 |
+
"Trip coil de-energization"
|
| 95 |
+
]
|
| 96 |
+
}
|
| 97 |
+
|
| 98 |
+
prompt = f"""
|
| 99 |
+
You are an expert in Dynamic Contact Resistance Measurement (DCRM) analysis for circuit breakers.
|
| 100 |
+
Your task is to generate FOUR specific outputs for a DCRM phase based on numerical data analysis:
|
| 101 |
+
|
| 102 |
+
1. "key_characteristics" - Output 3-4 short, direct phrases describing the main physical/electrical events of this phase.
|
| 103 |
+
2. "event_synopsis" - A concise 1-2 sentence summary of what actually occurred in this phase.
|
| 104 |
+
3. "diagnostic_verdict_details" - A detailed 2-4 sentence analysis explaining the health verdict with specific numerical evidence from the "Numerical Data Summary" and justifying it against the "DCRM Phase-Wise Diagnostic Criteria Reference". Be explicit with numbers and thresholds.
|
| 105 |
+
4. "confidence_score" - An integer from 0 to 100 indicating your confidence in the diagnostic verdict.
|
| 106 |
+
|
| 107 |
+
**Phase Context:**
|
| 108 |
+
- Phase Name: {phase_info['name']}
|
| 109 |
+
- Operation Title: {phase_info['phaseTitle']}
|
| 110 |
+
- Subheading: {phase_info['description']}
|
| 111 |
+
- Time Interval: {phase_data_summary['time_interval']}
|
| 112 |
+
- Phase ID: {phase_info['id']}
|
| 113 |
+
|
| 114 |
+
**Numerical Data Summary for this Phase:**
|
| 115 |
+
- Resistance: Min={phase_data_summary['resistance_min']:.2f} μΩ, Max={phase_data_summary['resistance_max']:.2f} μΩ, Avg={phase_data_summary['resistance_avg']:.2f} μΩ, StdDev={phase_data_summary['resistance_std']:.2f} μΩ
|
| 116 |
+
- Current: Min={phase_data_summary['current_min']:.2f} A, Max={phase_data_summary['current_max']:.2f} A, Avg={phase_data_summary['current_avg']:.2f} A, StdDev={phase_data_summary['current_std']:.2f} A
|
| 117 |
+
- Travel: Min={phase_data_summary['travel_min']:.2f} mm, Max={phase_data_summary['travel_max']:.2f} mm, Avg={phase_data_summary['travel_avg']:.2f} mm, StdDev={phase_data_summary['travel_std']:.2f} mm
|
| 118 |
+
- Close Coil: State = '{phase_data_summary['close_coil_state']}' (Avg: {phase_data_summary['close_coil_avg']:.2f} A)
|
| 119 |
+
- Trip Coil 1: State = '{phase_data_summary['trip_coil_1_state']}' (Avg: {phase_data_summary['trip_coil_1_avg']:.2f} A)
|
| 120 |
+
- Trip Coil 2: State = '{phase_data_summary['trip_coil_2_state']}' (Avg: {phase_data_summary['trip_coil_2_avg']:.2f} A)
|
| 121 |
+
|
| 122 |
+
**Programmatic Health Status:** {phase_data_summary['programmatic_health_status']}
|
| 123 |
+
|
| 124 |
+
{dcrm_diagnostic_reference}
|
| 125 |
+
|
| 126 |
+
**Reference key_characteristics for this phase:**
|
| 127 |
+
{reference_key_characteristics.get(phase_info['phaseNumber'], [])}
|
| 128 |
+
|
| 129 |
+
**Instructions:**
|
| 130 |
+
- Analyze the numerical data against the diagnostic criteria reference above for the specific phase.
|
| 131 |
+
- Focus the 'diagnostic_verdict_details' on explaining *why* the 'Programmatic Health Status' was assigned, using the specific measured values provided.
|
| 132 |
+
- For 'key_characteristics', output 3-4 short, direct phrases describing the main physical/electrical events of this phase.
|
| 133 |
+
- Provide specific numerical evidence in diagnostic_verdict_details.
|
| 134 |
+
- For 'confidence_score', assess how well the measured data aligns with the expected patterns for the given health status. Higher alignment = higher confidence.
|
| 135 |
+
- Output ONLY a valid JSON object with these exact keys:
|
| 136 |
+
{{"key_characteristics": ["characteristic1", "characteristic2"], "event_synopsis": "synopsis text", "diagnostic_verdict_details": "details text", "confidence_score": 85}}
|
| 137 |
+
"""
|
| 138 |
+
return prompt
|
| 139 |
+
|
| 140 |
+
# --- Core DCRM Analysis Function ---
|
| 141 |
+
def analyze_dcrm_data(df, llm=None):
|
| 142 |
+
"""
|
| 143 |
+
Analyzes DCRM DataFrame to segment phases, determine health, and format JSON output.
|
| 144 |
+
|
| 145 |
+
Args:
|
| 146 |
+
df (pd.DataFrame): Input DataFrame with DCRM data.
|
| 147 |
+
Expected columns: Time_ms, Resistance, Current, Travel,
|
| 148 |
+
Close_Coil, Trip_Coil_1, Trip_Coil_2.
|
| 149 |
+
llm (LangChain LLM object, optional): LLM to use for generating descriptions.
|
| 150 |
+
|
| 151 |
+
Returns:
|
| 152 |
+
dict: A dictionary representing the DCRM phase-wise interpretation in JSON format.
|
| 153 |
+
"""
|
| 154 |
+
|
| 155 |
+
# Ensure Time_ms is the index for easier slicing
|
| 156 |
+
if 'Time_ms' in df.columns:
|
| 157 |
+
df = df.set_index('Time_ms')
|
| 158 |
+
else:
|
| 159 |
+
# If no Time_ms, assume index is time or create it
|
| 160 |
+
if df.index.name != 'Time_ms':
|
| 161 |
+
# Create Time_ms if not present, assuming 1ms steps
|
| 162 |
+
df['Time_ms'] = range(len(df))
|
| 163 |
+
df = df.set_index('Time_ms')
|
| 164 |
+
|
| 165 |
+
# Convert columns to numeric, coercing errors to NaN
|
| 166 |
+
numeric_cols = ['Resistance', 'Current', 'Travel', 'Close_Coil', 'Trip_Coil_1', 'Trip_Coil_2']
|
| 167 |
+
for col in numeric_cols:
|
| 168 |
+
if col in df.columns:
|
| 169 |
+
df[col] = pd.to_numeric(df[col], errors='coerce')
|
| 170 |
+
else:
|
| 171 |
+
# Handle missing columns gracefully
|
| 172 |
+
df[col] = 0.0
|
| 173 |
+
|
| 174 |
+
df = df.dropna() # Drop rows with any NaN created by coercion
|
| 175 |
+
|
| 176 |
+
if df.empty:
|
| 177 |
+
return {"error": "Input DataFrame is empty after cleaning."}
|
| 178 |
+
|
| 179 |
+
# --- Optimized Dynamic Phase Boundary Detection Thresholds ---
|
| 180 |
+
# Based on EHV DCRM domain knowledge and international standards (IEC, IEEE)
|
| 181 |
+
|
| 182 |
+
# OPEN CIRCUIT THRESHOLDS (Phase 1 & 5)
|
| 183 |
+
OPEN_CIRCUIT_RESISTANCE_IDEAL = 100000 # Ideal infinite resistance (≥ 10⁵ μΩ) for perfect open circuit
|
| 184 |
+
OPEN_CIRCUIT_RESISTANCE_MIN = 750 # Minimum acceptable high resistance (straight line threshold for EHV)
|
| 185 |
+
OPEN_CIRCUIT_VARIATION_THRESHOLD = 30 # ±30 μΩ flat variation is healthy for open circuit (straight line)
|
| 186 |
+
|
| 187 |
+
# CLOSED CIRCUIT THRESHOLDS (Phase 3 - Main Contact Plateau)
|
| 188 |
+
CLOSED_CIRCUIT_RESISTANCE_BASELINE = 200 # Typical ~200 μΩ for healthy main contact plateau in EHV
|
| 189 |
+
CLOSED_CIRCUIT_RESISTANCE_MAX = 350 # Upper limit for acceptable main contact (allowing tolerance for EHV)
|
| 190 |
+
CLOSED_CIRCUIT_VARIATION_THRESHOLD = 50 # ±50 μΩ deviation for healthy stable plateau
|
| 191 |
+
|
| 192 |
+
# ARCING THRESHOLDS (Phase 2 & 4)
|
| 193 |
+
ARCING_RESISTANCE_MIN = 300 # Arcing typically starts around 300-500 μΩ
|
| 194 |
+
ARCING_RESISTANCE_MAX = 5000 # Arcing spikes can reach 3500-5000 μΩ in healthy operation
|
| 195 |
+
ARCING_STDDEV_HEALTHY_MIN = 80 # Minimum StdDev for healthy arcing (indicates arcing activity)
|
| 196 |
+
ARCING_SPIKE_FACTOR = 1.8 # Max should be at least 1.8x Min for healthy arcing spikes
|
| 197 |
+
|
| 198 |
+
# CURRENT & TRAVEL BASELINES (from observed data)
|
| 199 |
+
OPEN_CIRCUIT_CURRENT_BASELINE = 230 # Typical open circuit current (leakage/capacitive)
|
| 200 |
+
CLOSED_CIRCUIT_CURRENT_BASELINE = 715 # Test current injection level during conduction
|
| 201 |
+
TRAVEL_MIN_POSITION = 200 # Fully open position (minimum travel)
|
| 202 |
+
TRAVEL_MAX_POSITION = 750 # Fully closed position (maximum travel/overtravel)
|
| 203 |
+
|
| 204 |
+
# PHASE DETECTION THRESHOLDS (for boundary identification)
|
| 205 |
+
COIL_ACTIVE_THRESHOLD = 0.5 # Coil current above this indicates activation (A)
|
| 206 |
+
GRADIENT_R_CHANGE = 50 # Minimum resistance gradient for phase transition detection (μΩ/ms)
|
| 207 |
+
GRADIENT_I_CHANGE = 50 # Minimum current gradient for phase transition detection (A/ms)
|
| 208 |
+
TRAVEL_MOVEMENT_RATE_THRESHOLD = 1.0 # Minimum travel rate to detect active movement (mm/ms)
|
| 209 |
+
|
| 210 |
+
time_min = df.index.min()
|
| 211 |
+
time_max = df.index.max()
|
| 212 |
+
time_step = np.mean(np.diff(df.index)) if len(df.index) > 1 else 1.0 # Average time step, default to 1ms if only one point
|
| 213 |
+
|
| 214 |
+
# Initialize phase start and end times
|
| 215 |
+
phase_start_times = {1: time_min}
|
| 216 |
+
phase_end_times = {5: time_max} # Phase 5 always extends to the end of the data
|
| 217 |
+
|
| 218 |
+
# Helper to calculate rolling mean of differences (gradient)
|
| 219 |
+
def rolling_gradient(series, window=5):
|
| 220 |
+
if len(series) < window: # Handle cases where series is shorter than window
|
| 221 |
+
return series.diff()
|
| 222 |
+
return series.diff().rolling(window=window, min_periods=1).mean()
|
| 223 |
+
|
| 224 |
+
# --- Detection Logic (Refined) ---
|
| 225 |
+
|
| 226 |
+
# 1. Detect Contact Make (End of Phase 1 / Start of Phase 2)
|
| 227 |
+
# Look for a sharp drop in Resistance OR sharp rise in Current, AFTER Close_Coil activation.
|
| 228 |
+
close_coil_active_idx = df[df['Close_Coil'] > COIL_ACTIVE_THRESHOLD].index
|
| 229 |
+
close_op_start = close_coil_active_idx.min() if not close_coil_active_idx.empty else time_min # Start from beginning if no clear close coil activation
|
| 230 |
+
|
| 231 |
+
resistance_gradient_neg = rolling_gradient(df['Resistance'], window=3) # Smaller window for sharper detection
|
| 232 |
+
current_gradient_pos = rolling_gradient(df['Current'], window=3)
|
| 233 |
+
|
| 234 |
+
contact_make_candidates = df.loc[df.index >= close_op_start].index[
|
| 235 |
+
(resistance_gradient_neg.loc[df.index >= close_op_start] < -GRADIENT_R_CHANGE) | # Significant drop in R
|
| 236 |
+
(current_gradient_pos.loc[df.index >= close_op_start] > GRADIENT_I_CHANGE) # Significant rise in I
|
| 237 |
+
]
|
| 238 |
+
|
| 239 |
+
contact_make_time = time_max # Default to end
|
| 240 |
+
if not contact_make_candidates.empty:
|
| 241 |
+
contact_make_time = contact_make_candidates.min()
|
| 242 |
+
else: # Fallback: first time current is significantly active after close op start AND resistance has dropped
|
| 243 |
+
current_active_after_close_op = df.loc[df.index >= close_op_start].index[
|
| 244 |
+
(df['Current'].loc[df.index >= close_op_start] > OPEN_CIRCUIT_CURRENT_BASELINE + 50) & # Current rising significantly
|
| 245 |
+
(df['Resistance'].loc[df.index >= close_op_start] < OPEN_CIRCUIT_RESISTANCE_MIN) # Resistance has dropped significantly from open circuit
|
| 246 |
+
]
|
| 247 |
+
if not current_active_after_close_op.empty:
|
| 248 |
+
contact_make_time = current_active_after_close_op.min()
|
| 249 |
+
else:
|
| 250 |
+
contact_make_time = df.index.min() + (df.index.max() - df.index.min()) * 0.20 # Fallback to 20% into data if no clear event
|
| 251 |
+
|
| 252 |
+
phase_end_times[1] = contact_make_time - time_step
|
| 253 |
+
phase_start_times[2] = contact_make_time
|
| 254 |
+
|
| 255 |
+
# 2. Detect Conduction Stabilization (End of Phase 2 / Start of Phase 3)
|
| 256 |
+
# Look for Resistance near CLOSED_CIRCUIT_RESISTANCE_BASELINE AND Current near CLOSED_CIRCUIT_CURRENT_BASELINE AND both are stable.
|
| 257 |
+
stable_conduction_candidates = df.loc[df.index >= phase_start_times[2]].index[
|
| 258 |
+
(df['Resistance'].loc[df.index >= phase_start_times[2]].between(CLOSED_CIRCUIT_RESISTANCE_BASELINE - 100, CLOSED_CIRCUIT_RESISTANCE_MAX + 50)) &
|
| 259 |
+
(df['Current'].loc[df.index >= phase_start_times[2]].between(CLOSED_CIRCUIT_CURRENT_BASELINE - 100, CLOSED_CIRCUIT_CURRENT_BASELINE + 50)) &
|
| 260 |
+
(df['Resistance'].loc[df.index >= phase_start_times[2]].rolling(window=10, min_periods=1).std() < 30) & # Low std dev for stability (increased tolerance)
|
| 261 |
+
(df['Current'].loc[df.index >= phase_start_times[2]].rolling(window=10, min_periods=1).std() < 30)
|
| 262 |
+
]
|
| 263 |
+
|
| 264 |
+
conduction_start_time = time_max
|
| 265 |
+
if not stable_conduction_candidates.empty:
|
| 266 |
+
conduction_start_time = stable_conduction_candidates.min()
|
| 267 |
+
else: # Fallback: assume conduction starts when resistance first drops very low and current is high
|
| 268 |
+
low_res_high_curr_first_hit = df.loc[df.index >= phase_start_times[2]].index[
|
| 269 |
+
(df['Resistance'].loc[df.index >= phase_start_times[2]] < CLOSED_CIRCUIT_RESISTANCE_MAX) & # Clearly dropped to conduction level
|
| 270 |
+
(df['Current'].loc[df.index >= phase_start_times[2]] > CLOSED_CIRCUIT_CURRENT_BASELINE - 100) # Clearly risen to conduction level
|
| 271 |
+
]
|
| 272 |
+
if not low_res_high_curr_first_hit.empty:
|
| 273 |
+
conduction_start_time = low_res_high_curr_first_hit.min()
|
| 274 |
+
else:
|
| 275 |
+
conduction_start_time = phase_start_times[2] + 10.0 # Ensure a minimum 10ms duration for phase 2 if no clear event
|
| 276 |
+
|
| 277 |
+
phase_end_times[2] = conduction_start_time - time_step
|
| 278 |
+
phase_start_times[3] = conduction_start_time
|
| 279 |
+
|
| 280 |
+
# 3. Detect Contact Break (End of Phase 3 / Start of Phase 4)
|
| 281 |
+
# Look for Trip_Coil_1 activation OR a sharp rise in Resistance OR sharp drop in Current.
|
| 282 |
+
trip_coil_active_idx = df[df['Trip_Coil_1'] > COIL_ACTIVE_THRESHOLD].index
|
| 283 |
+
trip_op_start = trip_coil_active_idx.min() if not trip_coil_active_idx.empty else phase_start_times[3] + (df.index.max() - phase_start_times[3]) * 0.5 # Midpoint if no trip coil
|
| 284 |
+
|
| 285 |
+
resistance_gradient_pos = rolling_gradient(df['Resistance'], window=3)
|
| 286 |
+
current_gradient_neg = rolling_gradient(df['Current'], window=3)
|
| 287 |
+
|
| 288 |
+
contact_break_candidates = df.loc[df.index >= trip_op_start].index[
|
| 289 |
+
(resistance_gradient_pos.loc[df.index >= trip_op_start] > GRADIENT_R_CHANGE) | # Significant rise in R
|
| 290 |
+
(current_gradient_neg.loc[df.index >= trip_op_start] < -GRADIENT_I_CHANGE) # Significant drop in I
|
| 291 |
+
]
|
| 292 |
+
|
| 293 |
+
contact_break_time = time_max
|
| 294 |
+
if not contact_break_candidates.empty:
|
| 295 |
+
contact_break_time = contact_break_candidates.min()
|
| 296 |
+
else: # Fallback: first time current is low again and resistance is high after trip op start
|
| 297 |
+
current_low_high_res_after_trip_op = df.loc[df.index >= trip_op_start].index[
|
| 298 |
+
(df['Current'].loc[df.index >= trip_op_start] < CLOSED_CIRCUIT_CURRENT_BASELINE - 100) & # Current dropping significantly
|
| 299 |
+
(df['Resistance'].loc[df.index >= trip_op_start] > CLOSED_CIRCUIT_RESISTANCE_BASELINE + 100) # Resistance rising significantly
|
| 300 |
+
]
|
| 301 |
+
if not current_low_high_res_after_trip_op.empty:
|
| 302 |
+
contact_break_time = current_low_high_res_after_trip_op.min()
|
| 303 |
+
else:
|
| 304 |
+
contact_break_time = phase_start_times[3] + (df.index.max() - phase_start_times[3]) * 0.75 # Default to later if no clear break
|
| 305 |
+
|
| 306 |
+
phase_end_times[3] = contact_break_time - time_step
|
| 307 |
+
phase_start_times[4] = contact_break_time
|
| 308 |
+
|
| 309 |
+
# 4. Detect Final Open State Stabilization (End of Phase 4 / Start of Phase 5)
|
| 310 |
+
# Look for Resistance high AND Current near OPEN_CIRCUIT_CURRENT_BASELINE AND Travel is stable at min position.
|
| 311 |
+
final_open_stable_candidates = df.loc[df.index >= phase_start_times[4]].index[
|
| 312 |
+
(df['Resistance'].loc[df.index >= phase_start_times[4]] > OPEN_CIRCUIT_RESISTANCE_MIN - 50) & # High resistance
|
| 313 |
+
(df['Current'].loc[df.index >= phase_start_times[4]].between(OPEN_CIRCUIT_CURRENT_BASELINE - 50, OPEN_CIRCUIT_CURRENT_BASELINE + 50)) &
|
| 314 |
+
(df['Travel'].loc[df.index >= phase_start_times[4]].rolling(window=10, min_periods=1).std() < 2) # Travel stability
|
| 315 |
+
]
|
| 316 |
+
|
| 317 |
+
final_open_time = time_max
|
| 318 |
+
if not final_open_stable_candidates.empty:
|
| 319 |
+
final_open_time = final_open_stable_candidates.min()
|
| 320 |
+
else: # Fallback: when travel stops moving after contact break
|
| 321 |
+
travel_still_moving = df.loc[df.index >= phase_start_times[4]].index[rolling_gradient(df['Travel']).loc[df.index >= phase_start_times[4]].abs() > TRAVEL_MOVEMENT_RATE_THRESHOLD]
|
| 322 |
+
if not travel_still_moving.empty:
|
| 323 |
+
final_open_time = travel_still_moving.max() + 5 * time_step # End of significant travel movement
|
| 324 |
+
else:
|
| 325 |
+
final_open_time = df.index.max() - 20.0 # Default to near end if no clear stabilization
|
| 326 |
+
|
| 327 |
+
phase_end_times[4] = final_open_time - time_step
|
| 328 |
+
phase_start_times[5] = final_open_time
|
| 329 |
+
|
| 330 |
+
# --- Ensure Phase Times are Valid and Sequential ---
|
| 331 |
+
current_end = df.index.min() - time_step
|
| 332 |
+
for p_id in sorted(phase_start_times.keys()):
|
| 333 |
+
start_time_candidate = phase_start_times[p_id]
|
| 334 |
+
end_time_candidate = phase_end_times[p_id]
|
| 335 |
+
|
| 336 |
+
start_idx_loc = df.index.get_indexer([start_time_candidate], method='nearest')[0]
|
| 337 |
+
end_idx_loc = df.index.get_indexer([end_time_candidate], method='nearest')[0]
|
| 338 |
+
|
| 339 |
+
phase_start_times[p_id] = df.index[start_idx_loc]
|
| 340 |
+
phase_end_times[p_id] = df.index[end_idx_loc]
|
| 341 |
+
|
| 342 |
+
if phase_start_times[p_id] < current_end:
|
| 343 |
+
phase_start_times[p_id] = current_end
|
| 344 |
+
if phase_start_times[p_id] >= phase_end_times[p_id]:
|
| 345 |
+
# Ensure minimum 5ms duration for a phase, or until max_time if near end
|
| 346 |
+
phase_end_times[p_id] = min(phase_start_times[p_id] + time_step * 5, time_max)
|
| 347 |
+
# If still problematic, try to give it at least 1 point
|
| 348 |
+
if phase_start_times[p_id] == phase_end_times[p_id] and phase_start_times[p_id] < time_max:
|
| 349 |
+
next_idx = df.index.get_indexer([phase_start_times[p_id] + time_step], method='nearest')[0]
|
| 350 |
+
if next_idx < len(df.index):
|
| 351 |
+
phase_end_times[p_id] = df.index[next_idx]
|
| 352 |
+
|
| 353 |
+
current_end = phase_end_times[p_id]
|
| 354 |
+
|
| 355 |
+
if 5 in phase_end_times:
|
| 356 |
+
phase_end_times[5] = time_max # Ensure the last phase always goes to the end
|
| 357 |
+
|
| 358 |
+
|
| 359 |
+
# --- JSON Structure Template (static definitions) ---
|
| 360 |
+
json_output_template = {
|
| 361 |
+
"phaseWiseAnalysis": [
|
| 362 |
+
{
|
| 363 |
+
"phaseNumber": 1,
|
| 364 |
+
"id": "pre-contact-travel",
|
| 365 |
+
"name": "Pre-Contact Travel",
|
| 366 |
+
"phaseTitle": "Closing Operation — Pre-Contact Travel",
|
| 367 |
+
"description": "Initial state before arc contact occurs",
|
| 368 |
+
"color": "#ff9800"
|
| 369 |
+
},
|
| 370 |
+
{
|
| 371 |
+
"phaseNumber": 2,
|
| 372 |
+
"id": "arcing-contact-engagement-arc-initiation",
|
| 373 |
+
"name": "Arcing Contact Engagement & Arc Initiation",
|
| 374 |
+
"phaseTitle": "Closing Operation — Arcing Contact Engagement & Arc Initiation",
|
| 375 |
+
"description": "Arcing contact engagement and arc initiation",
|
| 376 |
+
"color": "#ff26bd"
|
| 377 |
+
},
|
| 378 |
+
{
|
| 379 |
+
"phaseNumber": 3,
|
| 380 |
+
"id": "main-contact-conduction",
|
| 381 |
+
"name": "Main Contact Conduction",
|
| 382 |
+
"phaseTitle": "Fully Closed State — Main Contact Conduction",
|
| 383 |
+
"description": "Main contact conduction",
|
| 384 |
+
"color": "#4caf50"
|
| 385 |
+
},
|
| 386 |
+
{
|
| 387 |
+
"phaseNumber": 4,
|
| 388 |
+
"id": "main-contact-parting-arc-elongation",
|
| 389 |
+
"name": "Main Contact Parting & Arc Elongation",
|
| 390 |
+
"phaseTitle": "Opening Operation — Main Contact Parting & Arc Elongation",
|
| 391 |
+
"description": "Main contact parting and arc elongation",
|
| 392 |
+
"color": "#2196f3"
|
| 393 |
+
},
|
| 394 |
+
{
|
| 395 |
+
"phaseNumber": 5,
|
| 396 |
+
"id": "final-open-state",
|
| 397 |
+
"name": "Final Open State",
|
| 398 |
+
"phaseTitle": "Post-Interruption — Final Open State",
|
| 399 |
+
"description": "Final open state",
|
| 400 |
+
"color": "#a629ff"
|
| 401 |
+
}
|
| 402 |
+
]
|
| 403 |
+
}
|
| 404 |
+
|
| 405 |
+
final_segmented_phases = []
|
| 406 |
+
|
| 407 |
+
# Populate the JSON structure dynamically
|
| 408 |
+
for phase_data_template in json_output_template['phaseWiseAnalysis']:
|
| 409 |
+
phase_id = phase_data_template['phaseNumber']
|
| 410 |
+
start_time = phase_start_times.get(phase_id)
|
| 411 |
+
end_time = phase_end_times.get(phase_id)
|
| 412 |
+
|
| 413 |
+
if start_time is None or end_time is None or start_time >= end_time:
|
| 414 |
+
continue
|
| 415 |
+
|
| 416 |
+
phase_data = df.loc[(df.index >= start_time) & (df.index <= end_time)]
|
| 417 |
+
|
| 418 |
+
if phase_data.empty:
|
| 419 |
+
continue
|
| 420 |
+
|
| 421 |
+
current_phase_entry = phase_data_template.copy()
|
| 422 |
+
current_phase_entry['startTime'] = int(start_time)
|
| 423 |
+
current_phase_entry['endTime'] = int(end_time)
|
| 424 |
+
current_phase_entry['details'] = {
|
| 425 |
+
"resistance": "",
|
| 426 |
+
"current": "",
|
| 427 |
+
"travel": "",
|
| 428 |
+
"characteristics": []
|
| 429 |
+
}
|
| 430 |
+
current_phase_entry['waveformAnalysis'] = {
|
| 431 |
+
"resistance": "",
|
| 432 |
+
"current": "",
|
| 433 |
+
"travel": "",
|
| 434 |
+
"coilAnalysis": {
|
| 435 |
+
"closeCoil": {"status": "", "description": "", "measuredValues": ""},
|
| 436 |
+
"tripCoil1": {"status": "", "description": "", "measuredValues": ""},
|
| 437 |
+
"tripCoil2": {"status": "", "description": "", "measuredValues": ""}
|
| 438 |
+
}
|
| 439 |
+
}
|
| 440 |
+
current_phase_entry['diagnosticVerdict'] = ""
|
| 441 |
+
current_phase_entry['eventSynopsis'] = ""
|
| 442 |
+
current_phase_entry['confidence'] = 0
|
| 443 |
+
current_phase_entry['status'] = "Unknown"
|
| 444 |
+
|
| 445 |
+
current_data_summary = {
|
| 446 |
+
'time_interval': f"{start_time:.1f}ms - {end_time:.1f}ms",
|
| 447 |
+
'resistance_min': phase_data['Resistance'].min(),
|
| 448 |
+
'resistance_max': phase_data['Resistance'].max(),
|
| 449 |
+
'resistance_avg': phase_data['Resistance'].mean(),
|
| 450 |
+
'resistance_std': phase_data['Resistance'].std() if len(phase_data) > 1 else 0,
|
| 451 |
+
'current_min': phase_data['Current'].min(),
|
| 452 |
+
'current_max': phase_data['Current'].max(),
|
| 453 |
+
'current_avg': phase_data['Current'].mean(),
|
| 454 |
+
'current_std': phase_data['Current'].std() if len(phase_data) > 1 else 0,
|
| 455 |
+
'travel_min': phase_data['Travel'].min(),
|
| 456 |
+
'travel_max': phase_data['Travel'].max(),
|
| 457 |
+
'travel_avg': phase_data['Travel'].mean(),
|
| 458 |
+
'travel_std': phase_data['Travel'].std() if len(phase_data) > 1 else 0,
|
| 459 |
+
'close_coil_avg': phase_data['Close_Coil'].mean(),
|
| 460 |
+
'trip_coil_1_avg': phase_data['Trip_Coil_1'].mean(),
|
| 461 |
+
'trip_coil_2_avg': phase_data['Trip_Coil_2'].mean(),
|
| 462 |
+
'close_coil_state': get_coil_state(phase_data['Close_Coil']),
|
| 463 |
+
'trip_coil_1_state': get_coil_state(phase_data['Trip_Coil_1']),
|
| 464 |
+
'trip_coil_2_state': get_coil_state(phase_data['Trip_Coil_2']),
|
| 465 |
+
'programmatic_health_status': "Unknown",
|
| 466 |
+
'resistance_status': "",
|
| 467 |
+
'current_status': "",
|
| 468 |
+
'travel_status': ""
|
| 469 |
+
}
|
| 470 |
+
|
| 471 |
+
# --- Health Verdict Logic (Optimized) ---
|
| 472 |
+
# (Same logic as backup_DCRM2.py, ensuring it populates current_data_summary and current_phase_entry)
|
| 473 |
+
|
| 474 |
+
if phase_id == 1: # PRE-CONTACT TRAVEL
|
| 475 |
+
is_resistance_infinite = current_data_summary['resistance_avg'] >= OPEN_CIRCUIT_RESISTANCE_IDEAL
|
| 476 |
+
is_resistance_high_and_flat = (current_data_summary['resistance_avg'] >= OPEN_CIRCUIT_RESISTANCE_MIN and
|
| 477 |
+
current_data_summary['resistance_std'] <= OPEN_CIRCUIT_VARIATION_THRESHOLD)
|
| 478 |
+
|
| 479 |
+
if is_resistance_infinite or is_resistance_high_and_flat:
|
| 480 |
+
current_data_summary['programmatic_health_status'] = "Healthy"
|
| 481 |
+
current_data_summary['resistance_status'] = "High and Stable (Open Circuit)"
|
| 482 |
+
if is_resistance_infinite:
|
| 483 |
+
resistance_desc = f"Resistance is infinite (Avg: {current_data_summary['resistance_avg']:.1f} μΩ ≥ {OPEN_CIRCUIT_RESISTANCE_IDEAL} μΩ) with minimal variation (StdDev: {current_data_summary['resistance_std']:.1f} μΩ), indicating proper open circuit with clean air gap and no premature contact."
|
| 484 |
+
else:
|
| 485 |
+
resistance_desc = f"Resistance is high and stable (Avg: {current_data_summary['resistance_avg']:.1f} μΩ ≥ {OPEN_CIRCUIT_RESISTANCE_MIN} μΩ) with flat straight-line pattern (StdDev: {current_data_summary['resistance_std']:.1f} μΩ ≤ ±{OPEN_CIRCUIT_VARIATION_THRESHOLD} μΩ), indicating proper open circuit with clean separation and no premature contact."
|
| 486 |
+
|
| 487 |
+
current_phase_entry['waveformAnalysis']['resistance'] = resistance_desc
|
| 488 |
+
current_phase_entry['waveformAnalysis']['current'] = f"Current remains stable around its open circuit baseline of {OPEN_CIRCUIT_CURRENT_BASELINE:.1f} A (Avg: {current_data_summary['current_avg']:.1f}), reinforcing no premature electrical connection."
|
| 489 |
+
current_phase_entry['waveformAnalysis']['travel'] = f"Travel shows a smooth, continuous increase from {current_data_summary['travel_min']:.1f} mm to {current_data_summary['travel_max']:.1f} mm, representing unimpeded mechanical movement."
|
| 490 |
+
|
| 491 |
+
current_phase_entry['details']['resistance'] = f"High resistance (~{current_data_summary['resistance_avg']:.0f} µΩ) - contacts are open"
|
| 492 |
+
current_phase_entry['details']['current'] = f"Low current (~{current_data_summary['current_avg']:.0f}) - no current flow"
|
| 493 |
+
current_phase_entry['details']['travel'] = f"Gradual increase from {current_data_summary['travel_min']:.0f} to {current_data_summary['travel_max']:.0f} - contact approaching"
|
| 494 |
+
else:
|
| 495 |
+
current_data_summary['programmatic_health_status'] = "Unhealthy - Pre-Contact Issues"
|
| 496 |
+
current_phase_entry['waveformAnalysis']['resistance'] = f"Resistance is too low (Avg: {current_data_summary['resistance_avg']:.1f} μΩ < {OPEN_CIRCUIT_RESISTANCE_MIN} μΩ) or shows excessive variation (StdDev: {current_data_summary['resistance_std']:.1f} μΩ > ±{OPEN_CIRCUIT_VARIATION_THRESHOLD} μΩ), suggesting premature contact, leakage, or insulation breakdown."
|
| 497 |
+
current_phase_entry['waveformAnalysis']['current'] = f"Current was not consistently around {OPEN_CIRCUIT_CURRENT_BASELINE:.1f} A (Avg: {current_data_summary['current_avg']:.1f} A), indicating a premature electrical connection or abnormal current path."
|
| 498 |
+
current_phase_entry['waveformAnalysis']['travel'] = f"Travel was not smooth, failed to ramp up sufficiently (Range: {current_data_summary['travel_min']:.1f} to {current_data_summary['travel_max']:.1f} mm), or showed erratic movement, indicating mechanical binding or obstruction."
|
| 499 |
+
|
| 500 |
+
current_phase_entry['details']['resistance'] = f"Abnormal resistance (~{current_data_summary['resistance_avg']:.0f} µΩ) - premature contact"
|
| 501 |
+
current_phase_entry['details']['current'] = f"Abnormal current (~{current_data_summary['current_avg']:.0f}) - unexpected flow"
|
| 502 |
+
current_phase_entry['details']['travel'] = f"Erratic travel from {current_data_summary['travel_min']:.0f} to {current_data_summary['travel_max']:.0f} - binding detected"
|
| 503 |
+
|
| 504 |
+
elif phase_id == 2: # Arcing Contact Engagement
|
| 505 |
+
resistance_range = current_data_summary['resistance_max'] - current_data_summary['resistance_min']
|
| 506 |
+
current_rise_magnitude = current_data_summary['current_max'] - current_data_summary['current_min']
|
| 507 |
+
|
| 508 |
+
resistance_drops_significantly = (current_data_summary['resistance_max'] > 500 and
|
| 509 |
+
current_data_summary['resistance_min'] < CLOSED_CIRCUIT_RESISTANCE_MAX + 100 and
|
| 510 |
+
resistance_range > 200)
|
| 511 |
+
|
| 512 |
+
has_healthy_arcing_spikes = (current_data_summary['resistance_max'] > ARCING_RESISTANCE_MIN * 2 and
|
| 513 |
+
current_data_summary['resistance_std'] > 80 and
|
| 514 |
+
current_data_summary['resistance_max'] / max(current_data_summary['resistance_min'], 1) > 1.8)
|
| 515 |
+
|
| 516 |
+
current_rises_properly = (current_rise_magnitude > 250 and
|
| 517 |
+
current_data_summary['current_max'] > CLOSED_CIRCUIT_CURRENT_BASELINE * 0.6)
|
| 518 |
+
|
| 519 |
+
travel_increasing = (current_data_summary['travel_max'] - current_data_summary['travel_min']) > 100
|
| 520 |
+
|
| 521 |
+
if resistance_drops_significantly and has_healthy_arcing_spikes and current_rises_properly and travel_increasing:
|
| 522 |
+
current_data_summary['programmatic_health_status'] = "Healthy"
|
| 523 |
+
current_phase_entry['waveformAnalysis']['resistance'] = f"Resistance exhibits healthy arcing behavior: sharp drop from {current_data_summary['resistance_max']:.1f} μΩ to {current_data_summary['resistance_min']:.1f} μΩ (Range: {resistance_range:.1f} μΩ) with prominent high-frequency arcing spikes (StdDev: {current_data_summary['resistance_std']:.1f} μΩ > 80), indicating proper arc initiation as arcing contacts engage."
|
| 524 |
+
current_phase_entry['waveformAnalysis']['current'] = f"Current rises sharply from {current_data_summary['current_min']:.1f} A to {current_data_summary['current_max']:.1f} A (Rise: {current_rise_magnitude:.1f} A), establishing conduction path."
|
| 525 |
+
current_phase_entry['waveformAnalysis']['travel'] = f"Travel increases smoothly from {current_data_summary['travel_min']:.1f} mm to {current_data_summary['travel_max']:.1f} mm."
|
| 526 |
+
|
| 527 |
+
current_phase_entry['details']['resistance'] = f"Highly variable ({current_data_summary['resistance_min']:.0f}-{current_data_summary['resistance_max']:.0f} µΩ) - arcing occurs"
|
| 528 |
+
current_phase_entry['details']['current'] = f"Sharp rise to ~{current_data_summary['current_max']:.0f} - current flow begins"
|
| 529 |
+
current_phase_entry['details']['travel'] = f"Continues to increase to ~{current_data_summary['travel_max']:.0f} - contacts closing"
|
| 530 |
+
else:
|
| 531 |
+
current_data_summary['programmatic_health_status'] = "Unhealthy - Arcing/Contact Issue"
|
| 532 |
+
if not has_healthy_arcing_spikes:
|
| 533 |
+
current_phase_entry['waveformAnalysis']['resistance'] = f"CRITICAL ISSUE: Resistance pattern lacks expected high-frequency arcing spikes (Max: {current_data_summary['resistance_max']:.1f} μΩ, StdDev: {current_data_summary['resistance_std']:.1f} μΩ << 80 μΩ expected). This indicates SEVERELY WORN arcing contacts."
|
| 534 |
+
elif not resistance_drops_significantly:
|
| 535 |
+
current_phase_entry['waveformAnalysis']['resistance'] = f"CRITICAL ISSUE: Resistance fails to drop adequately (Max: {current_data_summary['resistance_max']:.1f} μΩ, Min: {current_data_summary['resistance_min']:.1f} μΩ). This indicates CONTACT MISALIGNMENT or mechanical binding."
|
| 536 |
+
else:
|
| 537 |
+
current_phase_entry['waveformAnalysis']['resistance'] = f"Resistance pattern shows abnormal arcing behavior (Range: {current_data_summary['resistance_min']:.1f} to {current_data_summary['resistance_max']:.1f} μΩ)."
|
| 538 |
+
|
| 539 |
+
current_phase_entry['waveformAnalysis']['current'] = f"Current rise is inadequate or abnormal (Rise: {current_rise_magnitude:.1f} A)."
|
| 540 |
+
current_phase_entry['waveformAnalysis']['travel'] = f"Travel pattern: {current_data_summary['travel_min']:.1f} to {current_data_summary['travel_max']:.1f} mm."
|
| 541 |
+
|
| 542 |
+
current_phase_entry['details']['resistance'] = f"Abnormal ({current_data_summary['resistance_min']:.0f}-{current_data_summary['resistance_max']:.0f} µΩ) - contact issue"
|
| 543 |
+
current_phase_entry['details']['current'] = f"Poor rise to ~{current_data_summary['current_max']:.0f} - impedance detected"
|
| 544 |
+
current_phase_entry['details']['travel'] = f"Range to ~{current_data_summary['travel_max']:.0f} - issue detected"
|
| 545 |
+
|
| 546 |
+
elif phase_id == 3: # Main Contact Conduction
|
| 547 |
+
is_resistance_low = current_data_summary['resistance_avg'] <= CLOSED_CIRCUIT_RESISTANCE_MAX
|
| 548 |
+
is_resistance_flat_plateau = current_data_summary['resistance_std'] <= CLOSED_CIRCUIT_VARIATION_THRESHOLD
|
| 549 |
+
has_abnormal_spikes = current_data_summary['resistance_max'] > CLOSED_CIRCUIT_RESISTANCE_MAX + 100
|
| 550 |
+
is_current_stable = abs(current_data_summary['current_avg'] - CLOSED_CIRCUIT_CURRENT_BASELINE) < 50 and current_data_summary['current_std'] < 30
|
| 551 |
+
is_travel_stable = current_data_summary['travel_std'] < 5 and abs(current_data_summary['travel_avg'] - TRAVEL_MAX_POSITION) < 20
|
| 552 |
+
|
| 553 |
+
if is_resistance_low and is_resistance_flat_plateau and not has_abnormal_spikes and is_current_stable and is_travel_stable:
|
| 554 |
+
current_data_summary['programmatic_health_status'] = "Healthy"
|
| 555 |
+
current_phase_entry['waveformAnalysis']['resistance'] = f"Resistance maintains a healthy flat plateau at low values (Avg: {current_data_summary['resistance_avg']:.1f} μΩ) with minimal deviation."
|
| 556 |
+
current_phase_entry['waveformAnalysis']['current'] = f"Current remains high and stable, around {CLOSED_CIRCUIT_CURRENT_BASELINE:.1f} A (Avg: {current_data_summary['current_avg']:.1f})."
|
| 557 |
+
current_phase_entry['waveformAnalysis']['travel'] = f"Travel remains perfectly stable at its maximum stroke of approximately {TRAVEL_MAX_POSITION:.1f} mm."
|
| 558 |
+
|
| 559 |
+
current_phase_entry['details']['resistance'] = f"Stable low resistance (~{current_data_summary['resistance_avg']:.0f} µΩ) - good contact"
|
| 560 |
+
current_phase_entry['details']['current'] = f"Gradual decline from {current_data_summary['current_max']:.0f} to {current_data_summary['current_min']:.0f} - stable flow"
|
| 561 |
+
current_phase_entry['details']['travel'] = f"Constant at ~{current_data_summary['travel_avg']:.0f} - contacts fully closed"
|
| 562 |
+
else:
|
| 563 |
+
current_data_summary['programmatic_health_status'] = "Unhealthy - Contact Degradation"
|
| 564 |
+
current_phase_entry['waveformAnalysis']['resistance'] = f"Resistance is unhealthy: either elevated (Avg: {current_data_summary['resistance_avg']:.1f} μΩ), shows high abnormal spikes, or lacks flat plateau."
|
| 565 |
+
current_phase_entry['waveformAnalysis']['current'] = f"Current was unstable or lower than expected (Avg: {current_data_summary['current_avg']:.1f} A)."
|
| 566 |
+
current_phase_entry['waveformAnalysis']['travel'] = f"Travel was not stable at its maximum (Avg: {current_data_summary['travel_avg']:.1f} mm)."
|
| 567 |
+
|
| 568 |
+
current_phase_entry['details']['resistance'] = f"Elevated resistance (~{current_data_summary['resistance_avg']:.0f} µΩ) - degradation"
|
| 569 |
+
current_phase_entry['details']['current'] = f"Unstable current (~{current_data_summary['current_avg']:.0f}) - poor conduction"
|
| 570 |
+
current_phase_entry['details']['travel'] = f"Unstable at ~{current_data_summary['travel_avg']:.0f} - insufficient overtravel"
|
| 571 |
+
|
| 572 |
+
elif phase_id == 4: # Main Contact Parting
|
| 573 |
+
resistance_rise_magnitude = current_data_summary['resistance_max'] - current_data_summary['resistance_min']
|
| 574 |
+
current_drop_magnitude = current_data_summary['current_max'] - current_data_summary['current_min']
|
| 575 |
+
travel_drop_magnitude = current_data_summary['travel_max'] - current_data_summary['travel_min']
|
| 576 |
+
|
| 577 |
+
resistance_rises_smoothly = (current_data_summary['resistance_min'] < CLOSED_CIRCUIT_RESISTANCE_MAX + 100 and
|
| 578 |
+
resistance_rise_magnitude > 300)
|
| 579 |
+
current_drops_steadily = current_drop_magnitude > 200
|
| 580 |
+
travel_decreases_properly = travel_drop_magnitude > 400
|
| 581 |
+
has_smooth_progressive_rise = current_data_summary['resistance_std'] < 250
|
| 582 |
+
|
| 583 |
+
if resistance_rises_smoothly and current_drops_steadily and travel_decreases_properly and has_smooth_progressive_rise:
|
| 584 |
+
current_data_summary['programmatic_health_status'] = "Healthy"
|
| 585 |
+
current_phase_entry['waveformAnalysis']['resistance'] = f"Resistance rises smoothly and progressively from {current_data_summary['resistance_min']:.1f} μΩ to {current_data_summary['resistance_max']:.1f} μΩ."
|
| 586 |
+
current_phase_entry['waveformAnalysis']['current'] = f"Current drops steadily from {current_data_summary['current_max']:.1f} A to {current_data_summary['current_min']:.1f} A."
|
| 587 |
+
current_phase_entry['waveformAnalysis']['travel'] = f"Travel decreases smoothly from {current_data_summary['travel_max']:.1f} mm to {current_data_summary['travel_min']:.1f} mm."
|
| 588 |
+
|
| 589 |
+
current_phase_entry['details']['resistance'] = f"Rising from {current_data_summary['resistance_min']:.0f} to {current_data_summary['resistance_max']:.0f} µΩ - contacts separating"
|
| 590 |
+
current_phase_entry['details']['current'] = f"Sharp drop from {current_data_summary['current_max']:.0f} to {current_data_summary['current_min']:.0f} - current flow stops"
|
| 591 |
+
current_phase_entry['details']['travel'] = f"Sharp drop from {current_data_summary['travel_max']:.0f} to {current_data_summary['travel_min']:.0f} - contacts opening"
|
| 592 |
+
else:
|
| 593 |
+
current_data_summary['programmatic_health_status'] = "Unhealthy - Opening/Interruption Issue"
|
| 594 |
+
current_phase_entry['waveformAnalysis']['resistance'] = f"Resistance rise pattern is abnormal (Rise: {resistance_rise_magnitude:.1f} μΩ, StdDev: {current_data_summary['resistance_std']:.1f} μΩ)."
|
| 595 |
+
current_phase_entry['waveformAnalysis']['current'] = f"Current drop is insufficient or erratic (Drop: {current_drop_magnitude:.1f} A)."
|
| 596 |
+
current_phase_entry['waveformAnalysis']['travel'] = f"Travel decrease is insufficient or erratic (Drop: {travel_drop_magnitude:.1f} mm)."
|
| 597 |
+
|
| 598 |
+
current_phase_entry['details']['resistance'] = f"Abnormal rise {current_data_summary['resistance_min']:.0f} to {current_data_summary['resistance_max']:.0f} µΩ - issue detected"
|
| 599 |
+
current_phase_entry['details']['current'] = f"Erratic drop {current_data_summary['current_max']:.0f} to {current_data_summary['current_min']:.0f} - interruption problem"
|
| 600 |
+
current_phase_entry['details']['travel'] = f"Abnormal drop {current_data_summary['travel_max']:.0f} to {current_data_summary['travel_min']:.0f} - mechanical issue"
|
| 601 |
+
|
| 602 |
+
elif phase_id == 5: # Final Open State
|
| 603 |
+
is_resistance_infinite = current_data_summary['resistance_avg'] >= OPEN_CIRCUIT_RESISTANCE_IDEAL
|
| 604 |
+
is_resistance_high_and_flat = (current_data_summary['resistance_avg'] >= OPEN_CIRCUIT_RESISTANCE_MIN and
|
| 605 |
+
current_data_summary['resistance_std'] <= OPEN_CIRCUIT_VARIATION_THRESHOLD)
|
| 606 |
+
is_current_healthy = abs(current_data_summary['current_avg'] - OPEN_CIRCUIT_CURRENT_BASELINE) < 20 and current_data_summary['current_std'] < 20
|
| 607 |
+
is_travel_low_stable = current_data_summary['travel_std'] < 5 and abs(current_data_summary['travel_avg'] - TRAVEL_MIN_POSITION) < 20
|
| 608 |
+
|
| 609 |
+
if (is_resistance_infinite or is_resistance_high_and_flat) and is_current_healthy and is_travel_low_stable:
|
| 610 |
+
current_data_summary['programmatic_health_status'] = "Healthy"
|
| 611 |
+
current_phase_entry['waveformAnalysis']['resistance'] = f"Resistance remains high and stable (Avg: {current_data_summary['resistance_avg']:.1f} μΩ)."
|
| 612 |
+
current_phase_entry['waveformAnalysis']['current'] = f"Current remains stable around its open circuit baseline."
|
| 613 |
+
current_phase_entry['waveformAnalysis']['travel'] = f"Travel remains stable at the fully open position."
|
| 614 |
+
|
| 615 |
+
current_phase_entry['details']['resistance'] = f"High resistance (~{current_data_summary['resistance_avg']:.0f} µΩ) - contacts fully open"
|
| 616 |
+
current_phase_entry['details']['current'] = f"Low current (~{current_data_summary['current_avg']:.0f}) - no current flow"
|
| 617 |
+
current_phase_entry['details']['travel'] = f"Constant at ~{current_data_summary['travel_avg']:.0f} - contacts fully separated"
|
| 618 |
+
else:
|
| 619 |
+
current_data_summary['programmatic_health_status'] = "Unhealthy - Final State Issue"
|
| 620 |
+
current_phase_entry['waveformAnalysis']['resistance'] = f"Resistance did not return to its expected high value or was unstable."
|
| 621 |
+
current_phase_entry['waveformAnalysis']['current'] = f"Current was not stable around its open circuit baseline."
|
| 622 |
+
current_phase_entry['waveformAnalysis']['travel'] = f"Travel was unstable or did not reach the full open position."
|
| 623 |
+
|
| 624 |
+
current_phase_entry['details']['resistance'] = f"Abnormal resistance (~{current_data_summary['resistance_avg']:.0f} µΩ) - leakage detected"
|
| 625 |
+
current_phase_entry['details']['current'] = f"Abnormal current (~{current_data_summary['current_avg']:.0f}) - residual flow"
|
| 626 |
+
current_phase_entry['details']['travel'] = f"Unstable at ~{current_data_summary['travel_avg']:.0f} - rebound/drift"
|
| 627 |
+
|
| 628 |
+
# Update Coil Analysis with descriptions
|
| 629 |
+
current_phase_entry['waveformAnalysis']['coilAnalysis']['closeCoil']['status'] = current_data_summary['close_coil_state']
|
| 630 |
+
current_phase_entry['waveformAnalysis']['coilAnalysis']['closeCoil']['measuredValues'] = f"Avg: {current_data_summary['close_coil_avg']:.2f} A"
|
| 631 |
+
if current_data_summary['close_coil_state'] == 'Active':
|
| 632 |
+
current_phase_entry['waveformAnalysis']['coilAnalysis']['closeCoil']['description'] = "Close coil is energized, driving contacts to closed position"
|
| 633 |
+
else:
|
| 634 |
+
current_phase_entry['waveformAnalysis']['coilAnalysis']['closeCoil']['description'] = "Close coil is de-energized"
|
| 635 |
+
|
| 636 |
+
current_phase_entry['waveformAnalysis']['coilAnalysis']['tripCoil1']['status'] = current_data_summary['trip_coil_1_state']
|
| 637 |
+
current_phase_entry['waveformAnalysis']['coilAnalysis']['tripCoil1']['measuredValues'] = f"Avg: {current_data_summary['trip_coil_1_avg']:.2f} A"
|
| 638 |
+
if current_data_summary['trip_coil_1_state'] == 'Active':
|
| 639 |
+
current_phase_entry['waveformAnalysis']['coilAnalysis']['tripCoil1']['description'] = "Trip coil 1 is energized, initiating opening operation"
|
| 640 |
+
else:
|
| 641 |
+
current_phase_entry['waveformAnalysis']['coilAnalysis']['tripCoil1']['description'] = "Trip coil 1 is de-energized"
|
| 642 |
+
|
| 643 |
+
current_phase_entry['waveformAnalysis']['coilAnalysis']['tripCoil2']['status'] = current_data_summary['trip_coil_2_state']
|
| 644 |
+
current_phase_entry['waveformAnalysis']['coilAnalysis']['tripCoil2']['measuredValues'] = f"Avg: {current_data_summary['trip_coil_2_avg']:.2f} A"
|
| 645 |
+
if current_data_summary['trip_coil_2_state'] == 'Active':
|
| 646 |
+
current_phase_entry['waveformAnalysis']['coilAnalysis']['tripCoil2']['description'] = "Trip coil 2 is energized, providing redundant trip capability"
|
| 647 |
+
else:
|
| 648 |
+
# Check if it's failed or just inactive
|
| 649 |
+
if current_data_summary['trip_coil_2_avg'] == 0.0 and current_data_summary['trip_coil_1_state'] == 'Active':
|
| 650 |
+
current_phase_entry['waveformAnalysis']['coilAnalysis']['tripCoil2']['description'] = "Trip coil 2 appears non-functional - potential redundancy loss"
|
| 651 |
+
else:
|
| 652 |
+
current_phase_entry['waveformAnalysis']['coilAnalysis']['tripCoil2']['description'] = "Trip coil 2 is de-energized"
|
| 653 |
+
|
| 654 |
+
# --- LLM Enhancement ---
|
| 655 |
+
if llm:
|
| 656 |
+
try:
|
| 657 |
+
llm_prompt = generate_llm_description_prompt(current_data_summary, current_phase_entry)
|
| 658 |
+
|
| 659 |
+
# Invoke LLM
|
| 660 |
+
response = llm.invoke([HumanMessage(content=llm_prompt)])
|
| 661 |
+
content = response.content.replace("```json", "").replace("```", "").strip()
|
| 662 |
+
llm_output = json.loads(content)
|
| 663 |
+
|
| 664 |
+
current_phase_entry['details']['characteristics'] = llm_output.get('key_characteristics', [])
|
| 665 |
+
current_phase_entry['eventSynopsis'] = llm_output.get('event_synopsis', "")
|
| 666 |
+
current_phase_entry['diagnosticVerdict'] = llm_output.get('diagnostic_verdict_details', "")
|
| 667 |
+
|
| 668 |
+
# Extract confidence score
|
| 669 |
+
llm_confidence = llm_output.get('confidence_score', 0)
|
| 670 |
+
if isinstance(llm_confidence, (int, float)):
|
| 671 |
+
current_phase_entry['confidence'] = max(0, min(100, int(llm_confidence)))
|
| 672 |
+
else:
|
| 673 |
+
current_phase_entry['confidence'] = 50
|
| 674 |
+
|
| 675 |
+
except Exception as e:
|
| 676 |
+
print(f"Error calling LLM for phase {phase_id}: {e}")
|
| 677 |
+
# Fallback is already set by programmatic logic or defaults
|
| 678 |
+
current_phase_entry['diagnosticVerdict'] = f"Analysis based on programmatic data: {current_data_summary['programmatic_health_status']}. (LLM enhancement failed)"
|
| 679 |
+
current_phase_entry['confidence'] = 50
|
| 680 |
+
|
| 681 |
+
# Assign status for CBHI calculation
|
| 682 |
+
current_phase_entry['status'] = current_data_summary.get('programmatic_health_status', "Unknown")
|
| 683 |
+
|
| 684 |
+
final_segmented_phases.append(current_phase_entry)
|
| 685 |
+
|
| 686 |
+
json_output_template['phaseWiseAnalysis'] = final_segmented_phases
|
| 687 |
+
return json_output_template
|
core/signal/segmentation.py
ADDED
|
@@ -0,0 +1,134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Previous Name: analysis/agents/arcing_segmentation.py
|
| 2 |
+
import pandas as pd
|
| 3 |
+
import json
|
| 4 |
+
import matplotlib.pyplot as plt
|
| 5 |
+
import io
|
| 6 |
+
import base64
|
| 7 |
+
from langchain_core.messages import HumanMessage
|
| 8 |
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
| 9 |
+
|
| 10 |
+
def get_arcing_prompt(data_str):
|
| 11 |
+
return f"""
|
| 12 |
+
I have extracted data from a DCRM (Dynamic Contact Resistance Measurement) graph.
|
| 13 |
+
Data (Sampled): {data_str}
|
| 14 |
+
|
| 15 |
+
The columns are:
|
| 16 |
+
- 'Time_ms': Time in milliseconds.
|
| 17 |
+
- 'Resistance': Contact resistance in micro-ohms (µΩ).
|
| 18 |
+
- 'Travel': Mechanical position/displacement (mm).
|
| 19 |
+
|
| 20 |
+
I have also provided the image of the graph.
|
| 21 |
+
|
| 22 |
+
=== TASK: IDENTIFY ARCING CONTACT EVENTS (T0-T4) ===
|
| 23 |
+
|
| 24 |
+
Your goal is to identify 5 specific timepoints (T0, T1, T2, T3, T4) that define the arcing contact behavior.
|
| 25 |
+
|
| 26 |
+
**T0 (Breaker Closed)**:
|
| 27 |
+
- State: Main Contacts are fully closed.
|
| 28 |
+
- Signature: Resistance is LOW and STABLE (DLRO value, typically 30-60 µΩ).
|
| 29 |
+
- Time: t=0 or very early.
|
| 30 |
+
|
| 31 |
+
**T1 (Motion Starts)**:
|
| 32 |
+
- Event: The breaker mechanism begins to move.
|
| 33 |
+
- Signature: First significant deviation in the TRAVEL curve (Red). Resistance might rise slightly due to vibration.
|
| 34 |
+
|
| 35 |
+
**T2 (Main Contact Separation)**:
|
| 36 |
+
- Event: The Main Contacts (Silver) separate, forcing current to the Arcing Contacts (Tungsten).
|
| 37 |
+
- **VISUAL SIGNATURE**: Look for a sharp "STEP" increase in Resistance.
|
| 38 |
+
- **DATA SIGNATURE**: Resistance jumps from Low (~40 µΩ) to Medium (~150-300 µΩ).
|
| 39 |
+
- This is the START of the "Arcing Plateau".
|
| 40 |
+
|
| 41 |
+
**T3 (Arcing Contact Wipe Zone)**:
|
| 42 |
+
- Event: Only Arcing Contacts are touching.
|
| 43 |
+
- Signature: The "Plateau" phase between T2 and T4.
|
| 44 |
+
- Healthy: Flat plateau. Unhealthy: Noisy/Spiky.
|
| 45 |
+
- **Action**: Return the approximate MIDPOINT time of this plateau.
|
| 46 |
+
|
| 47 |
+
**T4 (Arcing Contact Separation)**:
|
| 48 |
+
- Event: The Arcing Contacts part.
|
| 49 |
+
- **VISUAL SIGNATURE**: Resistance shoots VERTICALLY to INFINITY (Open Circuit).
|
| 50 |
+
- **DATA SIGNATURE**: Resistance jumps from Medium (~200 µΩ) to High/Infinity (>2000 µΩ).
|
| 51 |
+
- This is the END of the electrical connection.
|
| 52 |
+
|
| 53 |
+
=== IMPORTANT: JSON FORMATTING ===
|
| 54 |
+
- You must return valid JSON.
|
| 55 |
+
- Do NOT include markdown formatting (like ```json).
|
| 56 |
+
- If you cannot find a point with certainty, return null.
|
| 57 |
+
|
| 58 |
+
=== OUTPUT FORMAT (Strict JSON) ===
|
| 59 |
+
{{
|
| 60 |
+
"events": {{
|
| 61 |
+
"T0_breaker_closed": float,
|
| 62 |
+
"T1_motion_start": float,
|
| 63 |
+
"T2_main_separation": float,
|
| 64 |
+
"T3_arcing_plateau_mid": float,
|
| 65 |
+
"T4_arcing_separation": float
|
| 66 |
+
}},
|
| 67 |
+
"confidence": "High"|"Medium"|"Low",
|
| 68 |
+
"reasoning": "Brief explanation of how you found T2 and T4"
|
| 69 |
+
}}
|
| 70 |
+
"""
|
| 71 |
+
|
| 72 |
+
def identify_arcing_events(df, llm):
|
| 73 |
+
"""
|
| 74 |
+
Uses the LLM to identify T0-T4 events from the dataframe.
|
| 75 |
+
"""
|
| 76 |
+
try:
|
| 77 |
+
# 1. Prepare Data Sample (downsample for token limit)
|
| 78 |
+
# We focus on the first 200ms where action happens
|
| 79 |
+
df_subset = df[df['Time_ms'] < 200].copy()
|
| 80 |
+
step = max(1, len(df_subset) // 100)
|
| 81 |
+
data_str = df_subset.iloc[::step][['Time_ms', 'Resistance', 'Travel']].to_string(index=False)
|
| 82 |
+
|
| 83 |
+
# 2. Generate Plot Image
|
| 84 |
+
plt.figure(figsize=(10, 6))
|
| 85 |
+
|
| 86 |
+
# Plot Travel (Red)
|
| 87 |
+
ax1 = plt.gca()
|
| 88 |
+
ax1.set_xlabel('Time (ms)')
|
| 89 |
+
ax1.set_ylabel('Travel (mm)', color='tab:red')
|
| 90 |
+
ax1.plot(df_subset['Time_ms'], df_subset['Travel'], color='tab:red', label='Travel')
|
| 91 |
+
ax1.tick_params(axis='y', labelcolor='tab:red')
|
| 92 |
+
|
| 93 |
+
# Plot Resistance (Green) on secondary axis
|
| 94 |
+
ax2 = ax1.twinx()
|
| 95 |
+
ax2.set_ylabel('Resistance (µΩ)', color='tab:green')
|
| 96 |
+
ax2.plot(df_subset['Time_ms'], df_subset['Resistance'], color='tab:green', label='Resistance')
|
| 97 |
+
ax2.tick_params(axis='y', labelcolor='tab:green')
|
| 98 |
+
# Limit Resistance view to see the "step" (ignore infinity spike)
|
| 99 |
+
ax2.set_ylim(0, 2000)
|
| 100 |
+
|
| 101 |
+
plt.title("DCRM Signature: Identify T0, T1, T2, T4")
|
| 102 |
+
plt.grid(True, linestyle='--', alpha=0.7)
|
| 103 |
+
|
| 104 |
+
# Save to buffer
|
| 105 |
+
buf = io.BytesIO()
|
| 106 |
+
plt.savefig(buf, format='png')
|
| 107 |
+
buf.seek(0)
|
| 108 |
+
image_data = base64.b64encode(buf.read()).decode('utf-8')
|
| 109 |
+
plt.close()
|
| 110 |
+
|
| 111 |
+
# 3. Call LLM
|
| 112 |
+
prompt = get_arcing_prompt(data_str)
|
| 113 |
+
|
| 114 |
+
message = HumanMessage(
|
| 115 |
+
content=[
|
| 116 |
+
{"type": "text", "text": prompt},
|
| 117 |
+
{"type": "image_url", "image_url": {"url": f"data:image/png;base64,{image_data}"}}
|
| 118 |
+
]
|
| 119 |
+
)
|
| 120 |
+
|
| 121 |
+
response = llm.invoke([message])
|
| 122 |
+
content = response.content.strip()
|
| 123 |
+
|
| 124 |
+
# Clean code blocks if present
|
| 125 |
+
if "```json" in content:
|
| 126 |
+
content = content.split("```json")[1].split("```")[0].strip()
|
| 127 |
+
elif "```" in content:
|
| 128 |
+
content = content.split("```")[1].strip()
|
| 129 |
+
|
| 130 |
+
result = json.loads(content)
|
| 131 |
+
return result
|
| 132 |
+
|
| 133 |
+
except Exception as e:
|
| 134 |
+
return {"error": str(e)}
|
core/utils/__pycache__/report_generator.cpython-313.pyc
ADDED
|
Binary file (11.7 kB). View file
|
|
|