TheDeepDas commited on
Commit
6bbbfda
·
1 Parent(s): ac86e10
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .dockerignore.prod +48 -0
  2. DEPLOYMENT.md +69 -0
  3. Dockerfile.prod +56 -0
  4. app/__pycache__/config.cpython-311.pyc +0 -0
  5. app/__pycache__/main.cpython-311.pyc +0 -0
  6. app/routers/__pycache__/incidents.cpython-311.pyc +0 -0
  7. app/routers/incidents.py +41 -9
  8. app/services/fallback_detection.py +371 -0
  9. app/services/image_processing.py +0 -0
  10. app/services/incidents.py +70 -12
  11. app/uploads/04855a39eece42338f57c20e7ccd614e_fallback.jpg +0 -0
  12. app/uploads/056e4270decd4bbdb4f167fb5187e794_fallback.jpg +0 -0
  13. app/uploads/0b4254a6e390450788b09c09da1e3855_fallback.jpg +0 -0
  14. app/uploads/0ba1833ae7364b728731d27591299af4_fallback.jpg +0 -0
  15. app/uploads/27473c83a5804ccfa352678205b6f53a_fallback.jpg +0 -0
  16. app/uploads/378dc0125670418e87cf5d66c71407f3_fallback.jpg +0 -0
  17. app/uploads/380dc542da37443099c79b4e675f8bd8_fallback.jpg +0 -0
  18. app/uploads/3ab6170095414cfaa558737e8d6b1cfc_fallback.jpg +0 -0
  19. app/uploads/43a9a75deefd4bee90103f5f5c65957a_fallback.jpg +0 -0
  20. app/uploads/4551c22d8ccb4614ba968679fc7efb10_fallback.jpg +0 -0
  21. app/uploads/48341c52bea94fb28f1ea9805952a4b1_fallback.jpg +0 -0
  22. app/uploads/48ab17d5999341a4b9dd285ed3e9b725_fallback.jpg +0 -0
  23. app/uploads/5af32e63ad214fb6953b4dea49c8368c_fallback.jpg +0 -0
  24. app/uploads/66bc109be3164dc6965c9e4191060f0e_fallback.jpg +0 -0
  25. app/uploads/71b91fd020494a3e94275f85b5e141db_fallback.jpg +0 -0
  26. app/uploads/7384a3002d4b45bfa09a929e6b17642e_fallback.jpg +0 -0
  27. app/uploads/757ef3a2d1634d328e58ed19d21e73d9_fallback.jpg +0 -0
  28. app/uploads/75a12b79967a4be592eab289968e5112_fallback.jpg +0 -0
  29. app/uploads/882c2f10490c4efc8372081d0d8e43be_fallback.jpg +0 -0
  30. app/uploads/9b99947b87a34e148ad2b3196644c34d_fallback.jpg +0 -0
  31. app/uploads/9c7c203c03de4f06a0851e13993ffe6c_fallback.jpg +0 -0
  32. app/uploads/a023c3e35d8645d49e21f2fab542c249_fallback.jpg +0 -0
  33. app/uploads/a9b6706011614e8d9bef5910da9b5ae3_fallback.jpg +0 -0
  34. app/uploads/aa28976bd67a4d0da3cc54fa5dde327a_fallback.jpg +0 -0
  35. app/uploads/b10f689ac76841adaff7d8db685e9b7d_fallback.jpg +0 -0
  36. app/uploads/b570b412fba0453d9d756502ef2e4040_fallback.jpg +0 -0
  37. app/uploads/ca1c8a06f4254c168e6db112af99fe5b_fallback.jpg +0 -0
  38. app/uploads/d51141c54e1345c3bae2ea67925c2d1e_fallback.jpg +0 -0
  39. app/uploads/d9549ce35f2e4e23945184875826a5ef_fallback.jpg +0 -0
  40. app/uploads/e10d84d682bc4eadb6e1397facfebeba_fallback.jpg +0 -0
  41. app/uploads/e1c1da5ca1bb4ef2ae260c644e3786ba_fallback.jpg +0 -0
  42. app/uploads/e321ecccfada4b9bbe797faaefd2c58a_fallback.jpg +0 -0
  43. app/uploads/ef720721f92b41918e372dd5f53b2a0f_fallback.jpg +0 -0
  44. prepare_deployment.bat +48 -0
  45. prepare_deployment.sh +41 -0
  46. requirements-version-fix.txt +8 -0
  47. requirements.txt +5 -1
  48. test_beach_plastic_detection.py +177 -0
  49. test_end_to_end_flow.py +473 -0
  50. test_enhanced_detection.py +149 -0
.dockerignore.prod ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Ignore all test files
2
+ test_*.py
3
+ tests/
4
+ test_files/
5
+ test_output/
6
+ eda.ipynb
7
+ *.ipynb
8
+
9
+ # Temporary files
10
+ tmp*
11
+ __pycache__/
12
+ *.py[cod]
13
+ *$py.class
14
+ .pytest_cache/
15
+
16
+ # Ignore smaller YOLOv8 models (we only need YOLOv8x)
17
+ yolov8n.pt
18
+ yolov8s.pt
19
+ yolov8m.pt
20
+ yolov8l.pt
21
+ # Note: don't ignore yolov8x.pt - we need it!
22
+
23
+ # Documentation files not needed for runtime
24
+ *.md
25
+ *.txt
26
+ !requirements.txt
27
+
28
+ # Debug scripts
29
+ debug_*.py
30
+ create_test_*.py
31
+ generate_*.py
32
+ list_*.py
33
+ train_*.py
34
+
35
+ # Git files
36
+ .git/
37
+ .gitattributes
38
+ .gitignore
39
+
40
+ # Environment and IDE files
41
+ .env
42
+ .vscode/
43
+ .idea/
44
+
45
+ # Only needed for development
46
+ requirements-*.txt
47
+ !requirements.txt
48
+ Dockerfile.simple
DEPLOYMENT.md ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Marine Pollution Detection System - Deployment Guide
2
+
3
+ This guide provides instructions for deploying the Marine Pollution Detection system using YOLOv8x for optimal detection accuracy.
4
+
5
+ ## Deployment Preparation
6
+
7
+ ### Option 1: Using the Automated Script (Recommended)
8
+
9
+ 1. Run the deployment preparation script:
10
+
11
+ **For Windows:**
12
+ ```
13
+ prepare_deployment.bat
14
+ ```
15
+
16
+ **For Linux/Mac:**
17
+ ```
18
+ chmod +x prepare_deployment.sh
19
+ ./prepare_deployment.sh
20
+ ```
21
+
22
+ 2. Build the Docker container:
23
+ ```
24
+ docker build -t marine-pollution-api .
25
+ ```
26
+
27
+ 3. Run the container:
28
+ ```
29
+ docker run -p 7860:7860 marine-pollution-api
30
+ ```
31
+
32
+ ### Option 2: Manual Deployment
33
+
34
+ 1. Clean up unnecessary files:
35
+ - Remove all test files (`test_*.py`, `test_files/`, `test_output/`, etc.)
36
+ - Remove smaller YOLO models (keep only `yolov8x.pt`)
37
+ - Remove development utilities (`debug_*.py`, etc.)
38
+
39
+ 2. Use the production Dockerfile:
40
+ ```
41
+ cp Dockerfile.prod Dockerfile
42
+ cp .dockerignore.prod .dockerignore
43
+ ```
44
+
45
+ 3. Build and run the Docker container as described in Option 1.
46
+
47
+ ## Important Notes
48
+
49
+ 1. **YOLOv8x Model**: The system now exclusively uses YOLOv8x (the largest/most accurate model) for marine pollution detection. The model file will be downloaded automatically on the first run if it doesn't exist.
50
+
51
+ 2. **Image Annotation**: The output images now have more subtle scene annotations in small text to improve readability.
52
+
53
+ 3. **Deployment Size**: The Docker image is optimized to include only necessary files for production use.
54
+
55
+ 4. **First Run**: The first time the system runs, it will download the YOLOv8x model (approximately 136MB). Subsequent runs will use the downloaded model.
56
+
57
+ 5. **Requirements**: Make sure the deployment environment has sufficient memory and processing power to run YOLOv8x effectively.
58
+
59
+ ## Troubleshooting
60
+
61
+ 1. **Model Download Issues**: If the model download fails, check your internet connection. If you want to manually provide the YOLOv8x model, place the file in the root directory of the project.
62
+
63
+ 2. **Performance Optimization**: For better performance on low-resource environments, consider adding memory management optimizations or serving the model with ONNX Runtime.
64
+
65
+ 3. **Errors**: Check the logs for detailed error messages. Most issues are related to model loading or file paths.
66
+
67
+ ## Contact
68
+
69
+ For any deployment issues or questions, please contact the development team.
Dockerfile.prod ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Production Dockerfile for Marine Pollution Detection API
2
+ # Optimized for deployment with only necessary files included
3
+
4
+ FROM python:3.11-slim
5
+
6
+ # Set working directory
7
+ WORKDIR /app
8
+
9
+ # Set environment variables
10
+ ENV PYTHONDONTWRITEBYTECODE=1
11
+ ENV PYTHONUNBUFFERED=1
12
+ ENV PORT=7860
13
+
14
+ # Create a non-root user for security
15
+ RUN useradd --create-home --shell /bin/bash app
16
+
17
+ # Install system dependencies
18
+ RUN apt-get update && apt-get install -y \
19
+ build-essential \
20
+ curl \
21
+ && rm -rf /var/lib/apt/lists/*
22
+
23
+ # Copy only the requirements file first for better caching
24
+ COPY requirements.txt .
25
+
26
+ # Install Python dependencies
27
+ RUN pip install --no-cache-dir --upgrade pip
28
+ RUN pip install --no-cache-dir -r requirements.txt
29
+
30
+ # Copy only the necessary application files (explicitly excluding test files)
31
+ COPY app/ /app/app/
32
+ COPY models/ /app/models/
33
+ COPY start-hf.sh /app/
34
+ COPY Procfile /app/
35
+ COPY Procfile.railway /app/
36
+
37
+ # Create necessary directories with proper permissions
38
+ RUN mkdir -p app/uploads /tmp/uploads && \
39
+ chown -R app:app /app /tmp/uploads && \
40
+ chmod -R 755 /app /tmp/uploads
41
+
42
+ # Make startup script executable
43
+ RUN chmod +x start-hf.sh && chown app:app start-hf.sh
44
+
45
+ # Switch to non-root user
46
+ USER app
47
+
48
+ # Expose port 7860 (Hugging Face Spaces default)
49
+ EXPOSE 7860
50
+
51
+ # Health check
52
+ HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
53
+ CMD curl -f http://localhost:7860/health || exit 1
54
+
55
+ # Command to run the application
56
+ CMD ["./start-hf.sh"]
app/__pycache__/config.cpython-311.pyc CHANGED
Binary files a/app/__pycache__/config.cpython-311.pyc and b/app/__pycache__/config.cpython-311.pyc differ
 
app/__pycache__/main.cpython-311.pyc CHANGED
Binary files a/app/__pycache__/main.cpython-311.pyc and b/app/__pycache__/main.cpython-311.pyc differ
 
app/routers/__pycache__/incidents.cpython-311.pyc CHANGED
Binary files a/app/routers/__pycache__/incidents.cpython-311.pyc and b/app/routers/__pycache__/incidents.cpython-311.pyc differ
 
app/routers/incidents.py CHANGED
@@ -49,16 +49,24 @@ async def classify_incident_report(
49
  incident_class, severity = classification_result
50
  confidence_scores = None
51
 
52
- # Upload image to Cloudinary
53
  image_path = None
 
 
 
54
  if image:
55
- image_path = await store_image(image)
56
- if not image_path:
57
  # If Cloudinary upload fails, raise an error since we're not using local fallback
58
  raise HTTPException(
59
  status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
60
  detail="Failed to upload image to cloud storage"
61
  )
 
 
 
 
 
62
 
63
  document = {
64
  "name": name,
@@ -69,6 +77,8 @@ async def classify_incident_report(
69
  "severity": severity,
70
  "reporter_id": current_user["id"],
71
  "image_path": image_path,
 
 
72
  "created_at": datetime.utcnow(),
73
  }
74
 
@@ -129,10 +139,18 @@ async def list_incidents(current_user=Depends(get_current_user)):
129
  @router.post("/update-status/{incident_id}")
130
  async def update_status(
131
  incident_id: str,
132
- status: str = Body(..., embed=True),
133
  current_user=Depends(get_current_user),
134
  ):
135
- """Update the status of an incident (validated, rejected, investigating)"""
 
 
 
 
 
 
 
 
136
  if not is_database_available():
137
  raise HTTPException(
138
  status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
@@ -146,8 +164,18 @@ async def update_status(
146
  detail="Only validators can update incident status"
147
  )
148
 
 
 
 
 
 
 
 
 
 
 
149
  # Validate the status value
150
- if status not in ["validated", "rejected", "investigating"]:
151
  raise HTTPException(
152
  status_code=status.HTTP_400_BAD_REQUEST,
153
  detail="Invalid status. Must be one of: validated, rejected, investigating"
@@ -156,12 +184,16 @@ async def update_status(
156
  try:
157
  success = await update_incident_status(
158
  incident_id=incident_id,
159
- status=status,
160
- validator_id=current_user["id"]
 
161
  )
162
 
163
  if success:
164
- return {"message": f"Incident status updated to {status}"}
 
 
 
165
  else:
166
  raise HTTPException(
167
  status_code=status.HTTP_404_NOT_FOUND,
 
49
  incident_class, severity = classification_result
50
  confidence_scores = None
51
 
52
+ # Upload image to Cloudinary and process with object detection
53
  image_path = None
54
+ annotated_image_path = None
55
+ detection_results = None
56
+
57
  if image:
58
+ image_result = await store_image(image)
59
+ if not image_result:
60
  # If Cloudinary upload fails, raise an error since we're not using local fallback
61
  raise HTTPException(
62
  status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
63
  detail="Failed to upload image to cloud storage"
64
  )
65
+
66
+ # Extract the image paths and detection results
67
+ image_path = image_result["image_url"]
68
+ annotated_image_path = image_result["annotated_image_url"]
69
+ detection_results = image_result["detection_results"]
70
 
71
  document = {
72
  "name": name,
 
77
  "severity": severity,
78
  "reporter_id": current_user["id"],
79
  "image_path": image_path,
80
+ "annotated_image_path": annotated_image_path,
81
+ "detection_results": detection_results,
82
  "created_at": datetime.utcnow(),
83
  }
84
 
 
139
  @router.post("/update-status/{incident_id}")
140
  async def update_status(
141
  incident_id: str,
142
+ data: dict = Body(...),
143
  current_user=Depends(get_current_user),
144
  ):
145
+ """
146
+ Update the status of an incident (validated, rejected, investigating)
147
+
148
+ Request body format:
149
+ {
150
+ "status": "validated" | "rejected" | "investigating",
151
+ "comment": "Optional explanation for the status change" (optional)
152
+ }
153
+ """
154
  if not is_database_available():
155
  raise HTTPException(
156
  status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
 
164
  detail="Only validators can update incident status"
165
  )
166
 
167
+ # Extract status and optional comment from request body
168
+ if not data or "status" not in data:
169
+ raise HTTPException(
170
+ status_code=status.HTTP_400_BAD_REQUEST,
171
+ detail="Missing required field 'status' in request body"
172
+ )
173
+
174
+ incident_status = data["status"]
175
+ comment = data.get("comment") # Optional field
176
+
177
  # Validate the status value
178
+ if incident_status not in ["validated", "rejected", "investigating"]:
179
  raise HTTPException(
180
  status_code=status.HTTP_400_BAD_REQUEST,
181
  detail="Invalid status. Must be one of: validated, rejected, investigating"
 
184
  try:
185
  success = await update_incident_status(
186
  incident_id=incident_id,
187
+ status=incident_status,
188
+ validator_id=current_user["id"],
189
+ comment=comment
190
  )
191
 
192
  if success:
193
+ response_message = f"Incident status updated to {incident_status}"
194
+ if comment:
195
+ response_message += " with comment"
196
+ return {"message": response_message}
197
  else:
198
  raise HTTPException(
199
  status_code=status.HTTP_404_NOT_FOUND,
app/services/fallback_detection.py ADDED
@@ -0,0 +1,371 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Fallback detection module for when the main YOLO model fails due to
3
+ torchvision or other dependency issues.
4
+
5
+ This provides a simple detection mechanism without dependencies on
6
+ PyTorch or torchvision.
7
+ """
8
+
9
+ import logging
10
+ import numpy as np
11
+ import os
12
+ import tempfile
13
+ from typing import Dict, List, Optional, Tuple, Union
14
+ import uuid
15
+
16
+ # Initialize logger
17
+ logger = logging.getLogger(__name__)
18
+
19
+ # Try to import OpenCV, but don't fail if not available
20
+ try:
21
+ import cv2
22
+ HAS_CV2 = True
23
+ except ImportError:
24
+ HAS_CV2 = False
25
+ logger.warning("OpenCV (cv2) not available in fallback_detection module")
26
+
27
+ # Basic color detection thresholds
28
+ # These are simple HSV thresholds for detecting common pollution colors
29
+ COLOR_THRESHOLDS = {
30
+ "oil_spill": {
31
+ "lower": np.array([0, 0, 0]),
32
+ "upper": np.array([180, 255, 80]),
33
+ "label": "Potential Oil Spill",
34
+ "confidence": 0.6
35
+ },
36
+ "plastic_bright": {
37
+ "lower": np.array([0, 50, 180]),
38
+ "upper": np.array([30, 255, 255]),
39
+ "label": "Potential Plastic Debris",
40
+ "confidence": 0.7
41
+ },
42
+ "foam_pollution": {
43
+ "lower": np.array([0, 0, 200]),
44
+ "upper": np.array([180, 30, 255]),
45
+ "label": "Potential Foam/Chemical Pollution",
46
+ "confidence": 0.65
47
+ },
48
+ # Enhanced plastic bottle detection thresholds
49
+ "plastic_bottles_clear": {
50
+ "lower": np.array([0, 0, 140]),
51
+ "upper": np.array([180, 60, 255]),
52
+ "label": "plastic bottle", # Updated label to match YOLO naming
53
+ "confidence": 0.80
54
+ },
55
+ "plastic_bottles_blue": {
56
+ "lower": np.array([90, 40, 100]),
57
+ "upper": np.array([130, 255, 255]),
58
+ "label": "plastic bottle",
59
+ "confidence": 0.75
60
+ },
61
+ "plastic_bottles_green": {
62
+ "lower": np.array([35, 40, 100]),
63
+ "upper": np.array([85, 255, 255]),
64
+ "label": "plastic bottle",
65
+ "confidence": 0.75
66
+ },
67
+ "plastic_bottles_white": {
68
+ "lower": np.array([0, 0, 180]),
69
+ "upper": np.array([180, 30, 255]),
70
+ "label": "plastic bottle",
71
+ "confidence": 0.75
72
+ },
73
+ "plastic_bottles_cap": {
74
+ "lower": np.array([100, 100, 100]),
75
+ "upper": np.array([140, 255, 255]),
76
+ "label": "plastic bottle cap",
77
+ "confidence": 0.85
78
+ },
79
+ "blue_plastic": {
80
+ "lower": np.array([90, 50, 50]),
81
+ "upper": np.array([130, 255, 255]),
82
+ "label": "plastic waste", # Updated label for consistency
83
+ "confidence": 0.6
84
+ },
85
+ "green_plastic": {
86
+ "lower": np.array([35, 50, 50]),
87
+ "upper": np.array([85, 255, 255]),
88
+ "label": "plastic waste",
89
+ "confidence": 0.6
90
+ },
91
+ "white_plastic": {
92
+ "lower": np.array([0, 0, 190]),
93
+ "upper": np.array([180, 30, 255]),
94
+ "label": "plastic waste",
95
+ "confidence": 0.6
96
+ }
97
+ }
98
+
99
+ def analyze_texture_for_pollution(img):
100
+ """
101
+ Analyze image texture to detect unnatural patterns that could be debris.
102
+ Uses edge detection and morphological operations to find potential plastic debris.
103
+ Enhanced for better plastic bottle detection.
104
+
105
+ Args:
106
+ img: OpenCV image in BGR format
107
+
108
+ Returns:
109
+ List of bounding boxes for potential debris based on texture
110
+ """
111
+ try:
112
+ # Convert to grayscale
113
+ gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
114
+
115
+ # Apply Gaussian blur to reduce noise
116
+ blurred = cv2.GaussianBlur(gray, (5, 5), 0)
117
+
118
+ # Apply Canny edge detection
119
+ edges = cv2.Canny(blurred, 50, 150)
120
+
121
+ # Dilate edges to connect nearby edges
122
+ kernel = np.ones((3, 3), np.uint8)
123
+ dilated_edges = cv2.dilate(edges, kernel, iterations=2)
124
+
125
+ # Find contours in the edge map
126
+ contours, _ = cv2.findContours(dilated_edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
127
+
128
+ # Also do a more specific search for bottle-shaped objects
129
+ # Convert to HSV for color filtering
130
+ hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
131
+
132
+ # Create a combined mask for common bottle colors
133
+ bottle_mask = np.zeros_like(gray)
134
+
135
+ # Clear/translucent plastic
136
+ clear_mask = cv2.inRange(hsv, np.array([0, 0, 140]), np.array([180, 60, 255]))
137
+ bottle_mask = cv2.bitwise_or(bottle_mask, clear_mask)
138
+
139
+ # Blue plastic
140
+ blue_mask = cv2.inRange(hsv, np.array([90, 40, 100]), np.array([130, 255, 255]))
141
+ bottle_mask = cv2.bitwise_or(bottle_mask, blue_mask)
142
+
143
+ # Apply morphological operations to clean up the mask
144
+ bottle_mask = cv2.morphologyEx(bottle_mask, cv2.MORPH_CLOSE, kernel)
145
+ bottle_mask = cv2.morphologyEx(bottle_mask, cv2.MORPH_OPEN, kernel)
146
+
147
+ # Find contours in the bottle mask
148
+ bottle_contours, _ = cv2.findContours(bottle_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
149
+
150
+ # Filter contours by various criteria to find unnatural patterns
151
+ debris_regions = []
152
+
153
+ # Process regular edge contours
154
+ for contour in contours:
155
+ # Calculate area and perimeter
156
+ area = cv2.contourArea(contour)
157
+ perimeter = cv2.arcLength(contour, True)
158
+
159
+ # Skip very small contours
160
+ if area < 100:
161
+ continue
162
+
163
+ # Calculate shape metrics
164
+ if perimeter > 0:
165
+ circularity = 4 * np.pi * area / (perimeter * perimeter)
166
+
167
+ # Unnatural objects tend to have specific circularity ranges
168
+ # (not too circular, not too irregular)
169
+ if 0.2 < circularity < 0.8:
170
+ x, y, w, h = cv2.boundingRect(contour)
171
+
172
+ # Calculate aspect ratio
173
+ aspect_ratio = float(w) / h if h > 0 else 0
174
+
175
+ # Most natural objects don't have extreme aspect ratios
176
+ if 0.2 < aspect_ratio < 5:
177
+ # Get ROI and check for texture uniformity
178
+ roi = gray[y:y+h, x:x+w]
179
+ if roi.size > 0:
180
+ # Calculate standard deviation of pixel values
181
+ std_dev = np.std(roi)
182
+
183
+ # Man-made objects often have uniform textures
184
+ if std_dev < 40:
185
+ debris_regions.append({
186
+ "bbox": [x, y, x+w, y+h],
187
+ "confidence": 0.55,
188
+ "class": "Potential Debris (Texture)"
189
+ })
190
+
191
+ # Process bottle-specific contours with higher confidence
192
+ for contour in bottle_contours:
193
+ area = cv2.contourArea(contour)
194
+ if area < 200: # Higher threshold for bottles
195
+ continue
196
+
197
+ perimeter = cv2.arcLength(contour, True)
198
+ if perimeter <= 0:
199
+ continue
200
+
201
+ # Get bounding rectangle
202
+ x, y, w, h = cv2.boundingRect(contour)
203
+
204
+ # Calculate aspect ratio - bottles typically have aspect ratio between 0.2 and 0.7
205
+ aspect_ratio = float(w) / h if h > 0 else 0
206
+
207
+ # Bottle detection criteria - bottles are usually taller than wide
208
+ if 0.2 < aspect_ratio < 0.7 and h > 50:
209
+ # This is likely to be a bottle based on shape
210
+ bottle_confidence = 0.70
211
+
212
+ # Get ROI for additional checks
213
+ roi_hsv = hsv[y:y+h, x:x+w]
214
+ if roi_hsv.size > 0:
215
+ # Check for uniformity in color which is common in bottles
216
+ h_std = np.std(roi_hsv[:,:,0])
217
+ s_std = np.std(roi_hsv[:,:,1])
218
+
219
+ # Bottles often have uniform hue and saturation
220
+ if h_std < 30 and s_std < 60:
221
+ bottle_confidence = 0.85 # Higher confidence for uniform color
222
+
223
+ debris_regions.append({
224
+ "bbox": [x, y, x+w, y+h],
225
+ "confidence": bottle_confidence,
226
+ "class": "Plastic Bottle"
227
+ })
228
+
229
+ return debris_regions
230
+ except Exception as e:
231
+ logger.error(f"Texture analysis failed: {str(e)}")
232
+ return []
233
+
234
+ def fallback_detect_objects(image_path: str) -> Dict:
235
+ """
236
+ Perform a simple color-based detection when ML detection fails.
237
+ Uses basic computer vision techniques to detect potential pollution.
238
+
239
+ Args:
240
+ image_path: Path to the image file
241
+
242
+ Returns:
243
+ Dict with detections in the same format as the main detection function
244
+ """
245
+ if not HAS_CV2:
246
+ logger.warning("OpenCV not available for fallback detection")
247
+ return {"detections": [], "detection_count": 0, "annotated_image_url": None}
248
+
249
+ try:
250
+ # Read the image
251
+ img = cv2.imread(image_path)
252
+ if img is None:
253
+ logger.error(f"Failed to read image at {image_path} in fallback detection")
254
+ return {"detections": [], "detection_count": 0, "annotated_image_url": None}
255
+
256
+ # Convert to HSV for better color detection
257
+ hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
258
+
259
+ # Make a copy for annotation
260
+ annotated = img.copy()
261
+
262
+ # Initialize detections
263
+ detections = []
264
+
265
+ # First check if the image contains water
266
+ has_water = detect_water_body(image_path)
267
+ logger.info(f"Water detection result: {'water detected' if has_water else 'no significant water detected'}")
268
+
269
+ # Run texture-based detection for potential debris
270
+ texture_detections = analyze_texture_for_pollution(img)
271
+ detections.extend(texture_detections)
272
+ logger.info(f"Texture analysis found {len(texture_detections)} potential debris objects")
273
+
274
+ # Detect potential pollution based on color profiles
275
+ for pollution_type, thresholds in COLOR_THRESHOLDS.items():
276
+ # Create mask using HSV thresholds
277
+ mask = cv2.inRange(hsv, thresholds["lower"], thresholds["upper"])
278
+
279
+ # Apply some morphological operations to clean up the mask
280
+ kernel = np.ones((5, 5), np.uint8)
281
+ mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
282
+ mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)
283
+
284
+ # Find contours
285
+ contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
286
+
287
+ # Filter out small contours, but with a smaller threshold for plastic debris
288
+ if "plastic" in pollution_type:
289
+ # More sensitive threshold for plastic items (0.5% of image)
290
+ min_area = img.shape[0] * img.shape[1] * 0.005
291
+ else:
292
+ # Standard threshold for other pollution (1% of image)
293
+ min_area = img.shape[0] * img.shape[1] * 0.01
294
+
295
+ filtered_contours = [cnt for cnt in contours if cv2.contourArea(cnt) > min_area]
296
+
297
+ # Process filtered contours
298
+ for contour in filtered_contours:
299
+ # Get bounding box
300
+ x, y, w, h = cv2.boundingRect(contour)
301
+
302
+ # Add to detections
303
+ detections.append({
304
+ "class": thresholds["label"],
305
+ "confidence": thresholds["confidence"],
306
+ "bbox": [x, y, x + w, y + h]
307
+ })
308
+
309
+ # Draw on the annotated image
310
+ cv2.rectangle(annotated, (x, y), (x + w, y + h), (0, 255, 0), 2)
311
+
312
+ # Add label
313
+ label = f"{thresholds['label']}: {thresholds['confidence']:.2f}"
314
+ cv2.putText(annotated, label, (x, y - 10),
315
+ cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
316
+
317
+ # Save the annotated image
318
+ annotated_image_path = f"{image_path}_fallback_annotated.jpg"
319
+ cv2.imwrite(annotated_image_path, annotated)
320
+
321
+ # Return the results - would normally upload image in real implementation
322
+ return {
323
+ "detections": detections,
324
+ "detection_count": len(detections),
325
+ "annotated_image_path": annotated_image_path,
326
+ "method": "fallback_color_detection"
327
+ }
328
+
329
+ except Exception as e:
330
+ logger.error(f"Fallback detection failed: {str(e)}")
331
+ return {"detections": [], "detection_count": 0, "annotated_image_url": None}
332
+
333
+ def detect_water_body(image_path: str) -> bool:
334
+ """
335
+ Simple detection to check if an image contains a large water body.
336
+ This helps validate if the image is related to marine environment.
337
+
338
+ Args:
339
+ image_path: Path to the image file
340
+
341
+ Returns:
342
+ True if a significant water body is detected
343
+ """
344
+ if not HAS_CV2:
345
+ return True # Assume yes if we can't check
346
+
347
+ try:
348
+ # Read image
349
+ img = cv2.imread(image_path)
350
+ if img is None:
351
+ return False
352
+
353
+ # Convert to HSV
354
+ hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
355
+
356
+ # Water detection thresholds (blue/green tones)
357
+ lower_water = np.array([90, 50, 50]) # Blue/green hues
358
+ upper_water = np.array([150, 255, 255])
359
+
360
+ # Create mask
361
+ mask = cv2.inRange(hsv, lower_water, upper_water)
362
+
363
+ # Calculate percentage of water-like pixels
364
+ water_percentage = np.sum(mask > 0) / (mask.shape[0] * mask.shape[1])
365
+
366
+ # Return True if water covers at least 30% of image
367
+ return water_percentage > 0.3
368
+
369
+ except Exception as e:
370
+ logger.error(f"Water detection failed: {str(e)}")
371
+ return True # Assume yes if detection fails
app/services/image_processing.py ADDED
The diff for this file is too large to render. See raw diff
 
app/services/incidents.py CHANGED
@@ -1,5 +1,5 @@
1
  from pathlib import Path
2
- from typing import Optional
3
  from uuid import uuid4
4
  import logging
5
  import cloudinary
@@ -7,8 +7,11 @@ import cloudinary.uploader
7
  import tempfile
8
  import os
9
  from datetime import datetime
 
 
10
  from ..database import get_collection
11
  from ..config import get_settings
 
12
 
13
  logger = logging.getLogger(__name__)
14
  INCIDENTS_COLLECTION = "incidents"
@@ -35,7 +38,12 @@ async def get_all_incidents() -> list:
35
  return await cursor.to_list(length=None)
36
 
37
 
38
- async def update_incident_status(incident_id: str, status: str, validator_id: str) -> bool:
 
 
 
 
 
39
  """
40
  Update the status of an incident
41
 
@@ -43,6 +51,7 @@ async def update_incident_status(incident_id: str, status: str, validator_id: st
43
  incident_id: The ID of the incident to update
44
  status: The new status (validated, rejected, investigating)
45
  validator_id: The ID of the validator who updated the status
 
46
 
47
  Returns:
48
  True if the update was successful, False otherwise
@@ -54,14 +63,21 @@ async def update_incident_status(incident_id: str, status: str, validator_id: st
54
  from bson import ObjectId
55
  object_id = ObjectId(incident_id)
56
 
 
 
 
 
 
 
 
 
 
 
 
57
  # Update the incident with the new status and validator information
58
  result = await collection.update_one(
59
  {"_id": object_id},
60
- {"$set": {
61
- "status": status,
62
- "validated_by": validator_id,
63
- "validated_at": datetime.utcnow()
64
- }}
65
  )
66
 
67
  return result.modified_count > 0
@@ -70,10 +86,15 @@ async def update_incident_status(incident_id: str, status: str, validator_id: st
70
  return False
71
 
72
 
73
- async def store_image(upload_file) -> Optional[str]:
74
  """
75
- Store an uploaded image using Cloudinary only.
76
- No local fallback - if Cloudinary upload fails, the function will return None.
 
 
 
 
 
77
  """
78
  if upload_file is None:
79
  return None
@@ -102,12 +123,49 @@ async def store_image(upload_file) -> Optional[str]:
102
  resource_type="auto"
103
  )
104
 
105
- # Return the Cloudinary URL
106
  cloudinary_url = upload_result["secure_url"]
107
  logger.info(f"Cloudinary upload successful. URL: {cloudinary_url}")
108
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
109
  await upload_file.close()
110
- return cloudinary_url
111
 
112
  except Exception as e:
113
  logger.error(f"Failed to upload image to Cloudinary: {e}", exc_info=True)
 
1
  from pathlib import Path
2
+ from typing import Optional, Dict, Any, List, Union
3
  from uuid import uuid4
4
  import logging
5
  import cloudinary
 
7
  import tempfile
8
  import os
9
  from datetime import datetime
10
+ import asyncio
11
+
12
  from ..database import get_collection
13
  from ..config import get_settings
14
+ from .image_processing import detect_objects_in_image
15
 
16
  logger = logging.getLogger(__name__)
17
  INCIDENTS_COLLECTION = "incidents"
 
38
  return await cursor.to_list(length=None)
39
 
40
 
41
+ async def update_incident_status(
42
+ incident_id: str,
43
+ status: str,
44
+ validator_id: str,
45
+ comment: Optional[str] = None
46
+ ) -> bool:
47
  """
48
  Update the status of an incident
49
 
 
51
  incident_id: The ID of the incident to update
52
  status: The new status (validated, rejected, investigating)
53
  validator_id: The ID of the validator who updated the status
54
+ comment: Optional comment from the validator explaining the decision
55
 
56
  Returns:
57
  True if the update was successful, False otherwise
 
63
  from bson import ObjectId
64
  object_id = ObjectId(incident_id)
65
 
66
+ # Prepare the update with status and validator information
67
+ update_data = {
68
+ "status": status,
69
+ "validated_by": validator_id,
70
+ "validated_at": datetime.utcnow(),
71
+ }
72
+
73
+ # Add comment if provided
74
+ if comment:
75
+ update_data["validator_comment"] = comment
76
+
77
  # Update the incident with the new status and validator information
78
  result = await collection.update_one(
79
  {"_id": object_id},
80
+ {"$set": update_data}
 
 
 
 
81
  )
82
 
83
  return result.modified_count > 0
 
86
  return False
87
 
88
 
89
+ async def store_image(upload_file) -> Dict[str, Any]:
90
  """
91
+ Store an uploaded image using Cloudinary and process it with object detection.
92
+ Returns a dictionary with:
93
+ - image_url: The URL of the original uploaded image
94
+ - annotated_image_url: The URL of the image with object detection boxes (if available)
95
+ - detection_results: Object detection results (if available)
96
+
97
+ If upload fails, returns None
98
  """
99
  if upload_file is None:
100
  return None
 
123
  resource_type="auto"
124
  )
125
 
126
+ # Get the Cloudinary URL
127
  cloudinary_url = upload_result["secure_url"]
128
  logger.info(f"Cloudinary upload successful. URL: {cloudinary_url}")
129
 
130
+ # Initialize the result dictionary
131
+ result = {
132
+ "image_url": cloudinary_url,
133
+ "annotated_image_url": None,
134
+ "detection_results": None
135
+ }
136
+
137
+ # Run object detection on the uploaded image
138
+ try:
139
+ logger.info("Running object detection on uploaded image")
140
+ detection_result = await detect_objects_in_image(cloudinary_url)
141
+
142
+ if detection_result:
143
+ result["detection_results"] = detection_result["detections"]
144
+ result["annotated_image_url"] = detection_result["annotated_image_url"]
145
+
146
+ if detection_result["detection_count"] > 0:
147
+ logger.info(f"Object detection successful. Found {detection_result['detection_count']} objects.")
148
+ # Log the detected classes
149
+ classes = [f"{d['class']} ({int(d['confidence']*100)}%)" for d in detection_result["detections"]]
150
+ logger.info(f"Detected objects: {', '.join(classes)}")
151
+ else:
152
+ logger.info("Object detection completed but no relevant objects found in image")
153
+ else:
154
+ logger.warning("Object detection failed or returned None")
155
+
156
+ # Since detection failed, we'll use the original image as the annotated one
157
+ # This ensures the frontend still works properly
158
+ result["annotated_image_url"] = cloudinary_url
159
+ result["detection_results"] = []
160
+
161
+ except Exception as e:
162
+ logger.error(f"Error in object detection: {e}", exc_info=True)
163
+ # Provide fallback values to ensure frontend functionality
164
+ result["annotated_image_url"] = cloudinary_url # Use original as fallback
165
+ result["detection_results"] = [] # Empty detection results
166
+
167
  await upload_file.close()
168
+ return result
169
 
170
  except Exception as e:
171
  logger.error(f"Failed to upload image to Cloudinary: {e}", exc_info=True)
app/uploads/04855a39eece42338f57c20e7ccd614e_fallback.jpg ADDED
app/uploads/056e4270decd4bbdb4f167fb5187e794_fallback.jpg ADDED
app/uploads/0b4254a6e390450788b09c09da1e3855_fallback.jpg ADDED
app/uploads/0ba1833ae7364b728731d27591299af4_fallback.jpg ADDED
app/uploads/27473c83a5804ccfa352678205b6f53a_fallback.jpg ADDED
app/uploads/378dc0125670418e87cf5d66c71407f3_fallback.jpg ADDED
app/uploads/380dc542da37443099c79b4e675f8bd8_fallback.jpg ADDED
app/uploads/3ab6170095414cfaa558737e8d6b1cfc_fallback.jpg ADDED
app/uploads/43a9a75deefd4bee90103f5f5c65957a_fallback.jpg ADDED
app/uploads/4551c22d8ccb4614ba968679fc7efb10_fallback.jpg ADDED
app/uploads/48341c52bea94fb28f1ea9805952a4b1_fallback.jpg ADDED
app/uploads/48ab17d5999341a4b9dd285ed3e9b725_fallback.jpg ADDED
app/uploads/5af32e63ad214fb6953b4dea49c8368c_fallback.jpg ADDED
app/uploads/66bc109be3164dc6965c9e4191060f0e_fallback.jpg ADDED
app/uploads/71b91fd020494a3e94275f85b5e141db_fallback.jpg ADDED
app/uploads/7384a3002d4b45bfa09a929e6b17642e_fallback.jpg ADDED
app/uploads/757ef3a2d1634d328e58ed19d21e73d9_fallback.jpg ADDED
app/uploads/75a12b79967a4be592eab289968e5112_fallback.jpg ADDED
app/uploads/882c2f10490c4efc8372081d0d8e43be_fallback.jpg ADDED
app/uploads/9b99947b87a34e148ad2b3196644c34d_fallback.jpg ADDED
app/uploads/9c7c203c03de4f06a0851e13993ffe6c_fallback.jpg ADDED
app/uploads/a023c3e35d8645d49e21f2fab542c249_fallback.jpg ADDED
app/uploads/a9b6706011614e8d9bef5910da9b5ae3_fallback.jpg ADDED
app/uploads/aa28976bd67a4d0da3cc54fa5dde327a_fallback.jpg ADDED
app/uploads/b10f689ac76841adaff7d8db685e9b7d_fallback.jpg ADDED
app/uploads/b570b412fba0453d9d756502ef2e4040_fallback.jpg ADDED
app/uploads/ca1c8a06f4254c168e6db112af99fe5b_fallback.jpg ADDED
app/uploads/d51141c54e1345c3bae2ea67925c2d1e_fallback.jpg ADDED
app/uploads/d9549ce35f2e4e23945184875826a5ef_fallback.jpg ADDED
app/uploads/e10d84d682bc4eadb6e1397facfebeba_fallback.jpg ADDED
app/uploads/e1c1da5ca1bb4ef2ae260c644e3786ba_fallback.jpg ADDED
app/uploads/e321ecccfada4b9bbe797faaefd2c58a_fallback.jpg ADDED
app/uploads/ef720721f92b41918e372dd5f53b2a0f_fallback.jpg ADDED
prepare_deployment.bat ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ @echo off
2
+ REM Deployment script for Marine Pollution Detection API
3
+ REM This script cleans up unnecessary files and prepares for deployment
4
+
5
+ echo Starting deployment preparation...
6
+
7
+ REM 1. Remove all test files
8
+ echo Removing test files...
9
+ for /r %%i in (test_*.py) do del "%%i"
10
+ if exist test_files\ rd /s /q test_files
11
+ if exist test_output\ rd /s /q test_output
12
+ if exist tests\ rd /s /q tests
13
+
14
+ REM 2. Remove unnecessary Python files
15
+ echo Removing unnecessary Python files...
16
+ if exist debug_cloudinary.py del debug_cloudinary.py
17
+ if exist create_test_user.py del create_test_user.py
18
+ if exist generate_test_incidents.py del generate_test_incidents.py
19
+ if exist list_incidents.py del list_incidents.py
20
+ if exist train_models.py del train_models.py
21
+
22
+ REM 3. Remove smaller YOLOv8 models (we only need YOLOv8x)
23
+ echo Removing smaller YOLO models...
24
+ if exist yolov8n.pt del yolov8n.pt
25
+ if exist yolov8s.pt del yolov8s.pt
26
+ if exist yolov8m.pt del yolov8m.pt
27
+ if exist yolov8l.pt del yolov8l.pt
28
+ REM Note: Keep yolov8x.pt as it's required
29
+
30
+ REM 4. Use production Dockerfile and .dockerignore
31
+ echo Setting up production Docker files...
32
+ copy Dockerfile.prod Dockerfile /Y
33
+ copy .dockerignore.prod .dockerignore /Y
34
+
35
+ REM 5. Clean up Python cache files
36
+ echo Cleaning up Python cache files...
37
+ for /d /r %%i in (__pycache__) do rd /s /q "%%i"
38
+ for /r %%i in (*.pyc *.pyo *.pyd) do del "%%i"
39
+ for /d /r %%i in (.pytest_cache) do rd /s /q "%%i"
40
+
41
+ REM 6. Keep only necessary requirements
42
+ echo Setting up production requirements...
43
+ copy requirements.txt requirements.bak /Y
44
+ REM Use specific requirements file for deployment
45
+ copy requirements-docker.txt requirements.txt /Y
46
+
47
+ echo Deployment preparation completed successfully!
48
+ echo Use 'docker build -t marine-pollution-api .' to build the production container
prepare_deployment.sh ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ # Deployment script for Marine Pollution Detection API
3
+ # This script cleans up unnecessary files and prepares for deployment
4
+
5
+ echo "Starting deployment preparation..."
6
+
7
+ # 1. Remove all test files
8
+ echo "Removing test files..."
9
+ find . -name "test_*.py" -type f -delete
10
+ rm -rf test_files/ test_output/ tests/
11
+
12
+ # 2. Remove unnecessary Python files
13
+ echo "Removing unnecessary Python files..."
14
+ rm -f debug_cloudinary.py create_test_user.py generate_test_incidents.py list_incidents.py train_models.py
15
+
16
+ # 3. Remove smaller YOLOv8 models (we only need YOLOv8x)
17
+ echo "Removing smaller YOLO models..."
18
+ rm -f yolov8n.pt yolov8s.pt yolov8m.pt yolov8l.pt
19
+ # Note: Keep yolov8x.pt as it's required
20
+
21
+ # 4. Use production Dockerfile and .dockerignore
22
+ echo "Setting up production Docker files..."
23
+ cp Dockerfile.prod Dockerfile
24
+ cp .dockerignore.prod .dockerignore
25
+
26
+ # 5. Clean up Python cache files
27
+ echo "Cleaning up Python cache files..."
28
+ find . -name "__pycache__" -type d -exec rm -rf {} +
29
+ find . -name "*.pyc" -type f -delete
30
+ find . -name "*.pyo" -type f -delete
31
+ find . -name "*.pyd" -type f -delete
32
+ find . -name ".pytest_cache" -type d -exec rm -rf {} +
33
+
34
+ # 6. Keep only necessary requirements
35
+ echo "Setting up production requirements..."
36
+ cp requirements.txt requirements.bak
37
+ # Use specific requirements file for deployment
38
+ cp requirements-docker.txt requirements.txt
39
+
40
+ echo "Deployment preparation completed successfully!"
41
+ echo "Use 'docker build -t marine-pollution-api .' to build the production container"
requirements-version-fix.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # Successfully tested on 2025-10-16 09:05:50
2
+ torch==2.0.1+cpu
3
+ torchvision==0.15.2+cpu
4
+ ultralytics
5
+ opencv-python
6
+ cloudinary
7
+ numpy
8
+ requests
requirements.txt CHANGED
@@ -8,7 +8,11 @@ python-jose==3.3.0
8
  python-multipart==0.0.9
9
  pydantic[email]==2.9.2
10
  pydantic-settings==2.6.1
11
- python-dotenv==1.0.1
 
 
 
 
12
  cloudinary
13
 
14
  # Testing dependencies (optional for production)
 
8
  python-multipart==0.0.9
9
  pydantic[email]==2.9.2
10
  pydantic-settings==2.6.1
11
+ python-dotenv==1.0.1torch==2.0.1+cpu
12
+ torchvision==0.15.2+cpu
13
+ ultralytics
14
+ opencv-python
15
+ requests
16
  cloudinary
17
 
18
  # Testing dependencies (optional for production)
test_beach_plastic_detection.py ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import logging
4
+ import cv2
5
+ import numpy as np
6
+ from pathlib import Path
7
+ import urllib.request
8
+
9
+ # Configure logging
10
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
11
+ logger = logging.getLogger(__name__)
12
+
13
+ # Add the app directory to the path so we can import modules
14
+ sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
15
+ from app.services.image_processing import (
16
+ detect_beach_scene, detect_water_scene, detect_plastic_bottles,
17
+ detect_plastic_bottles_in_beach, check_for_plastic_bottle
18
+ )
19
+
20
+ # Sample beach plastic images (public domain or creative commons)
21
+ SAMPLE_IMAGES = [
22
+ # Beach with plastic bottles
23
+ "https://cdn.pixabay.com/photo/2019/07/30/11/13/plastic-waste-4372436_1280.jpg",
24
+ # Beach with plastic bottles
25
+ "https://live.staticflickr.com/4499/37193114384_25b662f3b3_b.jpg",
26
+ # Plastic bottle on beach
27
+ "https://cdn.pixabay.com/photo/2019/06/15/16/28/plastic-4275696_1280.jpg"
28
+ ]
29
+
30
+ def download_sample_images():
31
+ """Download sample images for testing"""
32
+ output_dir = Path("test_files/beach_plastic")
33
+ output_dir.mkdir(parents=True, exist_ok=True)
34
+
35
+ downloaded_files = []
36
+
37
+ for i, url in enumerate(SAMPLE_IMAGES):
38
+ try:
39
+ output_path = output_dir / f"beach_plastic_{i+1}.jpg"
40
+
41
+ # Skip if already downloaded
42
+ if output_path.exists():
43
+ logger.info(f"File already exists: {output_path}")
44
+ downloaded_files.append(str(output_path))
45
+ continue
46
+
47
+ # Download the image
48
+ logger.info(f"Downloading: {url}")
49
+ urllib.request.urlretrieve(url, output_path)
50
+ downloaded_files.append(str(output_path))
51
+ logger.info(f"Downloaded to: {output_path}")
52
+ except Exception as e:
53
+ logger.error(f"Error downloading {url}: {e}")
54
+
55
+ return downloaded_files
56
+
57
+ def test_on_image(image_path):
58
+ """Test all detection functions on a single image"""
59
+ logger.info(f"Testing detection on: {image_path}")
60
+
61
+ # Read the image
62
+ img = cv2.imread(image_path)
63
+ if img is None:
64
+ logger.error(f"Could not read image: {image_path}")
65
+ return False
66
+
67
+ # Get image dimensions
68
+ height, width = img.shape[:2]
69
+ logger.info(f"Image dimensions: {width}x{height}")
70
+
71
+ # Create a copy for drawing results
72
+ img_result = img.copy()
73
+
74
+ # Convert to HSV for color-based detection
75
+ hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
76
+
77
+ # Detect scene type
78
+ is_beach = detect_beach_scene(img, hsv)
79
+ is_water = detect_water_scene(img, hsv)
80
+
81
+ scene_type = "unknown"
82
+ if is_beach and is_water:
83
+ scene_type = "coastal"
84
+ elif is_beach:
85
+ scene_type = "beach"
86
+ elif is_water:
87
+ scene_type = "water"
88
+
89
+ logger.info(f"Scene type: {scene_type}")
90
+
91
+ # Add scene type text to image
92
+ cv2.putText(img_result, f"Scene: {scene_type}", (10, 30),
93
+ cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2)
94
+
95
+ # Detect plastic bottles with both methods for comparison
96
+
97
+ # Standard detection
98
+ standard_bottles = detect_plastic_bottles(img, hsv)
99
+ logger.info(f"Standard detection found {len(standard_bottles)} bottles")
100
+
101
+ # Beach-specific detection
102
+ beach_bottles = detect_plastic_bottles_in_beach(img, hsv)
103
+ logger.info(f"Beach-specific detection found {len(beach_bottles)} bottles")
104
+
105
+ # Use the appropriate detection based on scene type
106
+ if is_beach:
107
+ bottle_detections = beach_bottles
108
+ logger.info("Using beach-specific bottle detection")
109
+ else:
110
+ bottle_detections = standard_bottles
111
+ logger.info("Using standard bottle detection")
112
+
113
+ # Draw standard detection in green
114
+ for det in standard_bottles:
115
+ x1, y1, x2, y2 = det["bbox"]
116
+ conf = det["confidence"]
117
+
118
+ # Draw green rectangle for standard detection
119
+ cv2.rectangle(img_result, (x1, y1), (x2, y2), (0, 255, 0), 1)
120
+ cv2.putText(img_result, f"Std: {conf:.2f}", (x1, y1-10),
121
+ cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)
122
+
123
+ # Draw beach-specific detection in red (thicker line)
124
+ for det in beach_bottles:
125
+ x1, y1, x2, y2 = det["bbox"]
126
+ conf = det["confidence"]
127
+
128
+ # Draw red rectangle for beach-specific detection
129
+ cv2.rectangle(img_result, (x1, y1), (x2, y2), (0, 0, 255), 2)
130
+ cv2.putText(img_result, f"Beach: {conf:.2f}", (x1, y1-25),
131
+ cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
132
+
133
+ # Save the result
134
+ output_dir = Path("test_output/beach_plastic")
135
+ output_dir.mkdir(parents=True, exist_ok=True)
136
+
137
+ base_name = os.path.basename(image_path)
138
+ output_path = output_dir / f"result_{base_name}"
139
+
140
+ cv2.imwrite(str(output_path), img_result)
141
+ logger.info(f"Result saved to: {output_path}")
142
+
143
+ return {
144
+ "scene_type": scene_type,
145
+ "standard_bottles": len(standard_bottles),
146
+ "beach_bottles": len(beach_bottles),
147
+ "output_path": str(output_path)
148
+ }
149
+
150
+ def main():
151
+ """Main function to test beach plastic bottle detection"""
152
+ # Download sample images
153
+ image_paths = download_sample_images()
154
+
155
+ if not image_paths:
156
+ logger.error("No images to test")
157
+ return
158
+
159
+ results = {}
160
+
161
+ # Process each image
162
+ for img_path in image_paths:
163
+ results[os.path.basename(img_path)] = test_on_image(img_path)
164
+
165
+ # Print summary
166
+ logger.info("\n\n--- Beach Plastic Detection Results Summary ---")
167
+ for img_file, result in results.items():
168
+ if result:
169
+ logger.info(f"{img_file}:")
170
+ logger.info(f" Scene type: {result['scene_type']}")
171
+ logger.info(f" Standard detection: {result['standard_bottles']} bottles")
172
+ logger.info(f" Beach-specific detection: {result['beach_bottles']} bottles")
173
+ logger.info(f" Output: {result['output_path']}")
174
+ logger.info("---")
175
+
176
+ if __name__ == "__main__":
177
+ main()
test_end_to_end_flow.py ADDED
@@ -0,0 +1,473 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ """
3
+ End-to-end test script for Marine Pollution Detection system.
4
+ Simulates the entire flow from citizen incident reporting to cleanup.
5
+
6
+ This script mimics:
7
+ 1. Creating a test user
8
+ 2. Authenticating and getting access token
9
+ 3. Uploading test images
10
+ 4. Submitting incident reports
11
+ 5. Processing images with YOLO detection
12
+ 6. Saving annotated images locally
13
+ 7. Querying the database for results
14
+ 8. Cleanup of test data from MongoDB and Cloudinary
15
+ """
16
+
17
+ import asyncio
18
+ import os
19
+ import sys
20
+ import json
21
+ import time
22
+ import logging
23
+ import requests
24
+ import uuid
25
+ from datetime import datetime
26
+ from pathlib import Path
27
+ import shutil
28
+ from pymongo import MongoClient
29
+ import cloudinary
30
+ import cloudinary.uploader
31
+ import cloudinary.api
32
+ from pprint import pprint
33
+
34
+ # Configure logging
35
+ logging.basicConfig(
36
+ level=logging.INFO,
37
+ format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
38
+ handlers=[logging.StreamHandler(sys.stdout)]
39
+ )
40
+ logger = logging.getLogger("end_to_end_test")
41
+
42
+ # Configuration
43
+ BASE_URL = "http://localhost:8000" # FastAPI backend URL
44
+ TEST_IMAGE_DIR = Path("test_files") # Directory with test images
45
+ OUTPUT_DIR = Path("test_output") # Directory to save annotated images
46
+ MONGODB_URI = "mongodb://localhost:27017" # MongoDB connection URI
47
+ DB_NAME = "marine_pollution" # MongoDB database name
48
+
49
+ # Test user credentials
50
+ TEST_USER = {
51
+ "email": f"test_user_{uuid.uuid4().hex[:8]}@example.com",
52
+ "password": "Test@password123",
53
+ "name": "Test User"
54
+ }
55
+
56
+ # Test incident data template
57
+ TEST_INCIDENT = {
58
+ "title": "Test Marine Pollution Incident",
59
+ "description": "This is a test incident created by the end-to-end test script",
60
+ "location": {
61
+ "latitude": 19.0760, # Mumbai coast coordinates
62
+ "longitude": 72.8777
63
+ },
64
+ "severity": "medium",
65
+ "pollution_type": "plastic",
66
+ "date_observed": datetime.now().isoformat(),
67
+ "reported_by": None # Will be filled in after user creation
68
+ }
69
+
70
+ class EndToEndTest:
71
+ """Class to manage the end-to-end testing flow"""
72
+
73
+ def __init__(self):
74
+ """Initialize the test environment"""
75
+ self.access_token = None
76
+ self.user_id = None
77
+ self.test_incidents = []
78
+ self.uploaded_images = []
79
+ self.annotated_images = []
80
+ self.mongo_client = None
81
+ self.db = None
82
+
83
+ # Create output directory if it doesn't exist
84
+ OUTPUT_DIR.mkdir(exist_ok=True)
85
+
86
+ # Initialize Cloudinary (will use env vars or settings from app config)
87
+ try:
88
+ # Try to import app settings
89
+ sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
90
+ from app.config import get_settings
91
+ settings = get_settings()
92
+
93
+ # Configure Cloudinary
94
+ cloudinary.config(
95
+ cloud_name=settings.cloudinary_cloud_name,
96
+ api_key=settings.cloudinary_api_key,
97
+ api_secret=settings.cloudinary_api_secret,
98
+ secure=True
99
+ )
100
+ logger.info("Configured Cloudinary from app settings")
101
+ except Exception as e:
102
+ logger.warning(f"Could not load app settings: {e}")
103
+ logger.warning("Make sure Cloudinary env variables are set")
104
+
105
+ def connect_to_mongodb(self):
106
+ """Connect to MongoDB database"""
107
+ try:
108
+ self.mongo_client = MongoClient(MONGODB_URI)
109
+ self.db = self.mongo_client[DB_NAME]
110
+ logger.info(f"Connected to MongoDB: {MONGODB_URI}, database: {DB_NAME}")
111
+ return True
112
+ except Exception as e:
113
+ logger.error(f"Failed to connect to MongoDB: {e}")
114
+ return False
115
+
116
+ async def register_test_user(self):
117
+ """Register a test user and return the user_id"""
118
+ try:
119
+ response = requests.post(
120
+ f"{BASE_URL}/auth/register",
121
+ json=TEST_USER
122
+ )
123
+ response.raise_for_status()
124
+ user_data = response.json()
125
+ self.user_id = user_data.get("id")
126
+ logger.info(f"Created test user with ID: {self.user_id}")
127
+ return True
128
+ except Exception as e:
129
+ logger.error(f"Failed to register test user: {e}")
130
+ # Check if user already exists (409 Conflict)
131
+ if getattr(e, "response", None) and getattr(e.response, "status_code", None) == 409:
132
+ logger.info("User already exists, trying to authenticate instead")
133
+ return await self.authenticate()
134
+ return False
135
+
136
+ async def authenticate(self):
137
+ """Authenticate the test user and get access token"""
138
+ try:
139
+ response = requests.post(
140
+ f"{BASE_URL}/auth/token",
141
+ data={
142
+ "username": TEST_USER["email"],
143
+ "password": TEST_USER["password"]
144
+ },
145
+ headers={"Content-Type": "application/x-www-form-urlencoded"}
146
+ )
147
+ response.raise_for_status()
148
+ token_data = response.json()
149
+ self.access_token = token_data.get("access_token")
150
+
151
+ # Get user ID if we don't have it yet
152
+ if not self.user_id:
153
+ me_response = requests.get(
154
+ f"{BASE_URL}/auth/me",
155
+ headers={"Authorization": f"Bearer {self.access_token}"}
156
+ )
157
+ me_response.raise_for_status()
158
+ me_data = me_response.json()
159
+ self.user_id = me_data.get("id")
160
+
161
+ logger.info(f"Authenticated as user ID: {self.user_id}")
162
+ return True
163
+ except Exception as e:
164
+ logger.error(f"Failed to authenticate: {e}")
165
+ return False
166
+
167
+ async def upload_test_images(self):
168
+ """Upload test images to the system"""
169
+ try:
170
+ headers = {"Authorization": f"Bearer {self.access_token}"}
171
+
172
+ # Find all images in the test directory
173
+ image_files = list(TEST_IMAGE_DIR.glob("*.jpg")) + list(TEST_IMAGE_DIR.glob("*.png"))
174
+
175
+ if not image_files:
176
+ logger.warning(f"No test images found in {TEST_IMAGE_DIR}")
177
+
178
+ # Create a demo image if no test images are found
179
+ from PIL import Image
180
+ import numpy as np
181
+
182
+ # Create a simple synthetic image with a blue background and a black blob
183
+ img = np.zeros((300, 500, 3), dtype=np.uint8)
184
+ img[:, :] = [200, 150, 100] # Brownish water
185
+ img[100:200, 200:300] = [0, 0, 0] # Black "oil spill"
186
+
187
+ # Save the test image
188
+ TEST_IMAGE_DIR.mkdir(exist_ok=True)
189
+ demo_image_path = TEST_IMAGE_DIR / "demo_oil_spill.jpg"
190
+ Image.fromarray(img).save(demo_image_path)
191
+ image_files = [demo_image_path]
192
+ logger.info(f"Created demo test image: {demo_image_path}")
193
+
194
+ for image_path in image_files:
195
+ # Upload the image
196
+ with open(image_path, "rb") as f:
197
+ files = {"file": (image_path.name, f, "image/jpeg")}
198
+ response = requests.post(
199
+ f"{BASE_URL}/incidents/upload-image",
200
+ files=files,
201
+ headers=headers
202
+ )
203
+ response.raise_for_status()
204
+ image_data = response.json()
205
+ image_url = image_data.get("image_url")
206
+ self.uploaded_images.append({
207
+ "url": image_url,
208
+ "original_path": str(image_path),
209
+ "filename": image_path.name
210
+ })
211
+ logger.info(f"Uploaded image: {image_path.name} -> {image_url}")
212
+
213
+ logger.info(f"Uploaded {len(self.uploaded_images)} test images")
214
+ return True
215
+ except Exception as e:
216
+ logger.error(f"Failed to upload test images: {e}")
217
+ return False
218
+
219
+ async def create_test_incidents(self):
220
+ """Create test incidents using the uploaded images"""
221
+ try:
222
+ headers = {
223
+ "Authorization": f"Bearer {self.access_token}",
224
+ "Content-Type": "application/json"
225
+ }
226
+
227
+ for i, image_data in enumerate(self.uploaded_images):
228
+ # Create an incident for each uploaded image
229
+ incident_data = TEST_INCIDENT.copy()
230
+ incident_data["title"] = f"Test Incident {i+1}: {image_data['filename']}"
231
+ incident_data["image_url"] = image_data["url"]
232
+ incident_data["reported_by"] = self.user_id
233
+
234
+ # Submit the incident
235
+ response = requests.post(
236
+ f"{BASE_URL}/incidents/",
237
+ json=incident_data,
238
+ headers=headers
239
+ )
240
+ response.raise_for_status()
241
+ created_incident = response.json()
242
+ self.test_incidents.append(created_incident)
243
+ logger.info(f"Created test incident: {created_incident.get('id')} with image {image_data['filename']}")
244
+
245
+ logger.info(f"Created {len(self.test_incidents)} test incidents")
246
+ return True
247
+ except Exception as e:
248
+ logger.error(f"Failed to create test incidents: {e}")
249
+ return False
250
+
251
+ async def wait_for_detection_processing(self, timeout=60):
252
+ """
253
+ Wait for the object detection to be processed
254
+ This polls the API to check if annotated images are available
255
+ """
256
+ try:
257
+ headers = {"Authorization": f"Bearer {self.access_token}"}
258
+ start_time = time.time()
259
+ processed_count = 0
260
+
261
+ logger.info(f"Waiting for detection processing (timeout: {timeout}s)...")
262
+
263
+ while processed_count < len(self.test_incidents) and time.time() - start_time < timeout:
264
+ processed_count = 0
265
+
266
+ for incident in self.test_incidents:
267
+ incident_id = incident.get("id")
268
+ response = requests.get(
269
+ f"{BASE_URL}/incidents/{incident_id}",
270
+ headers=headers
271
+ )
272
+ response.raise_for_status()
273
+ incident_data = response.json()
274
+
275
+ # Check if detection has been processed
276
+ if incident_data.get("detection_results"):
277
+ if incident_data["detection_results"].get("annotated_image_url"):
278
+ processed_count += 1
279
+
280
+ if processed_count < len(self.test_incidents):
281
+ logger.info(f"Processed {processed_count}/{len(self.test_incidents)} incidents, waiting...")
282
+ await asyncio.sleep(2)
283
+
284
+ if processed_count < len(self.test_incidents):
285
+ logger.warning(f"Not all incidents were processed within the timeout")
286
+ logger.warning(f"Processed {processed_count}/{len(self.test_incidents)} incidents")
287
+ else:
288
+ logger.info(f"All {len(self.test_incidents)} incidents processed successfully")
289
+
290
+ return processed_count > 0
291
+ except Exception as e:
292
+ logger.error(f"Error while waiting for detection processing: {e}")
293
+ return False
294
+
295
+ async def download_annotated_images(self):
296
+ """Download annotated images from the incidents"""
297
+ try:
298
+ headers = {"Authorization": f"Bearer {self.access_token}"}
299
+
300
+ for incident in self.test_incidents:
301
+ incident_id = incident.get("id")
302
+ response = requests.get(
303
+ f"{BASE_URL}/incidents/{incident_id}",
304
+ headers=headers
305
+ )
306
+ response.raise_for_status()
307
+ incident_data = response.json()
308
+
309
+ # Check if detection results and annotated image exist
310
+ if (incident_data.get("detection_results") and
311
+ incident_data["detection_results"].get("annotated_image_url")):
312
+
313
+ # Download the annotated image
314
+ annotated_url = incident_data["detection_results"]["annotated_image_url"]
315
+ img_response = requests.get(annotated_url, stream=True)
316
+
317
+ if img_response.status_code == 200:
318
+ # Save the image locally
319
+ local_filename = f"incident_{incident_id}_annotated.jpg"
320
+ local_path = OUTPUT_DIR / local_filename
321
+
322
+ with open(local_path, "wb") as f:
323
+ for chunk in img_response.iter_content(chunk_size=8192):
324
+ f.write(chunk)
325
+
326
+ self.annotated_images.append({
327
+ "incident_id": incident_id,
328
+ "url": annotated_url,
329
+ "local_path": str(local_path),
330
+ "detection_count": incident_data["detection_results"].get("detection_count", 0)
331
+ })
332
+
333
+ logger.info(f"Downloaded annotated image for incident {incident_id}")
334
+ logger.info(f"Found {incident_data['detection_results'].get('detection_count', 0)} objects")
335
+
336
+ logger.info(f"Downloaded {len(self.annotated_images)} annotated images")
337
+ return True
338
+ except Exception as e:
339
+ logger.error(f"Failed to download annotated images: {e}")
340
+ return False
341
+
342
+ async def print_mongodb_records(self):
343
+ """Print relevant MongoDB records for verification"""
344
+ if not self.db:
345
+ if not self.connect_to_mongodb():
346
+ return False
347
+
348
+ try:
349
+ # Print incident records
350
+ logger.info("--- MongoDB Incident Records ---")
351
+ incidents = list(self.db.incidents.find({"reported_by": self.user_id}))
352
+ for incident in incidents:
353
+ logger.info(f"Incident: {incident['_id']} - {incident['title']}")
354
+ if "detection_results" in incident and incident["detection_results"]:
355
+ logger.info(f" Detection count: {incident['detection_results'].get('detection_count', 0)}")
356
+ if "detections" in incident["detection_results"]:
357
+ for det in incident["detection_results"]["detections"]:
358
+ logger.info(f" - {det.get('class')}: {det.get('confidence')}")
359
+
360
+ return True
361
+ except Exception as e:
362
+ logger.error(f"Error accessing MongoDB: {e}")
363
+ return False
364
+
365
+ async def cleanup_test_data(self, keep_local=True):
366
+ """Clean up all test data from MongoDB and Cloudinary"""
367
+ try:
368
+ if not self.db:
369
+ if not self.connect_to_mongodb():
370
+ return False
371
+
372
+ # Delete incidents from MongoDB
373
+ if self.user_id:
374
+ result = self.db.incidents.delete_many({"reported_by": self.user_id})
375
+ logger.info(f"Deleted {result.deleted_count} incidents from MongoDB")
376
+
377
+ # Delete images from Cloudinary
378
+ for image_data in self.uploaded_images + self.annotated_images:
379
+ url = image_data.get("url", "")
380
+ if "cloudinary" in url:
381
+ try:
382
+ # Extract public ID from URL
383
+ # URL format: https://res.cloudinary.com/{cloud_name}/image/upload/{transformations}/{public_id}.{format}
384
+ parts = url.split("/")
385
+ public_id_with_ext = parts[-1]
386
+ public_id = public_id_with_ext.split(".")[0]
387
+
388
+ # Delete from Cloudinary
389
+ result = cloudinary.uploader.destroy(public_id)
390
+ logger.info(f"Deleted image from Cloudinary: {public_id}, result: {result}")
391
+ except Exception as e:
392
+ logger.warning(f"Could not delete Cloudinary image {url}: {e}")
393
+
394
+ # Optionally delete local files
395
+ if not keep_local:
396
+ # Clean up the output directory
397
+ for file_path in OUTPUT_DIR.glob("*"):
398
+ if file_path.is_file():
399
+ file_path.unlink()
400
+
401
+ # Clean up demo test images if we created them
402
+ demo_image = TEST_IMAGE_DIR / "demo_oil_spill.jpg"
403
+ if demo_image.exists():
404
+ demo_image.unlink()
405
+
406
+ logger.info("Test data cleanup completed")
407
+ return True
408
+ except Exception as e:
409
+ logger.error(f"Error during cleanup: {e}")
410
+ return False
411
+
412
+ async def run_test(self):
413
+ """Run the complete end-to-end test flow"""
414
+ logger.info("=== Starting End-to-End Test Flow ===")
415
+
416
+ # Step 1: Register test user (or authenticate if exists)
417
+ if not await self.register_test_user():
418
+ if not await self.authenticate():
419
+ logger.error("Failed to register or authenticate test user")
420
+ return False
421
+
422
+ # Step 2: Upload test images
423
+ if not await self.upload_test_images():
424
+ logger.error("Failed to upload test images")
425
+ return False
426
+
427
+ # Step 3: Create test incidents
428
+ if not await self.create_test_incidents():
429
+ logger.error("Failed to create test incidents")
430
+ return False
431
+
432
+ # Step 4: Wait for detection processing
433
+ if not await self.wait_for_detection_processing():
434
+ logger.warning("Detection processing may not be complete")
435
+ # Continue anyway as some may be processed
436
+
437
+ # Step 5: Download annotated images
438
+ if not await self.download_annotated_images():
439
+ logger.error("Failed to download annotated images")
440
+ return False
441
+
442
+ # Step 6: Print MongoDB records
443
+ await self.print_mongodb_records()
444
+
445
+ logger.info("=== End-to-End Test Flow Completed Successfully ===")
446
+ logger.info(f"Test user ID: {self.user_id}")
447
+ logger.info(f"Created {len(self.test_incidents)} test incidents")
448
+ logger.info(f"Processed {len(self.annotated_images)} annotated images")
449
+ logger.info(f"Annotated images saved to: {OUTPUT_DIR}")
450
+
451
+ return True
452
+
453
+ async def main():
454
+ """Main entry point for the script"""
455
+ test = EndToEndTest()
456
+ success = await test.run_test()
457
+
458
+ # Ask if the user wants to clean up test data
459
+ cleanup = input("Do you want to clean up test data? (y/n): ").lower().strip() == "y"
460
+ if cleanup:
461
+ await test.cleanup_test_data()
462
+ logger.info("Test data cleaned up")
463
+ else:
464
+ logger.info("Test data preserved for inspection")
465
+
466
+ return 0 if success else 1
467
+
468
+ if __name__ == "__main__":
469
+ try:
470
+ sys.exit(asyncio.run(main()))
471
+ except KeyboardInterrupt:
472
+ print("\nTest interrupted by user")
473
+ sys.exit(130)
test_enhanced_detection.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import logging
4
+ import cv2
5
+ import numpy as np
6
+ from pathlib import Path
7
+
8
+ # Configure logging
9
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
10
+ logger = logging.getLogger(__name__)
11
+
12
+ # Add the app directory to the path so we can import modules
13
+ sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
14
+ from app.services.image_processing import (
15
+ detect_beach_scene, detect_water_scene, detect_plastic_bottles,
16
+ detect_plastic_bottles_in_beach, detect_ships, check_for_plastic_bottle,
17
+ check_for_ship, check_for_plastic_waste
18
+ )
19
+
20
+ def test_on_image(image_path):
21
+ """Test all detection functions on a single image"""
22
+ logger.info(f"Testing detection on: {image_path}")
23
+
24
+ # Read the image
25
+ img = cv2.imread(image_path)
26
+ if img is None:
27
+ logger.error(f"Could not read image: {image_path}")
28
+ return False
29
+
30
+ # Get image dimensions
31
+ height, width = img.shape[:2]
32
+ logger.info(f"Image dimensions: {width}x{height}")
33
+
34
+ # Create a copy for drawing results
35
+ img_result = img.copy()
36
+
37
+ # Convert to HSV for color-based detection
38
+ hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
39
+
40
+ # Detect scene type
41
+ is_beach = detect_beach_scene(img, hsv)
42
+ is_water = detect_water_scene(img, hsv)
43
+
44
+ scene_type = "unknown"
45
+ if is_beach and is_water:
46
+ scene_type = "coastal"
47
+ elif is_beach:
48
+ scene_type = "beach"
49
+ elif is_water:
50
+ scene_type = "water"
51
+
52
+ logger.info(f"Scene type: {scene_type}")
53
+
54
+ # Add scene type text to image
55
+ cv2.putText(img_result, f"Scene: {scene_type}", (10, 30),
56
+ cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2)
57
+
58
+ # Detect plastic bottles
59
+ if is_beach:
60
+ logger.info("Using beach-specific bottle detection")
61
+ bottle_detections = detect_plastic_bottles_in_beach(img, hsv)
62
+ else:
63
+ logger.info("Using standard bottle detection")
64
+ bottle_detections = detect_plastic_bottles(img, hsv)
65
+
66
+ logger.info(f"Detected {len(bottle_detections)} potential plastic bottles")
67
+
68
+ # Draw bottle detections
69
+ for det in bottle_detections:
70
+ x1, y1, x2, y2 = det["bbox"]
71
+ conf = det["confidence"]
72
+
73
+ # Draw red rectangle for bottles
74
+ cv2.rectangle(img_result, (x1, y1), (x2, y2), (0, 0, 255), 2)
75
+ cv2.putText(img_result, f"Bottle: {conf:.2f}", (x1, y1-10),
76
+ cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
77
+
78
+ # Detect ships if in water scene
79
+ ship_detections = []
80
+ if is_water:
81
+ logger.info("Detecting ships in water scene")
82
+ ship_detections = detect_ships(img, hsv)
83
+
84
+ logger.info(f"Detected {len(ship_detections)} potential ships")
85
+
86
+ # Draw ship detections
87
+ for det in ship_detections:
88
+ x1, y1, x2, y2 = det["bbox"]
89
+ conf = det["confidence"]
90
+
91
+ # Draw blue rectangle for ships
92
+ cv2.rectangle(img_result, (x1, y1), (x2, y2), (255, 0, 0), 2)
93
+ cv2.putText(img_result, f"Ship: {conf:.2f}", (x1, y1-10),
94
+ cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 2)
95
+
96
+ # Save the result
97
+ output_dir = Path("test_output/enhanced_detection")
98
+ output_dir.mkdir(parents=True, exist_ok=True)
99
+
100
+ base_name = os.path.basename(image_path)
101
+ output_path = output_dir / f"result_{base_name}"
102
+
103
+ cv2.imwrite(str(output_path), img_result)
104
+ logger.info(f"Result saved to: {output_path}")
105
+
106
+ return {
107
+ "scene_type": scene_type,
108
+ "bottle_detections": len(bottle_detections),
109
+ "ship_detections": len(ship_detections),
110
+ "output_path": str(output_path)
111
+ }
112
+
113
+ def main():
114
+ """Main function to test enhanced detection on sample images"""
115
+ # Test directory
116
+ test_dir = "test_files"
117
+
118
+ # Check if test directory exists
119
+ if not os.path.isdir(test_dir):
120
+ logger.error(f"Test directory not found: {test_dir}")
121
+ return
122
+
123
+ # Get all image files in the test directory
124
+ image_files = [f for f in os.listdir(test_dir) if f.lower().endswith(('.png', '.jpg', '.jpeg'))]
125
+
126
+ if not image_files:
127
+ logger.error(f"No image files found in {test_dir}")
128
+ return
129
+
130
+ results = {}
131
+
132
+ # Process each image
133
+ for img_file in image_files:
134
+ img_path = os.path.join(test_dir, img_file)
135
+ results[img_file] = test_on_image(img_path)
136
+
137
+ # Print summary
138
+ logger.info("\n\n--- Detection Results Summary ---")
139
+ for img_file, result in results.items():
140
+ if result:
141
+ logger.info(f"{img_file}:")
142
+ logger.info(f" Scene type: {result['scene_type']}")
143
+ logger.info(f" Plastic bottles: {result['bottle_detections']}")
144
+ logger.info(f" Ships: {result['ship_detections']}")
145
+ logger.info(f" Output: {result['output_path']}")
146
+ logger.info("---")
147
+
148
+ if __name__ == "__main__":
149
+ main()