TheDeepDas commited on
Commit
6c9c901
Β·
1 Parent(s): 1de8f23
This view is limited to 50 files because it contains too many changes. Β  See raw diff
Files changed (50) hide show
  1. .dockerignore +66 -0
  2. .env +3 -0
  3. Dockerfile +42 -0
  4. HUGGINGFACE_DEPLOYMENT.md +83 -0
  5. Procfile +1 -0
  6. Procfile.railway +2 -0
  7. README.md +5 -7
  8. app/__init__.py +0 -0
  9. app/__pycache__/__init__.cpython-311.pyc +0 -0
  10. app/__pycache__/__init__.cpython-39.pyc +0 -0
  11. app/__pycache__/config.cpython-311.pyc +0 -0
  12. app/__pycache__/config.cpython-39.pyc +0 -0
  13. app/__pycache__/core_security.cpython-311.pyc +0 -0
  14. app/__pycache__/database.cpython-311.pyc +0 -0
  15. app/__pycache__/dependencies.cpython-311.pyc +0 -0
  16. app/__pycache__/main.cpython-311.pyc +0 -0
  17. app/__pycache__/main.cpython-39.pyc +0 -0
  18. app/__pycache__/schemas.cpython-311.pyc +0 -0
  19. app/config.py +27 -0
  20. app/core_security.py +27 -0
  21. app/database.py +55 -0
  22. app/dependencies.py +28 -0
  23. app/main.py +89 -0
  24. app/routers/__init__.py +0 -0
  25. app/routers/__pycache__/__init__.cpython-311.pyc +0 -0
  26. app/routers/__pycache__/auth.cpython-311.pyc +0 -0
  27. app/routers/__pycache__/incidents.cpython-311.pyc +0 -0
  28. app/routers/auth.py +69 -0
  29. app/routers/incidents.py +113 -0
  30. app/schemas.py +56 -0
  31. app/services/__init__.py +0 -0
  32. app/services/fallback_storage.py +0 -0
  33. app/services/incidents.py +32 -0
  34. app/services/ml_model.py +158 -0
  35. app/services/ml_model_training.py +277 -0
  36. app/services/nlp.py +105 -0
  37. app/services/users.py +53 -0
  38. eda.ipynb +265 -0
  39. incidents.csv +0 -0
  40. models/severity_model.pkl +3 -0
  41. models/threat_model.pkl +3 -0
  42. requirements-docker.txt +21 -0
  43. requirements-railway-light.txt +13 -0
  44. requirements-railway.txt +22 -0
  45. requirements-training.txt +15 -0
  46. requirements.txt +22 -0
  47. start-hf.sh +16 -0
  48. start.sh +13 -0
  49. tests/__pycache__/conftest.cpython-311-pytest-8.3.3.pyc +0 -0
  50. tests/__pycache__/test_auth.cpython-311-pytest-8.3.3.pyc +0 -0
.dockerignore ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Python
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+ *.so
6
+ .Python
7
+ build/
8
+ develop-eggs/
9
+ dist/
10
+ downloads/
11
+ eggs/
12
+ .eggs/
13
+ lib/
14
+ lib64/
15
+ parts/
16
+ sdist/
17
+ var/
18
+ wheels/
19
+ *.egg-info/
20
+ .installed.cfg
21
+ *.egg
22
+
23
+ # Virtual environments
24
+ venv/
25
+ env/
26
+ ENV/
27
+
28
+ # IDE
29
+ .vscode/
30
+ .idea/
31
+ *.swp
32
+ *.swo
33
+
34
+ # OS
35
+ .DS_Store
36
+ Thumbs.db
37
+
38
+ # Git
39
+ .git/
40
+ .gitignore
41
+
42
+ # Documentation
43
+ README.md
44
+ *.md
45
+
46
+ # Test files
47
+ tests/
48
+ test_*.py
49
+ *_test.py
50
+
51
+ # Jupyter notebooks
52
+ *.ipynb
53
+ .ipynb_checkpoints/
54
+
55
+ # Environment files (will be set in Hugging Face Spaces)
56
+ .env
57
+ .env.local
58
+ .env.production
59
+
60
+ # Logs
61
+ logs/
62
+ *.log
63
+
64
+ # Temporary files
65
+ tmp/
66
+ temp/
.env ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ JWT_SECRET_KEY=change_this_secret_in_production
2
+ MONGODB_URI=mongodb+srv://deepdblm_db_user:IqLKnKhwLLSOP1Ka@cluster0.0u1vpow.mongodb.net/?retryWrites=true&w=majority&appName=Cluster0
3
+ ALLOWED_ORIGINS=http://localhost:3000,http://localhost:5173,http://127.0.0.1:3000,http://127.0.0.1:5173,http://localhost:8080,https://marine-pollution-detection.onrender.com,https://marine-pollution-detection-production.up.railway.app,https://marine-pollution-detection.vercel.app
Dockerfile ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use Python 3.11 slim image for better performance
2
+ FROM python:3.11-slim
3
+
4
+ # Set working directory
5
+ WORKDIR /app
6
+
7
+ # Set environment variables
8
+ ENV PYTHONDONTWRITEBYTECODE=1
9
+ ENV PYTHONUNBUFFERED=1
10
+ ENV PORT=7860
11
+
12
+ # Install system dependencies
13
+ RUN apt-get update && apt-get install -y \
14
+ build-essential \
15
+ curl \
16
+ && rm -rf /var/lib/apt/lists/*
17
+
18
+ # Copy requirements first for better caching
19
+ COPY requirements-docker.txt requirements.txt
20
+
21
+ # Install Python dependencies
22
+ RUN pip install --no-cache-dir --upgrade pip
23
+ RUN pip install --no-cache-dir -r requirements.txt
24
+
25
+ # Copy the application code
26
+ COPY . .
27
+
28
+ # Make startup script executable
29
+ RUN chmod +x start-hf.sh
30
+
31
+ # Create models directory if it doesn't exist
32
+ RUN mkdir -p models
33
+
34
+ # Expose port 7860 (Hugging Face Spaces default)
35
+ EXPOSE 7860
36
+
37
+ # Health check
38
+ HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
39
+ CMD curl -f http://localhost:7860/health || exit 1
40
+
41
+ # Command to run the application
42
+ CMD ["./start-hf.sh"]
HUGGINGFACE_DEPLOYMENT.md ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # πŸš€ Hugging Face Spaces Deployment Guide
2
+
3
+ ## πŸ“ Files Created for Docker Deployment:
4
+ - `Dockerfile` - Main Docker configuration
5
+ - `requirements-docker.txt` - Optimized dependencies for Docker
6
+ - `.dockerignore` - Excludes unnecessary files from build
7
+ - `start-hf.sh` - Startup script for Hugging Face Spaces
8
+ - `README.md` - Hugging Face Spaces metadata
9
+
10
+ ## πŸ”§ Deployment Steps:
11
+
12
+ ### 1. **Create New Hugging Face Space**
13
+ 1. Go to [Hugging Face Spaces](https://huggingface.co/spaces)
14
+ 2. Click "Create new Space"
15
+ 3. Choose:
16
+ - **Space name**: `marine-guard-api`
17
+ - **License**: MIT
18
+ - **SDK**: Docker
19
+ - **Hardware**: CPU basic (free tier)
20
+
21
+ ### 2. **Upload Backend Files**
22
+ Upload these files to your Hugging Face Space:
23
+ ```
24
+ Dockerfile
25
+ requirements-docker.txt
26
+ start-hf.sh
27
+ README.md
28
+ app/
29
+ β”œβ”€β”€ __init__.py
30
+ β”œβ”€β”€ main.py
31
+ β”œβ”€β”€ config.py
32
+ β”œβ”€β”€ database.py
33
+ β”œβ”€β”€ dependencies.py
34
+ β”œβ”€β”€ schemas.py
35
+ β”œβ”€β”€ core_security.py
36
+ β”œβ”€β”€ routers/
37
+ β”‚ β”œβ”€β”€ __init__.py
38
+ β”‚ β”œβ”€β”€ auth.py
39
+ β”‚ └── incidents.py
40
+ └── services/
41
+ β”œβ”€β”€ __init__.py
42
+ β”œβ”€β”€ incidents.py
43
+ β”œβ”€β”€ ml_model.py
44
+ β”œβ”€β”€ nlp.py
45
+ └── users.py
46
+ models/
47
+ β”œβ”€β”€ threat_model.pkl
48
+ └── severity_model.pkl
49
+ ```
50
+
51
+ ### 3. **Set Environment Variables in Hugging Face**
52
+ In your Space settings, add these environment variables:
53
+ ```
54
+ MONGODB_URI=mongodb+srv://deepdblm_db_user:IqLKnKhwLLSOP1Ka@cluster0.0u1vpow.mongodb.net/?retryWrites=true&w=majority&appName=Cluster0
55
+ JWT_SECRET_KEY=your-secure-secret-key-here
56
+ ALLOWED_ORIGINS=https://marine-pollution-detection.vercel.app,http://localhost:3000
57
+ ```
58
+
59
+ ### 4. **Update Frontend Configuration**
60
+ Once deployed, update your frontend `.env` file:
61
+ ```
62
+ VITE_API_BASE_URL=https://your-username-marine-guard-api.hf.space/api
63
+ ```
64
+
65
+ ## 🎯 Expected URLs:
66
+ - **API Base**: `https://your-username-marine-guard-api.hf.space`
67
+ - **Health Check**: `https://your-username-marine-guard-api.hf.space/health`
68
+ - **Auth Endpoint**: `https://your-username-marine-guard-api.hf.space/api/auth/login`
69
+
70
+ ## πŸ” Troubleshooting:
71
+ - Check the **Logs** tab in your Hugging Face Space for any errors
72
+ - Ensure all environment variables are set correctly
73
+ - The space will take 2-3 minutes to build and start
74
+ - Models will be loaded automatically on startup
75
+
76
+ ## πŸ“Š Features Included:
77
+ - βœ… FastAPI backend with all endpoints
78
+ - βœ… MongoDB Atlas connection
79
+ - βœ… JWT authentication
80
+ - βœ… ML model inference (threat & severity classification)
81
+ - βœ… CORS configured for Vercel frontend
82
+ - βœ… Health check endpoint
83
+ - βœ… Automatic model loading on startup
Procfile ADDED
@@ -0,0 +1 @@
 
 
1
+ web: uvicorn app.main:app --host 0.0.0.0 --port $PORT
Procfile.railway ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ # Procfile for Railway - lightweight deployment without ML
2
+ web: pip install -r requirements-railway-light.txt && uvicorn app.main:app --host 0.0.0.0 --port $PORT
README.md CHANGED
@@ -1,10 +1,8 @@
1
- ---
2
- title: Sanjay
3
- emoji: 🐠
4
- colorFrom: green
5
  colorTo: green
6
  sdk: docker
7
  pinned: false
8
- ---
9
-
10
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
+ title: Marine Guard API
2
+ emoji: 🌊
3
+ colorFrom: blue
 
4
  colorTo: green
5
  sdk: docker
6
  pinned: false
7
+ license: mit
8
+ app_port: 7860
 
app/__init__.py ADDED
File without changes
app/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (142 Bytes). View file
 
app/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (124 Bytes). View file
 
app/__pycache__/config.cpython-311.pyc ADDED
Binary file (1.5 kB). View file
 
app/__pycache__/config.cpython-39.pyc ADDED
Binary file (1.07 kB). View file
 
app/__pycache__/core_security.cpython-311.pyc ADDED
Binary file (1.84 kB). View file
 
app/__pycache__/database.cpython-311.pyc ADDED
Binary file (2.49 kB). View file
 
app/__pycache__/dependencies.cpython-311.pyc ADDED
Binary file (1.82 kB). View file
 
app/__pycache__/main.cpython-311.pyc ADDED
Binary file (4.3 kB). View file
 
app/__pycache__/main.cpython-39.pyc ADDED
Binary file (1.28 kB). View file
 
app/__pycache__/schemas.cpython-311.pyc ADDED
Binary file (3.25 kB). View file
 
app/config.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import lru_cache
2
+ from typing import List, Union
3
+ import os
4
+
5
+ from pydantic_settings import BaseSettings
6
+
7
+
8
+ class Settings(BaseSettings):
9
+ app_name: str = "Marine Guard Backend"
10
+ mongodb_uri: str
11
+ database_name: str = "marine_guard"
12
+ jwt_secret_key: str
13
+ jwt_algorithm: str = "HS256"
14
+ access_token_expire_minutes: int = 60 * 24
15
+ allowed_origins: Union[str, List[str]] = "http://localhost:5173"
16
+
17
+ # Hugging Face Spaces specific settings
18
+ space_id: str = os.getenv("SPACE_ID", "")
19
+
20
+ class Config:
21
+ env_file = ".env"
22
+ extra = "allow" # Allow extra fields for Hugging Face environment variables
23
+
24
+
25
+ @lru_cache
26
+ def get_settings() -> Settings:
27
+ return Settings()
app/core_security.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datetime import datetime, timedelta, timezone
2
+ from typing import Optional
3
+
4
+ from jose import jwt
5
+ from passlib.context import CryptContext
6
+
7
+ from .config import get_settings
8
+
9
+ pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto")
10
+
11
+
12
+ def verify_password(plain_password: str, hashed_password: str) -> bool:
13
+ return pwd_context.verify(plain_password, hashed_password)
14
+
15
+
16
+ def get_password_hash(password: str) -> str:
17
+ return pwd_context.hash(password)
18
+
19
+
20
+ def create_access_token(subject: str, expires_delta: Optional[timedelta] = None) -> str:
21
+ settings = get_settings()
22
+ if expires_delta is None:
23
+ expires_delta = timedelta(minutes=settings.access_token_expire_minutes)
24
+
25
+ expire = datetime.now(timezone.utc) + expires_delta
26
+ to_encode = {"sub": subject, "exp": expire}
27
+ return jwt.encode(to_encode, settings.jwt_secret_key, algorithm=settings.jwt_algorithm)
app/database.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from motor.motor_asyncio import AsyncIOMotorClient, AsyncIOMotorDatabase
2
+ from typing import Optional
3
+ import logging
4
+
5
+ from .config import get_settings
6
+
7
+ logger = logging.getLogger(__name__)
8
+
9
+ _client: Optional[AsyncIOMotorClient] = None
10
+ _db: Optional[AsyncIOMotorDatabase] = None
11
+ _connection_healthy = False
12
+
13
+
14
+ async def test_connection() -> bool:
15
+ """Test if the database connection is healthy"""
16
+ global _connection_healthy
17
+ try:
18
+ client = get_client()
19
+ # Try to ping the database
20
+ await client.admin.command('ping')
21
+ _connection_healthy = True
22
+ logger.info("Database connection established successfully")
23
+ return True
24
+ except Exception as e:
25
+ _connection_healthy = False
26
+ logger.warning(f"Database connection failed: {e}")
27
+ return False
28
+
29
+
30
+ def get_client() -> AsyncIOMotorClient:
31
+ global _client
32
+ if _client is None:
33
+ settings = get_settings()
34
+ # Use the MongoDB URI exactly as provided, without additional parameters
35
+ _client = AsyncIOMotorClient(settings.mongodb_uri)
36
+ return _client
37
+
38
+
39
+ def get_database() -> AsyncIOMotorDatabase:
40
+ global _db
41
+ if _db is None:
42
+ client = get_client()
43
+ settings = get_settings()
44
+ _db = client[settings.database_name]
45
+ return _db
46
+
47
+
48
+ def get_collection(name: str):
49
+ db = get_database()
50
+ return db[name]
51
+
52
+
53
+ def is_database_available() -> bool:
54
+ """Check if database is available for operations"""
55
+ return _connection_healthy
app/dependencies.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import Depends, HTTPException, status
2
+ from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer
3
+ from jose import JWTError, jwt
4
+
5
+ from .config import get_settings
6
+ from .services.users import get_user_by_id, serialize_user
7
+
8
+ security = HTTPBearer()
9
+
10
+
11
+ async def get_current_user(credentials: HTTPAuthorizationCredentials = Depends(security)):
12
+ token = credentials.credentials
13
+ settings = get_settings()
14
+
15
+ try:
16
+ payload = jwt.decode(token, settings.jwt_secret_key, algorithms=[settings.jwt_algorithm])
17
+ except JWTError as exc: # pragma: no cover - error path
18
+ raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="Could not validate credentials") from exc
19
+
20
+ user_id: str = payload.get("sub")
21
+ if user_id is None:
22
+ raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="Invalid token payload")
23
+
24
+ user_doc = await get_user_by_id(user_id)
25
+ if user_doc is None:
26
+ raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="User not found")
27
+
28
+ return serialize_user(user_doc)
app/main.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ from contextlib import asynccontextmanager
3
+
4
+ from fastapi import FastAPI
5
+ from fastapi.middleware.cors import CORSMiddleware
6
+
7
+ from .config import get_settings
8
+ from .database import get_collection, test_connection
9
+ from .routers import auth, incidents
10
+
11
+ logger = logging.getLogger(__name__)
12
+
13
+
14
+ async def setup_database_indexes():
15
+ """Set up database indexes with error handling."""
16
+ try:
17
+ # Test connection first
18
+ connection_ok = await test_connection()
19
+ if not connection_ok:
20
+ logger.warning("Database connection failed - skipping index creation")
21
+ return
22
+
23
+ # Create indexes
24
+ users = get_collection("users")
25
+ incidents_collection = get_collection("incidents")
26
+
27
+ await users.create_index("email", unique=True)
28
+ await incidents_collection.create_index("created_at")
29
+ logger.info("Database indexes created successfully")
30
+ except Exception as e:
31
+ logger.warning(f"Database setup failed: {e}")
32
+ logger.warning("Application will continue without database indexes")
33
+
34
+
35
+ @asynccontextmanager
36
+ async def lifespan(app: FastAPI):
37
+ # Startup
38
+ await setup_database_indexes()
39
+ yield
40
+ # Shutdown (if needed in future)
41
+
42
+
43
+ settings = get_settings()
44
+
45
+ app = FastAPI(title=settings.app_name, lifespan=lifespan)
46
+
47
+ allowed_origins = settings.allowed_origins
48
+ if isinstance(allowed_origins, str):
49
+ allowed_origins = [origin.strip() for origin in allowed_origins.split(",") if origin.strip()]
50
+
51
+ # Ensure we have the allowed origins for development and production
52
+ if not allowed_origins:
53
+ allowed_origins = ["*"] # Fallback to allow all if not configured
54
+
55
+ app.add_middleware(
56
+ CORSMiddleware,
57
+ allow_origins=allowed_origins,
58
+ allow_credentials=True,
59
+ allow_methods=["GET", "POST", "PUT", "DELETE", "OPTIONS"],
60
+ allow_headers=["*"],
61
+ expose_headers=["*"],
62
+ )
63
+
64
+ # Debug: Log the allowed origins in startup
65
+ logger.info(f"CORS allowed origins: {allowed_origins}")
66
+
67
+
68
+ @app.get("/health")
69
+ async def health_check():
70
+ connection_ok = await test_connection()
71
+ if connection_ok:
72
+ return {"status": "ok", "database": "connected"}
73
+ else:
74
+ return {"status": "degraded", "database": "disconnected"}
75
+
76
+
77
+ @app.get("/")
78
+ async def root():
79
+ return {"message": "Marine Guard API", "status": "running"}
80
+
81
+
82
+ @app.options("/{path:path}")
83
+ async def options_handler(path: str):
84
+ """Handle CORS preflight requests."""
85
+ return {"message": "OK"}
86
+
87
+
88
+ app.include_router(auth.router, prefix="/api")
89
+ app.include_router(incidents.router, prefix="/api")
app/routers/__init__.py ADDED
File without changes
app/routers/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (150 Bytes). View file
 
app/routers/__pycache__/auth.cpython-311.pyc ADDED
Binary file (4.01 kB). View file
 
app/routers/__pycache__/incidents.cpython-311.pyc ADDED
Binary file (4.73 kB). View file
 
app/routers/auth.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import APIRouter, Depends, HTTPException, status
2
+
3
+ from ..core_security import create_access_token, get_password_hash, verify_password
4
+ from ..schemas import TokenResponse, UserCreate, UserInDB, UserLogin
5
+ from ..services.users import create_user, get_user_by_email, serialize_user
6
+ from ..dependencies import get_current_user
7
+
8
+ router = APIRouter(prefix="/auth", tags=["auth"])
9
+
10
+
11
+ @router.post("/signup", response_model=TokenResponse)
12
+ async def signup(payload: UserCreate):
13
+ try:
14
+ existing_user = await get_user_by_email(payload.email)
15
+ if existing_user:
16
+ raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Email already registered")
17
+
18
+ user_doc = await create_user(
19
+ {
20
+ "email": payload.email,
21
+ "display_name": payload.display_name,
22
+ "organization": payload.organization,
23
+ "role": payload.role,
24
+ "password_hash": get_password_hash(payload.password),
25
+ }
26
+ )
27
+
28
+ user_data = serialize_user(user_doc)
29
+ token = create_access_token(user_data["id"])
30
+
31
+ return TokenResponse(access_token=token, user=UserInDB.model_validate(user_data))
32
+ except HTTPException:
33
+ raise
34
+ except Exception as e:
35
+ raise HTTPException(
36
+ status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
37
+ detail="Database service temporarily unavailable"
38
+ )
39
+
40
+
41
+ @router.post("/login", response_model=TokenResponse)
42
+ async def login(payload: UserLogin):
43
+ try:
44
+ user_doc = await get_user_by_email(payload.email)
45
+ if not user_doc or not verify_password(payload.password, user_doc.get("password_hash", "")):
46
+ raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="Incorrect email or password")
47
+
48
+ user_data = serialize_user(user_doc)
49
+ token = create_access_token(user_data["id"])
50
+
51
+ return TokenResponse(access_token=token, user=UserInDB.model_validate(user_data))
52
+ except HTTPException:
53
+ raise
54
+ except Exception as e:
55
+ raise HTTPException(
56
+ status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
57
+ detail="Database service temporarily unavailable"
58
+ )
59
+
60
+
61
+ @router.get("/me", response_model=UserInDB)
62
+ async def get_me(current_user: dict = Depends(get_current_user)):
63
+ try:
64
+ return UserInDB.model_validate(current_user)
65
+ except Exception as e:
66
+ raise HTTPException(
67
+ status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
68
+ detail="Database service temporarily unavailable"
69
+ )
app/routers/incidents.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datetime import datetime
2
+
3
+ from fastapi import APIRouter, Depends, File, Form, HTTPException, status, UploadFile
4
+
5
+ from ..dependencies import get_current_user
6
+ from ..schemas import IncidentResponse
7
+ from ..services.incidents import save_incident_document, store_image
8
+ from ..services.nlp import classify_incident, get_model_info
9
+ from ..database import is_database_available
10
+
11
+ router = APIRouter(prefix="/incidents", tags=["incidents"])
12
+
13
+
14
+ @router.post("/classify", response_model=IncidentResponse)
15
+ async def classify_incident_report(
16
+ description: str = Form(...),
17
+ latitude: float = Form(...),
18
+ longitude: float = Form(...),
19
+ name: str = Form(""),
20
+ image: UploadFile | None = File(None),
21
+ current_user=Depends(get_current_user),
22
+ ):
23
+ if not is_database_available():
24
+ raise HTTPException(
25
+ status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
26
+ detail="Database service is currently unavailable. Please try again later."
27
+ )
28
+
29
+ try:
30
+ # Use the ML model for classification
31
+ classification_result = classify_incident(description, name)
32
+
33
+ if isinstance(classification_result, dict):
34
+ # ML model returned detailed results with confidence
35
+ incident_class = classification_result['threat']
36
+ severity = classification_result['severity']
37
+ confidence_scores = {
38
+ 'threat_confidence': classification_result.get('threat_confidence'),
39
+ 'severity_confidence': classification_result.get('severity_confidence')
40
+ }
41
+ else:
42
+ # Fallback classification returned simple tuple
43
+ incident_class, severity = classification_result
44
+ confidence_scores = None
45
+
46
+ image_path = await store_image(image)
47
+
48
+ document = {
49
+ "name": name,
50
+ "description": description,
51
+ "latitude": latitude,
52
+ "longitude": longitude,
53
+ "incident_class": incident_class,
54
+ "severity": severity,
55
+ "reporter_id": current_user["id"],
56
+ "image_path": image_path,
57
+ "created_at": datetime.utcnow(),
58
+ }
59
+
60
+ saved = await save_incident_document(document)
61
+
62
+ return IncidentResponse(
63
+ incident_class=incident_class,
64
+ severity=severity,
65
+ incident_id=str(saved["_id"]),
66
+ confidence_scores=confidence_scores,
67
+ )
68
+ except HTTPException:
69
+ raise
70
+ except Exception as e:
71
+ raise HTTPException(
72
+ status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
73
+ detail="Failed to process incident report"
74
+ )
75
+
76
+
77
+ @router.get("/model-info")
78
+ async def get_classification_model_info():
79
+ """Get information about the current classification model"""
80
+ return get_model_info()
81
+
82
+
83
+ @router.post("/test-classify")
84
+ async def test_classification(
85
+ description: str = Form(...),
86
+ name: str = Form(""),
87
+ ):
88
+ """Test endpoint for classification without saving to database"""
89
+ try:
90
+ classification_result = classify_incident(description, name)
91
+
92
+ if isinstance(classification_result, dict):
93
+ return {
94
+ "threat": classification_result['threat'],
95
+ "severity": classification_result['severity'],
96
+ "confidence_scores": {
97
+ 'threat_confidence': classification_result.get('threat_confidence'),
98
+ 'severity_confidence': classification_result.get('severity_confidence')
99
+ },
100
+ "model_type": "machine_learning"
101
+ }
102
+ else:
103
+ threat, severity = classification_result
104
+ return {
105
+ "threat": threat,
106
+ "severity": severity,
107
+ "model_type": "rule_based_fallback"
108
+ }
109
+ except Exception as e:
110
+ raise HTTPException(
111
+ status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
112
+ detail=f"Classification failed: {str(e)}"
113
+ )
app/schemas.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+ from datetime import datetime
3
+ from pydantic import BaseModel, EmailStr, Field
4
+
5
+
6
+ class UserCreate(BaseModel):
7
+ email: EmailStr
8
+ password: str = Field(min_length=6)
9
+ display_name: str
10
+ organization: Optional[str] = None
11
+ role: Optional[str] = Field(default="citizen")
12
+
13
+
14
+ class UserLogin(BaseModel):
15
+ email: EmailStr
16
+ password: str
17
+
18
+
19
+ class UserInDB(BaseModel):
20
+ id: str
21
+ email: EmailStr
22
+ display_name: str
23
+ organization: Optional[str]
24
+ role: Optional[str]
25
+ created_at: datetime
26
+
27
+
28
+ class TokenResponse(BaseModel):
29
+ access_token: str
30
+ token_type: str = "bearer"
31
+ user: UserInDB
32
+
33
+
34
+ class IncidentCreate(BaseModel):
35
+ description: str
36
+ latitude: float
37
+ longitude: float
38
+
39
+
40
+ class IncidentInDB(BaseModel):
41
+ id: str
42
+ description: str
43
+ latitude: float
44
+ longitude: float
45
+ incident_class: str
46
+ severity: str
47
+ created_at: datetime
48
+ reporter_id: Optional[str]
49
+ image_path: Optional[str]
50
+
51
+
52
+ class IncidentResponse(BaseModel):
53
+ incident_class: str
54
+ severity: str
55
+ incident_id: str
56
+ confidence_scores: Optional[dict] = None
app/services/__init__.py ADDED
File without changes
app/services/fallback_storage.py ADDED
File without changes
app/services/incidents.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ from typing import Optional
3
+ from uuid import uuid4
4
+
5
+ from ..database import get_collection
6
+
7
+ INCIDENTS_COLLECTION = "incidents"
8
+ UPLOAD_DIR = Path(__file__).resolve().parent.parent / "uploads"
9
+ UPLOAD_DIR.mkdir(exist_ok=True)
10
+
11
+
12
+ async def save_incident_document(document: dict) -> dict:
13
+ collection = get_collection(INCIDENTS_COLLECTION)
14
+ result = await collection.insert_one(document)
15
+ document["_id"] = result.inserted_id
16
+ return document
17
+
18
+
19
+ async def store_image(upload_file) -> Optional[str]:
20
+ if upload_file is None:
21
+ return None
22
+
23
+ UPLOAD_DIR.mkdir(parents=True, exist_ok=True)
24
+ file_extension = Path(upload_file.filename).suffix
25
+ filename = f"{uuid4().hex}{file_extension}"
26
+ file_path = UPLOAD_DIR / filename
27
+
28
+ contents = await upload_file.read()
29
+ file_path.write_bytes(contents)
30
+
31
+ await upload_file.close()
32
+ return str(Path("uploads") / filename)
app/services/ml_model.py ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference-only ML model service
2
+ # Models are pre-trained and saved as .pkl files
3
+
4
+ import numpy as np
5
+ import re
6
+ from pathlib import Path
7
+ import joblib
8
+ import logging
9
+
10
+ logger = logging.getLogger(__name__)
11
+
12
+ # Minimal sklearn imports for model loading
13
+ try:
14
+ from sklearn.feature_extraction.text import TfidfVectorizer
15
+ from sklearn.ensemble import RandomForestClassifier
16
+ SKLEARN_AVAILABLE = True
17
+ logger.info("sklearn imported successfully")
18
+ except ImportError as e:
19
+ SKLEARN_AVAILABLE = False
20
+ logger.warning(f"sklearn not available: {e}. Using rule-based classification.")
21
+
22
+ # Get the directory where this file is located
23
+ BASE_DIR = Path(__file__).resolve().parent.parent.parent
24
+ MODEL_DIR = BASE_DIR / "models"
25
+ MODEL_DIR.mkdir(exist_ok=True)
26
+
27
+ class IncidentClassifier:
28
+ def __init__(self):
29
+ self.threat_model = None
30
+ self.severity_model = None
31
+ self.is_trained = False
32
+
33
+ # Try to load pre-trained models automatically
34
+ try:
35
+ if self.load_models():
36
+ logger.info("Pre-trained models loaded successfully")
37
+ else:
38
+ logger.warning("No pre-trained models found. Classification will use fallback rules.")
39
+ except Exception as e:
40
+ logger.warning(f"Failed to load models on initialization: {e}")
41
+
42
+ def preprocess_text(self, text):
43
+ """Clean and preprocess text data"""
44
+ if text is None or text == "":
45
+ return ""
46
+
47
+ # Convert to lowercase
48
+ text = str(text).lower()
49
+
50
+ # Remove special characters but keep spaces
51
+ text = re.sub(r'[^a-zA-Z0-9\s]', ' ', text)
52
+
53
+ # Remove extra whitespaces
54
+ text = re.sub(r'\s+', ' ', text).strip()
55
+
56
+ return text
57
+
58
+ def load_models(self):
59
+ """Load trained models from disk"""
60
+ if not SKLEARN_AVAILABLE:
61
+ logger.warning("sklearn not available, cannot load models")
62
+ return False
63
+
64
+ try:
65
+ threat_model_path = MODEL_DIR / "threat_model.pkl"
66
+ severity_model_path = MODEL_DIR / "severity_model.pkl"
67
+
68
+ if threat_model_path.exists() and severity_model_path.exists():
69
+ self.threat_model = joblib.load(threat_model_path)
70
+ self.severity_model = joblib.load(severity_model_path)
71
+ self.is_trained = True
72
+ logger.info("Models loaded successfully")
73
+ return True
74
+ else:
75
+ logger.warning("Model files not found")
76
+ return False
77
+ except Exception as e:
78
+ logger.error(f"Error loading models: {e}")
79
+ return False
80
+
81
+ def predict(self, description, name=""):
82
+ """Predict threat type and severity for an incident"""
83
+ if not self.is_trained:
84
+ # Fallback to rule-based classification
85
+ return self._rule_based_classification(description, name)
86
+
87
+ try:
88
+ # Combine name and description
89
+ combined_text = f"{name} {description}".strip()
90
+ preprocessed_text = self.preprocess_text(combined_text)
91
+
92
+ if not preprocessed_text:
93
+ return self._rule_based_classification(description, name)
94
+
95
+ # Make predictions using loaded models
96
+ threat_pred = self.threat_model.predict([preprocessed_text])[0]
97
+ severity_pred = self.severity_model.predict([preprocessed_text])[0]
98
+
99
+ # Get prediction probabilities for confidence scores
100
+ threat_proba = self.threat_model.predict_proba([preprocessed_text])[0]
101
+ severity_proba = self.severity_model.predict_proba([preprocessed_text])[0]
102
+
103
+ # Get confidence scores (max probability)
104
+ threat_confidence = float(np.max(threat_proba))
105
+ severity_confidence = float(np.max(severity_proba))
106
+
107
+ return {
108
+ 'threat': threat_pred,
109
+ 'severity': severity_pred,
110
+ 'threat_confidence': threat_confidence,
111
+ 'severity_confidence': severity_confidence
112
+ }
113
+ except Exception as e:
114
+ logger.error(f"Prediction error: {e}")
115
+ return self._rule_based_classification(description, name)
116
+
117
+ def _rule_based_classification(self, description, name=""):
118
+ """Rule-based classification when ML models are not available"""
119
+ combined_text = f"{name} {description}".lower()
120
+
121
+ # Threat classification
122
+ if any(keyword in combined_text for keyword in ['oil', 'petroleum', 'crude', 'spill', 'tanker']):
123
+ threat = 'Oil'
124
+ elif any(keyword in combined_text for keyword in ['chemical', 'toxic', 'hazardous', 'acid', 'industrial']):
125
+ threat = 'Chemical'
126
+ else:
127
+ threat = 'Other'
128
+
129
+ # Severity classification
130
+ high_indicators = ['major', 'massive', 'large', 'explosion', 'fire', 'emergency', 'critical', 'severe']
131
+ medium_indicators = ['moderate', 'contained', 'limited', 'minor']
132
+
133
+ if any(indicator in combined_text for indicator in high_indicators):
134
+ severity = 'high'
135
+ elif any(indicator in combined_text for indicator in medium_indicators):
136
+ severity = 'medium'
137
+ else:
138
+ severity = 'low'
139
+
140
+ # Return with confidence scores for consistency
141
+ return {
142
+ 'threat': threat,
143
+ 'severity': severity,
144
+ 'threat_confidence': 0.8, # Mock confidence for rule-based
145
+ 'severity_confidence': 0.7
146
+ }
147
+
148
+ # Global classifier instance
149
+ incident_classifier = IncidentClassifier()
150
+
151
+ def get_classifier():
152
+ """Get the global classifier instance"""
153
+ return incident_classifier
154
+
155
+ def predict_incident(description, name=""):
156
+ """Predict threat and severity for an incident"""
157
+ classifier = get_classifier()
158
+ return classifier.predict(description, name)
app/services/ml_model_training.py ADDED
@@ -0,0 +1,277 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Core dependencies (always available)
2
+ import numpy as np
3
+ import pickle
4
+ import re
5
+ from pathlib import Path
6
+ import joblib
7
+ import logging
8
+
9
+ # Training dependencies (only imported when needed)
10
+ try:
11
+ import pandas as pd
12
+ from sklearn.model_selection import train_test_split
13
+ from sklearn.feature_extraction.text import TfidfVectorizer
14
+ from sklearn.ensemble import RandomForestClassifier
15
+ from sklearn.preprocessing import LabelEncoder
16
+ from sklearn.metrics import classification_report, accuracy_score
17
+ from sklearn.pipeline import Pipeline
18
+ TRAINING_DEPENDENCIES_AVAILABLE = True
19
+ except ImportError:
20
+ TRAINING_DEPENDENCIES_AVAILABLE = False
21
+ # These will be None if training dependencies are not available
22
+ pd = None
23
+ train_test_split = None
24
+ TfidfVectorizer = None
25
+ RandomForestClassifier = None
26
+ LabelEncoder = None
27
+ classification_report = None
28
+ accuracy_score = None
29
+ Pipeline = None
30
+
31
+ logger = logging.getLogger(__name__)
32
+
33
+ # Get the directory where this file is located
34
+ BASE_DIR = Path(__file__).resolve().parent.parent.parent
35
+ MODEL_DIR = BASE_DIR / "models"
36
+ MODEL_DIR.mkdir(exist_ok=True)
37
+
38
+ class IncidentClassifier:
39
+ def __init__(self):
40
+ self.threat_model = None
41
+ self.severity_model = None
42
+ self.threat_encoder = None
43
+ self.severity_encoder = None
44
+ self.is_trained = False
45
+
46
+ # Try to load pre-trained models automatically
47
+ try:
48
+ if self.load_models():
49
+ logger.info("Pre-trained models loaded successfully")
50
+ else:
51
+ logger.warning("No pre-trained models found. Classification will use fallback rules.")
52
+ except Exception as e:
53
+ logger.warning(f"Failed to load models on initialization: {e}")
54
+
55
+ def preprocess_text(self, text):
56
+ """Clean and preprocess text data"""
57
+ if text is None or (pd and pd.isna(text)):
58
+ return ""
59
+
60
+ # Convert to lowercase
61
+ text = str(text).lower()
62
+
63
+ # Remove special characters but keep spaces
64
+ text = re.sub(r'[^a-zA-Z0-9\s]', ' ', text)
65
+
66
+ # Remove extra whitespaces
67
+ text = re.sub(r'\s+', ' ', text).strip()
68
+
69
+ return text
70
+
71
+ def create_severity_labels(self, df):
72
+ """Create severity labels based on description content and threat type"""
73
+ severity_labels = []
74
+
75
+ for _, row in df.iterrows():
76
+ description = str(row['description']).lower()
77
+ threat = row['threat']
78
+
79
+ # High severity indicators
80
+ high_indicators = [
81
+ 'major', 'massive', 'large scale', 'explosion', 'fire', 'fatality',
82
+ 'death', 'significant', 'extensive', 'severe', 'critical',
83
+ 'emergency', 'disaster', 'toxic', 'hazardous', 'dangerous',
84
+ 'thousands', 'gallons', 'barrels', 'tons'
85
+ ]
86
+
87
+ # Medium severity indicators
88
+ medium_indicators = [
89
+ 'moderate', 'contained', 'limited', 'minor leak', 'small spill',
90
+ 'hundreds', 'investigation', 'response', 'cleanup'
91
+ ]
92
+
93
+ # Low severity indicators
94
+ low_indicators = [
95
+ 'minor', 'small', 'trace', 'minimal', 'observation', 'potential',
96
+ 'suspected', 'no injuries', 'no damage', 'monitoring'
97
+ ]
98
+
99
+ # Count indicators
100
+ high_count = sum(1 for indicator in high_indicators if indicator in description)
101
+ medium_count = sum(1 for indicator in medium_indicators if indicator in description)
102
+ low_count = sum(1 for indicator in low_indicators if indicator in description)
103
+
104
+ # Classify based on threat type and indicators
105
+ if threat == 'Chemical' or high_count >= 2:
106
+ severity = 'high'
107
+ elif threat == 'Oil' and (high_count >= 1 or medium_count >= 2):
108
+ severity = 'medium'
109
+ elif low_count >= 2 or 'minor' in description:
110
+ severity = 'low'
111
+ elif high_count >= 1:
112
+ severity = 'high'
113
+ elif medium_count >= 1:
114
+ severity = 'medium'
115
+ else:
116
+ severity = 'low'
117
+
118
+ severity_labels.append(severity)
119
+
120
+ return severity_labels
121
+
122
+ def train_models(self, csv_path=None):
123
+ """Train both threat classification and severity assessment models"""
124
+ if not TRAINING_DEPENDENCIES_AVAILABLE:
125
+ logger.error("Training dependencies (pandas, scikit-learn) not available. Install with: pip install -r requirements-training.txt")
126
+ raise ImportError("Training dependencies not available. This method requires pandas and scikit-learn.")
127
+
128
+ try:
129
+ if csv_path is None:
130
+ csv_path = BASE_DIR / "incidents.csv"
131
+
132
+ logger.info(f"Loading dataset from {csv_path}")
133
+ df = pd.read_csv(csv_path)
134
+
135
+ # Clean the data
136
+ df = df.dropna(subset=['description', 'threat'])
137
+
138
+ # Combine name and description for features
139
+ df['combined_text'] = df['name'].fillna('') + ' ' + df['description'].fillna('')
140
+ df['combined_text'] = df['combined_text'].apply(self.preprocess_text)
141
+
142
+ # Create severity labels
143
+ df['severity'] = self.create_severity_labels(df)
144
+
145
+ # Prepare features
146
+ X = df['combined_text']
147
+ y_threat = df['threat']
148
+ y_severity = df['severity']
149
+
150
+ # Split the data
151
+ X_train, X_test, y_threat_train, y_threat_test, y_severity_train, y_severity_test = train_test_split(
152
+ X, y_threat, y_severity, test_size=0.2, random_state=42, stratify=y_threat
153
+ )
154
+
155
+ # Train threat classification model
156
+ logger.info("Training threat classification model...")
157
+ self.threat_model = Pipeline([
158
+ ('tfidf', TfidfVectorizer(max_features=5000, stop_words='english', ngram_range=(1, 2))),
159
+ ('classifier', RandomForestClassifier(n_estimators=100, random_state=42))
160
+ ])
161
+
162
+ self.threat_model.fit(X_train, y_threat_train)
163
+
164
+ # Train severity assessment model
165
+ logger.info("Training severity assessment model...")
166
+ self.severity_model = Pipeline([
167
+ ('tfidf', TfidfVectorizer(max_features=5000, stop_words='english', ngram_range=(1, 2))),
168
+ ('classifier', RandomForestClassifier(n_estimators=100, random_state=42))
169
+ ])
170
+
171
+ self.severity_model.fit(X_train, y_severity_train)
172
+
173
+ # Evaluate models
174
+ threat_pred = self.threat_model.predict(X_test)
175
+ severity_pred = self.severity_model.predict(X_test)
176
+
177
+ logger.info("Threat Classification Results:")
178
+ logger.info(f"Accuracy: {accuracy_score(y_threat_test, threat_pred):.3f}")
179
+ logger.info("\n" + classification_report(y_threat_test, threat_pred))
180
+
181
+ logger.info("Severity Assessment Results:")
182
+ logger.info(f"Accuracy: {accuracy_score(y_severity_test, severity_pred):.3f}")
183
+ logger.info("\n" + classification_report(y_severity_test, severity_pred))
184
+
185
+ # Save models
186
+ self.save_models()
187
+ self.is_trained = True
188
+
189
+ logger.info("Models trained and saved successfully!")
190
+
191
+ return {
192
+ 'threat_accuracy': accuracy_score(y_threat_test, threat_pred),
193
+ 'severity_accuracy': accuracy_score(y_severity_test, severity_pred),
194
+ 'threat_distribution': df['threat'].value_counts().to_dict(),
195
+ 'severity_distribution': df['severity'].value_counts().to_dict()
196
+ }
197
+
198
+ except Exception as e:
199
+ logger.error(f"Error training models: {e}")
200
+ raise
201
+
202
+ def save_models(self):
203
+ """Save trained models to disk"""
204
+ try:
205
+ joblib.dump(self.threat_model, MODEL_DIR / "threat_model.pkl")
206
+ joblib.dump(self.severity_model, MODEL_DIR / "severity_model.pkl")
207
+ logger.info("Models saved successfully")
208
+ except Exception as e:
209
+ logger.error(f"Error saving models: {e}")
210
+ raise
211
+
212
+ def load_models(self):
213
+ """Load trained models from disk"""
214
+ try:
215
+ threat_model_path = MODEL_DIR / "threat_model.pkl"
216
+ severity_model_path = MODEL_DIR / "severity_model.pkl"
217
+
218
+ if threat_model_path.exists() and severity_model_path.exists():
219
+ self.threat_model = joblib.load(threat_model_path)
220
+ self.severity_model = joblib.load(severity_model_path)
221
+ self.is_trained = True
222
+ logger.info("Models loaded successfully")
223
+ return True
224
+ else:
225
+ logger.warning("Model files not found")
226
+ return False
227
+ except Exception as e:
228
+ logger.error(f"Error loading models: {e}")
229
+ return False
230
+
231
+ def predict(self, description, name=""):
232
+ """Predict threat type and severity for a given incident description"""
233
+ if not self.is_trained:
234
+ if not self.load_models():
235
+ raise ValueError("Models not trained or loaded")
236
+
237
+ # Preprocess input
238
+ combined_text = self.preprocess_text(f"{name} {description}")
239
+
240
+ # Make predictions
241
+ threat_pred = self.threat_model.predict([combined_text])[0]
242
+ severity_pred = self.severity_model.predict([combined_text])[0]
243
+
244
+ # Get prediction probabilities for confidence scores
245
+ threat_proba = self.threat_model.predict_proba([combined_text])[0]
246
+ severity_proba = self.severity_model.predict_proba([combined_text])[0]
247
+
248
+ threat_confidence = max(threat_proba)
249
+ severity_confidence = max(severity_proba)
250
+
251
+ return {
252
+ 'threat': threat_pred,
253
+ 'severity': severity_pred,
254
+ 'threat_confidence': float(threat_confidence),
255
+ 'severity_confidence': float(severity_confidence)
256
+ }
257
+
258
+ # Global instance
259
+ incident_classifier = IncidentClassifier()
260
+
261
+ def get_classifier():
262
+ """Get the global classifier instance"""
263
+ return incident_classifier
264
+
265
+ def train_models():
266
+ """Train the models using the incidents dataset"""
267
+ if not TRAINING_DEPENDENCIES_AVAILABLE:
268
+ logger.error("Training dependencies not available. Models should be pre-trained for deployment.")
269
+ return False
270
+
271
+ classifier = get_classifier()
272
+ return classifier.train_models()
273
+
274
+ def predict_incident(description, name=""):
275
+ """Predict threat and severity for an incident"""
276
+ classifier = get_classifier()
277
+ return classifier.predict(description, name)
app/services/nlp.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ from typing import Tuple, Union, Dict
3
+ from .ml_model import predict_incident, get_classifier
4
+
5
+ logger = logging.getLogger(__name__)
6
+
7
+ def classify_incident(description: str, name: str = "") -> Union[Tuple[str, str], Dict]:
8
+ """
9
+ Classify incident using trained ML model.
10
+ Falls back to rule-based classification if ML model is not available.
11
+
12
+ Args:
13
+ description: The incident description
14
+ name: The incident name/title (optional)
15
+
16
+ Returns:
17
+ Dict with ML results including confidence, or Tuple of (incident_class, severity) for fallback
18
+ """
19
+ try:
20
+ # Try to use ML model
21
+ classifier = get_classifier()
22
+ if classifier.is_trained or classifier.load_models():
23
+ result = predict_incident(description, name)
24
+ return result # Return full dict with confidence scores
25
+ else:
26
+ logger.warning("ML model not available, using fallback classification")
27
+ return _fallback_classify(description)
28
+ except Exception as e:
29
+ logger.error(f"Error in ML classification: {e}")
30
+ return _fallback_classify(description)
31
+
32
+ def _fallback_classify(description: str) -> Tuple[str, str]:
33
+ """
34
+ Fallback rule-based classification when ML model is not available.
35
+ """
36
+ description_lower = description.lower()
37
+
38
+ # Determine threat type
39
+ oil_keywords = ['oil', 'fuel', 'diesel', 'gasoline', 'petroleum', 'crude', 'spill']
40
+ chemical_keywords = ['chemical', 'acid', 'toxic', 'hazardous', 'styrene', 'acetic']
41
+
42
+ if any(keyword in description_lower for keyword in chemical_keywords):
43
+ threat = "Chemical"
44
+ elif any(keyword in description_lower for keyword in oil_keywords):
45
+ threat = "Oil"
46
+ else:
47
+ threat = "Other"
48
+
49
+ # Determine severity
50
+ high_severity_keywords = [
51
+ 'major', 'massive', 'explosion', 'fire', 'fatality', 'death',
52
+ 'significant', 'extensive', 'severe', 'critical', 'emergency',
53
+ 'disaster', 'thousands', 'gallons', 'barrels'
54
+ ]
55
+
56
+ medium_severity_keywords = [
57
+ 'moderate', 'contained', 'limited', 'hundreds', 'investigation',
58
+ 'response', 'cleanup', 'leak'
59
+ ]
60
+
61
+ low_severity_keywords = [
62
+ 'minor', 'small', 'trace', 'minimal', 'observation', 'potential',
63
+ 'suspected', 'no injuries', 'no damage'
64
+ ]
65
+
66
+ if any(keyword in description_lower for keyword in high_severity_keywords):
67
+ severity = "high"
68
+ elif any(keyword in description_lower for keyword in medium_severity_keywords):
69
+ severity = "medium"
70
+ elif any(keyword in description_lower for keyword in low_severity_keywords):
71
+ severity = "low"
72
+ else:
73
+ # Default based on threat type
74
+ if threat == "Chemical":
75
+ severity = "high"
76
+ elif threat == "Oil":
77
+ severity = "medium"
78
+ else:
79
+ severity = "low"
80
+
81
+ return threat, severity
82
+
83
+ def get_model_info():
84
+ """Get information about the current model status"""
85
+ try:
86
+ classifier = get_classifier()
87
+ if classifier.is_trained or classifier.load_models():
88
+ return {
89
+ "model_available": True,
90
+ "model_type": "machine_learning",
91
+ "status": "active"
92
+ }
93
+ else:
94
+ return {
95
+ "model_available": False,
96
+ "model_type": "rule_based_fallback",
97
+ "status": "fallback"
98
+ }
99
+ except Exception as e:
100
+ return {
101
+ "model_available": False,
102
+ "model_type": "rule_based_fallback",
103
+ "status": "error",
104
+ "error": str(e)
105
+ }
app/services/users.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datetime import datetime
2
+ from typing import Optional
3
+
4
+ from bson import ObjectId
5
+
6
+ from ..database import get_collection
7
+
8
+
9
+ USERS_COLLECTION = "users"
10
+
11
+
12
+ def serialize_user(document) -> Optional[dict]:
13
+ if not document:
14
+ return None
15
+ return {
16
+ "id": str(document.get("_id")),
17
+ "email": document.get("email"),
18
+ "display_name": document.get("display_name"),
19
+ "organization": document.get("organization"),
20
+ "role": document.get("role"),
21
+ "created_at": document.get("created_at"),
22
+ }
23
+
24
+
25
+ async def get_users_collection():
26
+ return get_collection(USERS_COLLECTION)
27
+
28
+
29
+ async def get_user_by_email(email: str) -> Optional[dict]:
30
+ users = await get_users_collection()
31
+ return await users.find_one({"email": email})
32
+
33
+
34
+ async def create_user(data: dict) -> dict:
35
+ users = await get_users_collection()
36
+ now = datetime.utcnow()
37
+ payload = {
38
+ **data,
39
+ "created_at": now,
40
+ "updated_at": now,
41
+ }
42
+ result = await users.insert_one(payload)
43
+ payload["_id"] = result.inserted_id
44
+ return payload
45
+
46
+
47
+ async def get_user_by_id(user_id: str) -> Optional[dict]:
48
+ users = await get_users_collection()
49
+ try:
50
+ oid = ObjectId(user_id)
51
+ except Exception:
52
+ return None
53
+ return await users.find_one({"_id": oid})
eda.ipynb ADDED
@@ -0,0 +1,265 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "id": "fed833c7",
7
+ "metadata": {},
8
+ "outputs": [],
9
+ "source": [
10
+ "import pandas as pd \n",
11
+ "import numpy as np"
12
+ ]
13
+ },
14
+ {
15
+ "cell_type": "code",
16
+ "execution_count": 2,
17
+ "id": "e5605712",
18
+ "metadata": {},
19
+ "outputs": [
20
+ {
21
+ "data": {
22
+ "text/html": [
23
+ "<div>\n",
24
+ "<style scoped>\n",
25
+ " .dataframe tbody tr th:only-of-type {\n",
26
+ " vertical-align: middle;\n",
27
+ " }\n",
28
+ "\n",
29
+ " .dataframe tbody tr th {\n",
30
+ " vertical-align: top;\n",
31
+ " }\n",
32
+ "\n",
33
+ " .dataframe thead th {\n",
34
+ " text-align: right;\n",
35
+ " }\n",
36
+ "</style>\n",
37
+ "<table border=\"1\" class=\"dataframe\">\n",
38
+ " <thead>\n",
39
+ " <tr style=\"text-align: right;\">\n",
40
+ " <th></th>\n",
41
+ " <th>id</th>\n",
42
+ " <th>open_date</th>\n",
43
+ " <th>name</th>\n",
44
+ " <th>location</th>\n",
45
+ " <th>lat</th>\n",
46
+ " <th>lon</th>\n",
47
+ " <th>threat</th>\n",
48
+ " <th>tags</th>\n",
49
+ " <th>commodity</th>\n",
50
+ " <th>measure_skim</th>\n",
51
+ " <th>measure_shore</th>\n",
52
+ " <th>measure_bio</th>\n",
53
+ " <th>measure_disperse</th>\n",
54
+ " <th>measure_burn</th>\n",
55
+ " <th>max_ptl_release_gallons</th>\n",
56
+ " <th>posts</th>\n",
57
+ " <th>description</th>\n",
58
+ " </tr>\n",
59
+ " </thead>\n",
60
+ " <tbody>\n",
61
+ " <tr>\n",
62
+ " <th>0</th>\n",
63
+ " <td>10431</td>\n",
64
+ " <td>2022-03-21</td>\n",
65
+ " <td>Tug Vessel Loses Power, Grounds, and Leaks Die...</td>\n",
66
+ " <td>Neva Strait, Sitka, AK</td>\n",
67
+ " <td>57.270000</td>\n",
68
+ " <td>-135.593330</td>\n",
69
+ " <td>Oil</td>\n",
70
+ " <td>NaN</td>\n",
71
+ " <td>NaN</td>\n",
72
+ " <td>NaN</td>\n",
73
+ " <td>NaN</td>\n",
74
+ " <td>NaN</td>\n",
75
+ " <td>NaN</td>\n",
76
+ " <td>NaN</td>\n",
77
+ " <td>NaN</td>\n",
78
+ " <td>0</td>\n",
79
+ " <td>At approximately 0400 on 21-Mar02922, the tug ...</td>\n",
80
+ " </tr>\n",
81
+ " <tr>\n",
82
+ " <th>1</th>\n",
83
+ " <td>10430</td>\n",
84
+ " <td>2022-03-17</td>\n",
85
+ " <td>Compromised Fuel Transfer Pipe Spills Oil into...</td>\n",
86
+ " <td>Oswego, NY</td>\n",
87
+ " <td>43.459410</td>\n",
88
+ " <td>-76.531650</td>\n",
89
+ " <td>Oil</td>\n",
90
+ " <td>NaN</td>\n",
91
+ " <td>NaN</td>\n",
92
+ " <td>NaN</td>\n",
93
+ " <td>NaN</td>\n",
94
+ " <td>NaN</td>\n",
95
+ " <td>NaN</td>\n",
96
+ " <td>NaN</td>\n",
97
+ " <td>NaN</td>\n",
98
+ " <td>0</td>\n",
99
+ " <td>On March 17, 2022, NOAA ERD was notified by Mi...</td>\n",
100
+ " </tr>\n",
101
+ " <tr>\n",
102
+ " <th>2</th>\n",
103
+ " <td>10429</td>\n",
104
+ " <td>2022-03-16</td>\n",
105
+ " <td>Floating Humpback Whale Carcass off of Carolin...</td>\n",
106
+ " <td>Carolina Beach, NC, USA</td>\n",
107
+ " <td>34.031323</td>\n",
108
+ " <td>-77.830343</td>\n",
109
+ " <td>Other</td>\n",
110
+ " <td>NaN</td>\n",
111
+ " <td>NaN</td>\n",
112
+ " <td>NaN</td>\n",
113
+ " <td>NaN</td>\n",
114
+ " <td>NaN</td>\n",
115
+ " <td>NaN</td>\n",
116
+ " <td>NaN</td>\n",
117
+ " <td>NaN</td>\n",
118
+ " <td>0</td>\n",
119
+ " <td>On March 16, 2022, the Gulf of Mexico Marine M...</td>\n",
120
+ " </tr>\n",
121
+ " <tr>\n",
122
+ " <th>3</th>\n",
123
+ " <td>10428</td>\n",
124
+ " <td>2022-03-15</td>\n",
125
+ " <td>Containership Grounded off Gibson Island in Ch...</td>\n",
126
+ " <td>Gibson Island, MD, USA</td>\n",
127
+ " <td>39.070000</td>\n",
128
+ " <td>-76.410000</td>\n",
129
+ " <td>Oil</td>\n",
130
+ " <td>NaN</td>\n",
131
+ " <td>NaN</td>\n",
132
+ " <td>NaN</td>\n",
133
+ " <td>NaN</td>\n",
134
+ " <td>NaN</td>\n",
135
+ " <td>NaN</td>\n",
136
+ " <td>NaN</td>\n",
137
+ " <td>NaN</td>\n",
138
+ " <td>2</td>\n",
139
+ " <td>On 15 March 2022, USCG Sector Maryland NCR not...</td>\n",
140
+ " </tr>\n",
141
+ " <tr>\n",
142
+ " <th>4</th>\n",
143
+ " <td>10426</td>\n",
144
+ " <td>2022-03-14</td>\n",
145
+ " <td>Oil Pipeline Discharge into Cahokia Canal, Edw...</td>\n",
146
+ " <td>Cahokia Canal, Edwardsville, IL</td>\n",
147
+ " <td>38.824034</td>\n",
148
+ " <td>-89.974600</td>\n",
149
+ " <td>Oil</td>\n",
150
+ " <td>NaN</td>\n",
151
+ " <td>NaN</td>\n",
152
+ " <td>NaN</td>\n",
153
+ " <td>NaN</td>\n",
154
+ " <td>NaN</td>\n",
155
+ " <td>NaN</td>\n",
156
+ " <td>NaN</td>\n",
157
+ " <td>NaN</td>\n",
158
+ " <td>0</td>\n",
159
+ " <td>On March 14, 2022, USEPA Region 5 contacted th...</td>\n",
160
+ " </tr>\n",
161
+ " </tbody>\n",
162
+ "</table>\n",
163
+ "</div>"
164
+ ],
165
+ "text/plain": [
166
+ " id open_date name \\\n",
167
+ "0 10431 2022-03-21 Tug Vessel Loses Power, Grounds, and Leaks Die... \n",
168
+ "1 10430 2022-03-17 Compromised Fuel Transfer Pipe Spills Oil into... \n",
169
+ "2 10429 2022-03-16 Floating Humpback Whale Carcass off of Carolin... \n",
170
+ "3 10428 2022-03-15 Containership Grounded off Gibson Island in Ch... \n",
171
+ "4 10426 2022-03-14 Oil Pipeline Discharge into Cahokia Canal, Edw... \n",
172
+ "\n",
173
+ " location lat lon threat tags \\\n",
174
+ "0 Neva Strait, Sitka, AK 57.270000 -135.593330 Oil NaN \n",
175
+ "1 Oswego, NY 43.459410 -76.531650 Oil NaN \n",
176
+ "2 Carolina Beach, NC, USA 34.031323 -77.830343 Other NaN \n",
177
+ "3 Gibson Island, MD, USA 39.070000 -76.410000 Oil NaN \n",
178
+ "4 Cahokia Canal, Edwardsville, IL 38.824034 -89.974600 Oil NaN \n",
179
+ "\n",
180
+ " commodity measure_skim measure_shore measure_bio measure_disperse \\\n",
181
+ "0 NaN NaN NaN NaN NaN \n",
182
+ "1 NaN NaN NaN NaN NaN \n",
183
+ "2 NaN NaN NaN NaN NaN \n",
184
+ "3 NaN NaN NaN NaN NaN \n",
185
+ "4 NaN NaN NaN NaN NaN \n",
186
+ "\n",
187
+ " measure_burn max_ptl_release_gallons posts \\\n",
188
+ "0 NaN NaN 0 \n",
189
+ "1 NaN NaN 0 \n",
190
+ "2 NaN NaN 0 \n",
191
+ "3 NaN NaN 2 \n",
192
+ "4 NaN NaN 0 \n",
193
+ "\n",
194
+ " description \n",
195
+ "0 At approximately 0400 on 21-Mar02922, the tug ... \n",
196
+ "1 On March 17, 2022, NOAA ERD was notified by Mi... \n",
197
+ "2 On March 16, 2022, the Gulf of Mexico Marine M... \n",
198
+ "3 On 15 March 2022, USCG Sector Maryland NCR not... \n",
199
+ "4 On March 14, 2022, USEPA Region 5 contacted th... "
200
+ ]
201
+ },
202
+ "execution_count": 2,
203
+ "metadata": {},
204
+ "output_type": "execute_result"
205
+ }
206
+ ],
207
+ "source": [
208
+ "df = pd.read_csv('incidents.csv')\n",
209
+ "df.head()"
210
+ ]
211
+ },
212
+ {
213
+ "cell_type": "code",
214
+ "execution_count": 3,
215
+ "id": "20914440",
216
+ "metadata": {},
217
+ "outputs": [
218
+ {
219
+ "data": {
220
+ "text/plain": [
221
+ "<Axes: xlabel='threat'>"
222
+ ]
223
+ },
224
+ "execution_count": 3,
225
+ "metadata": {},
226
+ "output_type": "execute_result"
227
+ },
228
+ {
229
+ "data": {
230
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAjEAAAHjCAYAAADScU5NAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8hTgPZAAAACXBIWXMAAA9hAAAPYQGoP6dpAAAnbElEQVR4nO3dfXRU9Z3H8c+QkJFgMuSBzCRlJEEiBYO2gCcEq0KBBASitl1w8aS4ZUHLU1NEC7XbUo4LllVAy8pStaVSrfZ0xdKVDQSRBwWEZI0goAc04UEyBEgyAYwTSGb/8HBPhwAaILnzy7xf58w5yb2/Cd9pp/DunXtvHMFgMCgAAADDdLB7AAAAgCtBxAAAACMRMQAAwEhEDAAAMBIRAwAAjETEAAAAIxExAADASEQMAAAwUrTdA7SWpqYmHT16VHFxcXI4HHaPAwAAvoZgMKhTp04pLS1NHTpc/lhLu42Yo0ePyuv12j0GAAC4AocPH1a3bt0uu6bdRkxcXJykL/9DiI+Pt3kaAADwddTV1cnr9Vr/jl9Ou42Y8x8hxcfHEzEAABjm65wKwom9AADASEQMAAAwEhEDAACMRMQAAAAjETEAAMBIRAwAADASEQMAAIxExAAAACMRMQAAwEhEDAAAMBIRAwAAjETEAAAAIxExAADASEQMAAAwUrTdA0BKn/2m3SO0CxVPjrJ7BABAG+JIDAAAMBIRAwAAjETEAAAAIxExAADASEQMAAAwEhEDAACMRMQAAAAjETEAAMBIRAwAADASEQMAAIxExAAAACMRMQAAwEhEDAAAMBIRAwAAjETEAAAAIxExAADASEQMAAAwEhEDAACMRMQAAAAjETEAAMBIRAwAADASEQMAAIxExAAAACMRMQAAwEhEDAAAMBIRAwAAjETEAAAAIxExAADASEQMAAAwEhEDAACMRMQAAAAjETEAAMBIRAwAADASEQMAAIxExAAAACMRMQAAwEhEDAAAMBIRAwAAjETEAAAAIxExAADASEQMAAAwEhEDAACMRMQAAAAjETEAAMBIRAwAADASEQMAAIxExAAAACO1KGIWLFig2267TXFxcUpJSdG9996rjz/+OGRNMBjU3LlzlZaWpk6dOmnw4MHas2dPyJpAIKDp06crOTlZnTt3Vn5+vo4cORKypqamRgUFBXK5XHK5XCooKFBtbe2VvUoAANDutChiNm3apKlTp2r79u0qLi7WuXPnlJubqzNnzlhrFi5cqEWLFmnp0qXauXOnPB6Phg8frlOnTllrCgsLtWrVKr366qt65513dPr0aY0ePVqNjY3WmvHjx6usrExFRUUqKipSWVmZCgoKrsFLBgAA7YEjGAwGr/TJx48fV0pKijZt2qQ777xTwWBQaWlpKiws1M9+9jNJXx51cbvd+s1vfqOHHnpIfr9fXbt21cqVKzVu3DhJ0tGjR+X1erVmzRrl5eVp37596tOnj7Zv367s7GxJ0vbt25WTk6OPPvpIvXr1+srZ6urq5HK55Pf7FR8ff6UvsU2kz37T7hHahYonR9k9AgDgKrXk3++rOifG7/dLkhITEyVJ5eXl8vl8ys3NtdY4nU7ddddd2rp1qySptLRUZ8+eDVmTlpamrKwsa822bdvkcrmsgJGkgQMHyuVyWWsuFAgEVFdXF/IAAADt1xVHTDAY1MyZM/Wd73xHWVlZkiSfzydJcrvdIWvdbre1z+fzKSYmRgkJCZddk5KS0uzPTElJsdZcaMGCBdb5My6XS16v90pfGgAAMMAVR8y0adO0a9cu/fnPf262z+FwhHwfDAabbbvQhWsutv5yP2fOnDny+/3W4/Dhw1/nZQAAAENdUcRMnz5dq1ev1ttvv61u3bpZ2z0ejyQ1O1pSVVVlHZ3xeDxqaGhQTU3NZdccO3as2Z97/PjxZkd5znM6nYqPjw95AACA9qtFERMMBjVt2jS9/vrr2rBhgzIyMkL2Z2RkyOPxqLi42NrW0NCgTZs2adCgQZKk/v37q2PHjiFrKisr9eGHH1prcnJy5Pf7tWPHDmvNe++9J7/fb60BAACRLboli6dOnapXXnlFf/vb3xQXF2cdcXG5XOrUqZMcDocKCws1f/58ZWZmKjMzU/Pnz1dsbKzGjx9vrZ04caIeeeQRJSUlKTExUbNmzVLfvn01bNgwSVLv3r01YsQITZo0ScuXL5ckTZ48WaNHj/5aVyYBAID2r0URs2zZMknS4MGDQ7b/4Q9/0IMPPihJeuyxx1RfX68pU6aopqZG2dnZWrduneLi4qz1ixcvVnR0tMaOHav6+noNHTpUK1asUFRUlLXm5Zdf1owZM6yrmPLz87V06dIreY0AAKAduqr7xIQz7hMTebhPDACYr83uEwMAAGAXIgYAABiJiAEAAEYiYgAAgJGIGAAAYCQiBgAAGImIAQAARiJiAACAkYgYAABgJCIGAAAYiYgBAABGImIAAICRiBgAAGAkIgYAABiJiAEAAEYiYgAAgJGIGAAAYCQiBgAAGImIAQAARiJiAACAkYgYAABgJCIGAAAYiYgBAABGImIAAICRiBgAAGAkIgYAABiJiAEAAEYiYgAAgJGIGAAAYCQiBgAAGImIAQAARiJiAACAkYgYAABgJCIGAAAYiYgBAABGImIAAICRiBgAAGAkIgYAABiJiAEAAEYiYgAAgJGIGAAAYCQiBgAAGImIAQAARiJiAACAkYgYAABgJCIGAAAYiYgBAABGImIAAICRiBgAAGAkIgYAABiJiAEAAEYiYgAAgJGIGAAAYCQiBgAAGImIAQAARiJiAACAkYgYAABgJCIGAAAYiYgBAABGImIAAICRiBgAAGAkIgYAABiJiAEAAEYiYgAAgJFaHDGbN2/WmDFjlJaWJofDoTfeeCNk/4MPPiiHwxHyGDhwYMiaQCCg6dOnKzk5WZ07d1Z+fr6OHDkSsqampkYFBQVyuVxyuVwqKChQbW1ti18gAABon1ocMWfOnNGtt96qpUuXXnLNiBEjVFlZaT3WrFkTsr+wsFCrVq3Sq6++qnfeeUenT5/W6NGj1djYaK0ZP368ysrKVFRUpKKiIpWVlamgoKCl4wIAgHYquqVPGDlypEaOHHnZNU6nUx6P56L7/H6/XnzxRa1cuVLDhg2TJP3pT3+S1+vV+vXrlZeXp3379qmoqEjbt29Xdna2JOn5559XTk6OPv74Y/Xq1aulYwMAgHamVc6J2bhxo1JSUnTTTTdp0qRJqqqqsvaVlpbq7Nmzys3NtbalpaUpKytLW7dulSRt27ZNLpfLChhJGjhwoFwul7XmQoFAQHV1dSEPAADQfl3ziBk5cqRefvllbdiwQU8//bR27typ7373uwoEApIkn8+nmJgYJSQkhDzP7XbL5/NZa1JSUpr97JSUFGvNhRYsWGCdP+NyueT1eq/xKwMAAOGkxR8nfZVx48ZZX2dlZWnAgAHq3r273nzzTX3ve9+75POCwaAcDof1/T9+fak1/2jOnDmaOXOm9X1dXR0hAwBAO9bql1inpqaqe/fu2r9/vyTJ4/GooaFBNTU1IeuqqqrkdrutNceOHWv2s44fP26tuZDT6VR8fHzIAwAAtF+tHjEnT57U4cOHlZqaKknq37+/OnbsqOLiYmtNZWWlPvzwQw0aNEiSlJOTI7/frx07dlhr3nvvPfn9fmsNAACIbC3+OOn06dM6cOCA9X15ebnKysqUmJioxMREzZ07V9///veVmpqqiooK/fznP1dycrLuu+8+SZLL5dLEiRP1yCOPKCkpSYmJiZo1a5b69u1rXa3Uu3dvjRgxQpMmTdLy5cslSZMnT9bo0aO5MgkAAEi6gogpKSnRkCFDrO/Pn4cyYcIELVu2TLt379ZLL72k2tpapaamasiQIXrttdcUFxdnPWfx4sWKjo7W2LFjVV9fr6FDh2rFihWKioqy1rz88suaMWOGdRVTfn7+Ze9NAwAAIosjGAwG7R6iNdTV1cnlcsnv94f9+THps9+0e4R2oeLJUXaPAAC4Si3595vfnQQAAIxExAAAACMRMQAAwEhEDAAAMBIRAwAAjETEAAAAIxExAADASEQMAAAwEhEDAACMRMQAAAAjETEAAMBIRAwAADASEQMAAIxExAAAACMRMQAAwEhEDAAAMBIRAwAAjETEAAAAIxExAADASEQMAAAwEhEDAACMRMQAAAAjETEAAMBIRAwAADASEQMAAIxExAAAACMRMQAAwEhEDAAAMBIRAwAAjETEAAAAIxExAADASEQMAAAwEhEDAACMRMQAAAAjETEAAMBIRAwAADASEQMAAIxExAAAACMRMQAAwEhEDAAAMBIRAwAAjETEAAAAIxExAADASEQMAAAwEhEDAACMRMQAAAAjETEAAMBIRAwAADASEQMAAIxExAAAACMRMQAAwEhEDAAAMBIRAwAAjETEAAAAIxExAADASEQMAAAwEhEDAACMRMQAAAAjETEAAMBIRAwAADASEQMAAIxExAAAACMRMQAAwEhEDAAAMFKLI2bz5s0aM2aM0tLS5HA49MYbb4TsDwaDmjt3rtLS0tSpUycNHjxYe/bsCVkTCAQ0ffp0JScnq3PnzsrPz9eRI0dC1tTU1KigoEAul0sul0sFBQWqra1t8QsEAADtU4sj5syZM7r11lu1dOnSi+5fuHChFi1apKVLl2rnzp3yeDwaPny4Tp06Za0pLCzUqlWr9Oqrr+qdd97R6dOnNXr0aDU2Nlprxo8fr7KyMhUVFamoqEhlZWUqKCi4gpcIAADaI0cwGAxe8ZMdDq1atUr33nuvpC+PwqSlpamwsFA/+9nPJH151MXtdus3v/mNHnroIfn9fnXt2lUrV67UuHHjJElHjx6V1+vVmjVrlJeXp3379qlPnz7avn27srOzJUnbt29XTk6OPvroI/Xq1esrZ6urq5PL5ZLf71d8fPyVvsQ2kT77TbtHaBcqnhxl9wgAgKvUkn+/r+k5MeXl5fL5fMrNzbW2OZ1O3XXXXdq6daskqbS0VGfPng1Zk5aWpqysLGvNtm3b5HK5rICRpIEDB8rlcllrLhQIBFRXVxfyAAAA7dc1jRifzydJcrvdIdvdbre1z+fzKSYmRgkJCZddk5KS0uznp6SkWGsutGDBAuv8GZfLJa/Xe9WvBwAAhK9WuTrJ4XCEfB8MBpttu9CFay62/nI/Z86cOfL7/dbj8OHDVzA5AAAwxTWNGI/HI0nNjpZUVVVZR2c8Ho8aGhpUU1Nz2TXHjh1r9vOPHz/e7CjPeU6nU/Hx8SEPAADQfl3TiMnIyJDH41FxcbG1raGhQZs2bdKgQYMkSf3791fHjh1D1lRWVurDDz+01uTk5Mjv92vHjh3Wmvfee09+v99aAwAAIlt0S59w+vRpHThwwPq+vLxcZWVlSkxM1A033KDCwkLNnz9fmZmZyszM1Pz58xUbG6vx48dLklwulyZOnKhHHnlESUlJSkxM1KxZs9S3b18NGzZMktS7d2+NGDFCkyZN0vLlyyVJkydP1ujRo7/WlUkAAKD9a3HElJSUaMiQIdb3M2fOlCRNmDBBK1as0GOPPab6+npNmTJFNTU1ys7O1rp16xQXF2c9Z/HixYqOjtbYsWNVX1+voUOHasWKFYqKirLWvPzyy5oxY4Z1FVN+fv4l700DAAAiz1XdJyaccZ+YyMN9YgDAfLbdJwYAAKCtEDEAAMBIRAwAADASEQMAAIxExAAAACMRMQAAwEhEDAAAMBIRAwAAjETEAAAAIxExAADASEQMAAAwEhEDAACMRMQAAAAjETEAAMBIRAwAADASEQMAAIxExAAAACMRMQAAwEhEDAAAMBIRAwAAjETEAAAAIxExAADASEQMAAAwEhEDAACMRMQAAAAjETEAAMBIRAwAADASEQMAAIxExAAAACMRMQAAwEhEDAAAMBIRAwAAjETEAAAAIxExAADASEQMAAAwEhEDAACMRMQAAAAjETEAAMBIRAwAADASEQMAAIxExAAAACMRMQAAwEhEDAAAMBIRAwAAjETEAAAAIxExAADASEQMAAAwEhEDAACMRMQAAAAjETEAAMBIRAwAADASEQMAAIxExAAAACMRMQAAwEhEDAAAMBIRAwAAjETEAAAAIxExAADASEQMAAAwEhEDAACMRMQAAAAjETEAAMBIRAwAADASEQMAAIx0zSNm7ty5cjgcIQ+Px2PtDwaDmjt3rtLS0tSpUycNHjxYe/bsCfkZgUBA06dPV3Jysjp37qz8/HwdOXLkWo8KAAAM1ipHYm6++WZVVlZaj927d1v7Fi5cqEWLFmnp0qXauXOnPB6Phg8frlOnTllrCgsLtWrVKr366qt65513dPr0aY0ePVqNjY2tMS4AADBQdKv80OjokKMv5wWDQS1ZskSPP/64vve970mS/vjHP8rtduuVV17RQw89JL/frxdffFErV67UsGHDJEl/+tOf5PV6tX79euXl5bXGyAAAwDCtciRm//79SktLU0ZGhu6//359+umnkqTy8nL5fD7l5uZaa51Op+666y5t3bpVklRaWqqzZ8+GrElLS1NWVpa15mICgYDq6upCHgAAoP265hGTnZ2tl156SWvXrtXzzz8vn8+nQYMG6eTJk/L5fJIkt9sd8hy3223t8/l8iomJUUJCwiXXXMyCBQvkcrmsh9frvcavDAAAhJNrHjEjR47U97//ffXt21fDhg3Tm2++KenLj43OczgcIc8JBoPNtl3oq9bMmTNHfr/fehw+fPgqXgUAAAh3rX6JdefOndW3b1/t37/fOk/mwiMqVVVV1tEZj8ejhoYG1dTUXHLNxTidTsXHx4c8AABA+9XqERMIBLRv3z6lpqYqIyNDHo9HxcXF1v6GhgZt2rRJgwYNkiT1799fHTt2DFlTWVmpDz/80FoDAABwza9OmjVrlsaMGaMbbrhBVVVVeuKJJ1RXV6cJEybI4XCosLBQ8+fPV2ZmpjIzMzV//nzFxsZq/PjxkiSXy6WJEyfqkUceUVJSkhITEzVr1izr4ykAAACpFSLmyJEj+ud//medOHFCXbt21cCBA7V9+3Z1795dkvTYY4+pvr5eU6ZMUU1NjbKzs7Vu3TrFxcVZP2Px4sWKjo7W2LFjVV9fr6FDh2rFihWKioq61uMCAABDOYLBYNDuIVpDXV2dXC6X/H5/2J8fkz77TbtHaBcqnhxl9wgAgKvUkn+/+d1JAADASEQMAAAwEhEDAACMRMQAAAAjETEAAMBIRAwAADASEQMAAIxExAAAACMRMQAAwEhEDAAAMBIRAwAAjETEAAAAIxExAADASEQMAAAwEhEDAACMRMQAAAAjETEAAMBIRAwAADASEQMAAIxExAAAACMRMQAAwEhEDAAAMBIRAwAAjETEAAAAIxExAADASEQMAAAwEhEDAACMFG33AADCT/rsN+0eod2oeHKU3SMA7RZHYgAAgJGIGAAAYCQiBgAAGIlzYgAAYY/ztK6d9nSeFkdiAACAkYgYAABgJCIGAAAYiYgBAABGImIAAICRiBgAAGAkIgYAABiJiAEAAEYiYgAAgJGIGAAAYCQiBgAAGImIAQAARiJiAACAkYgYAABgJCIGAAAYiYgBAABGImIAAICRiBgAAGAkIgYAABiJiAEAAEYiYgAAgJGIGAAAYCQiBgAAGImIAQAARiJiAACAkYgYAABgJCIGAAAYiYgBAABGImIAAICRiBgAAGAkIgYAABiJiAEAAEYK+4h57rnnlJGRoeuuu079+/fXli1b7B4JAACEgbCOmNdee02FhYV6/PHH9f777+uOO+7QyJEjdejQIbtHAwAANgvriFm0aJEmTpyof/3Xf1Xv3r21ZMkSeb1eLVu2zO7RAACAzaLtHuBSGhoaVFpaqtmzZ4dsz83N1datW5utDwQCCgQC1vd+v1+SVFdX17qDXgNNgc/tHqFdMOG/a1Pwnrx2eF9eG7wnr51wf0+eny8YDH7l2rCNmBMnTqixsVFutztku9vtls/na7Z+wYIF+vWvf91su9frbbUZEV5cS+yeAGiO9yXCjSnvyVOnTsnlcl12TdhGzHkOhyPk+2Aw2GybJM2ZM0czZ860vm9qalJ1dbWSkpIuuh5fX11dnbxerw4fPqz4+Hi7xwF4TyIs8b68NoLBoE6dOqW0tLSvXBu2EZOcnKyoqKhmR12qqqqaHZ2RJKfTKafTGbKtS5curTlixImPj+d/mAgrvCcRjnhfXr2vOgJzXtie2BsTE6P+/furuLg4ZHtxcbEGDRpk01QAACBchO2RGEmaOXOmCgoKNGDAAOXk5Oh3v/udDh06pIcfftju0QAAgM3COmLGjRunkydPat68eaqsrFRWVpbWrFmj7t272z1aRHE6nfrVr37V7OM6wC68JxGOeF+2PUfw61zDBAAAEGbC9pwYAACAyyFiAACAkYgYAABgJCIGAAAYiYgBAABGImIAAGihc+fO6Y9//ONFf5cf2g6XWMOyevXqr702Pz+/FScBvrRr166vvfaWW25pxUmA5mJjY7Vv3z7uXWYjIgaWDh2+3oE5h8OhxsbGVp4G+PI96XA4dKm/ps7v4z0JOwwZMkSFhYW655577B4lYoX1HXvRtpqamuweAQhRXl5u9wjAJU2ZMkUzZ87U4cOH1b9/f3Xu3DlkP0cHWx9HYgAAuAIXO3rN0cG2xZEYWJ599llNnjxZ1113nZ599tnLrp0xY0YbTQWE2rt3rw4dOqSGhoaQ7ZynhbbGkUL7cSQGloyMDJWUlCgpKUkZGRmXXOdwOPTpp5+24WSA9Omnn+q+++7T7t27Q86TcTgcksT/6wUiEJdYw1JeXq6kpCTr6/Lycu3YsUMlJSXW9+Xl5QQMbPGTn/xEGRkZOnbsmGJjY7Vnzx5t3rxZAwYM0MaNG+0eDxFq5cqVuv3225WWlqaDBw9KkpYsWaK//e1vNk8WGYgYNFNbW6upU6cqOTlZHo9HKSkpSk5O1rRp0+T3++0eDxFq27Ztmjdvnrp27aoOHTqoQ4cO+s53vqMFCxbw8SZssWzZMs2cOVN33323amtrraOBXbp00ZIlS+wdLkJwTgxCVFdXKycnR5999pkeeOAB9e7dW8FgUPv27dOKFSv01ltvaevWrUpISLB7VESYxsZGXX/99ZKk5ORkHT16VL169VL37t318ccf2zwdItFvf/tbPf/887r33nv15JNPWtsHDBigWbNm2ThZ5CBiEGLevHmKiYnRJ598Irfb3Wxfbm6u5s2bp8WLF9s0ISJVVlaWdu3apR49eig7O1sLFy5UTEyMfve736lHjx52j4cIVF5erm9/+9vNtjudTp05c8aGiSIPHychxBtvvKGnnnqqWcBIksfj0cKFC7Vq1SobJkOk+8UvfmHdy+iJJ57QwYMHdccdd2jNmjVfeTUd0BoyMjJUVlbWbPv//u//qk+fPm0/UATiSAxCVFZW6uabb77k/qysLH5XCGyRl5dnfd2jRw/t3btX1dXVSkhIsK5QAtrSo48+qqlTp+qLL75QMBjUjh079Oc//1kLFizQCy+8YPd4EYGIQYjk5GRVVFSoW7duF93/j1cwAW3J7/ersbFRiYmJ1rbExERVV1crOjpa8fHxNk6HSPQv//IvOnfunB577DF9/vnnGj9+vL7xjW/omWee0f3332/3eBGB+8QgxMSJE3XgwAEVFxcrJiYmZF8gEFBeXp5uvPFGvfjiizZNiEg1cuRIjRkzRlOmTAnZ/l//9V9avXq11qxZY9NkgHTixAk1NTUpJSXF7lEiChGDEEeOHNGAAQPkdDo1depUffOb35T05V1Sn3vuOQUCAZWUlMjr9do8KSJNYmKi3n33XfXu3Ttk+0cffaTbb79dJ0+etGkyAHbh4ySE6Natm7Zt26YpU6Zozpw5IXdFHT58uJYuXUrAwBaBQEDnzp1rtv3s2bOqr6+3YSJEumPHjmnWrFl66623VFVV1ey3rXMX6dbHkRhcUk1Njfbv3y9J6tmzZ8i5CEBbGzx4sPr27avf/va3IdunTp2qXbt2acuWLTZNhkg1cuRIHTp0SNOmTVNqamqzE8zvuecemyaLHEQMACO8++67GjZsmG677TYNHTpUkvTWW29p586dWrdune644w6bJ0SkiYuL05YtW/Stb33L7lEiFveJAWCE22+/Xdu2bZPX69Vf/vIX/f3vf1fPnj21a9cuAga28Hq9zT5CQtviSAwAAFdg3bp1evrpp7V8+XKlp6fbPU5EImIAhK26ujrr/i91dXWXXct9YtAWLry54pkzZ3Tu3DnFxsaqY8eOIWurq6vberyIw9VJAMJWQkKCKisrlZKSoi5dulz0zrzBYFAOh4MrQdAm+O3U4YWIARC2NmzYYF0V9/bbb9s8DSBNmDDB7hHwD/g4CQCAKxAVFWUdKfxHJ0+eVEpKCkcH2wBHYgAY44svvtCuXbtUVVVl/Ubr8/Lz822aCpHqUscAAoFAs1/bgtZBxAAwQlFRkX74wx/qxIkTzfZxTgza0rPPPivpy/fdCy+8oOuvv97a19jYqM2bN1u/sgWti4+TABihZ8+eysvL0y9/+Uu53W67x0EEy8jIkCQdPHhQ3bp1U1RUlLUvJiZG6enpmjdvnrKzs+0aMWIQMQCMEB8fr/fff1833nij3aMAkqQhQ4bo9ddf17lz59ShQwclJSXZPVLE4Y69AIzwgx/8QBs3brR7DECSVFtbq969eyszM1Mej0cpKSlKTk7WtGnTVFtba/d4EYMjMQCM8Pnnn+uf/umf1LVrV/Xt27fZjcVmzJhh02SINNXV1crJydFnn32mBx54QL1791YwGNS+ffv0yiuvyOv1auvWrUpISLB71HaPiAFghBdeeEEPP/ywOnXqpKSkpJAb3zkcDn366ac2TodIUlhYqLfeekvr169vdn6Wz+dTbm6uhg4dqsWLF9s0YeQgYgAYwePxaMaMGZo9e7Y6dOCTcNgnPT1dy5cvV15e3kX3FxUV6eGHH1ZFRUXbDhaB+JsAgBEaGho0btw4Aga2q6ys1M0333zJ/VlZWfL5fG04UeTibwMARpgwYYJee+01u8cAlJycfNmjLOXl5Vyp1Ea42R0AIzQ2NmrhwoVau3atbrnllmYn9i5atMimyRBpRowYoccff1zFxcXN7swbCAT0b//2bxoxYoRN00UWzokBYIQhQ4Zccp/D4dCGDRvacBpEsiNHjmjAgAFyOp2aOnWqdXfevXv36rnnnlMgEFBJSYm8Xq/Nk7Z/RAwAAC1UXl6uKVOmaN26ddbvUHI4HBo+fLiWLl2qnj172jxhZCBiABjlwIED+uSTT3TnnXeqU6dOCgaDIZdbA22ppqZG+/fvl/Tlr8ZITEy0eaLIQsQAMMLJkyc1duxYvf3223I4HNq/f7969OihiRMnqkuXLnr66aftHhFAG+PqJABG+OlPf6qOHTvq0KFDio2NtbaPGzdORUVFNk4GwC5cnQTACOvWrdPatWvVrVu3kO2ZmZk6ePCgTVMBsBNHYgAY4cyZMyFHYM47ceKEnE6nDRMBsBsRA8AId955p1566SXre4fDoaamJv3Hf/zHZS+/BtB+cWIvACPs3btXgwcPVv/+/bVhwwbl5+drz549qq6u1rvvvqsbb7zR7hEBtDEiBoAxfD6fli1bptLSUjU1Nalfv36aOnWqUlNT7R4NgA2IGAAAYCSuTgJgjNraWu3YsUNVVVVqamoK2ffDH/7QpqkA2IUjMQCM8Pe//10PPPCAzpw5o7i4uJC79DocDlVXV9s4HQA7EDEAjHDTTTfp7rvv1vz58y96qTWAyEPEADBC586dtXv3bvXo0cPuUQCECe4TA8AIeXl5KikpsXsMAGGEE3sBhK3Vq1dbX48aNUqPPvqo9u7dq759+6pjx44ha/Pz89t6PAA24+MkAGGrQ4evd7DY4XCosbGxlacBEG6IGAAAYCTOiQEQ1jZs2KA+ffqorq6u2T6/36+bb75ZW7ZssWEyAHYjYgCEtSVLlmjSpEmKj49vts/lcumhhx7SokWLbJgMgN2IGABh7YMPPtCIESMuuT83N1elpaVtOBGAcEHEAAhrx44da3Yl0j+Kjo7W8ePH23AiAOGCiAEQ1r7xjW9o9+7dl9y/a9cufos1EKGIGABh7e6779Yvf/lLffHFF8321dfX61e/+pVGjx5tw2QA7MYl1gDC2rFjx9SvXz9FRUVp2rRp6tWrlxwOh/bt26f//M//VGNjo/7v//5Pbrfb7lEBtDEiBkDYO3jwoH784x9r7dq1Ov9XlsPhUF5enp577jmlp6fbOyAAWxAxAIxRU1OjAwcOKBgMKjMzUwkJCXaPBMBGRAwAADASJ/YCAAAjETEAAMBIRAwAADASEQPAFhs3bpTD4VBtba3dowAwFBEDoE0MHjxYhYWFdo9hSU9P15IlS+weA8BVIGIAGOPs2bN2jwAgjBAxAFrdgw8+qE2bNumZZ56Rw+GQw+FQRUWFJKm0tFQDBgxQbGysBg0apI8//th63ty5c/Wtb31Lv//979WjRw85nU4Fg0H5/X5NnjxZKSkpio+P13e/+1198MEH1vM++eQT3XPPPXK73br++ut12223af369db+wYMH6+DBg/rpT39qzQPAPEQMgFb3zDPPKCcnR5MmTVJlZaUqKyvl9XolSY8//riefvpplZSUKDo6Wj/60Y9CnnvgwAH95S9/0X//93+rrKxMkjRq1Cj5fD6tWbNGpaWl6tevn4YOHarq6mpJ0unTp3X33Xdr/fr1ev/995WXl6cxY8bo0KFDkqTXX39d3bp107x586x5AJgn2u4BALR/LpdLMTExio2NlcfjkSR99NFHkqR///d/11133SVJmj17tkaNGqUvvvhC1113nSSpoaFBK1euVNeuXSVJGzZs0O7du1VVVSWn0ylJeuqpp/TGG2/or3/9qyZPnqxbb71Vt956q/XnP/HEE1q1apVWr16tadOmKTExUVFRUYqLi7PmAWAeIgaArW655Rbr69TUVElSVVWVbrjhBklS9+7drYCRvvz46fTp00pKSgr5OfX19frkk08kSWfOnNGvf/1r/c///I+OHj2qc+fOqb6+3joSA6B9IGIA2Kpjx47W1+fPTWlqarK2de7cOWR9U1OTUlNTtXHjxmY/q0uXLpKkRx99VGvXrtVTTz2lnj17qlOnTvrBD36ghoaGa/8CANiGiAHQJmJiYtTY2HjVP6dfv37y+XyKjo6+5G+v3rJlix588EHdd999kr48R+b8icTXeh4A9uHEXgBtIj09Xe+9954qKip04sSJkKMtLTFs2DDl5OTo3nvv1dq1a1VRUaGtW7fqF7/4hUpKSiRJPXv21Ouvv66ysjJ98MEHGj9+fLM/Lz09XZs3b9Znn32mEydOXPXrA9D2iBgAbWLWrFmKiopSnz591LVr1ys+P8XhcGjNmjW688479aMf/Ug33XST7r//flVUVMjtdkuSFi9erISEBA0aNEhjxoxRXl6e+vXrF/Jz5s2bp4qKCt14440h59wAMIcjGAwG7R4CAACgpTgSAwAAjETEAAAAIxExAADASEQMAAAwEhEDAACMRMQAAAAjETEAAMBIRAwAADASEQMAAIxExAAAACMRMQAAwEhEDAAAMNL/A7/2DeJ7Cz28AAAAAElFTkSuQmCC",
231
+ "text/plain": [
232
+ "<Figure size 640x480 with 1 Axes>"
233
+ ]
234
+ },
235
+ "metadata": {},
236
+ "output_type": "display_data"
237
+ }
238
+ ],
239
+ "source": [
240
+ "df['threat'].value_counts().plot(kind='bar')"
241
+ ]
242
+ }
243
+ ],
244
+ "metadata": {
245
+ "kernelspec": {
246
+ "display_name": "ai-gpu",
247
+ "language": "python",
248
+ "name": "python3"
249
+ },
250
+ "language_info": {
251
+ "codemirror_mode": {
252
+ "name": "ipython",
253
+ "version": 3
254
+ },
255
+ "file_extension": ".py",
256
+ "mimetype": "text/x-python",
257
+ "name": "python",
258
+ "nbconvert_exporter": "python",
259
+ "pygments_lexer": "ipython3",
260
+ "version": "3.9.21"
261
+ }
262
+ },
263
+ "nbformat": 4,
264
+ "nbformat_minor": 5
265
+ }
incidents.csv ADDED
The diff for this file is too large to render. See raw diff
 
models/severity_model.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be23df7113f24fe40e5c4dcc6bbd64244573f5fbc7d5f48ca82dc3d290f3a8ba
3
+ size 9374205
models/threat_model.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f75285ae4727cfe85b1dd64c0af91a668c8884dc225e892cdfa75ba4c9ba0f7
3
+ size 4653005
requirements-docker.txt ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Core FastAPI dependencies
2
+ fastapi==0.115.2
3
+ uvicorn[standard]==0.32.1
4
+ motor==3.6.0
5
+ passlib[bcrypt]==1.7.4
6
+ python-jose==3.3.0
7
+ python-multipart==0.0.9
8
+ pydantic[email]==2.9.2
9
+ pydantic-settings==2.6.1
10
+ python-dotenv==1.0.1
11
+
12
+ # Testing dependencies (optional for production)
13
+ pytest==8.3.3
14
+ httpx==0.27.2
15
+
16
+ # ML inference dependencies (optimized for Docker)
17
+ numpy==1.26.4
18
+ joblib==1.4.2
19
+ scikit-learn==1.5.2
20
+
21
+ # Note: Using sklearn 1.5.2 for better Docker compatibility while maintaining model loading capability
requirements-railway-light.txt ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Railway deployment without ML dependencies (rule-based classification only)
2
+ fastapi==0.115.2
3
+ uvicorn[standard]==0.32.1
4
+ motor==3.6.0
5
+ passlib[bcrypt]==1.7.4
6
+ python-jose==3.3.0
7
+ python-multipart==0.0.9
8
+ pydantic[email]==2.9.2
9
+ pydantic-settings==2.6.1
10
+ python-dotenv==1.0.1
11
+
12
+ # No ML dependencies - will use rule-based classification
13
+ # This should make Railway deployment much lighter and faster
requirements-railway.txt ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Railway-optimized requirements - lighter build
2
+ # Core FastAPI dependencies
3
+ fastapi==0.115.2
4
+ uvicorn[standard]==0.32.1
5
+ motor==3.6.0
6
+ passlib[bcrypt]==1.7.4
7
+ python-jose==3.3.0
8
+ python-multipart==0.0.9
9
+ pydantic[email]==2.9.2
10
+ pydantic-settings==2.6.1
11
+ python-dotenv==1.0.1
12
+
13
+ # Testing dependencies (optional for production)
14
+ pytest==8.3.3
15
+ httpx==0.27.2
16
+
17
+ # Minimal ML dependencies for Railway (lighter build)
18
+ numpy==1.24.3
19
+ joblib==1.3.2
20
+ scikit-learn==1.3.2
21
+
22
+ # Note: Using lighter/older versions to reduce build time and memory usage on Railway
requirements-training.txt ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Training Requirements (for local model development only)
2
+ # Install with: pip install -r requirements-training.txt
3
+
4
+ # All production dependencies
5
+ -r requirements.txt
6
+
7
+ # Heavy ML training libraries (only needed for model training/EDA)
8
+ pandas==2.2.2
9
+ scikit-learn==1.5.1
10
+ matplotlib==3.8.0
11
+ seaborn==0.13.0
12
+ jupyter==1.0.0
13
+
14
+ # Additional analysis tools
15
+ plotly==5.17.0
requirements.txt ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Core FastAPI dependencies
2
+ fastapi==0.115.2
3
+ uvicorn[standard]==0.32.1
4
+ motor==3.6.0
5
+ passlib[bcrypt]==1.7.4
6
+ python-jose==3.3.0
7
+ python-multipart==0.0.9
8
+ pydantic[email]==2.9.2
9
+ pydantic-settings==2.6.1
10
+ python-dotenv==1.0.1
11
+
12
+ # Testing dependencies (optional for production)
13
+ pytest==8.3.3
14
+ httpx==0.27.2
15
+
16
+ # ML inference dependencies (required for loading pre-trained models)
17
+ numpy==1.26.4
18
+ joblib==1.4.2
19
+ scikit-learn==1.7.0
20
+
21
+ # Note: scikit-learn version 1.7.0 matches the version used for training
22
+ # This eliminates version warnings during model loading
start-hf.sh ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Hugging Face Spaces startup script
4
+ echo "🌊 Starting Marine Guard API on Hugging Face Spaces..."
5
+
6
+ # Set default environment variables if not provided
7
+ export MONGODB_URI=${MONGODB_URI:-""}
8
+ export JWT_SECRET_KEY=${JWT_SECRET_KEY:-"huggingface-default-secret-change-in-production"}
9
+ export ALLOWED_ORIGINS=${ALLOWED_ORIGINS:-"*"}
10
+
11
+ # Log environment info
12
+ echo "πŸ“‘ Port: ${PORT:-7860}"
13
+ echo "πŸ”— Allowed Origins: $ALLOWED_ORIGINS"
14
+
15
+ # Start the FastAPI application
16
+ exec uvicorn app.main:app --host 0.0.0.0 --port ${PORT:-7860} --workers 1
start.sh ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Render startup script for Marine Guard API
4
+
5
+ echo "🌊 Starting Marine Guard API..."
6
+
7
+ # Set default port if not provided
8
+ export PORT=${PORT:-8000}
9
+
10
+ echo "πŸ“‘ Starting uvicorn on port $PORT..."
11
+
12
+ # Start the FastAPI application
13
+ exec uvicorn app.main:app --host 0.0.0.0 --port $PORT --workers 1
tests/__pycache__/conftest.cpython-311-pytest-8.3.3.pyc ADDED
Binary file (5.35 kB). View file
 
tests/__pycache__/test_auth.cpython-311-pytest-8.3.3.pyc ADDED
Binary file (9.43 kB). View file