Upload 53 files
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitignore +127 -0
- DEPLOYMENT_GUIDE.md +529 -0
- Dockerfile +52 -0
- __init__.py +0 -0
- __pycache__/__init__.cpython-311.pyc +0 -0
- __pycache__/app.cpython-311.pyc +0 -0
- __pycache__/main.cpython-311.pyc +0 -0
- __pycache__/main_isp.cpython-311.pyc +0 -0
- app.py +190 -0
- config.json +98 -0
- core/__init__.py +2 -0
- core/__pycache__/__init__.cpython-311.pyc +0 -0
- core/__pycache__/dhcp_server.cpython-311.pyc +0 -0
- core/__pycache__/firewall.cpython-311.pyc +0 -0
- core/__pycache__/ip_parser.cpython-311.pyc +0 -0
- core/__pycache__/logger.cpython-311.pyc +0 -0
- core/__pycache__/nat_engine.cpython-311.pyc +0 -0
- core/__pycache__/openvpn_manager.cpython-311.pyc +0 -0
- core/__pycache__/packet_bridge.cpython-311.pyc +0 -0
- core/__pycache__/session_tracker.cpython-311.pyc +0 -0
- core/__pycache__/socket_translator.cpython-311.pyc +0 -0
- core/__pycache__/tcp_engine.cpython-311.pyc +0 -0
- core/__pycache__/virtual_router.cpython-311.pyc +0 -0
- core/dhcp_server.py +391 -0
- core/firewall.py +523 -0
- core/ip_parser.py +546 -0
- core/logger.py +555 -0
- core/nat_engine.py +516 -0
- core/openvpn_manager.py +608 -0
- core/packet_bridge.py +664 -0
- core/session_tracker.py +602 -0
- core/socket_translator.py +653 -0
- core/tcp_engine.py +716 -0
- core/virtual_router.py +565 -0
- database/app.db +0 -0
- main.py +62 -0
- main_isp.py +273 -0
- models/__pycache__/user.cpython-311.pyc +0 -0
- models/user.py +18 -0
- openvpn/ca.crt +20 -0
- openvpn/dh.pem +8 -0
- openvpn/server.conf +21 -0
- openvpn/server.crt +86 -0
- openvpn/server.key +28 -0
- requirements.txt +34 -0
- routes/__pycache__/isp_api.cpython-311.pyc +0 -0
- routes/__pycache__/user.cpython-311.pyc +0 -0
- routes/isp_api.py +1107 -0
- routes/user.py +39 -0
- static/app.js +1095 -0
.gitignore
ADDED
|
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Python
|
| 2 |
+
__pycache__/
|
| 3 |
+
*.py[cod]
|
| 4 |
+
*$py.class
|
| 5 |
+
*.so
|
| 6 |
+
.Python
|
| 7 |
+
build/
|
| 8 |
+
develop-eggs/
|
| 9 |
+
dist/
|
| 10 |
+
downloads/
|
| 11 |
+
eggs/
|
| 12 |
+
.eggs/
|
| 13 |
+
lib/
|
| 14 |
+
lib64/
|
| 15 |
+
parts/
|
| 16 |
+
sdist/
|
| 17 |
+
var/
|
| 18 |
+
wheels/
|
| 19 |
+
pip-wheel-metadata/
|
| 20 |
+
share/python-wheels/
|
| 21 |
+
*.egg-info/
|
| 22 |
+
.installed.cfg
|
| 23 |
+
*.egg
|
| 24 |
+
MANIFEST
|
| 25 |
+
|
| 26 |
+
# Virtual environments
|
| 27 |
+
.env
|
| 28 |
+
.venv
|
| 29 |
+
env/
|
| 30 |
+
venv/
|
| 31 |
+
ENV/
|
| 32 |
+
env.bak/
|
| 33 |
+
venv.bak/
|
| 34 |
+
|
| 35 |
+
# IDEs
|
| 36 |
+
.vscode/
|
| 37 |
+
.idea/
|
| 38 |
+
*.swp
|
| 39 |
+
*.swo
|
| 40 |
+
*~
|
| 41 |
+
|
| 42 |
+
# OS
|
| 43 |
+
.DS_Store
|
| 44 |
+
.DS_Store?
|
| 45 |
+
._*
|
| 46 |
+
.Spotlight-V100
|
| 47 |
+
.Trashes
|
| 48 |
+
ehthumbs.db
|
| 49 |
+
Thumbs.db
|
| 50 |
+
|
| 51 |
+
# Logs
|
| 52 |
+
*.log
|
| 53 |
+
logs/
|
| 54 |
+
/var/log/
|
| 55 |
+
|
| 56 |
+
# Database
|
| 57 |
+
*.db
|
| 58 |
+
*.sqlite
|
| 59 |
+
*.sqlite3
|
| 60 |
+
|
| 61 |
+
# VPN configurations (for security)
|
| 62 |
+
*.ovpn
|
| 63 |
+
*.crt
|
| 64 |
+
*.key
|
| 65 |
+
*.pem
|
| 66 |
+
/tmp/vpn_client_configs/
|
| 67 |
+
|
| 68 |
+
# OpenVPN
|
| 69 |
+
/etc/openvpn/
|
| 70 |
+
/var/log/openvpn/
|
| 71 |
+
|
| 72 |
+
# Temporary files
|
| 73 |
+
/tmp/
|
| 74 |
+
*.tmp
|
| 75 |
+
*.temp
|
| 76 |
+
|
| 77 |
+
# Flask
|
| 78 |
+
instance/
|
| 79 |
+
.webassets-cache
|
| 80 |
+
|
| 81 |
+
# Environment variables
|
| 82 |
+
.env.local
|
| 83 |
+
.env.development.local
|
| 84 |
+
.env.test.local
|
| 85 |
+
.env.production.local
|
| 86 |
+
|
| 87 |
+
# Coverage reports
|
| 88 |
+
htmlcov/
|
| 89 |
+
.tox/
|
| 90 |
+
.coverage
|
| 91 |
+
.coverage.*
|
| 92 |
+
.cache
|
| 93 |
+
nosetests.xml
|
| 94 |
+
coverage.xml
|
| 95 |
+
*.cover
|
| 96 |
+
*.py,cover
|
| 97 |
+
.hypothesis/
|
| 98 |
+
.pytest_cache/
|
| 99 |
+
|
| 100 |
+
# Jupyter Notebook
|
| 101 |
+
.ipynb_checkpoints
|
| 102 |
+
|
| 103 |
+
# pyenv
|
| 104 |
+
.python-version
|
| 105 |
+
|
| 106 |
+
# Celery
|
| 107 |
+
celerybeat-schedule
|
| 108 |
+
celerybeat.pid
|
| 109 |
+
|
| 110 |
+
# SageMath parsed files
|
| 111 |
+
*.sage.py
|
| 112 |
+
|
| 113 |
+
# Spyder project settings
|
| 114 |
+
.spyderproject
|
| 115 |
+
.spyproject
|
| 116 |
+
|
| 117 |
+
# Rope project settings
|
| 118 |
+
.ropeproject
|
| 119 |
+
|
| 120 |
+
# mkdocs documentation
|
| 121 |
+
/site
|
| 122 |
+
|
| 123 |
+
# mypy
|
| 124 |
+
.mypy_cache/
|
| 125 |
+
.dmypy.json
|
| 126 |
+
dmypy.json
|
| 127 |
+
|
DEPLOYMENT_GUIDE.md
ADDED
|
@@ -0,0 +1,529 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Virtual ISP Stack with OpenVPN Integration - Complete Deployment Guide
|
| 2 |
+
|
| 3 |
+
**Author**: Manus AI
|
| 4 |
+
**Version**: 1.0.0
|
| 5 |
+
**Last Updated**: August 2025
|
| 6 |
+
|
| 7 |
+
## Table of Contents
|
| 8 |
+
|
| 9 |
+
1. [Introduction](#introduction)
|
| 10 |
+
2. [Architecture Overview](#architecture-overview)
|
| 11 |
+
3. [Prerequisites](#prerequisites)
|
| 12 |
+
4. [Local Development Setup](#local-development-setup)
|
| 13 |
+
5. [HuggingFace Spaces Deployment](#huggingface-spaces-deployment)
|
| 14 |
+
6. [Docker Deployment](#docker-deployment)
|
| 15 |
+
7. [API Reference](#api-reference)
|
| 16 |
+
8. [Client Connection Workflow](#client-connection-workflow)
|
| 17 |
+
9. [Security Considerations](#security-considerations)
|
| 18 |
+
10. [Troubleshooting](#troubleshooting)
|
| 19 |
+
11. [Performance Optimization](#performance-optimization)
|
| 20 |
+
12. [Monitoring and Maintenance](#monitoring-and-maintenance)
|
| 21 |
+
|
| 22 |
+
## Introduction
|
| 23 |
+
|
| 24 |
+
The Virtual ISP Stack with OpenVPN Integration represents a comprehensive solution for creating a complete Internet Service Provider (ISP) infrastructure with integrated Virtual Private Network (VPN) capabilities. This system is designed to provide educational insights into network infrastructure management while offering practical VPN services that can be deployed in various environments.
|
| 25 |
+
|
| 26 |
+
The application combines traditional ISP stack components including Dynamic Host Configuration Protocol (DHCP) services, Network Address Translation (NAT) engines, firewall management, and routing capabilities with a fully integrated OpenVPN server. This integration allows for seamless VPN client management, automated configuration generation, and comprehensive network monitoring through a unified RESTful API interface.
|
| 27 |
+
|
| 28 |
+
The system has been specifically architected to support multiple deployment scenarios, from local development environments to cloud-based platforms like HuggingFace Spaces, Docker containers, and traditional server deployments. The modular design ensures that components can be independently managed while maintaining tight integration for optimal performance and reliability.
|
| 29 |
+
|
| 30 |
+
## Architecture Overview
|
| 31 |
+
|
| 32 |
+
### Core Components
|
| 33 |
+
|
| 34 |
+
The Virtual ISP Stack is built upon several interconnected components that work together to provide comprehensive network services. The architecture follows a microservices-inspired design pattern where each component maintains its own responsibilities while communicating through well-defined interfaces.
|
| 35 |
+
|
| 36 |
+
#### ISP Stack Components
|
| 37 |
+
|
| 38 |
+
The foundational ISP stack consists of multiple specialized engines that handle different aspects of network management. The DHCP Server component manages dynamic IP address allocation within the configured network ranges, maintaining lease tables and handling client requests for network configuration. This component supports both dynamic allocation and static lease assignment, allowing for flexible network management strategies.
|
| 39 |
+
|
| 40 |
+
The NAT Engine provides Network Address Translation services, enabling multiple clients to share a single public IP address while maintaining session tracking and port management. The engine maintains comprehensive session tables that track active connections, monitor bandwidth usage, and provide detailed statistics for network analysis.
|
| 41 |
+
|
| 42 |
+
The Firewall Engine implements a rule-based packet filtering system that can be dynamically configured through the API. It supports various rule types including source and destination IP filtering, port-based rules, protocol-specific filtering, and time-based access controls. The firewall maintains detailed logs of all filtered traffic and provides real-time statistics on rule effectiveness.
|
| 43 |
+
|
| 44 |
+
The Virtual Router component manages routing tables and handles packet forwarding between different network segments. It maintains ARP tables, manages network interfaces, and provides comprehensive routing statistics. The router integrates closely with other components to ensure optimal packet flow and network performance.
|
| 45 |
+
|
| 46 |
+
#### OpenVPN Integration
|
| 47 |
+
|
| 48 |
+
The OpenVPN integration layer provides comprehensive VPN server management capabilities through a dedicated OpenVPN Manager component. This component handles server lifecycle management, client connection monitoring, and configuration generation. The integration is designed to work seamlessly with the existing ISP stack components, allowing VPN clients to participate fully in the managed network environment.
|
| 49 |
+
|
| 50 |
+
The OpenVPN Manager maintains real-time monitoring of connected clients, tracking connection statistics, bandwidth usage, and session duration. It provides automated client configuration generation with embedded certificates, eliminating the need for separate certificate distribution mechanisms.
|
| 51 |
+
|
| 52 |
+
#### API Layer
|
| 53 |
+
|
| 54 |
+
The RESTful API layer provides a unified interface for managing all system components. Built using Flask with comprehensive CORS support, the API enables both programmatic access and web-based management interfaces. The API follows RESTful design principles with consistent response formats, proper HTTP status codes, and comprehensive error handling.
|
| 55 |
+
|
| 56 |
+
### Data Flow Architecture
|
| 57 |
+
|
| 58 |
+
The system implements a sophisticated data flow architecture that ensures efficient packet processing and minimal latency. Incoming client requests are processed through multiple stages, beginning with firewall evaluation, proceeding through NAT translation if required, and finally reaching the appropriate destination through the routing engine.
|
| 59 |
+
|
| 60 |
+
VPN traffic follows a specialized path that integrates with the standard ISP stack processing. VPN clients connect through the OpenVPN server, receive IP addresses from the integrated DHCP system, and have their traffic processed through the same NAT and firewall engines as traditional clients. This unified approach ensures consistent policy application and comprehensive monitoring across all network traffic.
|
| 61 |
+
|
| 62 |
+
## Prerequisites
|
| 63 |
+
|
| 64 |
+
### System Requirements
|
| 65 |
+
|
| 66 |
+
The Virtual ISP Stack requires a Linux-based operating system with kernel version 3.10 or higher. The system should have at least 2GB of available RAM and 10GB of free disk space. Network connectivity is essential, and the system should have at least one network interface configured with internet access.
|
| 67 |
+
|
| 68 |
+
For production deployments, the system should have a static IP address or dynamic DNS configuration to ensure consistent client connectivity. The deployment environment should support the creation of virtual network interfaces and have appropriate permissions for network configuration management.
|
| 69 |
+
|
| 70 |
+
### Software Dependencies
|
| 71 |
+
|
| 72 |
+
The application requires Python 3.11 or higher with pip package manager. Flask 3.1.1 serves as the primary web framework, with Flask-CORS providing cross-origin request support and Flask-SQLAlchemy handling database operations. The system uses SQLite for data persistence, though other database backends can be configured if needed.
|
| 73 |
+
|
| 74 |
+
OpenVPN server software is required for VPN functionality, though the application can operate in ISP-only mode if OpenVPN is not available. Additional Python packages include aiohttp for asynchronous operations, websockets for real-time communication, and various utility libraries for network operations and data processing.
|
| 75 |
+
|
| 76 |
+
### Network Configuration
|
| 77 |
+
|
| 78 |
+
The system requires specific network configuration to operate effectively. The default configuration uses the 10.0.0.0/24 network range for ISP services and 10.8.0.0/24 for VPN clients. These ranges can be customized through the application configuration, but care should be taken to avoid conflicts with existing network infrastructure.
|
| 79 |
+
|
| 80 |
+
Firewall rules on the host system should allow traffic on the configured ports. The default configuration uses port 7860 for the web API, port 1194 for OpenVPN services, and various high-numbered ports for internal communication between components.
|
| 81 |
+
|
| 82 |
+
## Local Development Setup
|
| 83 |
+
|
| 84 |
+
### Installation Process
|
| 85 |
+
|
| 86 |
+
Setting up the Virtual ISP Stack for local development begins with cloning or downloading the application source code to a suitable directory. The recommended approach is to create a dedicated directory for the application and ensure that the Python environment has appropriate permissions for network operations.
|
| 87 |
+
|
| 88 |
+
Begin by creating a virtual environment to isolate the application dependencies from the system Python installation. This approach prevents conflicts with other Python applications and ensures consistent dependency versions across different deployment environments.
|
| 89 |
+
|
| 90 |
+
```bash
|
| 91 |
+
python3 -m venv venv
|
| 92 |
+
source venv/bin/activate
|
| 93 |
+
pip install -r requirements.txt
|
| 94 |
+
```
|
| 95 |
+
|
| 96 |
+
The requirements.txt file contains all necessary Python dependencies with specific version numbers to ensure compatibility. The installation process typically takes several minutes depending on network connectivity and system performance.
|
| 97 |
+
|
| 98 |
+
### Configuration Setup
|
| 99 |
+
|
| 100 |
+
The application uses a configuration-based approach that allows customization of network ranges, service ports, and operational parameters. The default configuration is suitable for most development environments, but production deployments should review and customize these settings based on specific requirements.
|
| 101 |
+
|
| 102 |
+
Database initialization occurs automatically during the first application startup. The system creates necessary tables and initializes default configuration values. For development environments, the SQLite database provides adequate performance and simplifies deployment requirements.
|
| 103 |
+
|
| 104 |
+
Network configuration should be reviewed to ensure that the selected IP ranges do not conflict with existing network infrastructure. The application provides configuration validation during startup and will report any detected conflicts or configuration issues.
|
| 105 |
+
|
| 106 |
+
### Starting the Development Server
|
| 107 |
+
|
| 108 |
+
The development server can be started using the provided app.py entry point. The server will initialize all components, establish database connections, and begin listening for client connections on the configured port.
|
| 109 |
+
|
| 110 |
+
```bash
|
| 111 |
+
python app.py
|
| 112 |
+
```
|
| 113 |
+
|
| 114 |
+
The startup process includes comprehensive logging that provides visibility into component initialization and any potential issues. The server typically requires 10-15 seconds to fully initialize all components and begin accepting connections.
|
| 115 |
+
|
| 116 |
+
During development, the server provides detailed logging output that includes API requests, component status changes, and error conditions. This information is valuable for debugging and understanding system behavior during development and testing.
|
| 117 |
+
|
| 118 |
+
### Development Testing
|
| 119 |
+
|
| 120 |
+
The development environment includes comprehensive testing capabilities that allow verification of all system components. The API endpoints can be tested using standard HTTP clients like curl or specialized API testing tools.
|
| 121 |
+
|
| 122 |
+
Basic functionality testing should include health check verification, component status queries, and configuration generation testing. The system provides detailed error messages and logging to assist with troubleshooting during development.
|
| 123 |
+
|
| 124 |
+
```bash
|
| 125 |
+
curl http://localhost:7860/health
|
| 126 |
+
curl http://localhost:7860/api/openvpn/status
|
| 127 |
+
curl -o test-client.ovpn "http://localhost:7860/api/openvpn/config/test-client?server_ip=127.0.0.1"
|
| 128 |
+
```
|
| 129 |
+
|
| 130 |
+
## HuggingFace Spaces Deployment
|
| 131 |
+
|
| 132 |
+
### Platform Overview
|
| 133 |
+
|
| 134 |
+
HuggingFace Spaces provides a cloud-based platform for deploying machine learning applications and web services. The platform supports various frameworks including Gradio, Streamlit, and custom applications using Docker or Python environments. The Virtual ISP Stack has been specifically optimized for deployment on HuggingFace Spaces with minimal configuration requirements.
|
| 135 |
+
|
| 136 |
+
The platform provides automatic scaling, SSL certificate management, and integrated monitoring capabilities. Applications deployed on HuggingFace Spaces receive a unique URL and can be configured for public or private access. The platform handles infrastructure management, allowing developers to focus on application functionality rather than server administration.
|
| 137 |
+
|
| 138 |
+
### Deployment Preparation
|
| 139 |
+
|
| 140 |
+
Preparing the Virtual ISP Stack for HuggingFace Spaces deployment involves organizing the application files in the correct structure and ensuring that all dependencies are properly specified. The application includes a pre-configured app.py file that serves as the entry point for HuggingFace Spaces deployment.
|
| 141 |
+
|
| 142 |
+
The requirements.txt file has been optimized for the HuggingFace Spaces environment, including only necessary dependencies and specifying compatible versions. The application structure follows HuggingFace Spaces conventions with the main application file at the root level and supporting modules organized in subdirectories.
|
| 143 |
+
|
| 144 |
+
Configuration management for HuggingFace Spaces deployment uses environment variables and default values that work within the platform constraints. The application automatically detects the HuggingFace Spaces environment and adjusts configuration parameters accordingly.
|
| 145 |
+
|
| 146 |
+
### Deployment Process
|
| 147 |
+
|
| 148 |
+
Deploying to HuggingFace Spaces involves creating a new Space on the platform and uploading the application files. The platform supports both web-based file upload and Git repository integration for more advanced deployment workflows.
|
| 149 |
+
|
| 150 |
+
The deployment process begins by creating a new Space with the "Gradio" or "Custom" template, depending on the specific requirements. The application files should be uploaded to the Space repository, maintaining the directory structure and file permissions.
|
| 151 |
+
|
| 152 |
+
Once the files are uploaded, HuggingFace Spaces automatically detects the application type and begins the deployment process. The platform installs dependencies, initializes the application, and provides a public URL for access. The deployment typically takes 5-10 minutes depending on the complexity of dependencies and platform load.
|
| 153 |
+
|
| 154 |
+
### Post-Deployment Configuration
|
| 155 |
+
|
| 156 |
+
After successful deployment, the application requires minimal configuration to begin operation. The HuggingFace Spaces environment provides automatic port assignment and SSL certificate management, eliminating the need for manual network configuration.
|
| 157 |
+
|
| 158 |
+
The application includes health check endpoints that can be used to verify successful deployment and monitor ongoing operation. These endpoints provide detailed status information about all system components and can be used for automated monitoring and alerting.
|
| 159 |
+
|
| 160 |
+
Access to the deployed application is provided through the HuggingFace Spaces URL, which includes automatic SSL encryption and global content delivery network acceleration. The platform provides usage analytics and performance monitoring through the Spaces dashboard.
|
| 161 |
+
|
| 162 |
+
### Limitations and Considerations
|
| 163 |
+
|
| 164 |
+
HuggingFace Spaces deployment includes certain limitations that should be considered when planning production use. The platform provides limited computational resources and may not be suitable for high-traffic applications or resource-intensive operations.
|
| 165 |
+
|
| 166 |
+
Network configuration options are limited in the HuggingFace Spaces environment, which may affect certain advanced networking features. The OpenVPN server functionality may be restricted due to platform security policies and network isolation requirements.
|
| 167 |
+
|
| 168 |
+
Data persistence is limited in the HuggingFace Spaces environment, with temporary storage being cleared during application restarts. Applications requiring persistent data storage should implement external database connections or cloud storage integration.
|
| 169 |
+
|
| 170 |
+
## Docker Deployment
|
| 171 |
+
|
| 172 |
+
### Container Architecture
|
| 173 |
+
|
| 174 |
+
The Docker deployment option provides a fully containerized environment that includes all necessary dependencies and system components. The provided Dockerfile creates a complete Linux environment with OpenVPN server capabilities, Python runtime, and all required system utilities.
|
| 175 |
+
|
| 176 |
+
The container architecture follows best practices for security and performance, using a minimal base image and installing only necessary components. The container includes comprehensive health checking capabilities and supports various orchestration platforms including Docker Compose, Kubernetes, and Docker Swarm.
|
| 177 |
+
|
| 178 |
+
Network configuration within the container environment requires careful consideration of port mapping and network isolation requirements. The container exposes necessary ports for API access and VPN connectivity while maintaining security through proper firewall configuration.
|
| 179 |
+
|
| 180 |
+
### Building the Container
|
| 181 |
+
|
| 182 |
+
Building the Docker container involves using the provided Dockerfile to create a complete application image. The build process includes system dependency installation, Python environment setup, and application configuration.
|
| 183 |
+
|
| 184 |
+
```bash
|
| 185 |
+
docker build -t virtual-isp-stack .
|
| 186 |
+
```
|
| 187 |
+
|
| 188 |
+
The build process typically takes 10-15 minutes depending on network connectivity and system performance. The resulting image includes all necessary components and can be deployed on any Docker-compatible platform.
|
| 189 |
+
|
| 190 |
+
Container optimization includes multi-stage builds to minimize image size and security scanning to identify potential vulnerabilities. The final image is designed to be production-ready with appropriate security configurations and performance optimizations.
|
| 191 |
+
|
| 192 |
+
### Container Deployment
|
| 193 |
+
|
| 194 |
+
Deploying the containerized application involves running the Docker image with appropriate port mappings and volume mounts. The container requires privileged access for network operations and OpenVPN functionality.
|
| 195 |
+
|
| 196 |
+
```bash
|
| 197 |
+
docker run -d --name virtual-isp-stack \
|
| 198 |
+
--privileged \
|
| 199 |
+
-p 7860:7860 \
|
| 200 |
+
-p 1194:1194/udp \
|
| 201 |
+
-v /path/to/data:/app/data \
|
| 202 |
+
virtual-isp-stack
|
| 203 |
+
```
|
| 204 |
+
|
| 205 |
+
The deployment configuration includes port mappings for API access and VPN connectivity, volume mounts for persistent data storage, and environment variable configuration for customization. The container includes comprehensive logging and monitoring capabilities for production deployment.
|
| 206 |
+
|
| 207 |
+
### Orchestration Support
|
| 208 |
+
|
| 209 |
+
The containerized application supports various orchestration platforms including Docker Compose for simple multi-container deployments and Kubernetes for enterprise-scale deployments. The application includes configuration templates for common orchestration scenarios.
|
| 210 |
+
|
| 211 |
+
Docker Compose deployment provides a simple way to deploy the application with supporting services like databases or monitoring tools. The provided docker-compose.yml file includes all necessary service definitions and network configurations.
|
| 212 |
+
|
| 213 |
+
Kubernetes deployment involves creating appropriate manifests for pods, services, and ingress controllers. The application supports horizontal scaling through load balancing, though certain components require careful consideration for distributed deployment.
|
| 214 |
+
|
| 215 |
+
## API Reference
|
| 216 |
+
|
| 217 |
+
### Authentication and Authorization
|
| 218 |
+
|
| 219 |
+
The Virtual ISP Stack API currently operates without authentication requirements for development and testing purposes. Production deployments should implement appropriate authentication mechanisms based on specific security requirements and deployment environments.
|
| 220 |
+
|
| 221 |
+
The API supports Cross-Origin Resource Sharing (CORS) to enable web-based management interfaces and third-party integrations. CORS configuration can be customized through application settings to restrict access to specific domains or origins.
|
| 222 |
+
|
| 223 |
+
Rate limiting and request throttling are not implemented in the current version but should be considered for production deployments to prevent abuse and ensure fair resource allocation among users.
|
| 224 |
+
|
| 225 |
+
### OpenVPN Management Endpoints
|
| 226 |
+
|
| 227 |
+
The OpenVPN management API provides comprehensive control over VPN server operations and client management. These endpoints enable programmatic management of the VPN service without requiring direct server access.
|
| 228 |
+
|
| 229 |
+
#### Server Control Operations
|
| 230 |
+
|
| 231 |
+
The server control endpoints provide basic lifecycle management for the OpenVPN server. The status endpoint returns comprehensive information about server state, connected clients, and performance metrics.
|
| 232 |
+
|
| 233 |
+
**GET /api/openvpn/status**
|
| 234 |
+
|
| 235 |
+
Returns current OpenVPN server status including operational state, connected client count, traffic statistics, and uptime information. The response includes detailed metrics that can be used for monitoring and capacity planning.
|
| 236 |
+
|
| 237 |
+
```json
|
| 238 |
+
{
|
| 239 |
+
"status": "success",
|
| 240 |
+
"openvpn_status": {
|
| 241 |
+
"is_running": false,
|
| 242 |
+
"connected_clients": 0,
|
| 243 |
+
"total_bytes_received": 0,
|
| 244 |
+
"total_bytes_sent": 0,
|
| 245 |
+
"uptime": 0,
|
| 246 |
+
"server_ip": "10.8.0.1",
|
| 247 |
+
"server_port": 1194
|
| 248 |
+
}
|
| 249 |
+
}
|
| 250 |
+
```
|
| 251 |
+
|
| 252 |
+
**POST /api/openvpn/start**
|
| 253 |
+
|
| 254 |
+
Initiates the OpenVPN server startup process. The endpoint performs comprehensive validation of server configuration and system requirements before attempting to start the service. The response indicates success or failure with detailed error information if startup fails.
|
| 255 |
+
|
| 256 |
+
**POST /api/openvpn/stop**
|
| 257 |
+
|
| 258 |
+
Gracefully shuts down the OpenVPN server, disconnecting all clients and cleaning up system resources. The endpoint ensures that all client connections are properly terminated and that system state is restored to a clean condition.
|
| 259 |
+
|
| 260 |
+
#### Client Management Operations
|
| 261 |
+
|
| 262 |
+
Client management endpoints provide detailed control over individual VPN client connections and comprehensive monitoring of client activity.
|
| 263 |
+
|
| 264 |
+
**GET /api/openvpn/clients**
|
| 265 |
+
|
| 266 |
+
Returns a list of currently connected VPN clients with detailed information about each connection including client identifier, IP address assignment, connection duration, and traffic statistics.
|
| 267 |
+
|
| 268 |
+
**POST /api/openvpn/clients/{client_id}/disconnect**
|
| 269 |
+
|
| 270 |
+
Forcibly disconnects a specific VPN client by client identifier. The operation is performed gracefully when possible but can force disconnection if necessary. The endpoint provides confirmation of disconnection success or failure.
|
| 271 |
+
|
| 272 |
+
#### Configuration Management Operations
|
| 273 |
+
|
| 274 |
+
Configuration management endpoints handle the generation, storage, and retrieval of client VPN configurations. These endpoints support both on-demand generation and persistent storage of client configurations.
|
| 275 |
+
|
| 276 |
+
**GET /api/openvpn/config/{client_name}**
|
| 277 |
+
|
| 278 |
+
Generates and returns a complete OpenVPN client configuration file for the specified client name. The configuration includes embedded certificates and all necessary connection parameters. The server IP address must be provided as a query parameter.
|
| 279 |
+
|
| 280 |
+
**GET /api/openvpn/configs**
|
| 281 |
+
|
| 282 |
+
Returns a list of all stored client configurations. This endpoint provides an overview of all clients that have been configured for VPN access.
|
| 283 |
+
|
| 284 |
+
**POST /api/openvpn/configs/{client_name}/generate**
|
| 285 |
+
|
| 286 |
+
Generates a new client configuration and stores it for future retrieval. This endpoint combines configuration generation with persistent storage, allowing for consistent client configuration management.
|
| 287 |
+
|
| 288 |
+
**DELETE /api/openvpn/configs/{client_name}**
|
| 289 |
+
|
| 290 |
+
Removes a stored client configuration from the system. This operation is permanent and cannot be undone, though new configurations can be generated for the same client name.
|
| 291 |
+
|
| 292 |
+
### ISP Stack Management Endpoints
|
| 293 |
+
|
| 294 |
+
The ISP stack management endpoints provide control and monitoring capabilities for the core networking components including DHCP, NAT, firewall, and routing services.
|
| 295 |
+
|
| 296 |
+
#### DHCP Management
|
| 297 |
+
|
| 298 |
+
DHCP management endpoints provide visibility into IP address allocation and lease management. These endpoints enable monitoring of network utilization and troubleshooting of connectivity issues.
|
| 299 |
+
|
| 300 |
+
**GET /api/dhcp/leases**
|
| 301 |
+
|
| 302 |
+
Returns the current DHCP lease table showing all active IP address assignments. The response includes client MAC addresses, assigned IP addresses, lease expiration times, and client hostnames when available.
|
| 303 |
+
|
| 304 |
+
**DELETE /api/dhcp/leases/{mac_address}**
|
| 305 |
+
|
| 306 |
+
Releases a specific DHCP lease by MAC address. This operation forces the immediate release of an IP address assignment and makes the address available for reassignment to other clients.
|
| 307 |
+
|
| 308 |
+
#### NAT and Firewall Management
|
| 309 |
+
|
| 310 |
+
NAT and firewall management endpoints provide comprehensive control over network address translation and packet filtering operations.
|
| 311 |
+
|
| 312 |
+
**GET /api/nat/sessions**
|
| 313 |
+
|
| 314 |
+
Returns the current NAT session table showing all active network address translations. The response includes source and destination addresses, port mappings, session duration, and traffic statistics.
|
| 315 |
+
|
| 316 |
+
**GET /api/firewall/rules**
|
| 317 |
+
|
| 318 |
+
Returns the current firewall rule set with detailed information about each rule including priority, action, matching criteria, and usage statistics.
|
| 319 |
+
|
| 320 |
+
**POST /api/firewall/rules**
|
| 321 |
+
|
| 322 |
+
Creates a new firewall rule with specified matching criteria and action. The endpoint validates rule syntax and checks for conflicts with existing rules before adding the new rule to the active rule set.
|
| 323 |
+
|
| 324 |
+
## Client Connection Workflow
|
| 325 |
+
|
| 326 |
+
### Configuration Generation Process
|
| 327 |
+
|
| 328 |
+
The client connection workflow begins with the generation of appropriate OpenVPN client configuration files. This process involves several steps that ensure proper authentication, network configuration, and security policy application.
|
| 329 |
+
|
| 330 |
+
The configuration generation process starts with the client making a request to the configuration generation endpoint, specifying the desired client name and server IP address. The system validates the request parameters and checks for any existing configurations with the same client name.
|
| 331 |
+
|
| 332 |
+
Upon successful validation, the system generates a complete OpenVPN client configuration that includes all necessary connection parameters, embedded certificates, and security settings. The configuration is customized for the specific client and includes unique identifiers that enable proper tracking and management.
|
| 333 |
+
|
| 334 |
+
The generated configuration includes comprehensive network settings that ensure proper integration with the ISP stack components. DNS server assignments, routing configurations, and security policies are all embedded in the client configuration to provide a seamless connection experience.
|
| 335 |
+
|
| 336 |
+
### Client Software Installation
|
| 337 |
+
|
| 338 |
+
VPN clients require appropriate OpenVPN client software to establish connections using the generated configurations. The choice of client software depends on the operating system and specific requirements of the deployment environment.
|
| 339 |
+
|
| 340 |
+
For desktop operating systems including Windows, macOS, and Linux, the official OpenVPN client software provides comprehensive functionality and broad compatibility. The client software supports both GUI and command-line operation modes, enabling integration with various management and automation systems.
|
| 341 |
+
|
| 342 |
+
Mobile devices including iOS and Android require specialized client applications that are available through the respective app stores. The OpenVPN Connect application provides official support for OpenVPN configurations and includes features optimized for mobile device operation.
|
| 343 |
+
|
| 344 |
+
Enterprise environments may require specialized client software that integrates with existing management systems or provides additional security features. Many commercial VPN client solutions support OpenVPN protocol compatibility and can be used with the generated configurations.
|
| 345 |
+
|
| 346 |
+
### Connection Establishment
|
| 347 |
+
|
| 348 |
+
The connection establishment process involves several phases that ensure proper authentication, network configuration, and policy application. The process begins when the client software attempts to establish a connection using the provided configuration file.
|
| 349 |
+
|
| 350 |
+
Initial connection attempts involve DNS resolution of the server address and establishment of the underlying network connection. The client software validates the server certificate and establishes an encrypted tunnel for subsequent communication.
|
| 351 |
+
|
| 352 |
+
Authentication occurs through the exchange of client certificates and validation of client credentials. The server verifies the client certificate against the configured certificate authority and checks for any revocation or expiration conditions.
|
| 353 |
+
|
| 354 |
+
Upon successful authentication, the server assigns network configuration parameters to the client including IP address assignment, DNS server configuration, and routing table updates. The client software applies these configurations to the local network interface and establishes the VPN tunnel.
|
| 355 |
+
|
| 356 |
+
### Traffic Flow and Monitoring
|
| 357 |
+
|
| 358 |
+
Once the VPN connection is established, all client traffic flows through the encrypted tunnel to the VPN server. The server processes this traffic through the integrated ISP stack components, applying appropriate NAT, firewall, and routing policies.
|
| 359 |
+
|
| 360 |
+
The system maintains comprehensive monitoring of client connections including bandwidth utilization, connection duration, and traffic patterns. This information is available through the API endpoints and can be used for capacity planning and troubleshooting.
|
| 361 |
+
|
| 362 |
+
Quality of service policies can be applied to VPN traffic to ensure appropriate bandwidth allocation and priority handling. The system supports various QoS mechanisms including traffic shaping, priority queuing, and bandwidth limiting.
|
| 363 |
+
|
| 364 |
+
Connection monitoring includes real-time detection of connection failures and automatic reconnection attempts. The system provides detailed logging of connection events and can generate alerts for significant events or threshold violations.
|
| 365 |
+
|
| 366 |
+
### Troubleshooting Common Issues
|
| 367 |
+
|
| 368 |
+
Client connection issues can arise from various sources including network connectivity problems, configuration errors, certificate issues, or server-side problems. The system provides comprehensive logging and diagnostic capabilities to assist with troubleshooting.
|
| 369 |
+
|
| 370 |
+
Network connectivity issues are often the most common source of connection problems. Clients should verify basic internet connectivity and ensure that the VPN server address is reachable. Firewall configurations on both client and server sides should be reviewed to ensure that necessary ports are accessible.
|
| 371 |
+
|
| 372 |
+
Configuration errors can prevent successful connection establishment or cause unexpected behavior after connection. The system validates configuration files during generation, but network environment changes or client software incompatibilities can cause issues.
|
| 373 |
+
|
| 374 |
+
Certificate problems including expiration, revocation, or validation errors can prevent authentication. The system provides detailed error messages for certificate-related issues and includes tools for certificate validation and troubleshooting.
|
| 375 |
+
|
| 376 |
+
## Security Considerations
|
| 377 |
+
|
| 378 |
+
### Certificate Management
|
| 379 |
+
|
| 380 |
+
The current implementation includes sample certificates and keys for demonstration and testing purposes. These certificates should never be used in production environments as they provide no security and are publicly available.
|
| 381 |
+
|
| 382 |
+
Production deployments require the generation of proper Public Key Infrastructure (PKI) certificates using tools like Easy-RSA or commercial certificate authorities. The certificate generation process should include proper key length selection, appropriate validity periods, and secure key storage practices.
|
| 383 |
+
|
| 384 |
+
Certificate revocation capabilities should be implemented for production environments to handle compromised certificates or terminated client access. The system should support Certificate Revocation Lists (CRL) or Online Certificate Status Protocol (OCSP) for real-time certificate validation.
|
| 385 |
+
|
| 386 |
+
Regular certificate rotation and renewal processes should be established to maintain security over time. Automated certificate management tools can help reduce the operational burden of certificate lifecycle management while ensuring consistent security practices.
|
| 387 |
+
|
| 388 |
+
### Network Security
|
| 389 |
+
|
| 390 |
+
Network security for the Virtual ISP Stack involves multiple layers of protection including host-level security, application-level security, and network-level security controls. Each layer provides specific protections and should be configured appropriately for the deployment environment.
|
| 391 |
+
|
| 392 |
+
Host-level security includes operating system hardening, regular security updates, and appropriate access controls. The system should run with minimal privileges and use dedicated user accounts for service operation. File system permissions should be configured to prevent unauthorized access to configuration files and certificates.
|
| 393 |
+
|
| 394 |
+
Application-level security includes input validation, secure coding practices, and proper error handling. The system implements various security controls including parameter validation, SQL injection prevention, and secure session management.
|
| 395 |
+
|
| 396 |
+
Network-level security includes firewall configuration, intrusion detection, and network segmentation. The system should be deployed behind appropriate firewall protection and may benefit from network intrusion detection systems for advanced threat detection.
|
| 397 |
+
|
| 398 |
+
### Access Control and Authentication
|
| 399 |
+
|
| 400 |
+
The current implementation does not include authentication mechanisms, making it suitable only for development and testing environments. Production deployments should implement appropriate authentication and authorization controls based on specific requirements.
|
| 401 |
+
|
| 402 |
+
Authentication mechanisms can include username/password authentication, certificate-based authentication, or integration with existing identity management systems. The choice of authentication method should consider security requirements, user experience, and operational complexity.
|
| 403 |
+
|
| 404 |
+
Authorization controls should implement role-based access control (RBAC) or attribute-based access control (ABAC) to ensure that users have appropriate permissions for their roles. The system should support fine-grained permissions for different API endpoints and operations.
|
| 405 |
+
|
| 406 |
+
Audit logging should be implemented to track all administrative actions and access attempts. The audit logs should be stored securely and regularly reviewed for suspicious activity or policy violations.
|
| 407 |
+
|
| 408 |
+
### Data Protection
|
| 409 |
+
|
| 410 |
+
Data protection for the Virtual ISP Stack includes protection of configuration data, client information, and operational logs. All sensitive data should be encrypted both in transit and at rest using appropriate encryption algorithms and key management practices.
|
| 411 |
+
|
| 412 |
+
Configuration files containing certificates and keys should be stored with restricted file system permissions and may benefit from additional encryption. Database encryption should be considered for environments storing sensitive client information or operational data.
|
| 413 |
+
|
| 414 |
+
Network traffic encryption is provided by the OpenVPN protocol, but additional encryption may be appropriate for management traffic and API communications. SSL/TLS encryption should be used for all web-based management interfaces and API access.
|
| 415 |
+
|
| 416 |
+
Data retention policies should be established to ensure that sensitive information is not retained longer than necessary. Regular data purging and secure deletion practices should be implemented to minimize the risk of data exposure.
|
| 417 |
+
|
| 418 |
+
## Troubleshooting
|
| 419 |
+
|
| 420 |
+
### Common Deployment Issues
|
| 421 |
+
|
| 422 |
+
Deployment issues can arise from various sources including missing dependencies, configuration errors, network connectivity problems, or resource constraints. The system provides comprehensive error reporting and logging to assist with troubleshooting.
|
| 423 |
+
|
| 424 |
+
Dependency issues are often encountered during initial deployment, particularly in environments with restricted internet access or custom Python configurations. The requirements.txt file specifies all necessary dependencies with version constraints, but some environments may require additional configuration.
|
| 425 |
+
|
| 426 |
+
Configuration errors can prevent proper system initialization or cause unexpected behavior during operation. The system validates configuration parameters during startup and provides detailed error messages for invalid or conflicting settings.
|
| 427 |
+
|
| 428 |
+
Network connectivity issues can prevent proper operation of networking components or external service integration. The system includes network connectivity testing capabilities and provides detailed error reporting for network-related problems.
|
| 429 |
+
|
| 430 |
+
Resource constraints including insufficient memory, disk space, or CPU capacity can cause performance problems or service failures. The system includes resource monitoring capabilities and can provide warnings when resource utilization approaches critical levels.
|
| 431 |
+
|
| 432 |
+
### Performance Optimization
|
| 433 |
+
|
| 434 |
+
Performance optimization for the Virtual ISP Stack involves tuning various system parameters and configuration settings to achieve optimal throughput and responsiveness. The system includes several configurable parameters that can be adjusted based on specific deployment requirements.
|
| 435 |
+
|
| 436 |
+
Network buffer sizes and connection limits can be adjusted to optimize throughput for high-traffic environments. The system supports configuration of various networking parameters including TCP window sizes, connection timeouts, and queue depths.
|
| 437 |
+
|
| 438 |
+
Database performance can be optimized through appropriate indexing, query optimization, and connection pooling configuration. The system supports various database backends and can be configured for optimal performance based on the specific database platform.
|
| 439 |
+
|
| 440 |
+
Memory utilization can be optimized through garbage collection tuning, object pooling, and cache configuration. The system includes memory monitoring capabilities and can provide recommendations for memory optimization based on usage patterns.
|
| 441 |
+
|
| 442 |
+
CPU utilization can be optimized through thread pool configuration, process scheduling, and algorithm selection. The system supports multi-threaded operation and can be configured to take advantage of multi-core systems for improved performance.
|
| 443 |
+
|
| 444 |
+
### Monitoring and Alerting
|
| 445 |
+
|
| 446 |
+
Comprehensive monitoring and alerting capabilities are essential for production deployments of the Virtual ISP Stack. The system includes various monitoring endpoints and logging capabilities that can be integrated with external monitoring systems.
|
| 447 |
+
|
| 448 |
+
Health check endpoints provide real-time status information about all system components and can be used for automated monitoring and alerting. The endpoints return detailed status information including component health, performance metrics, and error conditions.
|
| 449 |
+
|
| 450 |
+
Performance metrics are available through various API endpoints and include information about throughput, response times, resource utilization, and error rates. These metrics can be exported to external monitoring systems for historical analysis and trend identification.
|
| 451 |
+
|
| 452 |
+
Log aggregation and analysis capabilities enable comprehensive troubleshooting and security monitoring. The system generates detailed logs for all operations and can be configured to export logs to external systems for centralized analysis.
|
| 453 |
+
|
| 454 |
+
Alerting configuration should include thresholds for critical metrics including service availability, performance degradation, and security events. The system can be integrated with various alerting platforms to provide timely notification of critical conditions.
|
| 455 |
+
|
| 456 |
+
## Performance Optimization
|
| 457 |
+
|
| 458 |
+
### System Tuning
|
| 459 |
+
|
| 460 |
+
System-level performance tuning involves optimizing the underlying operating system and hardware configuration to support optimal performance of the Virtual ISP Stack. This includes kernel parameter tuning, network stack optimization, and resource allocation adjustments.
|
| 461 |
+
|
| 462 |
+
Network stack tuning includes adjustments to TCP buffer sizes, connection tracking limits, and network queue configurations. These parameters can significantly impact throughput and latency, particularly in high-traffic environments.
|
| 463 |
+
|
| 464 |
+
Memory management tuning includes adjustments to virtual memory parameters, cache sizes, and memory allocation strategies. Proper memory configuration can prevent performance degradation due to excessive swapping or memory fragmentation.
|
| 465 |
+
|
| 466 |
+
CPU scheduling optimization includes process priority adjustments, CPU affinity configuration, and interrupt handling optimization. These adjustments can improve responsiveness and reduce latency for critical system components.
|
| 467 |
+
|
| 468 |
+
### Application Optimization
|
| 469 |
+
|
| 470 |
+
Application-level optimization involves tuning the Virtual ISP Stack configuration parameters and implementation details to achieve optimal performance for specific deployment scenarios.
|
| 471 |
+
|
| 472 |
+
Database optimization includes query optimization, index configuration, and connection pooling tuning. Proper database configuration can significantly improve response times and reduce resource utilization.
|
| 473 |
+
|
| 474 |
+
Caching strategies can improve performance by reducing redundant computations and database queries. The system supports various caching mechanisms including in-memory caches and distributed caching systems.
|
| 475 |
+
|
| 476 |
+
Concurrency optimization includes thread pool configuration, asynchronous processing implementation, and lock contention reduction. Proper concurrency management can improve throughput and responsiveness in multi-user environments.
|
| 477 |
+
|
| 478 |
+
### Scalability Planning
|
| 479 |
+
|
| 480 |
+
Scalability planning involves designing the deployment architecture to support growth in user base, traffic volume, and feature complexity. The Virtual ISP Stack supports various scalability approaches including vertical scaling, horizontal scaling, and hybrid approaches.
|
| 481 |
+
|
| 482 |
+
Vertical scaling involves increasing the resources available to a single instance of the system including CPU, memory, and storage capacity. This approach is often the simplest to implement but has limitations in terms of maximum achievable scale.
|
| 483 |
+
|
| 484 |
+
Horizontal scaling involves deploying multiple instances of the system and distributing load across the instances. This approach can achieve higher scale but requires careful consideration of data consistency and state management.
|
| 485 |
+
|
| 486 |
+
Load balancing strategies are essential for horizontal scaling and include various approaches such as round-robin distribution, weighted distribution, and session-aware distribution. The choice of load balancing strategy should consider the specific characteristics of the application workload.
|
| 487 |
+
|
| 488 |
+
## Monitoring and Maintenance
|
| 489 |
+
|
| 490 |
+
### Operational Monitoring
|
| 491 |
+
|
| 492 |
+
Operational monitoring of the Virtual ISP Stack involves continuous observation of system health, performance metrics, and security indicators. Effective monitoring enables proactive identification of issues before they impact users and provides data for capacity planning and optimization.
|
| 493 |
+
|
| 494 |
+
System health monitoring includes tracking of component availability, resource utilization, and error rates. The system provides comprehensive health check endpoints that can be integrated with external monitoring platforms for automated alerting and reporting.
|
| 495 |
+
|
| 496 |
+
Performance monitoring includes tracking of response times, throughput, and resource efficiency. The system generates detailed performance metrics that can be used for trend analysis, capacity planning, and performance optimization.
|
| 497 |
+
|
| 498 |
+
Security monitoring includes tracking of authentication events, access patterns, and potential security threats. The system generates comprehensive audit logs that can be analyzed for security incidents and compliance reporting.
|
| 499 |
+
|
| 500 |
+
### Preventive Maintenance
|
| 501 |
+
|
| 502 |
+
Preventive maintenance activities are essential for maintaining optimal performance and security of the Virtual ISP Stack over time. Regular maintenance activities include software updates, configuration reviews, and performance optimization.
|
| 503 |
+
|
| 504 |
+
Software update management includes tracking of security patches, feature updates, and dependency updates. The system should be regularly updated to address security vulnerabilities and take advantage of performance improvements.
|
| 505 |
+
|
| 506 |
+
Configuration management includes regular review of system configuration parameters, security settings, and operational policies. Configuration drift should be monitored and corrected to maintain consistent system behavior.
|
| 507 |
+
|
| 508 |
+
Performance optimization should be performed regularly based on monitoring data and changing usage patterns. This includes database maintenance, cache optimization, and resource allocation adjustments.
|
| 509 |
+
|
| 510 |
+
### Backup and Recovery
|
| 511 |
+
|
| 512 |
+
Backup and recovery procedures are critical for protecting against data loss and ensuring business continuity. The Virtual ISP Stack includes various data types that require different backup strategies and recovery procedures.
|
| 513 |
+
|
| 514 |
+
Configuration backup includes system configuration files, certificates, and operational policies. These files should be backed up regularly and stored securely to enable rapid system recovery in case of failure.
|
| 515 |
+
|
| 516 |
+
Database backup includes client information, operational logs, and system state data. Database backups should be performed regularly and tested to ensure successful recovery capability.
|
| 517 |
+
|
| 518 |
+
Recovery procedures should be documented and tested regularly to ensure that they can be executed successfully under stress conditions. Recovery testing should include both partial recovery scenarios and complete system reconstruction.
|
| 519 |
+
|
| 520 |
+
---
|
| 521 |
+
|
| 522 |
+
**References:**
|
| 523 |
+
|
| 524 |
+
[1] OpenVPN Community. "OpenVPN Documentation." https://openvpn.net/community-resources/
|
| 525 |
+
[2] Flask Development Team. "Flask Documentation." https://flask.palletsprojects.com/
|
| 526 |
+
[3] HuggingFace. "Spaces Documentation." https://huggingface.co/docs/hub/spaces
|
| 527 |
+
[4] Docker Inc. "Docker Documentation." https://docs.docker.com/
|
| 528 |
+
[5] Python Software Foundation. "Python Documentation." https://docs.python.org/3/
|
| 529 |
+
|
Dockerfile
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Virtual ISP Stack with OpenVPN Integration
|
| 2 |
+
# Dockerfile for containerized deployment
|
| 3 |
+
|
| 4 |
+
FROM python:3.11-slim
|
| 5 |
+
|
| 6 |
+
# Set working directory
|
| 7 |
+
WORKDIR /app
|
| 8 |
+
|
| 9 |
+
# Install system dependencies
|
| 10 |
+
RUN apt-get update && apt-get install -y \
|
| 11 |
+
openvpn \
|
| 12 |
+
iptables \
|
| 13 |
+
iproute2 \
|
| 14 |
+
net-tools \
|
| 15 |
+
procps \
|
| 16 |
+
build-essential \
|
| 17 |
+
python3-dev \
|
| 18 |
+
&& rm -rf /var/lib/apt/lists/*
|
| 19 |
+
|
| 20 |
+
COPY openvpn/server.conf /etc/openvpn/server/server.conf
|
| 21 |
+
COPY openvpn/ca.crt /etc/openvpn/server/ca.crt
|
| 22 |
+
COPY openvpn/server.crt /etc/openvpn/server/server.crt
|
| 23 |
+
COPY openvpn/server.key /etc/openvpn/server/server.key
|
| 24 |
+
COPY openvpn/dh.pem /etc/openvpn/server/dh.pem
|
| 25 |
+
|
| 26 |
+
# Copy requirements and install Python dependencies
|
| 27 |
+
COPY requirements.txt .
|
| 28 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 29 |
+
|
| 30 |
+
# Copy application files
|
| 31 |
+
COPY . .
|
| 32 |
+
|
| 33 |
+
# Create necessary directories
|
| 34 |
+
RUN mkdir -p /tmp/vpn_client_configs \
|
| 35 |
+
&& mkdir -p /var/log/openvpn \
|
| 36 |
+
&& mkdir -p database
|
| 37 |
+
|
| 38 |
+
# Set environment variables
|
| 39 |
+
ENV FLASK_APP=app.py
|
| 40 |
+
ENV FLASK_ENV=production
|
| 41 |
+
ENV PORT=7860
|
| 42 |
+
|
| 43 |
+
# Expose port
|
| 44 |
+
EXPOSE 7860
|
| 45 |
+
|
| 46 |
+
# Health check
|
| 47 |
+
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
| 48 |
+
CMD curl -f http://localhost:7860/health || exit 1
|
| 49 |
+
|
| 50 |
+
# Run the application
|
| 51 |
+
CMD ["python", "app.py"]
|
| 52 |
+
|
__init__.py
ADDED
|
File without changes
|
__pycache__/__init__.cpython-311.pyc
ADDED
|
Binary file (173 Bytes). View file
|
|
|
__pycache__/app.cpython-311.pyc
ADDED
|
Binary file (7.82 kB). View file
|
|
|
__pycache__/main.cpython-311.pyc
ADDED
|
Binary file (3.57 kB). View file
|
|
|
__pycache__/main_isp.cpython-311.pyc
ADDED
|
Binary file (10.8 kB). View file
|
|
|
app.py
ADDED
|
@@ -0,0 +1,190 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Virtual ISP Stack with OpenVPN Integration
|
| 4 |
+
HuggingFace Spaces Entry Point
|
| 5 |
+
|
| 6 |
+
This application provides a complete Virtual ISP stack with OpenVPN server integration,
|
| 7 |
+
allowing users to manage VPN connections, generate client configurations, and monitor
|
| 8 |
+
network traffic through a RESTful API.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
import os
|
| 12 |
+
import sys
|
| 13 |
+
import logging
|
| 14 |
+
|
| 15 |
+
# Add current directory to Python path
|
| 16 |
+
sys.path.insert(0, os.path.dirname(__file__))
|
| 17 |
+
|
| 18 |
+
from flask import Flask, send_from_directory, jsonify
|
| 19 |
+
from flask_cors import CORS
|
| 20 |
+
from models.user import db
|
| 21 |
+
from routes.user import user_bp
|
| 22 |
+
from routes.isp_api import init_engines, isp_api
|
| 23 |
+
|
| 24 |
+
# Configure logging
|
| 25 |
+
logging.basicConfig(
|
| 26 |
+
level=logging.INFO,
|
| 27 |
+
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
| 28 |
+
)
|
| 29 |
+
logger = logging.getLogger(__name__)
|
| 30 |
+
|
| 31 |
+
# Create Flask application
|
| 32 |
+
app = Flask(__name__, static_folder=os.path.join(os.path.dirname(__file__), 'static'))
|
| 33 |
+
|
| 34 |
+
# Enable CORS for all routes
|
| 35 |
+
CORS(app, origins="*")
|
| 36 |
+
|
| 37 |
+
# Configuration
|
| 38 |
+
app.config['SECRET_KEY'] = os.environ.get('SECRET_KEY', 'vpn-isp-stack-secret-key-change-in-production')
|
| 39 |
+
|
| 40 |
+
# Database configuration
|
| 41 |
+
database_path = os.path.join(os.path.dirname(__file__), 'database', 'app.db')
|
| 42 |
+
app.config['SQLALCHEMY_DATABASE_URI'] = f"sqlite:///{database_path}"
|
| 43 |
+
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
|
| 44 |
+
|
| 45 |
+
# Initialize database
|
| 46 |
+
db.init_app(app)
|
| 47 |
+
|
| 48 |
+
# Register blueprints
|
| 49 |
+
app.register_blueprint(user_bp, url_prefix='/api')
|
| 50 |
+
app.register_blueprint(isp_api, url_prefix='/api')
|
| 51 |
+
|
| 52 |
+
# Engine configuration
|
| 53 |
+
app.config.update({
|
| 54 |
+
"dhcp": {
|
| 55 |
+
"network": "10.0.0.0/24",
|
| 56 |
+
"range_start": "10.0.0.10",
|
| 57 |
+
"range_end": "10.0.0.100",
|
| 58 |
+
"lease_time": 3600,
|
| 59 |
+
"gateway": "10.0.0.1",
|
| 60 |
+
"dns_servers": ["8.8.8.8", "8.8.4.4"]
|
| 61 |
+
},
|
| 62 |
+
"nat": {
|
| 63 |
+
"port_range_start": 10000,
|
| 64 |
+
"port_range_end": 65535,
|
| 65 |
+
"session_timeout": 300
|
| 66 |
+
},
|
| 67 |
+
"firewall": {
|
| 68 |
+
"default_policy": "ACCEPT",
|
| 69 |
+
"log_blocked": True
|
| 70 |
+
},
|
| 71 |
+
"tcp": {
|
| 72 |
+
"initial_window": 65535,
|
| 73 |
+
"max_retries": 3,
|
| 74 |
+
"timeout": 30
|
| 75 |
+
},
|
| 76 |
+
"openvpn": {
|
| 77 |
+
"server_ip": "10.8.0.1",
|
| 78 |
+
"server_port": 1194,
|
| 79 |
+
"network": "10.8.0.0/24"
|
| 80 |
+
},
|
| 81 |
+
"logger": {
|
| 82 |
+
"log_level": "INFO",
|
| 83 |
+
"log_file": "/tmp/virtual_isp.log"
|
| 84 |
+
}
|
| 85 |
+
})
|
| 86 |
+
|
| 87 |
+
# Initialize database tables
|
| 88 |
+
with app.app_context():
|
| 89 |
+
try:
|
| 90 |
+
db.create_all()
|
| 91 |
+
logger.info("Database tables created successfully")
|
| 92 |
+
except Exception as e:
|
| 93 |
+
logger.error(f"Error creating database tables: {e}")
|
| 94 |
+
|
| 95 |
+
# Initialize engines
|
| 96 |
+
try:
|
| 97 |
+
init_engines(app.config)
|
| 98 |
+
logger.info("All engines initialized successfully")
|
| 99 |
+
except Exception as e:
|
| 100 |
+
logger.error(f"Error initializing engines: {e}")
|
| 101 |
+
|
| 102 |
+
@app.route('/')
|
| 103 |
+
def index():
|
| 104 |
+
"""Main index page"""
|
| 105 |
+
return serve_static('')
|
| 106 |
+
|
| 107 |
+
@app.route('/health')
|
| 108 |
+
def health_check():
|
| 109 |
+
"""Health check endpoint for monitoring"""
|
| 110 |
+
return jsonify({
|
| 111 |
+
'status': 'healthy',
|
| 112 |
+
'service': 'Virtual ISP Stack with OpenVPN',
|
| 113 |
+
'version': '1.0.0'
|
| 114 |
+
})
|
| 115 |
+
|
| 116 |
+
@app.route('/api')
|
| 117 |
+
def api_info():
|
| 118 |
+
"""API information endpoint"""
|
| 119 |
+
return jsonify({
|
| 120 |
+
'service': 'Virtual ISP Stack API',
|
| 121 |
+
'version': '1.0.0',
|
| 122 |
+
'endpoints': {
|
| 123 |
+
'openvpn': {
|
| 124 |
+
'status': '/api/openvpn/status',
|
| 125 |
+
'start': '/api/openvpn/start',
|
| 126 |
+
'stop': '/api/openvpn/stop',
|
| 127 |
+
'clients': '/api/openvpn/clients',
|
| 128 |
+
'config': '/api/openvpn/config/<client_name>',
|
| 129 |
+
'stats': '/api/openvpn/stats',
|
| 130 |
+
'configs': '/api/openvpn/configs'
|
| 131 |
+
},
|
| 132 |
+
'dhcp': {
|
| 133 |
+
'leases': '/api/dhcp/leases'
|
| 134 |
+
},
|
| 135 |
+
'nat': {
|
| 136 |
+
'sessions': '/api/nat/sessions',
|
| 137 |
+
'stats': '/api/nat/stats'
|
| 138 |
+
},
|
| 139 |
+
'firewall': {
|
| 140 |
+
'rules': '/api/firewall/rules',
|
| 141 |
+
'logs': '/api/firewall/logs',
|
| 142 |
+
'stats': '/api/firewall/stats'
|
| 143 |
+
}
|
| 144 |
+
}
|
| 145 |
+
})
|
| 146 |
+
|
| 147 |
+
@app.route('/<path:path>')
|
| 148 |
+
def serve_static(path):
|
| 149 |
+
"""Serve static files"""
|
| 150 |
+
static_folder_path = app.static_folder
|
| 151 |
+
if static_folder_path is None:
|
| 152 |
+
return jsonify({'error': 'Static folder not configured'}), 404
|
| 153 |
+
|
| 154 |
+
if path != "" and os.path.exists(os.path.join(static_folder_path, path)):
|
| 155 |
+
return send_from_directory(static_folder_path, path)
|
| 156 |
+
else:
|
| 157 |
+
index_path = os.path.join(static_folder_path, 'index.html')
|
| 158 |
+
if os.path.exists(index_path):
|
| 159 |
+
return send_from_directory(static_folder_path, 'index.html')
|
| 160 |
+
else:
|
| 161 |
+
return jsonify({
|
| 162 |
+
'message': 'Virtual ISP Stack with OpenVPN Integration',
|
| 163 |
+
'status': 'running',
|
| 164 |
+
'api_docs': '/api'
|
| 165 |
+
})
|
| 166 |
+
|
| 167 |
+
@app.errorhandler(404)
|
| 168 |
+
def not_found(error):
|
| 169 |
+
"""Handle 404 errors"""
|
| 170 |
+
return jsonify({'error': 'Endpoint not found', 'api_docs': '/api'}), 404
|
| 171 |
+
|
| 172 |
+
@app.errorhandler(500)
|
| 173 |
+
def internal_error(error):
|
| 174 |
+
"""Handle 500 errors"""
|
| 175 |
+
return jsonify({'error': 'Internal server error'}), 500
|
| 176 |
+
|
| 177 |
+
if __name__ == '__main__':
|
| 178 |
+
# Get port from environment variable (HuggingFace Spaces uses PORT)
|
| 179 |
+
port = int(os.environ.get('PORT', 7860))
|
| 180 |
+
|
| 181 |
+
logger.info(f"Starting Virtual ISP Stack with OpenVPN on port {port}")
|
| 182 |
+
|
| 183 |
+
# Run the application
|
| 184 |
+
app.run(
|
| 185 |
+
host='0.0.0.0',
|
| 186 |
+
port=port,
|
| 187 |
+
debug=False,
|
| 188 |
+
threaded=True
|
| 189 |
+
)
|
| 190 |
+
|
config.json
ADDED
|
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"dhcp": {
|
| 3 |
+
"network": "10.0.0.0/24",
|
| 4 |
+
"range_start": "10.0.0.10",
|
| 5 |
+
"range_end": "10.0.0.100",
|
| 6 |
+
"lease_time": 3600,
|
| 7 |
+
"gateway": "10.0.0.1",
|
| 8 |
+
"dns_servers": [
|
| 9 |
+
"8.8.8.8",
|
| 10 |
+
"8.8.4.4"
|
| 11 |
+
]
|
| 12 |
+
},
|
| 13 |
+
"nat": {
|
| 14 |
+
"port_range_start": 10000,
|
| 15 |
+
"port_range_end": 65535,
|
| 16 |
+
"session_timeout": 300,
|
| 17 |
+
"host_ip": "0.0.0.0"
|
| 18 |
+
},
|
| 19 |
+
"firewall": {
|
| 20 |
+
"default_policy": "ACCEPT",
|
| 21 |
+
"log_blocked": true,
|
| 22 |
+
"log_accepted": false,
|
| 23 |
+
"max_log_entries": 10000,
|
| 24 |
+
"rules": [
|
| 25 |
+
{
|
| 26 |
+
"rule_id": "allow_dhcp",
|
| 27 |
+
"priority": 1,
|
| 28 |
+
"action": "ACCEPT",
|
| 29 |
+
"direction": "BOTH",
|
| 30 |
+
"dest_port": "67,68",
|
| 31 |
+
"protocol": "UDP",
|
| 32 |
+
"description": "Allow DHCP traffic",
|
| 33 |
+
"enabled": true
|
| 34 |
+
},
|
| 35 |
+
{
|
| 36 |
+
"rule_id": "allow_dns",
|
| 37 |
+
"priority": 2,
|
| 38 |
+
"action": "ACCEPT",
|
| 39 |
+
"direction": "BOTH",
|
| 40 |
+
"dest_port": "53",
|
| 41 |
+
"protocol": "UDP",
|
| 42 |
+
"description": "Allow DNS traffic",
|
| 43 |
+
"enabled": true
|
| 44 |
+
}
|
| 45 |
+
]
|
| 46 |
+
},
|
| 47 |
+
"tcp": {
|
| 48 |
+
"initial_window": 65535,
|
| 49 |
+
"max_retries": 3,
|
| 50 |
+
"timeout": 300,
|
| 51 |
+
"time_wait_timeout": 120,
|
| 52 |
+
"mss": 1460
|
| 53 |
+
},
|
| 54 |
+
"router": {
|
| 55 |
+
"router_id": "virtual-isp-router",
|
| 56 |
+
"default_gateway": "10.0.0.1",
|
| 57 |
+
"interfaces": [
|
| 58 |
+
{
|
| 59 |
+
"name": "virtual0",
|
| 60 |
+
"ip_address": "10.0.0.1",
|
| 61 |
+
"netmask": "255.255.255.0",
|
| 62 |
+
"enabled": true,
|
| 63 |
+
"mtu": 1500
|
| 64 |
+
}
|
| 65 |
+
],
|
| 66 |
+
"static_routes": []
|
| 67 |
+
},
|
| 68 |
+
"socket_translator": {
|
| 69 |
+
"connect_timeout": 10,
|
| 70 |
+
"read_timeout": 30,
|
| 71 |
+
"max_connections": 1000,
|
| 72 |
+
"buffer_size": 8192
|
| 73 |
+
},
|
| 74 |
+
"packet_bridge": {
|
| 75 |
+
"websocket_host": "0.0.0.0",
|
| 76 |
+
"websocket_port": 8765,
|
| 77 |
+
"tcp_host": "0.0.0.0",
|
| 78 |
+
"tcp_port": 8766,
|
| 79 |
+
"max_clients": 100,
|
| 80 |
+
"client_timeout": 300
|
| 81 |
+
},
|
| 82 |
+
"session_tracker": {
|
| 83 |
+
"max_sessions": 10000,
|
| 84 |
+
"session_timeout": 3600,
|
| 85 |
+
"cleanup_interval": 300,
|
| 86 |
+
"metrics_retention": 86400
|
| 87 |
+
},
|
| 88 |
+
"logger": {
|
| 89 |
+
"log_level": "INFO",
|
| 90 |
+
"log_to_file": true,
|
| 91 |
+
"log_file_path": "/tmp/virtual_isp.log",
|
| 92 |
+
"log_file_max_size": 10485760,
|
| 93 |
+
"log_file_backup_count": 5,
|
| 94 |
+
"log_to_console": true,
|
| 95 |
+
"structured_logging": true,
|
| 96 |
+
"max_memory_logs": 10000
|
| 97 |
+
}
|
| 98 |
+
}
|
core/__init__.py
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Core networking modules for the virtual ISP stack
|
| 2 |
+
|
core/__pycache__/__init__.cpython-311.pyc
ADDED
|
Binary file (156 Bytes). View file
|
|
|
core/__pycache__/dhcp_server.cpython-311.pyc
ADDED
|
Binary file (21.2 kB). View file
|
|
|
core/__pycache__/firewall.cpython-311.pyc
ADDED
|
Binary file (27.4 kB). View file
|
|
|
core/__pycache__/ip_parser.cpython-311.pyc
ADDED
|
Binary file (23.1 kB). View file
|
|
|
core/__pycache__/logger.cpython-311.pyc
ADDED
|
Binary file (29.4 kB). View file
|
|
|
core/__pycache__/nat_engine.cpython-311.pyc
ADDED
|
Binary file (29.3 kB). View file
|
|
|
core/__pycache__/openvpn_manager.cpython-311.pyc
ADDED
|
Binary file (30.9 kB). View file
|
|
|
core/__pycache__/packet_bridge.cpython-311.pyc
ADDED
|
Binary file (34.3 kB). View file
|
|
|
core/__pycache__/session_tracker.cpython-311.pyc
ADDED
|
Binary file (33.9 kB). View file
|
|
|
core/__pycache__/socket_translator.cpython-311.pyc
ADDED
|
Binary file (32.8 kB). View file
|
|
|
core/__pycache__/tcp_engine.cpython-311.pyc
ADDED
|
Binary file (33.1 kB). View file
|
|
|
core/__pycache__/virtual_router.cpython-311.pyc
ADDED
|
Binary file (30.7 kB). View file
|
|
|
core/dhcp_server.py
ADDED
|
@@ -0,0 +1,391 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
DHCP Server Module
|
| 3 |
+
|
| 4 |
+
Implements a user-space DHCP server that handles:
|
| 5 |
+
- DHCP DISCOVER → OFFER → REQUEST → ACK sequence
|
| 6 |
+
- IP lease management
|
| 7 |
+
- Lease renewals and expiration
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
import struct
|
| 11 |
+
import time
|
| 12 |
+
import socket
|
| 13 |
+
import threading
|
| 14 |
+
from typing import Dict, Optional, Tuple
|
| 15 |
+
from dataclasses import dataclass
|
| 16 |
+
from enum import Enum
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class DHCPMessageType(Enum):
|
| 20 |
+
DISCOVER = 1
|
| 21 |
+
OFFER = 2
|
| 22 |
+
REQUEST = 3
|
| 23 |
+
DECLINE = 4
|
| 24 |
+
ACK = 5
|
| 25 |
+
NAK = 6
|
| 26 |
+
RELEASE = 7
|
| 27 |
+
INFORM = 8
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
@dataclass
|
| 31 |
+
class DHCPLease:
|
| 32 |
+
"""Represents a DHCP lease"""
|
| 33 |
+
mac_address: str
|
| 34 |
+
ip_address: str
|
| 35 |
+
lease_time: int
|
| 36 |
+
lease_start: float
|
| 37 |
+
state: str = 'BOUND'
|
| 38 |
+
|
| 39 |
+
@property
|
| 40 |
+
def is_expired(self) -> bool:
|
| 41 |
+
return time.time() > (self.lease_start + self.lease_time)
|
| 42 |
+
|
| 43 |
+
@property
|
| 44 |
+
def remaining_time(self) -> int:
|
| 45 |
+
remaining = int((self.lease_start + self.lease_time) - time.time())
|
| 46 |
+
return max(0, remaining)
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
class DHCPPacket:
|
| 50 |
+
"""DHCP packet parser and builder"""
|
| 51 |
+
|
| 52 |
+
def __init__(self):
|
| 53 |
+
self.op = 0 # Message op code / message type
|
| 54 |
+
self.htype = 1 # Hardware address type (Ethernet = 1)
|
| 55 |
+
self.hlen = 6 # Hardware address length
|
| 56 |
+
self.hops = 0 # Hops
|
| 57 |
+
self.xid = 0 # Transaction ID
|
| 58 |
+
self.secs = 0 # Seconds elapsed
|
| 59 |
+
self.flags = 0 # Flags
|
| 60 |
+
self.ciaddr = '0.0.0.0' # Client IP address
|
| 61 |
+
self.yiaddr = '0.0.0.0' # Your IP address
|
| 62 |
+
self.siaddr = '0.0.0.0' # Server IP address
|
| 63 |
+
self.giaddr = '0.0.0.0' # Gateway IP address
|
| 64 |
+
self.chaddr = b'\x00' * 16 # Client hardware address
|
| 65 |
+
self.sname = b'\x00' * 64 # Server name
|
| 66 |
+
self.file = b'\x00' * 128 # Boot file name
|
| 67 |
+
self.options = {} # DHCP options
|
| 68 |
+
|
| 69 |
+
@classmethod
|
| 70 |
+
def parse(cls, data: bytes) -> 'DHCPPacket':
|
| 71 |
+
"""Parse DHCP packet from raw bytes"""
|
| 72 |
+
packet = cls()
|
| 73 |
+
|
| 74 |
+
# Parse fixed fields (first 236 bytes)
|
| 75 |
+
if len(data) < 236:
|
| 76 |
+
raise ValueError("DHCP packet too short")
|
| 77 |
+
|
| 78 |
+
fields = struct.unpack('!BBBBIHH4s4s4s4s16s64s128s', data[:236])
|
| 79 |
+
packet.op = fields[0]
|
| 80 |
+
packet.htype = fields[1]
|
| 81 |
+
packet.hlen = fields[2]
|
| 82 |
+
packet.hops = fields[3]
|
| 83 |
+
packet.xid = fields[4]
|
| 84 |
+
packet.secs = fields[5]
|
| 85 |
+
packet.flags = fields[6]
|
| 86 |
+
packet.ciaddr = socket.inet_ntoa(fields[7])
|
| 87 |
+
packet.yiaddr = socket.inet_ntoa(fields[8])
|
| 88 |
+
packet.siaddr = socket.inet_ntoa(fields[9])
|
| 89 |
+
packet.giaddr = socket.inet_ntoa(fields[10])
|
| 90 |
+
packet.chaddr = fields[11]
|
| 91 |
+
packet.sname = fields[12]
|
| 92 |
+
packet.file = fields[13]
|
| 93 |
+
|
| 94 |
+
# Parse options (after magic cookie)
|
| 95 |
+
options_data = data[236:]
|
| 96 |
+
if len(options_data) >= 4:
|
| 97 |
+
magic = struct.unpack('!I', options_data[:4])[0]
|
| 98 |
+
if magic == 0x63825363: # DHCP magic cookie
|
| 99 |
+
packet.options = packet._parse_options(options_data[4:])
|
| 100 |
+
|
| 101 |
+
return packet
|
| 102 |
+
|
| 103 |
+
def _parse_options(self, data: bytes) -> Dict[int, bytes]:
|
| 104 |
+
"""Parse DHCP options"""
|
| 105 |
+
options = {}
|
| 106 |
+
i = 0
|
| 107 |
+
|
| 108 |
+
while i < len(data):
|
| 109 |
+
if data[i] == 255: # End option
|
| 110 |
+
break
|
| 111 |
+
elif data[i] == 0: # Pad option
|
| 112 |
+
i += 1
|
| 113 |
+
continue
|
| 114 |
+
|
| 115 |
+
option_type = data[i]
|
| 116 |
+
if i + 1 >= len(data):
|
| 117 |
+
break
|
| 118 |
+
|
| 119 |
+
option_length = data[i + 1]
|
| 120 |
+
if i + 2 + option_length > len(data):
|
| 121 |
+
break
|
| 122 |
+
|
| 123 |
+
option_data = data[i + 2:i + 2 + option_length]
|
| 124 |
+
options[option_type] = option_data
|
| 125 |
+
i += 2 + option_length
|
| 126 |
+
|
| 127 |
+
return options
|
| 128 |
+
|
| 129 |
+
def build(self) -> bytes:
|
| 130 |
+
"""Build DHCP packet as bytes"""
|
| 131 |
+
# Build fixed fields
|
| 132 |
+
packet_data = struct.pack(
|
| 133 |
+
'!BBBBIHH4s4s4s4s16s64s128s',
|
| 134 |
+
self.op, self.htype, self.hlen, self.hops,
|
| 135 |
+
self.xid, self.secs, self.flags,
|
| 136 |
+
socket.inet_aton(self.ciaddr),
|
| 137 |
+
socket.inet_aton(self.yiaddr),
|
| 138 |
+
socket.inet_aton(self.siaddr),
|
| 139 |
+
socket.inet_aton(self.giaddr),
|
| 140 |
+
self.chaddr, self.sname, self.file
|
| 141 |
+
)
|
| 142 |
+
|
| 143 |
+
# Add magic cookie
|
| 144 |
+
packet_data += struct.pack('!I', 0x63825363)
|
| 145 |
+
|
| 146 |
+
# Add options
|
| 147 |
+
for option_type, option_data in self.options.items():
|
| 148 |
+
packet_data += struct.pack('!BB', option_type, len(option_data))
|
| 149 |
+
packet_data += option_data
|
| 150 |
+
|
| 151 |
+
# Add end option
|
| 152 |
+
packet_data += b'\xff'
|
| 153 |
+
|
| 154 |
+
# Pad to minimum size
|
| 155 |
+
while len(packet_data) < 300:
|
| 156 |
+
packet_data += b'\x00'
|
| 157 |
+
|
| 158 |
+
return packet_data
|
| 159 |
+
|
| 160 |
+
def get_mac_address(self) -> str:
|
| 161 |
+
"""Get client MAC address as string"""
|
| 162 |
+
return ':'.join(f'{b:02x}' for b in self.chaddr[:6])
|
| 163 |
+
|
| 164 |
+
def get_message_type(self) -> Optional[DHCPMessageType]:
|
| 165 |
+
"""Get DHCP message type from options"""
|
| 166 |
+
if 53 in self.options and len(self.options[53]) == 1:
|
| 167 |
+
msg_type = self.options[53][0]
|
| 168 |
+
try:
|
| 169 |
+
return DHCPMessageType(msg_type)
|
| 170 |
+
except ValueError:
|
| 171 |
+
return None
|
| 172 |
+
return None
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
class DHCPServer:
|
| 176 |
+
"""User-space DHCP server implementation"""
|
| 177 |
+
|
| 178 |
+
def __init__(self, config: Dict):
|
| 179 |
+
self.config = config
|
| 180 |
+
self.leases: Dict[str, DHCPLease] = {} # MAC -> Lease
|
| 181 |
+
self.ip_pool = self._build_ip_pool()
|
| 182 |
+
self.running = False
|
| 183 |
+
self.server_thread = None
|
| 184 |
+
self.lock = threading.Lock()
|
| 185 |
+
|
| 186 |
+
def _build_ip_pool(self) -> set:
|
| 187 |
+
"""Build available IP address pool"""
|
| 188 |
+
network = self.config['network']
|
| 189 |
+
start_ip = self.config['range_start']
|
| 190 |
+
end_ip = self.config['range_end']
|
| 191 |
+
|
| 192 |
+
# Convert IP addresses to integers for range calculation
|
| 193 |
+
start_int = struct.unpack('!I', socket.inet_aton(start_ip))[0]
|
| 194 |
+
end_int = struct.unpack('!I', socket.inet_aton(end_ip))[0]
|
| 195 |
+
|
| 196 |
+
pool = set()
|
| 197 |
+
for ip_int in range(start_int, end_int + 1):
|
| 198 |
+
ip_str = socket.inet_ntoa(struct.pack('!I', ip_int))
|
| 199 |
+
pool.add(ip_str)
|
| 200 |
+
|
| 201 |
+
return pool
|
| 202 |
+
|
| 203 |
+
def _get_available_ip(self) -> Optional[str]:
|
| 204 |
+
"""Get next available IP address"""
|
| 205 |
+
with self.lock:
|
| 206 |
+
# Remove expired leases
|
| 207 |
+
self._cleanup_expired_leases()
|
| 208 |
+
|
| 209 |
+
# Find available IP
|
| 210 |
+
used_ips = {lease.ip_address for lease in self.leases.values()}
|
| 211 |
+
available_ips = self.ip_pool - used_ips
|
| 212 |
+
|
| 213 |
+
if available_ips:
|
| 214 |
+
return min(available_ips) # Return lowest available IP
|
| 215 |
+
return None
|
| 216 |
+
|
| 217 |
+
def _cleanup_expired_leases(self):
|
| 218 |
+
"""Remove expired leases"""
|
| 219 |
+
expired_macs = [
|
| 220 |
+
mac for mac, lease in self.leases.items()
|
| 221 |
+
if lease.is_expired
|
| 222 |
+
]
|
| 223 |
+
for mac in expired_macs:
|
| 224 |
+
del self.leases[mac]
|
| 225 |
+
|
| 226 |
+
def _create_dhcp_offer(self, discover_packet: DHCPPacket) -> DHCPPacket:
|
| 227 |
+
"""Create DHCP OFFER response"""
|
| 228 |
+
mac_address = discover_packet.get_mac_address()
|
| 229 |
+
|
| 230 |
+
# Check for existing lease
|
| 231 |
+
if mac_address in self.leases and not self.leases[mac_address].is_expired:
|
| 232 |
+
offered_ip = self.leases[mac_address].ip_address
|
| 233 |
+
else:
|
| 234 |
+
offered_ip = self._get_available_ip()
|
| 235 |
+
if not offered_ip:
|
| 236 |
+
return None # No available IPs
|
| 237 |
+
|
| 238 |
+
# Create OFFER packet
|
| 239 |
+
offer = DHCPPacket()
|
| 240 |
+
offer.op = 2 # BOOTREPLY
|
| 241 |
+
offer.htype = discover_packet.htype
|
| 242 |
+
offer.hlen = discover_packet.hlen
|
| 243 |
+
offer.xid = discover_packet.xid
|
| 244 |
+
offer.yiaddr = offered_ip
|
| 245 |
+
offer.siaddr = self.config['gateway']
|
| 246 |
+
offer.chaddr = discover_packet.chaddr
|
| 247 |
+
|
| 248 |
+
# Add DHCP options
|
| 249 |
+
offer.options[53] = bytes([DHCPMessageType.OFFER.value]) # Message type
|
| 250 |
+
offer.options[1] = socket.inet_aton('255.255.255.0') # Subnet mask
|
| 251 |
+
offer.options[3] = socket.inet_aton(self.config['gateway']) # Router
|
| 252 |
+
offer.options[6] = b''.join(socket.inet_aton(dns) for dns in self.config['dns_servers']) # DNS
|
| 253 |
+
offer.options[51] = struct.pack('!I', self.config['lease_time']) # Lease time
|
| 254 |
+
offer.options[54] = socket.inet_aton(self.config['gateway']) # DHCP server identifier
|
| 255 |
+
|
| 256 |
+
return offer
|
| 257 |
+
|
| 258 |
+
def _create_dhcp_ack(self, request_packet: DHCPPacket) -> DHCPPacket:
|
| 259 |
+
"""Create DHCP ACK response"""
|
| 260 |
+
mac_address = request_packet.get_mac_address()
|
| 261 |
+
requested_ip = request_packet.ciaddr
|
| 262 |
+
|
| 263 |
+
# If no requested IP in ciaddr, check option 50
|
| 264 |
+
if requested_ip == '0.0.0.0' and 50 in request_packet.options:
|
| 265 |
+
requested_ip = socket.inet_ntoa(request_packet.options[50])
|
| 266 |
+
|
| 267 |
+
# Validate request
|
| 268 |
+
if not self._validate_request(mac_address, requested_ip):
|
| 269 |
+
return self._create_dhcp_nak(request_packet)
|
| 270 |
+
|
| 271 |
+
# Create or update lease
|
| 272 |
+
lease = DHCPLease(
|
| 273 |
+
mac_address=mac_address,
|
| 274 |
+
ip_address=requested_ip,
|
| 275 |
+
lease_time=self.config['lease_time'],
|
| 276 |
+
lease_start=time.time()
|
| 277 |
+
)
|
| 278 |
+
|
| 279 |
+
with self.lock:
|
| 280 |
+
self.leases[mac_address] = lease
|
| 281 |
+
|
| 282 |
+
# Create ACK packet
|
| 283 |
+
ack = DHCPPacket()
|
| 284 |
+
ack.op = 2 # BOOTREPLY
|
| 285 |
+
ack.htype = request_packet.htype
|
| 286 |
+
ack.hlen = request_packet.hlen
|
| 287 |
+
ack.xid = request_packet.xid
|
| 288 |
+
ack.yiaddr = requested_ip
|
| 289 |
+
ack.siaddr = self.config['gateway']
|
| 290 |
+
ack.chaddr = request_packet.chaddr
|
| 291 |
+
|
| 292 |
+
# Add DHCP options
|
| 293 |
+
ack.options[53] = bytes([DHCPMessageType.ACK.value]) # Message type
|
| 294 |
+
ack.options[1] = socket.inet_aton('255.255.255.0') # Subnet mask
|
| 295 |
+
ack.options[3] = socket.inet_aton(self.config['gateway']) # Router
|
| 296 |
+
ack.options[6] = b''.join(socket.inet_aton(dns) for dns in self.config['dns_servers']) # DNS
|
| 297 |
+
ack.options[51] = struct.pack('!I', self.config['lease_time']) # Lease time
|
| 298 |
+
ack.options[54] = socket.inet_aton(self.config['gateway']) # DHCP server identifier
|
| 299 |
+
|
| 300 |
+
return ack
|
| 301 |
+
|
| 302 |
+
def _create_dhcp_nak(self, request_packet: DHCPPacket) -> DHCPPacket:
|
| 303 |
+
"""Create DHCP NAK response"""
|
| 304 |
+
nak = DHCPPacket()
|
| 305 |
+
nak.op = 2 # BOOTREPLY
|
| 306 |
+
nak.htype = request_packet.htype
|
| 307 |
+
nak.hlen = request_packet.hlen
|
| 308 |
+
nak.xid = request_packet.xid
|
| 309 |
+
nak.chaddr = request_packet.chaddr
|
| 310 |
+
|
| 311 |
+
# Add DHCP options
|
| 312 |
+
nak.options[53] = bytes([DHCPMessageType.NAK.value]) # Message type
|
| 313 |
+
nak.options[54] = socket.inet_aton(self.config['gateway']) # DHCP server identifier
|
| 314 |
+
|
| 315 |
+
return nak
|
| 316 |
+
|
| 317 |
+
def _validate_request(self, mac_address: str, requested_ip: str) -> bool:
|
| 318 |
+
"""Validate DHCP request"""
|
| 319 |
+
# Check if IP is in our pool
|
| 320 |
+
if requested_ip not in self.ip_pool:
|
| 321 |
+
return False
|
| 322 |
+
|
| 323 |
+
# Check if IP is available or already assigned to this MAC
|
| 324 |
+
with self.lock:
|
| 325 |
+
for mac, lease in self.leases.items():
|
| 326 |
+
if lease.ip_address == requested_ip:
|
| 327 |
+
if mac != mac_address and not lease.is_expired:
|
| 328 |
+
return False # IP already assigned to different MAC
|
| 329 |
+
|
| 330 |
+
return True
|
| 331 |
+
|
| 332 |
+
def process_packet(self, packet_data: bytes, client_addr: Tuple[str, int]) -> Optional[bytes]:
|
| 333 |
+
"""Process incoming DHCP packet and return response"""
|
| 334 |
+
try:
|
| 335 |
+
packet = DHCPPacket.parse(packet_data)
|
| 336 |
+
message_type = packet.get_message_type()
|
| 337 |
+
|
| 338 |
+
if message_type == DHCPMessageType.DISCOVER:
|
| 339 |
+
response = self._create_dhcp_offer(packet)
|
| 340 |
+
elif message_type == DHCPMessageType.REQUEST:
|
| 341 |
+
response = self._create_dhcp_ack(packet)
|
| 342 |
+
elif message_type == DHCPMessageType.RELEASE:
|
| 343 |
+
# Handle lease release
|
| 344 |
+
mac_address = packet.get_mac_address()
|
| 345 |
+
with self.lock:
|
| 346 |
+
if mac_address in self.leases:
|
| 347 |
+
del self.leases[mac_address]
|
| 348 |
+
return None
|
| 349 |
+
else:
|
| 350 |
+
return None
|
| 351 |
+
|
| 352 |
+
if response:
|
| 353 |
+
return response.build()
|
| 354 |
+
|
| 355 |
+
except Exception as e:
|
| 356 |
+
print(f"Error processing DHCP packet: {e}")
|
| 357 |
+
return None
|
| 358 |
+
|
| 359 |
+
def get_leases(self) -> Dict[str, Dict]:
|
| 360 |
+
"""Get current lease table"""
|
| 361 |
+
with self.lock:
|
| 362 |
+
self._cleanup_expired_leases()
|
| 363 |
+
return {
|
| 364 |
+
mac: {
|
| 365 |
+
'ip_address': lease.ip_address,
|
| 366 |
+
'lease_time': lease.lease_time,
|
| 367 |
+
'lease_start': lease.lease_start,
|
| 368 |
+
'remaining_time': lease.remaining_time,
|
| 369 |
+
'state': lease.state
|
| 370 |
+
}
|
| 371 |
+
for mac, lease in self.leases.items()
|
| 372 |
+
}
|
| 373 |
+
|
| 374 |
+
def release_lease(self, mac_address: str) -> bool:
|
| 375 |
+
"""Manually release a lease"""
|
| 376 |
+
with self.lock:
|
| 377 |
+
if mac_address in self.leases:
|
| 378 |
+
del self.leases[mac_address]
|
| 379 |
+
return True
|
| 380 |
+
return False
|
| 381 |
+
|
| 382 |
+
def start(self):
|
| 383 |
+
"""Start DHCP server (placeholder for integration with packet bridge)"""
|
| 384 |
+
self.running = True
|
| 385 |
+
print(f"DHCP server started - Pool: {self.config['range_start']} - {self.config['range_end']}")
|
| 386 |
+
|
| 387 |
+
def stop(self):
|
| 388 |
+
"""Stop DHCP server"""
|
| 389 |
+
self.running = False
|
| 390 |
+
print("DHCP server stopped")
|
| 391 |
+
|
core/firewall.py
ADDED
|
@@ -0,0 +1,523 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Firewall Module
|
| 3 |
+
|
| 4 |
+
Implements packet filtering and access control:
|
| 5 |
+
- Rule-based packet filtering (allow/block by IP, port, protocol)
|
| 6 |
+
- Ordered rule processing
|
| 7 |
+
- Logging and statistics
|
| 8 |
+
- Dynamic rule management via API
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
import time
|
| 12 |
+
import threading
|
| 13 |
+
import ipaddress
|
| 14 |
+
import re
|
| 15 |
+
from typing import Dict, List, Optional, Tuple, Any
|
| 16 |
+
from dataclasses import dataclass
|
| 17 |
+
from enum import Enum
|
| 18 |
+
|
| 19 |
+
from .ip_parser import ParsedPacket, TCPHeader, UDPHeader
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class FirewallAction(Enum):
|
| 23 |
+
ACCEPT = "ACCEPT"
|
| 24 |
+
DROP = "DROP"
|
| 25 |
+
REJECT = "REJECT"
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class FirewallDirection(Enum):
|
| 29 |
+
INBOUND = "INBOUND"
|
| 30 |
+
OUTBOUND = "OUTBOUND"
|
| 31 |
+
BOTH = "BOTH"
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
@dataclass
|
| 35 |
+
class FirewallRule:
|
| 36 |
+
"""Represents a firewall rule"""
|
| 37 |
+
rule_id: str
|
| 38 |
+
priority: int # Lower number = higher priority
|
| 39 |
+
action: FirewallAction
|
| 40 |
+
direction: FirewallDirection
|
| 41 |
+
|
| 42 |
+
# Match criteria
|
| 43 |
+
source_ip: Optional[str] = None # IP or CIDR
|
| 44 |
+
dest_ip: Optional[str] = None # IP or CIDR
|
| 45 |
+
source_port: Optional[str] = None # Port or range (e.g., "80", "80-90", "80,443")
|
| 46 |
+
dest_port: Optional[str] = None # Port or range
|
| 47 |
+
protocol: Optional[str] = None # TCP, UDP, ICMP, or None for any
|
| 48 |
+
|
| 49 |
+
# Metadata
|
| 50 |
+
description: str = ""
|
| 51 |
+
enabled: bool = True
|
| 52 |
+
created_time: float = 0
|
| 53 |
+
hit_count: int = 0
|
| 54 |
+
last_hit: Optional[float] = None
|
| 55 |
+
|
| 56 |
+
def __post_init__(self):
|
| 57 |
+
if self.created_time == 0:
|
| 58 |
+
self.created_time = time.time()
|
| 59 |
+
|
| 60 |
+
def record_hit(self):
|
| 61 |
+
"""Record a rule hit"""
|
| 62 |
+
self.hit_count += 1
|
| 63 |
+
self.last_hit = time.time()
|
| 64 |
+
|
| 65 |
+
def to_dict(self) -> Dict:
|
| 66 |
+
"""Convert rule to dictionary"""
|
| 67 |
+
return {
|
| 68 |
+
'rule_id': self.rule_id,
|
| 69 |
+
'priority': self.priority,
|
| 70 |
+
'action': self.action.value,
|
| 71 |
+
'direction': self.direction.value,
|
| 72 |
+
'source_ip': self.source_ip,
|
| 73 |
+
'dest_ip': self.dest_ip,
|
| 74 |
+
'source_port': self.source_port,
|
| 75 |
+
'dest_port': self.dest_port,
|
| 76 |
+
'protocol': self.protocol,
|
| 77 |
+
'description': self.description,
|
| 78 |
+
'enabled': self.enabled,
|
| 79 |
+
'created_time': self.created_time,
|
| 80 |
+
'hit_count': self.hit_count,
|
| 81 |
+
'last_hit': self.last_hit
|
| 82 |
+
}
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
@dataclass
|
| 86 |
+
class FirewallLogEntry:
|
| 87 |
+
"""Represents a firewall log entry"""
|
| 88 |
+
timestamp: float
|
| 89 |
+
action: str
|
| 90 |
+
rule_id: Optional[str]
|
| 91 |
+
source_ip: str
|
| 92 |
+
dest_ip: str
|
| 93 |
+
source_port: int
|
| 94 |
+
dest_port: int
|
| 95 |
+
protocol: str
|
| 96 |
+
packet_size: int
|
| 97 |
+
reason: str = ""
|
| 98 |
+
|
| 99 |
+
def to_dict(self) -> Dict:
|
| 100 |
+
"""Convert log entry to dictionary"""
|
| 101 |
+
return {
|
| 102 |
+
'timestamp': self.timestamp,
|
| 103 |
+
'action': self.action,
|
| 104 |
+
'rule_id': self.rule_id,
|
| 105 |
+
'source_ip': self.source_ip,
|
| 106 |
+
'dest_ip': self.dest_ip,
|
| 107 |
+
'source_port': self.source_port,
|
| 108 |
+
'dest_port': self.dest_port,
|
| 109 |
+
'protocol': self.protocol,
|
| 110 |
+
'packet_size': self.packet_size,
|
| 111 |
+
'reason': self.reason
|
| 112 |
+
}
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
class FirewallEngine:
|
| 116 |
+
"""Firewall engine implementation"""
|
| 117 |
+
|
| 118 |
+
def __init__(self, config: Dict):
|
| 119 |
+
self.config = config
|
| 120 |
+
self.rules: Dict[str, FirewallRule] = {}
|
| 121 |
+
self.logs: List[FirewallLogEntry] = []
|
| 122 |
+
self.lock = threading.Lock()
|
| 123 |
+
|
| 124 |
+
# Configuration
|
| 125 |
+
self.default_policy = FirewallAction(config.get('default_policy', 'ACCEPT'))
|
| 126 |
+
self.log_blocked = config.get('log_blocked', True)
|
| 127 |
+
self.log_accepted = config.get('log_accepted', False)
|
| 128 |
+
self.max_log_entries = config.get('max_log_entries', 10000)
|
| 129 |
+
|
| 130 |
+
# Statistics
|
| 131 |
+
self.stats = {
|
| 132 |
+
'packets_processed': 0,
|
| 133 |
+
'packets_accepted': 0,
|
| 134 |
+
'packets_dropped': 0,
|
| 135 |
+
'packets_rejected': 0,
|
| 136 |
+
'rules_hit': 0,
|
| 137 |
+
'default_policy_hits': 0
|
| 138 |
+
}
|
| 139 |
+
|
| 140 |
+
# Load initial rules
|
| 141 |
+
initial_rules = config.get('rules', [])
|
| 142 |
+
for rule_config in initial_rules:
|
| 143 |
+
self._add_rule_from_config(rule_config)
|
| 144 |
+
|
| 145 |
+
def _add_rule_from_config(self, rule_config: Dict):
|
| 146 |
+
"""Add rule from configuration"""
|
| 147 |
+
rule = FirewallRule(
|
| 148 |
+
rule_id=rule_config['rule_id'],
|
| 149 |
+
priority=rule_config.get('priority', 100),
|
| 150 |
+
action=FirewallAction(rule_config['action']),
|
| 151 |
+
direction=FirewallDirection(rule_config.get('direction', 'BOTH')),
|
| 152 |
+
source_ip=rule_config.get('source_ip'),
|
| 153 |
+
dest_ip=rule_config.get('dest_ip'),
|
| 154 |
+
source_port=rule_config.get('source_port'),
|
| 155 |
+
dest_port=rule_config.get('dest_port'),
|
| 156 |
+
protocol=rule_config.get('protocol'),
|
| 157 |
+
description=rule_config.get('description', ''),
|
| 158 |
+
enabled=rule_config.get('enabled', True)
|
| 159 |
+
)
|
| 160 |
+
|
| 161 |
+
with self.lock:
|
| 162 |
+
self.rules[rule.rule_id] = rule
|
| 163 |
+
|
| 164 |
+
def _match_ip(self, ip: str, pattern: str) -> bool:
|
| 165 |
+
"""Match IP address against pattern (IP or CIDR)"""
|
| 166 |
+
try:
|
| 167 |
+
if '/' in pattern:
|
| 168 |
+
# CIDR notation
|
| 169 |
+
network = ipaddress.ip_network(pattern, strict=False)
|
| 170 |
+
return ipaddress.ip_address(ip) in network
|
| 171 |
+
else:
|
| 172 |
+
# Exact IP match
|
| 173 |
+
return ip == pattern
|
| 174 |
+
except (ipaddress.AddressValueError, ValueError):
|
| 175 |
+
return False
|
| 176 |
+
|
| 177 |
+
def _match_port(self, port: int, pattern: str) -> bool:
|
| 178 |
+
"""Match port against pattern (port, range, or list)"""
|
| 179 |
+
try:
|
| 180 |
+
if ',' in pattern:
|
| 181 |
+
# List of ports: "80,443,8080"
|
| 182 |
+
ports = [int(p.strip()) for p in pattern.split(',')]
|
| 183 |
+
return port in ports
|
| 184 |
+
elif '-' in pattern:
|
| 185 |
+
# Port range: "80-90"
|
| 186 |
+
start, end = map(int, pattern.split('-', 1))
|
| 187 |
+
return start <= port <= end
|
| 188 |
+
else:
|
| 189 |
+
# Single port: "80"
|
| 190 |
+
return port == int(pattern)
|
| 191 |
+
except (ValueError, TypeError):
|
| 192 |
+
return False
|
| 193 |
+
|
| 194 |
+
def _match_protocol(self, protocol: str, pattern: str) -> bool:
|
| 195 |
+
"""Match protocol against pattern"""
|
| 196 |
+
if pattern is None:
|
| 197 |
+
return True # Match any protocol
|
| 198 |
+
return protocol.upper() == pattern.upper()
|
| 199 |
+
|
| 200 |
+
def _evaluate_rule(self, rule: FirewallRule, packet: ParsedPacket, direction: FirewallDirection) -> bool:
|
| 201 |
+
"""Evaluate if a rule matches a packet"""
|
| 202 |
+
if not rule.enabled:
|
| 203 |
+
return False
|
| 204 |
+
|
| 205 |
+
# Check direction
|
| 206 |
+
if rule.direction != FirewallDirection.BOTH and rule.direction != direction:
|
| 207 |
+
return False
|
| 208 |
+
|
| 209 |
+
# Check source IP
|
| 210 |
+
if rule.source_ip and not self._match_ip(packet.ip_header.source_ip, rule.source_ip):
|
| 211 |
+
return False
|
| 212 |
+
|
| 213 |
+
# Check destination IP
|
| 214 |
+
if rule.dest_ip and not self._match_ip(packet.ip_header.dest_ip, rule.dest_ip):
|
| 215 |
+
return False
|
| 216 |
+
|
| 217 |
+
# Check protocol
|
| 218 |
+
if packet.transport_header:
|
| 219 |
+
if isinstance(packet.transport_header, TCPHeader):
|
| 220 |
+
protocol = 'TCP'
|
| 221 |
+
source_port = packet.transport_header.source_port
|
| 222 |
+
dest_port = packet.transport_header.dest_port
|
| 223 |
+
elif isinstance(packet.transport_header, UDPHeader):
|
| 224 |
+
protocol = 'UDP'
|
| 225 |
+
source_port = packet.transport_header.source_port
|
| 226 |
+
dest_port = packet.transport_header.dest_port
|
| 227 |
+
else:
|
| 228 |
+
protocol = 'OTHER'
|
| 229 |
+
source_port = 0
|
| 230 |
+
dest_port = 0
|
| 231 |
+
else:
|
| 232 |
+
protocol = 'OTHER'
|
| 233 |
+
source_port = 0
|
| 234 |
+
dest_port = 0
|
| 235 |
+
|
| 236 |
+
if not self._match_protocol(protocol, rule.protocol):
|
| 237 |
+
return False
|
| 238 |
+
|
| 239 |
+
# Check source port
|
| 240 |
+
if rule.source_port and not self._match_port(source_port, rule.source_port):
|
| 241 |
+
return False
|
| 242 |
+
|
| 243 |
+
# Check destination port
|
| 244 |
+
if rule.dest_port and not self._match_port(dest_port, rule.dest_port):
|
| 245 |
+
return False
|
| 246 |
+
|
| 247 |
+
return True
|
| 248 |
+
|
| 249 |
+
def _log_packet(self, action: str, packet: ParsedPacket, rule_id: Optional[str] = None, reason: str = ""):
|
| 250 |
+
"""Log packet processing"""
|
| 251 |
+
if not (self.log_blocked or self.log_accepted):
|
| 252 |
+
return
|
| 253 |
+
|
| 254 |
+
# Only log if configured
|
| 255 |
+
if action == 'ACCEPT' and not self.log_accepted:
|
| 256 |
+
return
|
| 257 |
+
if action in ['DROP', 'REJECT'] and not self.log_blocked:
|
| 258 |
+
return
|
| 259 |
+
|
| 260 |
+
# Extract packet information
|
| 261 |
+
if packet.transport_header:
|
| 262 |
+
if isinstance(packet.transport_header, (TCPHeader, UDPHeader)):
|
| 263 |
+
source_port = packet.transport_header.source_port
|
| 264 |
+
dest_port = packet.transport_header.dest_port
|
| 265 |
+
protocol = 'TCP' if isinstance(packet.transport_header, TCPHeader) else 'UDP'
|
| 266 |
+
else:
|
| 267 |
+
source_port = 0
|
| 268 |
+
dest_port = 0
|
| 269 |
+
protocol = 'OTHER'
|
| 270 |
+
else:
|
| 271 |
+
source_port = 0
|
| 272 |
+
dest_port = 0
|
| 273 |
+
protocol = 'OTHER'
|
| 274 |
+
|
| 275 |
+
log_entry = FirewallLogEntry(
|
| 276 |
+
timestamp=time.time(),
|
| 277 |
+
action=action,
|
| 278 |
+
rule_id=rule_id,
|
| 279 |
+
source_ip=packet.ip_header.source_ip,
|
| 280 |
+
dest_ip=packet.ip_header.dest_ip,
|
| 281 |
+
source_port=source_port,
|
| 282 |
+
dest_port=dest_port,
|
| 283 |
+
protocol=protocol,
|
| 284 |
+
packet_size=len(packet.raw_packet),
|
| 285 |
+
reason=reason
|
| 286 |
+
)
|
| 287 |
+
|
| 288 |
+
with self.lock:
|
| 289 |
+
self.logs.append(log_entry)
|
| 290 |
+
|
| 291 |
+
# Trim logs if too many
|
| 292 |
+
if len(self.logs) > self.max_log_entries:
|
| 293 |
+
self.logs = self.logs[-self.max_log_entries:]
|
| 294 |
+
|
| 295 |
+
def process_packet(self, packet: ParsedPacket, direction: FirewallDirection) -> FirewallAction:
|
| 296 |
+
"""Process packet through firewall rules"""
|
| 297 |
+
self.stats['packets_processed'] += 1
|
| 298 |
+
|
| 299 |
+
# Get sorted rules by priority
|
| 300 |
+
with self.lock:
|
| 301 |
+
sorted_rules = sorted(self.rules.values(), key=lambda r: r.priority)
|
| 302 |
+
|
| 303 |
+
# Evaluate rules in order
|
| 304 |
+
for rule in sorted_rules:
|
| 305 |
+
if self._evaluate_rule(rule, packet, direction):
|
| 306 |
+
rule.record_hit()
|
| 307 |
+
self.stats['rules_hit'] += 1
|
| 308 |
+
|
| 309 |
+
# Log the action
|
| 310 |
+
self._log_packet(rule.action.value, packet, rule.rule_id, f"Matched rule: {rule.description}")
|
| 311 |
+
|
| 312 |
+
# Update statistics
|
| 313 |
+
if rule.action == FirewallAction.ACCEPT:
|
| 314 |
+
self.stats['packets_accepted'] += 1
|
| 315 |
+
elif rule.action == FirewallAction.DROP:
|
| 316 |
+
self.stats['packets_dropped'] += 1
|
| 317 |
+
elif rule.action == FirewallAction.REJECT:
|
| 318 |
+
self.stats['packets_rejected'] += 1
|
| 319 |
+
|
| 320 |
+
return rule.action
|
| 321 |
+
|
| 322 |
+
# No rule matched, apply default policy
|
| 323 |
+
self.stats['default_policy_hits'] += 1
|
| 324 |
+
self._log_packet(self.default_policy.value, packet, None, "Default policy")
|
| 325 |
+
|
| 326 |
+
if self.default_policy == FirewallAction.ACCEPT:
|
| 327 |
+
self.stats['packets_accepted'] += 1
|
| 328 |
+
elif self.default_policy == FirewallAction.DROP:
|
| 329 |
+
self.stats['packets_dropped'] += 1
|
| 330 |
+
elif self.default_policy == FirewallAction.REJECT:
|
| 331 |
+
self.stats['packets_rejected'] += 1
|
| 332 |
+
|
| 333 |
+
return self.default_policy
|
| 334 |
+
|
| 335 |
+
def add_rule(self, rule: FirewallRule) -> bool:
|
| 336 |
+
"""Add firewall rule"""
|
| 337 |
+
with self.lock:
|
| 338 |
+
if rule.rule_id in self.rules:
|
| 339 |
+
return False
|
| 340 |
+
self.rules[rule.rule_id] = rule
|
| 341 |
+
return True
|
| 342 |
+
|
| 343 |
+
def remove_rule(self, rule_id: str) -> bool:
|
| 344 |
+
"""Remove firewall rule"""
|
| 345 |
+
with self.lock:
|
| 346 |
+
if rule_id in self.rules:
|
| 347 |
+
del self.rules[rule_id]
|
| 348 |
+
return True
|
| 349 |
+
return False
|
| 350 |
+
|
| 351 |
+
def update_rule(self, rule_id: str, **kwargs) -> bool:
|
| 352 |
+
"""Update firewall rule"""
|
| 353 |
+
with self.lock:
|
| 354 |
+
if rule_id not in self.rules:
|
| 355 |
+
return False
|
| 356 |
+
|
| 357 |
+
rule = self.rules[rule_id]
|
| 358 |
+
for key, value in kwargs.items():
|
| 359 |
+
if hasattr(rule, key):
|
| 360 |
+
if key in ['action', 'direction']:
|
| 361 |
+
# Handle enum values
|
| 362 |
+
if key == 'action':
|
| 363 |
+
value = FirewallAction(value)
|
| 364 |
+
elif key == 'direction':
|
| 365 |
+
value = FirewallDirection(value)
|
| 366 |
+
setattr(rule, key, value)
|
| 367 |
+
|
| 368 |
+
return True
|
| 369 |
+
|
| 370 |
+
def enable_rule(self, rule_id: str) -> bool:
|
| 371 |
+
"""Enable firewall rule"""
|
| 372 |
+
return self.update_rule(rule_id, enabled=True)
|
| 373 |
+
|
| 374 |
+
def disable_rule(self, rule_id: str) -> bool:
|
| 375 |
+
"""Disable firewall rule"""
|
| 376 |
+
return self.update_rule(rule_id, enabled=False)
|
| 377 |
+
|
| 378 |
+
def get_rules(self) -> List[Dict]:
|
| 379 |
+
"""Get all firewall rules"""
|
| 380 |
+
with self.lock:
|
| 381 |
+
return [rule.to_dict() for rule in sorted(self.rules.values(), key=lambda r: r.priority)]
|
| 382 |
+
|
| 383 |
+
def get_rule(self, rule_id: str) -> Optional[Dict]:
|
| 384 |
+
"""Get specific firewall rule"""
|
| 385 |
+
with self.lock:
|
| 386 |
+
rule = self.rules.get(rule_id)
|
| 387 |
+
return rule.to_dict() if rule else None
|
| 388 |
+
|
| 389 |
+
def get_logs(self, limit: int = 100, filter_action: Optional[str] = None) -> List[Dict]:
|
| 390 |
+
"""Get firewall logs"""
|
| 391 |
+
with self.lock:
|
| 392 |
+
logs = self.logs.copy()
|
| 393 |
+
|
| 394 |
+
# Filter by action if specified
|
| 395 |
+
if filter_action:
|
| 396 |
+
logs = [log for log in logs if log.action == filter_action.upper()]
|
| 397 |
+
|
| 398 |
+
# Return most recent logs
|
| 399 |
+
return [log.to_dict() for log in logs[-limit:]]
|
| 400 |
+
|
| 401 |
+
def clear_logs(self):
|
| 402 |
+
"""Clear firewall logs"""
|
| 403 |
+
with self.lock:
|
| 404 |
+
self.logs.clear()
|
| 405 |
+
|
| 406 |
+
def get_stats(self) -> Dict:
|
| 407 |
+
"""Get firewall statistics"""
|
| 408 |
+
with self.lock:
|
| 409 |
+
stats = self.stats.copy()
|
| 410 |
+
stats['total_rules'] = len(self.rules)
|
| 411 |
+
stats['enabled_rules'] = sum(1 for rule in self.rules.values() if rule.enabled)
|
| 412 |
+
stats['log_entries'] = len(self.logs)
|
| 413 |
+
stats['default_policy'] = self.default_policy.value
|
| 414 |
+
|
| 415 |
+
return stats
|
| 416 |
+
|
| 417 |
+
def reset_stats(self):
|
| 418 |
+
"""Reset firewall statistics"""
|
| 419 |
+
self.stats = {
|
| 420 |
+
'packets_processed': 0,
|
| 421 |
+
'packets_accepted': 0,
|
| 422 |
+
'packets_dropped': 0,
|
| 423 |
+
'packets_rejected': 0,
|
| 424 |
+
'rules_hit': 0,
|
| 425 |
+
'default_policy_hits': 0
|
| 426 |
+
}
|
| 427 |
+
|
| 428 |
+
# Reset rule hit counts
|
| 429 |
+
with self.lock:
|
| 430 |
+
for rule in self.rules.values():
|
| 431 |
+
rule.hit_count = 0
|
| 432 |
+
rule.last_hit = None
|
| 433 |
+
|
| 434 |
+
def set_default_policy(self, policy: str):
|
| 435 |
+
"""Set default firewall policy"""
|
| 436 |
+
self.default_policy = FirewallAction(policy.upper())
|
| 437 |
+
|
| 438 |
+
def export_rules(self) -> List[Dict]:
|
| 439 |
+
"""Export rules for backup/configuration"""
|
| 440 |
+
return self.get_rules()
|
| 441 |
+
|
| 442 |
+
def import_rules(self, rules_config: List[Dict], replace: bool = False):
|
| 443 |
+
"""Import rules from configuration"""
|
| 444 |
+
if replace:
|
| 445 |
+
with self.lock:
|
| 446 |
+
self.rules.clear()
|
| 447 |
+
|
| 448 |
+
for rule_config in rules_config:
|
| 449 |
+
self._add_rule_from_config(rule_config)
|
| 450 |
+
|
| 451 |
+
|
| 452 |
+
class FirewallRuleBuilder:
|
| 453 |
+
"""Helper class to build firewall rules"""
|
| 454 |
+
|
| 455 |
+
def __init__(self, rule_id: str):
|
| 456 |
+
self.rule_id = rule_id
|
| 457 |
+
self.priority = 100
|
| 458 |
+
self.action = FirewallAction.ACCEPT
|
| 459 |
+
self.direction = FirewallDirection.BOTH
|
| 460 |
+
self.source_ip = None
|
| 461 |
+
self.dest_ip = None
|
| 462 |
+
self.source_port = None
|
| 463 |
+
self.dest_port = None
|
| 464 |
+
self.protocol = None
|
| 465 |
+
self.description = ""
|
| 466 |
+
self.enabled = True
|
| 467 |
+
|
| 468 |
+
def set_priority(self, priority: int) -> 'FirewallRuleBuilder':
|
| 469 |
+
self.priority = priority
|
| 470 |
+
return self
|
| 471 |
+
|
| 472 |
+
def set_action(self, action: str) -> 'FirewallRuleBuilder':
|
| 473 |
+
self.action = FirewallAction(action.upper())
|
| 474 |
+
return self
|
| 475 |
+
|
| 476 |
+
def set_direction(self, direction: str) -> 'FirewallRuleBuilder':
|
| 477 |
+
self.direction = FirewallDirection(direction.upper())
|
| 478 |
+
return self
|
| 479 |
+
|
| 480 |
+
def set_source_ip(self, ip: str) -> 'FirewallRuleBuilder':
|
| 481 |
+
self.source_ip = ip
|
| 482 |
+
return self
|
| 483 |
+
|
| 484 |
+
def set_dest_ip(self, ip: str) -> 'FirewallRuleBuilder':
|
| 485 |
+
self.dest_ip = ip
|
| 486 |
+
return self
|
| 487 |
+
|
| 488 |
+
def set_source_port(self, port: str) -> 'FirewallRuleBuilder':
|
| 489 |
+
self.source_port = port
|
| 490 |
+
return self
|
| 491 |
+
|
| 492 |
+
def set_dest_port(self, port: str) -> 'FirewallRuleBuilder':
|
| 493 |
+
self.dest_port = port
|
| 494 |
+
return self
|
| 495 |
+
|
| 496 |
+
def set_protocol(self, protocol: str) -> 'FirewallRuleBuilder':
|
| 497 |
+
self.protocol = protocol.upper()
|
| 498 |
+
return self
|
| 499 |
+
|
| 500 |
+
def set_description(self, description: str) -> 'FirewallRuleBuilder':
|
| 501 |
+
self.description = description
|
| 502 |
+
return self
|
| 503 |
+
|
| 504 |
+
def set_enabled(self, enabled: bool) -> 'FirewallRuleBuilder':
|
| 505 |
+
self.enabled = enabled
|
| 506 |
+
return self
|
| 507 |
+
|
| 508 |
+
def build(self) -> FirewallRule:
|
| 509 |
+
"""Build the firewall rule"""
|
| 510 |
+
return FirewallRule(
|
| 511 |
+
rule_id=self.rule_id,
|
| 512 |
+
priority=self.priority,
|
| 513 |
+
action=self.action,
|
| 514 |
+
direction=self.direction,
|
| 515 |
+
source_ip=self.source_ip,
|
| 516 |
+
dest_ip=self.dest_ip,
|
| 517 |
+
source_port=self.source_port,
|
| 518 |
+
dest_port=self.dest_port,
|
| 519 |
+
protocol=self.protocol,
|
| 520 |
+
description=self.description,
|
| 521 |
+
enabled=self.enabled
|
| 522 |
+
)
|
| 523 |
+
|
core/ip_parser.py
ADDED
|
@@ -0,0 +1,546 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
IP Parser/Assembler Module
|
| 3 |
+
|
| 4 |
+
Handles IPv4 packet parsing and construction:
|
| 5 |
+
- Parse IPv4, UDP, and TCP headers
|
| 6 |
+
- Calculate and verify checksums
|
| 7 |
+
- Handle packet fragmentation and reassembly
|
| 8 |
+
- Support various IP options
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
import struct
|
| 12 |
+
import socket
|
| 13 |
+
from typing import Dict, List, Optional, Tuple
|
| 14 |
+
from dataclasses import dataclass
|
| 15 |
+
from enum import Enum
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class IPProtocol(Enum):
|
| 19 |
+
ICMP = 1
|
| 20 |
+
TCP = 6
|
| 21 |
+
UDP = 17
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
@dataclass
|
| 25 |
+
class IPv4Header:
|
| 26 |
+
"""IPv4 header structure"""
|
| 27 |
+
version: int = 4
|
| 28 |
+
ihl: int = 5 # Internet Header Length (in 32-bit words)
|
| 29 |
+
tos: int = 0 # Type of Service
|
| 30 |
+
total_length: int = 0
|
| 31 |
+
identification: int = 0
|
| 32 |
+
flags: int = 0 # 3 bits: Reserved, Don't Fragment, More Fragments
|
| 33 |
+
fragment_offset: int = 0 # 13 bits
|
| 34 |
+
ttl: int = 64 # Time to Live
|
| 35 |
+
protocol: int = 0
|
| 36 |
+
header_checksum: int = 0
|
| 37 |
+
source_ip: str = '0.0.0.0'
|
| 38 |
+
dest_ip: str = '0.0.0.0'
|
| 39 |
+
options: bytes = b''
|
| 40 |
+
|
| 41 |
+
@property
|
| 42 |
+
def header_length(self) -> int:
|
| 43 |
+
"""Get header length in bytes"""
|
| 44 |
+
return self.ihl * 4
|
| 45 |
+
|
| 46 |
+
@property
|
| 47 |
+
def dont_fragment(self) -> bool:
|
| 48 |
+
"""Check if Don't Fragment flag is set"""
|
| 49 |
+
return bool(self.flags & 0x2)
|
| 50 |
+
|
| 51 |
+
@property
|
| 52 |
+
def more_fragments(self) -> bool:
|
| 53 |
+
"""Check if More Fragments flag is set"""
|
| 54 |
+
return bool(self.flags & 0x1)
|
| 55 |
+
|
| 56 |
+
@property
|
| 57 |
+
def is_fragment(self) -> bool:
|
| 58 |
+
"""Check if this is a fragment"""
|
| 59 |
+
return self.more_fragments or self.fragment_offset > 0
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
@dataclass
|
| 63 |
+
class TCPHeader:
|
| 64 |
+
"""TCP header structure"""
|
| 65 |
+
source_port: int = 0
|
| 66 |
+
dest_port: int = 0
|
| 67 |
+
seq_num: int = 0
|
| 68 |
+
ack_num: int = 0
|
| 69 |
+
data_offset: int = 5 # Header length in 32-bit words
|
| 70 |
+
reserved: int = 0
|
| 71 |
+
flags: int = 0 # 9 bits: NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN
|
| 72 |
+
window_size: int = 65535
|
| 73 |
+
checksum: int = 0
|
| 74 |
+
urgent_pointer: int = 0
|
| 75 |
+
options: bytes = b''
|
| 76 |
+
|
| 77 |
+
@property
|
| 78 |
+
def header_length(self) -> int:
|
| 79 |
+
"""Get header length in bytes"""
|
| 80 |
+
return self.data_offset * 4
|
| 81 |
+
|
| 82 |
+
# TCP Flag properties
|
| 83 |
+
@property
|
| 84 |
+
def fin(self) -> bool:
|
| 85 |
+
return bool(self.flags & 0x01)
|
| 86 |
+
|
| 87 |
+
@property
|
| 88 |
+
def syn(self) -> bool:
|
| 89 |
+
return bool(self.flags & 0x02)
|
| 90 |
+
|
| 91 |
+
@property
|
| 92 |
+
def rst(self) -> bool:
|
| 93 |
+
return bool(self.flags & 0x04)
|
| 94 |
+
|
| 95 |
+
@property
|
| 96 |
+
def psh(self) -> bool:
|
| 97 |
+
return bool(self.flags & 0x08)
|
| 98 |
+
|
| 99 |
+
@property
|
| 100 |
+
def ack(self) -> bool:
|
| 101 |
+
return bool(self.flags & 0x10)
|
| 102 |
+
|
| 103 |
+
@property
|
| 104 |
+
def urg(self) -> bool:
|
| 105 |
+
return bool(self.flags & 0x20)
|
| 106 |
+
|
| 107 |
+
def set_flag(self, flag_name: str, value: bool = True):
|
| 108 |
+
"""Set TCP flag"""
|
| 109 |
+
flag_bits = {
|
| 110 |
+
'fin': 0x01, 'syn': 0x02, 'rst': 0x04, 'psh': 0x08,
|
| 111 |
+
'ack': 0x10, 'urg': 0x20, 'ece': 0x40, 'cwr': 0x80, 'ns': 0x100
|
| 112 |
+
}
|
| 113 |
+
|
| 114 |
+
if flag_name.lower() in flag_bits:
|
| 115 |
+
bit = flag_bits[flag_name.lower()]
|
| 116 |
+
if value:
|
| 117 |
+
self.flags |= bit
|
| 118 |
+
else:
|
| 119 |
+
self.flags &= ~bit
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
@dataclass
|
| 123 |
+
class UDPHeader:
|
| 124 |
+
"""UDP header structure"""
|
| 125 |
+
source_port: int = 0
|
| 126 |
+
dest_port: int = 0
|
| 127 |
+
length: int = 8 # Header + data length
|
| 128 |
+
checksum: int = 0
|
| 129 |
+
|
| 130 |
+
@property
|
| 131 |
+
def header_length(self) -> int:
|
| 132 |
+
"""Get header length in bytes (always 8 for UDP)"""
|
| 133 |
+
return 8
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
@dataclass
|
| 137 |
+
class ParsedPacket:
|
| 138 |
+
"""Parsed packet structure"""
|
| 139 |
+
ip_header: IPv4Header
|
| 140 |
+
transport_header: Optional[object] = None # TCPHeader or UDPHeader
|
| 141 |
+
payload: bytes = b''
|
| 142 |
+
raw_packet: bytes = b''
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
class IPParser:
|
| 146 |
+
"""IPv4 packet parser and assembler"""
|
| 147 |
+
|
| 148 |
+
@staticmethod
|
| 149 |
+
def calculate_checksum(data: bytes) -> int:
|
| 150 |
+
"""Calculate Internet checksum"""
|
| 151 |
+
# Pad data to even length
|
| 152 |
+
if len(data) % 2:
|
| 153 |
+
data += b'\x00'
|
| 154 |
+
|
| 155 |
+
checksum = 0
|
| 156 |
+
for i in range(0, len(data), 2):
|
| 157 |
+
word = (data[i] << 8) + data[i + 1]
|
| 158 |
+
checksum += word
|
| 159 |
+
|
| 160 |
+
# Add carry bits
|
| 161 |
+
while checksum >> 16:
|
| 162 |
+
checksum = (checksum & 0xFFFF) + (checksum >> 16)
|
| 163 |
+
|
| 164 |
+
# One's complement
|
| 165 |
+
return (~checksum) & 0xFFFF
|
| 166 |
+
|
| 167 |
+
@staticmethod
|
| 168 |
+
def verify_checksum(data: bytes, checksum: int) -> bool:
|
| 169 |
+
"""Verify Internet checksum"""
|
| 170 |
+
calculated = IPParser.calculate_checksum(data)
|
| 171 |
+
return calculated == checksum or (calculated + checksum) == 0xFFFF
|
| 172 |
+
|
| 173 |
+
@classmethod
|
| 174 |
+
def parse_ipv4_header(cls, data: bytes) -> Tuple[IPv4Header, int]:
|
| 175 |
+
"""Parse IPv4 header from raw bytes"""
|
| 176 |
+
if len(data) < 20:
|
| 177 |
+
raise ValueError("IPv4 header too short")
|
| 178 |
+
|
| 179 |
+
# Parse fixed part of header
|
| 180 |
+
header_data = struct.unpack('!BBHHHBBH4s4s', data[:20])
|
| 181 |
+
|
| 182 |
+
header = IPv4Header()
|
| 183 |
+
version_ihl = header_data[0]
|
| 184 |
+
header.version = (version_ihl >> 4) & 0xF
|
| 185 |
+
header.ihl = version_ihl & 0xF
|
| 186 |
+
header.tos = header_data[1]
|
| 187 |
+
header.total_length = header_data[2]
|
| 188 |
+
header.identification = header_data[3]
|
| 189 |
+
flags_fragment = header_data[4]
|
| 190 |
+
header.flags = (flags_fragment >> 13) & 0x7
|
| 191 |
+
header.fragment_offset = flags_fragment & 0x1FFF
|
| 192 |
+
header.ttl = header_data[5]
|
| 193 |
+
header.protocol = header_data[6]
|
| 194 |
+
header.header_checksum = header_data[7]
|
| 195 |
+
header.source_ip = socket.inet_ntoa(header_data[8])
|
| 196 |
+
header.dest_ip = socket.inet_ntoa(header_data[9])
|
| 197 |
+
|
| 198 |
+
# Validate version
|
| 199 |
+
if header.version != 4:
|
| 200 |
+
raise ValueError(f"Unsupported IP version: {header.version}")
|
| 201 |
+
|
| 202 |
+
# Parse options if present
|
| 203 |
+
options_length = header.header_length - 20
|
| 204 |
+
if options_length > 0:
|
| 205 |
+
if len(data) < 20 + options_length:
|
| 206 |
+
raise ValueError("IPv4 options truncated")
|
| 207 |
+
header.options = data[20:20 + options_length]
|
| 208 |
+
|
| 209 |
+
return header, header.header_length
|
| 210 |
+
|
| 211 |
+
@classmethod
|
| 212 |
+
def parse_tcp_header(cls, data: bytes) -> Tuple[TCPHeader, int]:
|
| 213 |
+
"""Parse TCP header from raw bytes"""
|
| 214 |
+
if len(data) < 20:
|
| 215 |
+
raise ValueError("TCP header too short")
|
| 216 |
+
|
| 217 |
+
# Parse fixed part of header
|
| 218 |
+
header_data = struct.unpack('!HHIIBBHHH', data[:20])
|
| 219 |
+
|
| 220 |
+
header = TCPHeader()
|
| 221 |
+
header.source_port = header_data[0]
|
| 222 |
+
header.dest_port = header_data[1]
|
| 223 |
+
header.seq_num = header_data[2]
|
| 224 |
+
header.ack_num = header_data[3]
|
| 225 |
+
offset_reserved = header_data[4]
|
| 226 |
+
header.data_offset = (offset_reserved >> 4) & 0xF
|
| 227 |
+
header.reserved = (offset_reserved >> 1) & 0x7
|
| 228 |
+
header.flags = ((offset_reserved & 0x1) << 8) | header_data[5]
|
| 229 |
+
header.window_size = header_data[6]
|
| 230 |
+
header.checksum = header_data[7]
|
| 231 |
+
header.urgent_pointer = header_data[8]
|
| 232 |
+
|
| 233 |
+
# Parse options if present
|
| 234 |
+
options_length = header.header_length - 20
|
| 235 |
+
if options_length > 0:
|
| 236 |
+
if len(data) < 20 + options_length:
|
| 237 |
+
raise ValueError("TCP options truncated")
|
| 238 |
+
header.options = data[20:20 + options_length]
|
| 239 |
+
|
| 240 |
+
return header, header.header_length
|
| 241 |
+
|
| 242 |
+
@classmethod
|
| 243 |
+
def parse_udp_header(cls, data: bytes) -> Tuple[UDPHeader, int]:
|
| 244 |
+
"""Parse UDP header from raw bytes"""
|
| 245 |
+
if len(data) < 8:
|
| 246 |
+
raise ValueError("UDP header too short")
|
| 247 |
+
|
| 248 |
+
header_data = struct.unpack('!HHHH', data[:8])
|
| 249 |
+
|
| 250 |
+
header = UDPHeader()
|
| 251 |
+
header.source_port = header_data[0]
|
| 252 |
+
header.dest_port = header_data[1]
|
| 253 |
+
header.length = header_data[2]
|
| 254 |
+
header.checksum = header_data[3]
|
| 255 |
+
|
| 256 |
+
return header, 8
|
| 257 |
+
|
| 258 |
+
@classmethod
|
| 259 |
+
def parse_packet(cls, data: bytes) -> ParsedPacket:
|
| 260 |
+
"""Parse complete packet"""
|
| 261 |
+
packet = ParsedPacket(raw_packet=data)
|
| 262 |
+
|
| 263 |
+
# Parse IP header
|
| 264 |
+
packet.ip_header, ip_header_len = cls.parse_ipv4_header(data)
|
| 265 |
+
|
| 266 |
+
# Extract payload after IP header
|
| 267 |
+
ip_payload = data[ip_header_len:packet.ip_header.total_length]
|
| 268 |
+
|
| 269 |
+
# Parse transport layer header
|
| 270 |
+
if packet.ip_header.protocol == IPProtocol.TCP.value:
|
| 271 |
+
packet.transport_header, transport_header_len = cls.parse_tcp_header(ip_payload)
|
| 272 |
+
packet.payload = ip_payload[transport_header_len:]
|
| 273 |
+
elif packet.ip_header.protocol == IPProtocol.UDP.value:
|
| 274 |
+
packet.transport_header, transport_header_len = cls.parse_udp_header(ip_payload)
|
| 275 |
+
packet.payload = ip_payload[transport_header_len:]
|
| 276 |
+
else:
|
| 277 |
+
# Unsupported protocol, treat as raw payload
|
| 278 |
+
packet.payload = ip_payload
|
| 279 |
+
|
| 280 |
+
return packet
|
| 281 |
+
|
| 282 |
+
@classmethod
|
| 283 |
+
def build_ipv4_header(cls, header: IPv4Header) -> bytes:
|
| 284 |
+
"""Build IPv4 header as bytes"""
|
| 285 |
+
# Calculate header length including options
|
| 286 |
+
header.ihl = (20 + len(header.options) + 3) // 4 # Round up to 32-bit boundary
|
| 287 |
+
|
| 288 |
+
# Build header without checksum
|
| 289 |
+
version_ihl = (header.version << 4) | header.ihl
|
| 290 |
+
flags_fragment = (header.flags << 13) | header.fragment_offset
|
| 291 |
+
|
| 292 |
+
header_data = struct.pack(
|
| 293 |
+
'!BBHHHBBH4s4s',
|
| 294 |
+
version_ihl, header.tos, header.total_length,
|
| 295 |
+
header.identification, flags_fragment,
|
| 296 |
+
header.ttl, header.protocol, 0, # Checksum = 0 for calculation
|
| 297 |
+
socket.inet_aton(header.source_ip),
|
| 298 |
+
socket.inet_aton(header.dest_ip)
|
| 299 |
+
)
|
| 300 |
+
|
| 301 |
+
# Add options and padding
|
| 302 |
+
if header.options:
|
| 303 |
+
header_data += header.options
|
| 304 |
+
# Pad to 32-bit boundary
|
| 305 |
+
padding_needed = (header.ihl * 4) - len(header_data)
|
| 306 |
+
if padding_needed > 0:
|
| 307 |
+
header_data += b'\x00' * padding_needed
|
| 308 |
+
|
| 309 |
+
# Calculate and insert checksum
|
| 310 |
+
checksum = cls.calculate_checksum(header_data)
|
| 311 |
+
header_data = header_data[:10] + struct.pack('!H', checksum) + header_data[12:]
|
| 312 |
+
|
| 313 |
+
return header_data
|
| 314 |
+
|
| 315 |
+
@classmethod
|
| 316 |
+
def build_tcp_header(cls, header: TCPHeader, source_ip: str, dest_ip: str, payload: bytes) -> bytes:
|
| 317 |
+
"""Build TCP header as bytes with checksum"""
|
| 318 |
+
# Calculate header length including options
|
| 319 |
+
header.data_offset = (20 + len(header.options) + 3) // 4 # Round up to 32-bit boundary
|
| 320 |
+
|
| 321 |
+
# Build header without checksum
|
| 322 |
+
offset_reserved_flags = (header.data_offset << 12) | (header.reserved << 9) | header.flags
|
| 323 |
+
|
| 324 |
+
header_data = struct.pack(
|
| 325 |
+
'!HHIIHHH',
|
| 326 |
+
header.source_port, header.dest_port,
|
| 327 |
+
header.seq_num, header.ack_num,
|
| 328 |
+
offset_reserved_flags, header.window_size,
|
| 329 |
+
0, header.urgent_pointer # Checksum = 0 for calculation
|
| 330 |
+
)
|
| 331 |
+
|
| 332 |
+
# Add options and padding
|
| 333 |
+
if header.options:
|
| 334 |
+
header_data += header.options
|
| 335 |
+
# Pad to 32-bit boundary
|
| 336 |
+
padding_needed = (header.data_offset * 4) - len(header_data)
|
| 337 |
+
if padding_needed > 0:
|
| 338 |
+
header_data += b'\x00' * padding_needed
|
| 339 |
+
|
| 340 |
+
# Calculate TCP checksum with pseudo-header
|
| 341 |
+
pseudo_header = struct.pack(
|
| 342 |
+
'!4s4sBBH',
|
| 343 |
+
socket.inet_aton(source_ip),
|
| 344 |
+
socket.inet_aton(dest_ip),
|
| 345 |
+
0, IPProtocol.TCP.value,
|
| 346 |
+
len(header_data) + len(payload)
|
| 347 |
+
)
|
| 348 |
+
|
| 349 |
+
checksum_data = pseudo_header + header_data + payload
|
| 350 |
+
checksum = cls.calculate_checksum(checksum_data)
|
| 351 |
+
|
| 352 |
+
# Insert checksum
|
| 353 |
+
header_data = header_data[:16] + struct.pack('!H', checksum) + header_data[18:]
|
| 354 |
+
|
| 355 |
+
return header_data
|
| 356 |
+
|
| 357 |
+
@classmethod
|
| 358 |
+
def build_udp_header(cls, header: UDPHeader, source_ip: str, dest_ip: str, payload: bytes) -> bytes:
|
| 359 |
+
"""Build UDP header as bytes with checksum"""
|
| 360 |
+
header.length = 8 + len(payload)
|
| 361 |
+
|
| 362 |
+
# Build header without checksum
|
| 363 |
+
header_data = struct.pack(
|
| 364 |
+
'!HHHH',
|
| 365 |
+
header.source_port, header.dest_port,
|
| 366 |
+
header.length, 0 # Checksum = 0 for calculation
|
| 367 |
+
)
|
| 368 |
+
|
| 369 |
+
# Calculate UDP checksum with pseudo-header (optional for IPv4)
|
| 370 |
+
if header.checksum != 0: # If checksum is required
|
| 371 |
+
pseudo_header = struct.pack(
|
| 372 |
+
'!4s4sBBH',
|
| 373 |
+
socket.inet_aton(source_ip),
|
| 374 |
+
socket.inet_aton(dest_ip),
|
| 375 |
+
0, IPProtocol.UDP.value,
|
| 376 |
+
header.length
|
| 377 |
+
)
|
| 378 |
+
|
| 379 |
+
checksum_data = pseudo_header + header_data + payload
|
| 380 |
+
checksum = cls.calculate_checksum(checksum_data)
|
| 381 |
+
|
| 382 |
+
# Insert checksum
|
| 383 |
+
header_data = header_data[:6] + struct.pack('!H', checksum) + header_data[8:]
|
| 384 |
+
|
| 385 |
+
return header_data
|
| 386 |
+
|
| 387 |
+
@classmethod
|
| 388 |
+
def build_packet(cls, ip_header: IPv4Header, transport_header: Optional[object] = None, payload: bytes = b'') -> bytes:
|
| 389 |
+
"""Build complete packet"""
|
| 390 |
+
transport_data = b''
|
| 391 |
+
|
| 392 |
+
# Build transport header
|
| 393 |
+
if transport_header:
|
| 394 |
+
if isinstance(transport_header, TCPHeader):
|
| 395 |
+
transport_data = cls.build_tcp_header(
|
| 396 |
+
transport_header, ip_header.source_ip, ip_header.dest_ip, payload
|
| 397 |
+
)
|
| 398 |
+
elif isinstance(transport_header, UDPHeader):
|
| 399 |
+
transport_data = cls.build_udp_header(
|
| 400 |
+
transport_header, ip_header.source_ip, ip_header.dest_ip, payload
|
| 401 |
+
)
|
| 402 |
+
|
| 403 |
+
# Update IP header total length
|
| 404 |
+
ip_header.total_length = ip_header.header_length + len(transport_data) + len(payload)
|
| 405 |
+
|
| 406 |
+
# Build IP header
|
| 407 |
+
ip_data = cls.build_ipv4_header(ip_header)
|
| 408 |
+
|
| 409 |
+
# Combine all parts
|
| 410 |
+
return ip_data + transport_data + payload
|
| 411 |
+
|
| 412 |
+
|
| 413 |
+
class PacketFragmenter:
|
| 414 |
+
"""Handle packet fragmentation and reassembly"""
|
| 415 |
+
|
| 416 |
+
def __init__(self, mtu: int = 1500):
|
| 417 |
+
self.mtu = mtu
|
| 418 |
+
self.fragments: Dict[Tuple[str, str, int], List[Tuple[int, bytes]]] = {} # (src, dst, id) -> [(offset, data)]
|
| 419 |
+
|
| 420 |
+
def fragment_packet(self, packet: bytes, mtu: int = None) -> List[bytes]:
|
| 421 |
+
"""Fragment a packet if it exceeds MTU"""
|
| 422 |
+
if mtu is None:
|
| 423 |
+
mtu = self.mtu
|
| 424 |
+
|
| 425 |
+
if len(packet) <= mtu:
|
| 426 |
+
return [packet]
|
| 427 |
+
|
| 428 |
+
# Parse original packet
|
| 429 |
+
parsed = IPParser.parse_packet(packet)
|
| 430 |
+
ip_header = parsed.ip_header
|
| 431 |
+
|
| 432 |
+
# Don't fragment if DF flag is set
|
| 433 |
+
if ip_header.dont_fragment:
|
| 434 |
+
raise ValueError("Packet too large and Don't Fragment flag is set")
|
| 435 |
+
|
| 436 |
+
fragments = []
|
| 437 |
+
payload_mtu = mtu - ip_header.header_length
|
| 438 |
+
payload_mtu = (payload_mtu // 8) * 8 # Must be multiple of 8 bytes
|
| 439 |
+
|
| 440 |
+
# Get the payload to fragment (everything after IP header)
|
| 441 |
+
payload_start = ip_header.header_length
|
| 442 |
+
payload = packet[payload_start:]
|
| 443 |
+
|
| 444 |
+
offset = 0
|
| 445 |
+
while offset < len(payload):
|
| 446 |
+
# Create fragment
|
| 447 |
+
fragment_payload = payload[offset:offset + payload_mtu]
|
| 448 |
+
|
| 449 |
+
# Create new IP header for fragment
|
| 450 |
+
frag_header = IPv4Header(
|
| 451 |
+
version=ip_header.version,
|
| 452 |
+
ihl=ip_header.ihl,
|
| 453 |
+
tos=ip_header.tos,
|
| 454 |
+
identification=ip_header.identification,
|
| 455 |
+
ttl=ip_header.ttl,
|
| 456 |
+
protocol=ip_header.protocol,
|
| 457 |
+
source_ip=ip_header.source_ip,
|
| 458 |
+
dest_ip=ip_header.dest_ip,
|
| 459 |
+
options=ip_header.options
|
| 460 |
+
)
|
| 461 |
+
|
| 462 |
+
# Set fragment offset and flags
|
| 463 |
+
frag_header.fragment_offset = (ip_header.fragment_offset * 8 + offset) // 8
|
| 464 |
+
frag_header.flags = ip_header.flags
|
| 465 |
+
|
| 466 |
+
# Set More Fragments flag if not last fragment
|
| 467 |
+
if offset + len(fragment_payload) < len(payload):
|
| 468 |
+
frag_header.flags |= 0x1 # More Fragments
|
| 469 |
+
else:
|
| 470 |
+
frag_header.flags &= ~0x1 # Clear More Fragments
|
| 471 |
+
|
| 472 |
+
# Build fragment
|
| 473 |
+
fragment = IPParser.build_packet(frag_header, payload=fragment_payload)
|
| 474 |
+
fragments.append(fragment)
|
| 475 |
+
|
| 476 |
+
offset += len(fragment_payload)
|
| 477 |
+
|
| 478 |
+
return fragments
|
| 479 |
+
|
| 480 |
+
def reassemble_packet(self, packet: bytes) -> Optional[bytes]:
|
| 481 |
+
"""Reassemble fragmented packet"""
|
| 482 |
+
parsed = IPParser.parse_packet(packet)
|
| 483 |
+
ip_header = parsed.ip_header
|
| 484 |
+
|
| 485 |
+
# If not a fragment, return as-is
|
| 486 |
+
if not ip_header.is_fragment:
|
| 487 |
+
return packet
|
| 488 |
+
|
| 489 |
+
# Create fragment key
|
| 490 |
+
key = (ip_header.source_ip, ip_header.dest_ip, ip_header.identification)
|
| 491 |
+
|
| 492 |
+
# Store fragment
|
| 493 |
+
if key not in self.fragments:
|
| 494 |
+
self.fragments[key] = []
|
| 495 |
+
|
| 496 |
+
payload_start = ip_header.header_length
|
| 497 |
+
fragment_data = packet[payload_start:]
|
| 498 |
+
self.fragments[key].append((ip_header.fragment_offset * 8, fragment_data))
|
| 499 |
+
|
| 500 |
+
# Check if we have all fragments
|
| 501 |
+
fragments = sorted(self.fragments[key])
|
| 502 |
+
|
| 503 |
+
# Verify we have contiguous fragments starting from 0
|
| 504 |
+
expected_offset = 0
|
| 505 |
+
complete_payload = b''
|
| 506 |
+
|
| 507 |
+
for offset, data in fragments:
|
| 508 |
+
if offset != expected_offset:
|
| 509 |
+
return None # Missing fragment
|
| 510 |
+
|
| 511 |
+
complete_payload += data
|
| 512 |
+
expected_offset += len(data)
|
| 513 |
+
|
| 514 |
+
# Check if last fragment (no More Fragments flag)
|
| 515 |
+
last_fragment = None
|
| 516 |
+
for frag_packet in [packet]: # We only have current packet, need to track all
|
| 517 |
+
frag_parsed = IPParser.parse_packet(frag_packet)
|
| 518 |
+
if not frag_parsed.ip_header.more_fragments:
|
| 519 |
+
last_fragment = frag_parsed
|
| 520 |
+
break
|
| 521 |
+
|
| 522 |
+
if last_fragment is None:
|
| 523 |
+
return None # Don't have last fragment yet
|
| 524 |
+
|
| 525 |
+
# Reassemble complete packet
|
| 526 |
+
complete_header = IPv4Header(
|
| 527 |
+
version=ip_header.version,
|
| 528 |
+
ihl=ip_header.ihl,
|
| 529 |
+
tos=ip_header.tos,
|
| 530 |
+
identification=ip_header.identification,
|
| 531 |
+
flags=ip_header.flags & ~0x1, # Clear More Fragments
|
| 532 |
+
fragment_offset=0,
|
| 533 |
+
ttl=ip_header.ttl,
|
| 534 |
+
protocol=ip_header.protocol,
|
| 535 |
+
source_ip=ip_header.source_ip,
|
| 536 |
+
dest_ip=ip_header.dest_ip,
|
| 537 |
+
options=ip_header.options
|
| 538 |
+
)
|
| 539 |
+
|
| 540 |
+
complete_packet = IPParser.build_packet(complete_header, payload=complete_payload)
|
| 541 |
+
|
| 542 |
+
# Clean up fragments
|
| 543 |
+
del self.fragments[key]
|
| 544 |
+
|
| 545 |
+
return complete_packet
|
| 546 |
+
|
core/logger.py
ADDED
|
@@ -0,0 +1,555 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Logger Module
|
| 3 |
+
|
| 4 |
+
Centralized logging system for the virtual ISP stack:
|
| 5 |
+
- Structured logging with multiple levels
|
| 6 |
+
- Log aggregation and filtering
|
| 7 |
+
- Real-time log streaming
|
| 8 |
+
- Log persistence and rotation
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
import logging
|
| 12 |
+
import logging.handlers
|
| 13 |
+
import time
|
| 14 |
+
import threading
|
| 15 |
+
import json
|
| 16 |
+
import os
|
| 17 |
+
from typing import Dict, List, Optional, Any, Callable
|
| 18 |
+
from dataclasses import dataclass, asdict
|
| 19 |
+
from enum import Enum
|
| 20 |
+
from collections import deque
|
| 21 |
+
import queue
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class LogLevel(Enum):
|
| 25 |
+
DEBUG = "DEBUG"
|
| 26 |
+
INFO = "INFO"
|
| 27 |
+
WARNING = "WARNING"
|
| 28 |
+
ERROR = "ERROR"
|
| 29 |
+
CRITICAL = "CRITICAL"
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
class LogCategory(Enum):
|
| 33 |
+
SYSTEM = "SYSTEM"
|
| 34 |
+
DHCP = "DHCP"
|
| 35 |
+
NAT = "NAT"
|
| 36 |
+
FIREWALL = "FIREWALL"
|
| 37 |
+
TCP = "TCP"
|
| 38 |
+
ROUTER = "ROUTER"
|
| 39 |
+
BRIDGE = "BRIDGE"
|
| 40 |
+
SOCKET = "SOCKET"
|
| 41 |
+
SESSION = "SESSION"
|
| 42 |
+
SECURITY = "SECURITY"
|
| 43 |
+
PERFORMANCE = "PERFORMANCE"
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
@dataclass
|
| 47 |
+
class LogEntry:
|
| 48 |
+
"""Structured log entry"""
|
| 49 |
+
timestamp: float
|
| 50 |
+
level: str
|
| 51 |
+
category: str
|
| 52 |
+
module: str
|
| 53 |
+
message: str
|
| 54 |
+
session_id: Optional[str] = None
|
| 55 |
+
client_id: Optional[str] = None
|
| 56 |
+
source_ip: Optional[str] = None
|
| 57 |
+
dest_ip: Optional[str] = None
|
| 58 |
+
protocol: Optional[str] = None
|
| 59 |
+
metadata: Dict[str, Any] = None
|
| 60 |
+
|
| 61 |
+
def __post_init__(self):
|
| 62 |
+
if self.timestamp == 0:
|
| 63 |
+
self.timestamp = time.time()
|
| 64 |
+
if self.metadata is None:
|
| 65 |
+
self.metadata = {}
|
| 66 |
+
|
| 67 |
+
def to_dict(self) -> Dict:
|
| 68 |
+
"""Convert to dictionary"""
|
| 69 |
+
return asdict(self)
|
| 70 |
+
|
| 71 |
+
def to_json(self) -> str:
|
| 72 |
+
"""Convert to JSON string"""
|
| 73 |
+
return json.dumps(self.to_dict(), default=str)
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
class LogFilter:
|
| 77 |
+
"""Log filtering class"""
|
| 78 |
+
|
| 79 |
+
def __init__(self):
|
| 80 |
+
self.level_filter: Optional[LogLevel] = None
|
| 81 |
+
self.category_filter: Optional[LogCategory] = None
|
| 82 |
+
self.module_filter: Optional[str] = None
|
| 83 |
+
self.session_filter: Optional[str] = None
|
| 84 |
+
self.client_filter: Optional[str] = None
|
| 85 |
+
self.ip_filter: Optional[str] = None
|
| 86 |
+
self.text_filter: Optional[str] = None
|
| 87 |
+
self.time_range: Optional[tuple] = None
|
| 88 |
+
|
| 89 |
+
def matches(self, entry: LogEntry) -> bool:
|
| 90 |
+
"""Check if log entry matches filter criteria"""
|
| 91 |
+
# Level filter
|
| 92 |
+
if self.level_filter:
|
| 93 |
+
entry_level_value = getattr(logging, entry.level)
|
| 94 |
+
filter_level_value = getattr(logging, self.level_filter.value)
|
| 95 |
+
if entry_level_value < filter_level_value:
|
| 96 |
+
return False
|
| 97 |
+
|
| 98 |
+
# Category filter
|
| 99 |
+
if self.category_filter and entry.category != self.category_filter.value:
|
| 100 |
+
return False
|
| 101 |
+
|
| 102 |
+
# Module filter
|
| 103 |
+
if self.module_filter and self.module_filter.lower() not in entry.module.lower():
|
| 104 |
+
return False
|
| 105 |
+
|
| 106 |
+
# Session filter
|
| 107 |
+
if self.session_filter and entry.session_id != self.session_filter:
|
| 108 |
+
return False
|
| 109 |
+
|
| 110 |
+
# Client filter
|
| 111 |
+
if self.client_filter and entry.client_id != self.client_filter:
|
| 112 |
+
return False
|
| 113 |
+
|
| 114 |
+
# IP filter
|
| 115 |
+
if self.ip_filter:
|
| 116 |
+
if (entry.source_ip != self.ip_filter and
|
| 117 |
+
entry.dest_ip != self.ip_filter):
|
| 118 |
+
return False
|
| 119 |
+
|
| 120 |
+
# Text filter
|
| 121 |
+
if self.text_filter and self.text_filter.lower() not in entry.message.lower():
|
| 122 |
+
return False
|
| 123 |
+
|
| 124 |
+
# Time range filter
|
| 125 |
+
if self.time_range:
|
| 126 |
+
start_time, end_time = self.time_range
|
| 127 |
+
if not (start_time <= entry.timestamp <= end_time):
|
| 128 |
+
return False
|
| 129 |
+
|
| 130 |
+
return True
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
class LogSubscriber:
|
| 134 |
+
"""Log subscriber for real-time streaming"""
|
| 135 |
+
|
| 136 |
+
def __init__(self, subscriber_id: str, callback: Callable[[LogEntry], None],
|
| 137 |
+
log_filter: Optional[LogFilter] = None):
|
| 138 |
+
self.subscriber_id = subscriber_id
|
| 139 |
+
self.callback = callback
|
| 140 |
+
self.filter = log_filter or LogFilter()
|
| 141 |
+
self.created_time = time.time()
|
| 142 |
+
self.message_count = 0
|
| 143 |
+
self.last_message_time = None
|
| 144 |
+
self.is_active = True
|
| 145 |
+
|
| 146 |
+
def send_log(self, entry: LogEntry) -> bool:
|
| 147 |
+
"""Send log entry to subscriber if it matches filter"""
|
| 148 |
+
if not self.is_active:
|
| 149 |
+
return False
|
| 150 |
+
|
| 151 |
+
if self.filter.matches(entry):
|
| 152 |
+
try:
|
| 153 |
+
self.callback(entry)
|
| 154 |
+
self.message_count += 1
|
| 155 |
+
self.last_message_time = time.time()
|
| 156 |
+
return True
|
| 157 |
+
except Exception as e:
|
| 158 |
+
print(f"Error sending log to subscriber {self.subscriber_id}: {e}")
|
| 159 |
+
self.is_active = False
|
| 160 |
+
return False
|
| 161 |
+
|
| 162 |
+
return False
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
class VirtualISPLogger:
|
| 166 |
+
"""Centralized logger for Virtual ISP stack"""
|
| 167 |
+
|
| 168 |
+
def __init__(self, config: Dict):
|
| 169 |
+
self.config = config
|
| 170 |
+
self.log_entries: deque = deque(maxlen=config.get('max_memory_logs', 10000))
|
| 171 |
+
self.subscribers: Dict[str, LogSubscriber] = {}
|
| 172 |
+
self.lock = threading.Lock()
|
| 173 |
+
|
| 174 |
+
# Configuration
|
| 175 |
+
self.log_level = LogLevel(config.get('log_level', 'INFO'))
|
| 176 |
+
self.log_to_file = config.get('log_to_file', True)
|
| 177 |
+
self.log_file_path = config.get('log_file_path', '/tmp/virtual_isp.log')
|
| 178 |
+
self.log_file_max_size = config.get('log_file_max_size', 10 * 1024 * 1024) # 10MB
|
| 179 |
+
self.log_file_backup_count = config.get('log_file_backup_count', 5)
|
| 180 |
+
self.log_to_console = config.get('log_to_console', True)
|
| 181 |
+
self.structured_logging = config.get('structured_logging', True)
|
| 182 |
+
|
| 183 |
+
# Statistics
|
| 184 |
+
self.stats = {
|
| 185 |
+
'total_logs': 0,
|
| 186 |
+
'logs_by_level': {level.value: 0 for level in LogLevel},
|
| 187 |
+
'logs_by_category': {cat.value: 0 for cat in LogCategory},
|
| 188 |
+
'active_subscribers': 0,
|
| 189 |
+
'file_logs_written': 0,
|
| 190 |
+
'console_logs_written': 0,
|
| 191 |
+
'dropped_logs': 0
|
| 192 |
+
}
|
| 193 |
+
|
| 194 |
+
# Setup logging
|
| 195 |
+
self._setup_logging()
|
| 196 |
+
|
| 197 |
+
# Background processing
|
| 198 |
+
self.running = False
|
| 199 |
+
self.log_queue = queue.Queue()
|
| 200 |
+
self.processing_thread = None
|
| 201 |
+
|
| 202 |
+
def _setup_logging(self):
|
| 203 |
+
"""Setup Python logging infrastructure"""
|
| 204 |
+
# Create logger
|
| 205 |
+
self.logger = logging.getLogger('virtual_isp')
|
| 206 |
+
self.logger.setLevel(getattr(logging, self.log_level.value))
|
| 207 |
+
|
| 208 |
+
# Remove existing handlers
|
| 209 |
+
for handler in self.logger.handlers[:]:
|
| 210 |
+
self.logger.removeHandler(handler)
|
| 211 |
+
|
| 212 |
+
# Console handler
|
| 213 |
+
if self.log_to_console:
|
| 214 |
+
console_handler = logging.StreamHandler()
|
| 215 |
+
if self.structured_logging:
|
| 216 |
+
console_formatter = logging.Formatter(
|
| 217 |
+
'%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
| 218 |
+
)
|
| 219 |
+
else:
|
| 220 |
+
console_formatter = logging.Formatter('%(message)s')
|
| 221 |
+
console_handler.setFormatter(console_formatter)
|
| 222 |
+
self.logger.addHandler(console_handler)
|
| 223 |
+
|
| 224 |
+
# File handler with rotation
|
| 225 |
+
if self.log_to_file:
|
| 226 |
+
# Ensure log directory exists
|
| 227 |
+
log_dir = os.path.dirname(self.log_file_path)
|
| 228 |
+
if log_dir and not os.path.exists(log_dir):
|
| 229 |
+
os.makedirs(log_dir, exist_ok=True)
|
| 230 |
+
|
| 231 |
+
file_handler = logging.handlers.RotatingFileHandler(
|
| 232 |
+
self.log_file_path,
|
| 233 |
+
maxBytes=self.log_file_max_size,
|
| 234 |
+
backupCount=self.log_file_backup_count
|
| 235 |
+
)
|
| 236 |
+
|
| 237 |
+
if self.structured_logging:
|
| 238 |
+
file_formatter = logging.Formatter(
|
| 239 |
+
'%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
| 240 |
+
)
|
| 241 |
+
else:
|
| 242 |
+
file_formatter = logging.Formatter('%(message)s')
|
| 243 |
+
|
| 244 |
+
file_handler.setFormatter(file_formatter)
|
| 245 |
+
self.logger.addHandler(file_handler)
|
| 246 |
+
|
| 247 |
+
def _process_log_queue(self):
|
| 248 |
+
"""Background thread to process log queue"""
|
| 249 |
+
while self.running:
|
| 250 |
+
try:
|
| 251 |
+
# Get log entry from queue (with timeout)
|
| 252 |
+
try:
|
| 253 |
+
entry = self.log_queue.get(timeout=1.0)
|
| 254 |
+
except queue.Empty:
|
| 255 |
+
continue
|
| 256 |
+
|
| 257 |
+
# Store in memory
|
| 258 |
+
with self.lock:
|
| 259 |
+
self.log_entries.append(entry)
|
| 260 |
+
|
| 261 |
+
# Send to subscribers
|
| 262 |
+
inactive_subscribers = []
|
| 263 |
+
with self.lock:
|
| 264 |
+
for subscriber_id, subscriber in self.subscribers.items():
|
| 265 |
+
if not subscriber.send_log(entry):
|
| 266 |
+
inactive_subscribers.append(subscriber_id)
|
| 267 |
+
|
| 268 |
+
# Remove inactive subscribers
|
| 269 |
+
for subscriber_id in inactive_subscribers:
|
| 270 |
+
self.remove_subscriber(subscriber_id)
|
| 271 |
+
|
| 272 |
+
# Update statistics
|
| 273 |
+
self.stats['total_logs'] += 1
|
| 274 |
+
self.stats['logs_by_level'][entry.level] += 1
|
| 275 |
+
self.stats['logs_by_category'][entry.category] += 1
|
| 276 |
+
|
| 277 |
+
# Mark task as done
|
| 278 |
+
self.log_queue.task_done()
|
| 279 |
+
|
| 280 |
+
except Exception as e:
|
| 281 |
+
print(f"Error processing log queue: {e}")
|
| 282 |
+
time.sleep(1)
|
| 283 |
+
|
| 284 |
+
def log(self, level: LogLevel, category: LogCategory, module: str, message: str,
|
| 285 |
+
session_id: Optional[str] = None, client_id: Optional[str] = None,
|
| 286 |
+
source_ip: Optional[str] = None, dest_ip: Optional[str] = None,
|
| 287 |
+
protocol: Optional[str] = None, **metadata):
|
| 288 |
+
"""Log a message"""
|
| 289 |
+
# Check if we should log this level
|
| 290 |
+
level_value = getattr(logging, level.value)
|
| 291 |
+
min_level_value = getattr(logging, self.log_level.value)
|
| 292 |
+
if level_value < min_level_value:
|
| 293 |
+
return
|
| 294 |
+
|
| 295 |
+
# Create log entry
|
| 296 |
+
entry = LogEntry(
|
| 297 |
+
timestamp=time.time(),
|
| 298 |
+
level=level.value,
|
| 299 |
+
category=category.value,
|
| 300 |
+
module=module,
|
| 301 |
+
message=message,
|
| 302 |
+
session_id=session_id,
|
| 303 |
+
client_id=client_id,
|
| 304 |
+
source_ip=source_ip,
|
| 305 |
+
dest_ip=dest_ip,
|
| 306 |
+
protocol=protocol,
|
| 307 |
+
metadata=metadata
|
| 308 |
+
)
|
| 309 |
+
|
| 310 |
+
# Add to queue for background processing
|
| 311 |
+
try:
|
| 312 |
+
self.log_queue.put_nowait(entry)
|
| 313 |
+
except queue.Full:
|
| 314 |
+
self.stats['dropped_logs'] += 1
|
| 315 |
+
|
| 316 |
+
# Also log through Python logging system
|
| 317 |
+
if self.structured_logging:
|
| 318 |
+
log_data = entry.to_dict()
|
| 319 |
+
log_message = f"{message} | {json.dumps(log_data, default=str)}"
|
| 320 |
+
else:
|
| 321 |
+
log_message = message
|
| 322 |
+
|
| 323 |
+
# Log to Python logger
|
| 324 |
+
python_logger_level = getattr(logging, level.value)
|
| 325 |
+
self.logger.log(python_logger_level, log_message)
|
| 326 |
+
|
| 327 |
+
# Update console/file stats
|
| 328 |
+
if self.log_to_console:
|
| 329 |
+
self.stats['console_logs_written'] += 1
|
| 330 |
+
if self.log_to_file:
|
| 331 |
+
self.stats['file_logs_written'] += 1
|
| 332 |
+
|
| 333 |
+
def debug(self, category: LogCategory, module: str, message: str, **kwargs):
|
| 334 |
+
"""Log debug message"""
|
| 335 |
+
self.log(LogLevel.DEBUG, category, module, message, **kwargs)
|
| 336 |
+
|
| 337 |
+
def info(self, category: LogCategory, module: str, message: str, **kwargs):
|
| 338 |
+
"""Log info message"""
|
| 339 |
+
self.log(LogLevel.INFO, category, module, message, **kwargs)
|
| 340 |
+
|
| 341 |
+
def warning(self, category: LogCategory, module: str, message: str, **kwargs):
|
| 342 |
+
"""Log warning message"""
|
| 343 |
+
self.log(LogLevel.WARNING, category, module, message, **kwargs)
|
| 344 |
+
|
| 345 |
+
def error(self, category: LogCategory, module: str, message: str, **kwargs):
|
| 346 |
+
"""Log error message"""
|
| 347 |
+
self.log(LogLevel.ERROR, category, module, message, **kwargs)
|
| 348 |
+
|
| 349 |
+
def critical(self, category: LogCategory, module: str, message: str, **kwargs):
|
| 350 |
+
"""Log critical message"""
|
| 351 |
+
self.log(LogLevel.CRITICAL, category, module, message, **kwargs)
|
| 352 |
+
|
| 353 |
+
def add_subscriber(self, subscriber_id: str, callback: Callable[[LogEntry], None],
|
| 354 |
+
log_filter: Optional[LogFilter] = None) -> bool:
|
| 355 |
+
"""Add log subscriber for real-time streaming"""
|
| 356 |
+
with self.lock:
|
| 357 |
+
if subscriber_id in self.subscribers:
|
| 358 |
+
return False
|
| 359 |
+
|
| 360 |
+
subscriber = LogSubscriber(subscriber_id, callback, log_filter)
|
| 361 |
+
self.subscribers[subscriber_id] = subscriber
|
| 362 |
+
self.stats['active_subscribers'] = len(self.subscribers)
|
| 363 |
+
|
| 364 |
+
return True
|
| 365 |
+
|
| 366 |
+
def remove_subscriber(self, subscriber_id: str) -> bool:
|
| 367 |
+
"""Remove log subscriber"""
|
| 368 |
+
with self.lock:
|
| 369 |
+
if subscriber_id in self.subscribers:
|
| 370 |
+
del self.subscribers[subscriber_id]
|
| 371 |
+
self.stats['active_subscribers'] = len(self.subscribers)
|
| 372 |
+
return True
|
| 373 |
+
return False
|
| 374 |
+
|
| 375 |
+
def get_logs(self, limit: int = 100, offset: int = 0,
|
| 376 |
+
log_filter: Optional[LogFilter] = None) -> List[Dict]:
|
| 377 |
+
"""Get logs with filtering and pagination"""
|
| 378 |
+
with self.lock:
|
| 379 |
+
# Convert deque to list for easier manipulation
|
| 380 |
+
all_logs = list(self.log_entries)
|
| 381 |
+
|
| 382 |
+
# Apply filter
|
| 383 |
+
if log_filter:
|
| 384 |
+
filtered_logs = [entry for entry in all_logs if log_filter.matches(entry)]
|
| 385 |
+
else:
|
| 386 |
+
filtered_logs = all_logs
|
| 387 |
+
|
| 388 |
+
# Sort by timestamp (newest first)
|
| 389 |
+
filtered_logs.sort(key=lambda x: x.timestamp, reverse=True)
|
| 390 |
+
|
| 391 |
+
# Apply pagination
|
| 392 |
+
paginated_logs = filtered_logs[offset:offset + limit]
|
| 393 |
+
|
| 394 |
+
return [entry.to_dict() for entry in paginated_logs]
|
| 395 |
+
|
| 396 |
+
def search_logs(self, query: str, limit: int = 100) -> List[Dict]:
|
| 397 |
+
"""Search logs by text query"""
|
| 398 |
+
log_filter = LogFilter()
|
| 399 |
+
log_filter.text_filter = query
|
| 400 |
+
|
| 401 |
+
return self.get_logs(limit=limit, log_filter=log_filter)
|
| 402 |
+
|
| 403 |
+
def get_logs_by_session(self, session_id: str, limit: int = 100) -> List[Dict]:
|
| 404 |
+
"""Get logs for specific session"""
|
| 405 |
+
log_filter = LogFilter()
|
| 406 |
+
log_filter.session_filter = session_id
|
| 407 |
+
|
| 408 |
+
return self.get_logs(limit=limit, log_filter=log_filter)
|
| 409 |
+
|
| 410 |
+
def get_logs_by_client(self, client_id: str, limit: int = 100) -> List[Dict]:
|
| 411 |
+
"""Get logs for specific client"""
|
| 412 |
+
log_filter = LogFilter()
|
| 413 |
+
log_filter.client_filter = client_id
|
| 414 |
+
|
| 415 |
+
return self.get_logs(limit=limit, log_filter=log_filter)
|
| 416 |
+
|
| 417 |
+
def get_logs_by_ip(self, ip_address: str, limit: int = 100) -> List[Dict]:
|
| 418 |
+
"""Get logs for specific IP address"""
|
| 419 |
+
log_filter = LogFilter()
|
| 420 |
+
log_filter.ip_filter = ip_address
|
| 421 |
+
|
| 422 |
+
return self.get_logs(limit=limit, log_filter=log_filter)
|
| 423 |
+
|
| 424 |
+
def get_recent_errors(self, limit: int = 50) -> List[Dict]:
|
| 425 |
+
"""Get recent error and critical logs"""
|
| 426 |
+
log_filter = LogFilter()
|
| 427 |
+
log_filter.level_filter = LogLevel.ERROR
|
| 428 |
+
|
| 429 |
+
return self.get_logs(limit=limit, log_filter=log_filter)
|
| 430 |
+
|
| 431 |
+
def clear_logs(self):
|
| 432 |
+
"""Clear all logs from memory"""
|
| 433 |
+
with self.lock:
|
| 434 |
+
self.log_entries.clear()
|
| 435 |
+
|
| 436 |
+
def get_stats(self) -> Dict:
|
| 437 |
+
"""Get logging statistics"""
|
| 438 |
+
with self.lock:
|
| 439 |
+
stats = self.stats.copy()
|
| 440 |
+
stats['memory_logs_count'] = len(self.log_entries)
|
| 441 |
+
stats['active_subscribers'] = len(self.subscribers)
|
| 442 |
+
stats['queue_size'] = self.log_queue.qsize()
|
| 443 |
+
|
| 444 |
+
return stats
|
| 445 |
+
|
| 446 |
+
def reset_stats(self):
|
| 447 |
+
"""Reset logging statistics"""
|
| 448 |
+
self.stats = {
|
| 449 |
+
'total_logs': 0,
|
| 450 |
+
'logs_by_level': {level.value: 0 for level in LogLevel},
|
| 451 |
+
'logs_by_category': {cat.value: 0 for cat in LogCategory},
|
| 452 |
+
'active_subscribers': len(self.subscribers),
|
| 453 |
+
'file_logs_written': 0,
|
| 454 |
+
'console_logs_written': 0,
|
| 455 |
+
'dropped_logs': 0
|
| 456 |
+
}
|
| 457 |
+
|
| 458 |
+
def export_logs(self, format: str = 'json', log_filter: Optional[LogFilter] = None) -> str:
|
| 459 |
+
"""Export logs in specified format"""
|
| 460 |
+
logs = self.get_logs(limit=10000, log_filter=log_filter)
|
| 461 |
+
|
| 462 |
+
if format == 'json':
|
| 463 |
+
return json.dumps(logs, indent=2, default=str)
|
| 464 |
+
elif format == 'csv':
|
| 465 |
+
import csv
|
| 466 |
+
import io
|
| 467 |
+
|
| 468 |
+
output = io.StringIO()
|
| 469 |
+
if logs:
|
| 470 |
+
writer = csv.DictWriter(output, fieldnames=logs[0].keys())
|
| 471 |
+
writer.writeheader()
|
| 472 |
+
writer.writerows(logs)
|
| 473 |
+
|
| 474 |
+
return output.getvalue()
|
| 475 |
+
else:
|
| 476 |
+
raise ValueError(f"Unsupported export format: {format}")
|
| 477 |
+
|
| 478 |
+
def set_log_level(self, level: LogLevel):
|
| 479 |
+
"""Set logging level"""
|
| 480 |
+
self.log_level = level
|
| 481 |
+
self.logger.setLevel(getattr(logging, level.value))
|
| 482 |
+
|
| 483 |
+
def start(self):
|
| 484 |
+
"""Start logger"""
|
| 485 |
+
self.running = True
|
| 486 |
+
self.processing_thread = threading.Thread(target=self._process_log_queue, daemon=True)
|
| 487 |
+
self.processing_thread.start()
|
| 488 |
+
|
| 489 |
+
self.info(LogCategory.SYSTEM, 'logger', 'Virtual ISP Logger started')
|
| 490 |
+
|
| 491 |
+
def stop(self):
|
| 492 |
+
"""Stop logger"""
|
| 493 |
+
self.info(LogCategory.SYSTEM, 'logger', 'Virtual ISP Logger stopping')
|
| 494 |
+
|
| 495 |
+
self.running = False
|
| 496 |
+
|
| 497 |
+
# Wait for queue to be processed
|
| 498 |
+
self.log_queue.join()
|
| 499 |
+
|
| 500 |
+
# Wait for processing thread
|
| 501 |
+
if self.processing_thread:
|
| 502 |
+
self.processing_thread.join()
|
| 503 |
+
|
| 504 |
+
# Remove all subscribers
|
| 505 |
+
with self.lock:
|
| 506 |
+
self.subscribers.clear()
|
| 507 |
+
|
| 508 |
+
print("Virtual ISP Logger stopped")
|
| 509 |
+
|
| 510 |
+
|
| 511 |
+
# Global logger instance
|
| 512 |
+
_global_logger: Optional[VirtualISPLogger] = None
|
| 513 |
+
|
| 514 |
+
|
| 515 |
+
def get_logger() -> Optional[VirtualISPLogger]:
|
| 516 |
+
"""Get global logger instance"""
|
| 517 |
+
return _global_logger
|
| 518 |
+
|
| 519 |
+
|
| 520 |
+
def init_logger(config: Dict) -> VirtualISPLogger:
|
| 521 |
+
"""Initialize global logger"""
|
| 522 |
+
global _global_logger
|
| 523 |
+
_global_logger = VirtualISPLogger(config)
|
| 524 |
+
return _global_logger
|
| 525 |
+
|
| 526 |
+
|
| 527 |
+
def log_debug(category: LogCategory, module: str, message: str, **kwargs):
|
| 528 |
+
"""Global debug logging function"""
|
| 529 |
+
if _global_logger:
|
| 530 |
+
_global_logger.debug(category, module, message, **kwargs)
|
| 531 |
+
|
| 532 |
+
|
| 533 |
+
def log_info(category: LogCategory, module: str, message: str, **kwargs):
|
| 534 |
+
"""Global info logging function"""
|
| 535 |
+
if _global_logger:
|
| 536 |
+
_global_logger.info(category, module, message, **kwargs)
|
| 537 |
+
|
| 538 |
+
|
| 539 |
+
def log_warning(category: LogCategory, module: str, message: str, **kwargs):
|
| 540 |
+
"""Global warning logging function"""
|
| 541 |
+
if _global_logger:
|
| 542 |
+
_global_logger.warning(category, module, message, **kwargs)
|
| 543 |
+
|
| 544 |
+
|
| 545 |
+
def log_error(category: LogCategory, module: str, message: str, **kwargs):
|
| 546 |
+
"""Global error logging function"""
|
| 547 |
+
if _global_logger:
|
| 548 |
+
_global_logger.error(category, module, message, **kwargs)
|
| 549 |
+
|
| 550 |
+
|
| 551 |
+
def log_critical(category: LogCategory, module: str, message: str, **kwargs):
|
| 552 |
+
"""Global critical logging function"""
|
| 553 |
+
if _global_logger:
|
| 554 |
+
_global_logger.critical(category, module, message, **kwargs)
|
| 555 |
+
|
core/nat_engine.py
ADDED
|
@@ -0,0 +1,516 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
NAT Engine Module
|
| 3 |
+
|
| 4 |
+
Implements Network Address Translation:
|
| 5 |
+
- Map (virtualIP, virtualPort) to (hostIP, hostPort)
|
| 6 |
+
- Maintain connection tracking table
|
| 7 |
+
- Handle port allocation and deallocation
|
| 8 |
+
- Support connection state tracking
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
import time
|
| 12 |
+
import threading
|
| 13 |
+
import socket
|
| 14 |
+
import random
|
| 15 |
+
from typing import Dict, Optional, Tuple, Set
|
| 16 |
+
from dataclasses import dataclass
|
| 17 |
+
from enum import Enum
|
| 18 |
+
|
| 19 |
+
from .ip_parser import IPProtocol
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class NATType(Enum):
|
| 23 |
+
SNAT = "SNAT" # Source NAT
|
| 24 |
+
DNAT = "DNAT" # Destination NAT
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
@dataclass
|
| 28 |
+
class NATSession:
|
| 29 |
+
"""Represents a NAT session"""
|
| 30 |
+
# Virtual (internal) endpoint
|
| 31 |
+
virtual_ip: str
|
| 32 |
+
virtual_port: int
|
| 33 |
+
|
| 34 |
+
# Real (external) endpoint
|
| 35 |
+
real_ip: str
|
| 36 |
+
real_port: int
|
| 37 |
+
|
| 38 |
+
# Host (translated) endpoint
|
| 39 |
+
host_ip: str
|
| 40 |
+
host_port: int
|
| 41 |
+
|
| 42 |
+
# Session metadata
|
| 43 |
+
protocol: str # TCP or UDP
|
| 44 |
+
nat_type: NATType
|
| 45 |
+
created_time: float
|
| 46 |
+
last_activity: float
|
| 47 |
+
bytes_in: int = 0
|
| 48 |
+
bytes_out: int = 0
|
| 49 |
+
packets_in: int = 0
|
| 50 |
+
packets_out: int = 0
|
| 51 |
+
|
| 52 |
+
@property
|
| 53 |
+
def session_id(self) -> str:
|
| 54 |
+
"""Get unique session identifier"""
|
| 55 |
+
return f"{self.virtual_ip}:{self.virtual_port}-{self.real_ip}:{self.real_port}-{self.protocol}"
|
| 56 |
+
|
| 57 |
+
@property
|
| 58 |
+
def is_expired(self) -> bool:
|
| 59 |
+
"""Check if session has expired"""
|
| 60 |
+
timeout = 300 if self.protocol == 'TCP' else 60 # 5 min for TCP, 1 min for UDP
|
| 61 |
+
return time.time() - self.last_activity > timeout
|
| 62 |
+
|
| 63 |
+
@property
|
| 64 |
+
def duration(self) -> float:
|
| 65 |
+
"""Get session duration in seconds"""
|
| 66 |
+
return time.time() - self.created_time
|
| 67 |
+
|
| 68 |
+
def update_activity(self, bytes_transferred: int = 0, direction: str = 'out'):
|
| 69 |
+
"""Update session activity"""
|
| 70 |
+
self.last_activity = time.time()
|
| 71 |
+
|
| 72 |
+
if direction == 'out':
|
| 73 |
+
self.bytes_out += bytes_transferred
|
| 74 |
+
self.packets_out += 1
|
| 75 |
+
else:
|
| 76 |
+
self.bytes_in += bytes_transferred
|
| 77 |
+
self.packets_in += 1
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
class PortPool:
|
| 81 |
+
"""Manages available ports for NAT"""
|
| 82 |
+
|
| 83 |
+
def __init__(self, start_port: int = 10000, end_port: int = 65535):
|
| 84 |
+
self.start_port = start_port
|
| 85 |
+
self.end_port = end_port
|
| 86 |
+
self.available_ports: Set[int] = set(range(start_port, end_port + 1))
|
| 87 |
+
self.allocated_ports: Dict[int, str] = {} # port -> session_id
|
| 88 |
+
self.lock = threading.Lock()
|
| 89 |
+
|
| 90 |
+
def allocate_port(self, session_id: str) -> Optional[int]:
|
| 91 |
+
"""Allocate a port for a session"""
|
| 92 |
+
with self.lock:
|
| 93 |
+
if not self.available_ports:
|
| 94 |
+
return None
|
| 95 |
+
|
| 96 |
+
# Try to get a random port to distribute load
|
| 97 |
+
port = random.choice(list(self.available_ports))
|
| 98 |
+
self.available_ports.remove(port)
|
| 99 |
+
self.allocated_ports[port] = session_id
|
| 100 |
+
|
| 101 |
+
return port
|
| 102 |
+
|
| 103 |
+
def release_port(self, port: int) -> bool:
|
| 104 |
+
"""Release a port back to the pool"""
|
| 105 |
+
with self.lock:
|
| 106 |
+
if port in self.allocated_ports:
|
| 107 |
+
del self.allocated_ports[port]
|
| 108 |
+
if self.start_port <= port <= self.end_port:
|
| 109 |
+
self.available_ports.add(port)
|
| 110 |
+
return True
|
| 111 |
+
return False
|
| 112 |
+
|
| 113 |
+
def get_session_for_port(self, port: int) -> Optional[str]:
|
| 114 |
+
"""Get session ID for a port"""
|
| 115 |
+
with self.lock:
|
| 116 |
+
return self.allocated_ports.get(port)
|
| 117 |
+
|
| 118 |
+
def get_stats(self) -> Dict:
|
| 119 |
+
"""Get port pool statistics"""
|
| 120 |
+
with self.lock:
|
| 121 |
+
return {
|
| 122 |
+
'total_ports': self.end_port - self.start_port + 1,
|
| 123 |
+
'available_ports': len(self.available_ports),
|
| 124 |
+
'allocated_ports': len(self.allocated_ports),
|
| 125 |
+
'utilization': len(self.allocated_ports) / (self.end_port - self.start_port + 1)
|
| 126 |
+
}
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
class NATEngine:
|
| 130 |
+
"""Network Address Translation engine"""
|
| 131 |
+
|
| 132 |
+
def __init__(self, config: Dict):
|
| 133 |
+
self.config = config
|
| 134 |
+
self.sessions: Dict[str, NATSession] = {} # session_id -> session
|
| 135 |
+
self.virtual_to_session: Dict[Tuple[str, int, str], str] = {} # (vip, vport, proto) -> session_id
|
| 136 |
+
self.host_to_session: Dict[Tuple[str, int, str], str] = {} # (hip, hport, proto) -> session_id
|
| 137 |
+
self.lock = threading.Lock()
|
| 138 |
+
|
| 139 |
+
# Port pool for outbound connections
|
| 140 |
+
self.port_pool = PortPool(
|
| 141 |
+
config.get('port_range_start', 10000),
|
| 142 |
+
config.get('port_range_end', 65535)
|
| 143 |
+
)
|
| 144 |
+
|
| 145 |
+
# Host IP for outbound connections
|
| 146 |
+
self.host_ip = config.get('host_ip', self._get_default_host_ip())
|
| 147 |
+
|
| 148 |
+
# Session timeout
|
| 149 |
+
self.session_timeout = config.get('session_timeout', 300)
|
| 150 |
+
|
| 151 |
+
# Statistics
|
| 152 |
+
self.stats = {
|
| 153 |
+
'total_sessions': 0,
|
| 154 |
+
'active_sessions': 0,
|
| 155 |
+
'expired_sessions': 0,
|
| 156 |
+
'port_exhaustion_events': 0,
|
| 157 |
+
'bytes_translated': 0,
|
| 158 |
+
'packets_translated': 0
|
| 159 |
+
}
|
| 160 |
+
|
| 161 |
+
# Cleanup thread
|
| 162 |
+
self.running = False
|
| 163 |
+
self.cleanup_thread = None
|
| 164 |
+
|
| 165 |
+
def _get_default_host_ip(self) -> str:
|
| 166 |
+
"""Get default host IP address"""
|
| 167 |
+
try:
|
| 168 |
+
# Connect to a remote address to determine local IP
|
| 169 |
+
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
|
| 170 |
+
s.connect(('8.8.8.8', 80))
|
| 171 |
+
return s.getsockname()[0]
|
| 172 |
+
except Exception:
|
| 173 |
+
return '127.0.0.1'
|
| 174 |
+
|
| 175 |
+
def _cleanup_expired_sessions(self):
|
| 176 |
+
"""Clean up expired sessions"""
|
| 177 |
+
current_time = time.time()
|
| 178 |
+
expired_sessions = []
|
| 179 |
+
|
| 180 |
+
with self.lock:
|
| 181 |
+
for session_id, session in self.sessions.items():
|
| 182 |
+
if session.is_expired:
|
| 183 |
+
expired_sessions.append(session_id)
|
| 184 |
+
|
| 185 |
+
for session_id in expired_sessions:
|
| 186 |
+
self._remove_session(session_id)
|
| 187 |
+
self.stats['expired_sessions'] += 1
|
| 188 |
+
|
| 189 |
+
def _remove_session(self, session_id: str):
|
| 190 |
+
"""Remove a session and clean up resources"""
|
| 191 |
+
with self.lock:
|
| 192 |
+
if session_id not in self.sessions:
|
| 193 |
+
return
|
| 194 |
+
|
| 195 |
+
session = self.sessions[session_id]
|
| 196 |
+
|
| 197 |
+
# Remove from lookup tables
|
| 198 |
+
virtual_key = (session.virtual_ip, session.virtual_port, session.protocol)
|
| 199 |
+
if virtual_key in self.virtual_to_session:
|
| 200 |
+
del self.virtual_to_session[virtual_key]
|
| 201 |
+
|
| 202 |
+
host_key = (session.host_ip, session.host_port, session.protocol)
|
| 203 |
+
if host_key in self.host_to_session:
|
| 204 |
+
del self.host_to_session[host_key]
|
| 205 |
+
|
| 206 |
+
# Release port
|
| 207 |
+
self.port_pool.release_port(session.host_port)
|
| 208 |
+
|
| 209 |
+
# Remove session
|
| 210 |
+
del self.sessions[session_id]
|
| 211 |
+
|
| 212 |
+
self.stats['active_sessions'] = len(self.sessions)
|
| 213 |
+
|
| 214 |
+
def create_outbound_session(self, virtual_ip: str, virtual_port: int,
|
| 215 |
+
real_ip: str, real_port: int, protocol: str) -> Optional[NATSession]:
|
| 216 |
+
"""Create NAT session for outbound connection"""
|
| 217 |
+
# Allocate host port
|
| 218 |
+
session_id = f"{virtual_ip}:{virtual_port}-{real_ip}:{real_port}-{protocol}"
|
| 219 |
+
host_port = self.port_pool.allocate_port(session_id)
|
| 220 |
+
|
| 221 |
+
if host_port is None:
|
| 222 |
+
self.stats['port_exhaustion_events'] += 1
|
| 223 |
+
return None
|
| 224 |
+
|
| 225 |
+
# Create session
|
| 226 |
+
session = NATSession(
|
| 227 |
+
virtual_ip=virtual_ip,
|
| 228 |
+
virtual_port=virtual_port,
|
| 229 |
+
real_ip=real_ip,
|
| 230 |
+
real_port=real_port,
|
| 231 |
+
host_ip=self.host_ip,
|
| 232 |
+
host_port=host_port,
|
| 233 |
+
protocol=protocol,
|
| 234 |
+
nat_type=NATType.SNAT,
|
| 235 |
+
created_time=time.time(),
|
| 236 |
+
last_activity=time.time()
|
| 237 |
+
)
|
| 238 |
+
|
| 239 |
+
with self.lock:
|
| 240 |
+
self.sessions[session_id] = session
|
| 241 |
+
|
| 242 |
+
# Add to lookup tables
|
| 243 |
+
virtual_key = (virtual_ip, virtual_port, protocol)
|
| 244 |
+
self.virtual_to_session[virtual_key] = session_id
|
| 245 |
+
|
| 246 |
+
host_key = (self.host_ip, host_port, protocol)
|
| 247 |
+
self.host_to_session[host_key] = session_id
|
| 248 |
+
|
| 249 |
+
self.stats['total_sessions'] += 1
|
| 250 |
+
self.stats['active_sessions'] = len(self.sessions)
|
| 251 |
+
|
| 252 |
+
return session
|
| 253 |
+
|
| 254 |
+
def translate_outbound(self, virtual_ip: str, virtual_port: int,
|
| 255 |
+
real_ip: str, real_port: int, protocol: str) -> Optional[Tuple[str, int]]:
|
| 256 |
+
"""Translate outbound packet (virtual -> host)"""
|
| 257 |
+
virtual_key = (virtual_ip, virtual_port, protocol)
|
| 258 |
+
|
| 259 |
+
with self.lock:
|
| 260 |
+
session_id = self.virtual_to_session.get(virtual_key)
|
| 261 |
+
|
| 262 |
+
if session_id:
|
| 263 |
+
session = self.sessions[session_id]
|
| 264 |
+
session.update_activity(direction='out')
|
| 265 |
+
return (session.host_ip, session.host_port)
|
| 266 |
+
else:
|
| 267 |
+
# Create new session
|
| 268 |
+
session = self.create_outbound_session(virtual_ip, virtual_port, real_ip, real_port, protocol)
|
| 269 |
+
if session:
|
| 270 |
+
return (session.host_ip, session.host_port)
|
| 271 |
+
|
| 272 |
+
return None
|
| 273 |
+
|
| 274 |
+
def translate_inbound(self, host_ip: str, host_port: int, protocol: str) -> Optional[Tuple[str, int]]:
|
| 275 |
+
"""Translate inbound packet (host -> virtual)"""
|
| 276 |
+
host_key = (host_ip, host_port, protocol)
|
| 277 |
+
|
| 278 |
+
with self.lock:
|
| 279 |
+
session_id = self.host_to_session.get(host_key)
|
| 280 |
+
|
| 281 |
+
if session_id and session_id in self.sessions:
|
| 282 |
+
session = self.sessions[session_id]
|
| 283 |
+
session.update_activity(direction='in')
|
| 284 |
+
return (session.virtual_ip, session.virtual_port)
|
| 285 |
+
|
| 286 |
+
return None
|
| 287 |
+
|
| 288 |
+
def get_session_by_virtual(self, virtual_ip: str, virtual_port: int, protocol: str) -> Optional[NATSession]:
|
| 289 |
+
"""Get session by virtual endpoint"""
|
| 290 |
+
virtual_key = (virtual_ip, virtual_port, protocol)
|
| 291 |
+
|
| 292 |
+
with self.lock:
|
| 293 |
+
session_id = self.virtual_to_session.get(virtual_key)
|
| 294 |
+
if session_id and session_id in self.sessions:
|
| 295 |
+
return self.sessions[session_id]
|
| 296 |
+
|
| 297 |
+
return None
|
| 298 |
+
|
| 299 |
+
def get_session_by_host(self, host_ip: str, host_port: int, protocol: str) -> Optional[NATSession]:
|
| 300 |
+
"""Get session by host endpoint"""
|
| 301 |
+
host_key = (host_ip, host_port, protocol)
|
| 302 |
+
|
| 303 |
+
with self.lock:
|
| 304 |
+
session_id = self.host_to_session.get(host_key)
|
| 305 |
+
if session_id and session_id in self.sessions:
|
| 306 |
+
return self.sessions[session_id]
|
| 307 |
+
|
| 308 |
+
return None
|
| 309 |
+
|
| 310 |
+
def close_session(self, session_id: str) -> bool:
|
| 311 |
+
"""Manually close a session"""
|
| 312 |
+
with self.lock:
|
| 313 |
+
if session_id in self.sessions:
|
| 314 |
+
self._remove_session(session_id)
|
| 315 |
+
return True
|
| 316 |
+
return False
|
| 317 |
+
|
| 318 |
+
def close_session_by_virtual(self, virtual_ip: str, virtual_port: int, protocol: str) -> bool:
|
| 319 |
+
"""Close session by virtual endpoint"""
|
| 320 |
+
virtual_key = (virtual_ip, virtual_port, protocol)
|
| 321 |
+
|
| 322 |
+
with self.lock:
|
| 323 |
+
session_id = self.virtual_to_session.get(virtual_key)
|
| 324 |
+
if session_id:
|
| 325 |
+
self._remove_session(session_id)
|
| 326 |
+
return True
|
| 327 |
+
return False
|
| 328 |
+
|
| 329 |
+
def get_sessions(self) -> Dict[str, Dict]:
|
| 330 |
+
"""Get all active sessions"""
|
| 331 |
+
with self.lock:
|
| 332 |
+
return {
|
| 333 |
+
session_id: {
|
| 334 |
+
'virtual_ip': session.virtual_ip,
|
| 335 |
+
'virtual_port': session.virtual_port,
|
| 336 |
+
'real_ip': session.real_ip,
|
| 337 |
+
'real_port': session.real_port,
|
| 338 |
+
'host_ip': session.host_ip,
|
| 339 |
+
'host_port': session.host_port,
|
| 340 |
+
'protocol': session.protocol,
|
| 341 |
+
'nat_type': session.nat_type.value,
|
| 342 |
+
'created_time': session.created_time,
|
| 343 |
+
'last_activity': session.last_activity,
|
| 344 |
+
'duration': session.duration,
|
| 345 |
+
'bytes_in': session.bytes_in,
|
| 346 |
+
'bytes_out': session.bytes_out,
|
| 347 |
+
'packets_in': session.packets_in,
|
| 348 |
+
'packets_out': session.packets_out,
|
| 349 |
+
'is_expired': session.is_expired
|
| 350 |
+
}
|
| 351 |
+
for session_id, session in self.sessions.items()
|
| 352 |
+
}
|
| 353 |
+
|
| 354 |
+
def get_stats(self) -> Dict:
|
| 355 |
+
"""Get NAT statistics"""
|
| 356 |
+
port_stats = self.port_pool.get_stats()
|
| 357 |
+
|
| 358 |
+
with self.lock:
|
| 359 |
+
current_stats = self.stats.copy()
|
| 360 |
+
current_stats['active_sessions'] = len(self.sessions)
|
| 361 |
+
current_stats.update(port_stats)
|
| 362 |
+
|
| 363 |
+
return current_stats
|
| 364 |
+
|
| 365 |
+
def update_packet_stats(self, bytes_count: int):
|
| 366 |
+
"""Update packet statistics"""
|
| 367 |
+
self.stats['bytes_translated'] += bytes_count
|
| 368 |
+
self.stats['packets_translated'] += 1
|
| 369 |
+
|
| 370 |
+
def _cleanup_loop(self):
|
| 371 |
+
"""Background cleanup loop"""
|
| 372 |
+
while self.running:
|
| 373 |
+
try:
|
| 374 |
+
self._cleanup_expired_sessions()
|
| 375 |
+
time.sleep(30) # Cleanup every 30 seconds
|
| 376 |
+
except Exception as e:
|
| 377 |
+
print(f"NAT cleanup error: {e}")
|
| 378 |
+
time.sleep(5)
|
| 379 |
+
|
| 380 |
+
def start(self):
|
| 381 |
+
"""Start NAT engine"""
|
| 382 |
+
self.running = True
|
| 383 |
+
self.cleanup_thread = threading.Thread(target=self._cleanup_loop, daemon=True)
|
| 384 |
+
self.cleanup_thread.start()
|
| 385 |
+
print(f"NAT engine started - Host IP: {self.host_ip}, Port range: {self.port_pool.start_port}-{self.port_pool.end_port}")
|
| 386 |
+
|
| 387 |
+
def stop(self):
|
| 388 |
+
"""Stop NAT engine"""
|
| 389 |
+
self.running = False
|
| 390 |
+
if self.cleanup_thread:
|
| 391 |
+
self.cleanup_thread.join()
|
| 392 |
+
|
| 393 |
+
# Close all sessions
|
| 394 |
+
with self.lock:
|
| 395 |
+
session_ids = list(self.sessions.keys())
|
| 396 |
+
for session_id in session_ids:
|
| 397 |
+
self._remove_session(session_id)
|
| 398 |
+
|
| 399 |
+
print("NAT engine stopped")
|
| 400 |
+
|
| 401 |
+
def reset_stats(self):
|
| 402 |
+
"""Reset statistics"""
|
| 403 |
+
self.stats = {
|
| 404 |
+
'total_sessions': 0,
|
| 405 |
+
'active_sessions': len(self.sessions),
|
| 406 |
+
'expired_sessions': 0,
|
| 407 |
+
'port_exhaustion_events': 0,
|
| 408 |
+
'bytes_translated': 0,
|
| 409 |
+
'packets_translated': 0
|
| 410 |
+
}
|
| 411 |
+
|
| 412 |
+
|
| 413 |
+
class NATRule:
|
| 414 |
+
"""Represents a NAT rule for DNAT (port forwarding)"""
|
| 415 |
+
|
| 416 |
+
def __init__(self, external_port: int, internal_ip: str, internal_port: int,
|
| 417 |
+
protocol: str = 'TCP', enabled: bool = True):
|
| 418 |
+
self.external_port = external_port
|
| 419 |
+
self.internal_ip = internal_ip
|
| 420 |
+
self.internal_port = internal_port
|
| 421 |
+
self.protocol = protocol.upper()
|
| 422 |
+
self.enabled = enabled
|
| 423 |
+
self.created_time = time.time()
|
| 424 |
+
self.hit_count = 0
|
| 425 |
+
self.last_hit = None
|
| 426 |
+
|
| 427 |
+
def matches(self, port: int, protocol: str) -> bool:
|
| 428 |
+
"""Check if rule matches the given port and protocol"""
|
| 429 |
+
return (self.enabled and
|
| 430 |
+
self.external_port == port and
|
| 431 |
+
self.protocol == protocol.upper())
|
| 432 |
+
|
| 433 |
+
def record_hit(self):
|
| 434 |
+
"""Record a rule hit"""
|
| 435 |
+
self.hit_count += 1
|
| 436 |
+
self.last_hit = time.time()
|
| 437 |
+
|
| 438 |
+
def to_dict(self) -> Dict:
|
| 439 |
+
"""Convert rule to dictionary"""
|
| 440 |
+
return {
|
| 441 |
+
'external_port': self.external_port,
|
| 442 |
+
'internal_ip': self.internal_ip,
|
| 443 |
+
'internal_port': self.internal_port,
|
| 444 |
+
'protocol': self.protocol,
|
| 445 |
+
'enabled': self.enabled,
|
| 446 |
+
'created_time': self.created_time,
|
| 447 |
+
'hit_count': self.hit_count,
|
| 448 |
+
'last_hit': self.last_hit
|
| 449 |
+
}
|
| 450 |
+
|
| 451 |
+
|
| 452 |
+
class DNATEngine:
|
| 453 |
+
"""Destination NAT engine for port forwarding"""
|
| 454 |
+
|
| 455 |
+
def __init__(self):
|
| 456 |
+
self.rules: Dict[str, NATRule] = {} # rule_id -> rule
|
| 457 |
+
self.lock = threading.Lock()
|
| 458 |
+
|
| 459 |
+
def add_rule(self, rule_id: str, external_port: int, internal_ip: str,
|
| 460 |
+
internal_port: int, protocol: str = 'TCP') -> bool:
|
| 461 |
+
"""Add DNAT rule"""
|
| 462 |
+
with self.lock:
|
| 463 |
+
if rule_id in self.rules:
|
| 464 |
+
return False
|
| 465 |
+
|
| 466 |
+
rule = NATRule(external_port, internal_ip, internal_port, protocol)
|
| 467 |
+
self.rules[rule_id] = rule
|
| 468 |
+
return True
|
| 469 |
+
|
| 470 |
+
def remove_rule(self, rule_id: str) -> bool:
|
| 471 |
+
"""Remove DNAT rule"""
|
| 472 |
+
with self.lock:
|
| 473 |
+
if rule_id in self.rules:
|
| 474 |
+
del self.rules[rule_id]
|
| 475 |
+
return True
|
| 476 |
+
return False
|
| 477 |
+
|
| 478 |
+
def enable_rule(self, rule_id: str) -> bool:
|
| 479 |
+
"""Enable DNAT rule"""
|
| 480 |
+
with self.lock:
|
| 481 |
+
if rule_id in self.rules:
|
| 482 |
+
self.rules[rule_id].enabled = True
|
| 483 |
+
return True
|
| 484 |
+
return False
|
| 485 |
+
|
| 486 |
+
def disable_rule(self, rule_id: str) -> bool:
|
| 487 |
+
"""Disable DNAT rule"""
|
| 488 |
+
with self.lock:
|
| 489 |
+
if rule_id in self.rules:
|
| 490 |
+
self.rules[rule_id].enabled = False
|
| 491 |
+
return True
|
| 492 |
+
return False
|
| 493 |
+
|
| 494 |
+
def translate_inbound_dnat(self, external_port: int, protocol: str) -> Optional[Tuple[str, int]]:
|
| 495 |
+
"""Translate inbound packet using DNAT rules"""
|
| 496 |
+
with self.lock:
|
| 497 |
+
for rule in self.rules.values():
|
| 498 |
+
if rule.matches(external_port, protocol):
|
| 499 |
+
rule.record_hit()
|
| 500 |
+
return (rule.internal_ip, rule.internal_port)
|
| 501 |
+
|
| 502 |
+
return None
|
| 503 |
+
|
| 504 |
+
def get_rules(self) -> Dict[str, Dict]:
|
| 505 |
+
"""Get all DNAT rules"""
|
| 506 |
+
with self.lock:
|
| 507 |
+
return {
|
| 508 |
+
rule_id: rule.to_dict()
|
| 509 |
+
for rule_id, rule in self.rules.items()
|
| 510 |
+
}
|
| 511 |
+
|
| 512 |
+
def clear_rules(self):
|
| 513 |
+
"""Clear all DNAT rules"""
|
| 514 |
+
with self.lock:
|
| 515 |
+
self.rules.clear()
|
| 516 |
+
|
core/openvpn_manager.py
ADDED
|
@@ -0,0 +1,608 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
OpenVPN Manager Module
|
| 3 |
+
|
| 4 |
+
Manages OpenVPN server integration with the Virtual ISP Stack
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import os
|
| 8 |
+
import json
|
| 9 |
+
import subprocess
|
| 10 |
+
import threading
|
| 11 |
+
import time
|
| 12 |
+
import logging
|
| 13 |
+
from typing import Dict, List, Optional, Any
|
| 14 |
+
from dataclasses import dataclass, asdict
|
| 15 |
+
import ipaddress
|
| 16 |
+
|
| 17 |
+
logger = logging.getLogger(__name__)
|
| 18 |
+
|
| 19 |
+
@dataclass
|
| 20 |
+
class VPNClient:
|
| 21 |
+
"""Represents a connected VPN client"""
|
| 22 |
+
client_id: str
|
| 23 |
+
common_name: str
|
| 24 |
+
ip_address: str
|
| 25 |
+
connected_at: float
|
| 26 |
+
bytes_received: int = 0
|
| 27 |
+
bytes_sent: int = 0
|
| 28 |
+
status: str = "connected"
|
| 29 |
+
|
| 30 |
+
@dataclass
|
| 31 |
+
class VPNServerStatus:
|
| 32 |
+
"""Represents VPN server status"""
|
| 33 |
+
is_running: bool
|
| 34 |
+
connected_clients: int
|
| 35 |
+
total_bytes_received: int
|
| 36 |
+
total_bytes_sent: int
|
| 37 |
+
uptime: float
|
| 38 |
+
server_ip: str
|
| 39 |
+
server_port: int
|
| 40 |
+
|
| 41 |
+
class OpenVPNManager:
|
| 42 |
+
"""Manages OpenVPN server and client connections"""
|
| 43 |
+
|
| 44 |
+
def __init__(self, config: Dict[str, Any]):
|
| 45 |
+
self.config = config
|
| 46 |
+
self.server_config_path = "/etc/openvpn/server/server.conf"
|
| 47 |
+
self.status_log_path = "/var/log/openvpn/openvpn-status.log"
|
| 48 |
+
self.clients: Dict[str, VPNClient] = {}
|
| 49 |
+
self.server_process = None
|
| 50 |
+
self.is_running = False
|
| 51 |
+
self.start_time = None
|
| 52 |
+
|
| 53 |
+
# VPN network configuration
|
| 54 |
+
self.vpn_network = ipaddress.IPv4Network("10.8.0.0/24")
|
| 55 |
+
self.vpn_server_ip = "10.8.0.1"
|
| 56 |
+
self.vpn_port = 1194
|
| 57 |
+
|
| 58 |
+
# Integration with ISP stack
|
| 59 |
+
self.dhcp_server = None
|
| 60 |
+
self.nat_engine = None
|
| 61 |
+
self.firewall = None
|
| 62 |
+
self.router = None
|
| 63 |
+
|
| 64 |
+
# Status monitoring thread
|
| 65 |
+
self.monitor_thread = None
|
| 66 |
+
self.monitor_running = False
|
| 67 |
+
|
| 68 |
+
# Client configuration storage
|
| 69 |
+
self.config_storage_path = "/tmp/vpn_client_configs"
|
| 70 |
+
os.makedirs(self.config_storage_path, exist_ok=True)
|
| 71 |
+
|
| 72 |
+
def set_isp_components(self, dhcp_server=None, nat_engine=None, firewall=None, router=None):
|
| 73 |
+
"""Set references to ISP stack components for integration"""
|
| 74 |
+
self.dhcp_server = dhcp_server
|
| 75 |
+
self.nat_engine = nat_engine
|
| 76 |
+
self.firewall = firewall
|
| 77 |
+
self.router = router
|
| 78 |
+
|
| 79 |
+
def start_server(self) -> bool:
|
| 80 |
+
"""Start the OpenVPN server"""
|
| 81 |
+
try:
|
| 82 |
+
if self.is_running:
|
| 83 |
+
logger.warning("OpenVPN server is already running")
|
| 84 |
+
return True
|
| 85 |
+
|
| 86 |
+
# Ensure configuration exists
|
| 87 |
+
if not os.path.exists(self.server_config_path):
|
| 88 |
+
logger.error(f"OpenVPN server configuration not found: {self.server_config_path}")
|
| 89 |
+
return False
|
| 90 |
+
|
| 91 |
+
# Create log directory
|
| 92 |
+
os.makedirs("/var/log/openvpn", exist_ok=True)
|
| 93 |
+
|
| 94 |
+
# Start OpenVPN server
|
| 95 |
+
cmd = [
|
| 96 |
+
"sudo", "openvpn",
|
| 97 |
+
"--config", self.server_config_path,
|
| 98 |
+
"--daemon", "openvpn-server",
|
| 99 |
+
"--log", "/var/log/openvpn/openvpn.log",
|
| 100 |
+
"--status", self.status_log_path, "10"
|
| 101 |
+
]
|
| 102 |
+
|
| 103 |
+
result = subprocess.run(cmd, capture_output=True, text=True)
|
| 104 |
+
|
| 105 |
+
if result.returncode == 0:
|
| 106 |
+
self.is_running = True
|
| 107 |
+
self.start_time = time.time()
|
| 108 |
+
logger.info("OpenVPN server started successfully")
|
| 109 |
+
|
| 110 |
+
# Start monitoring thread
|
| 111 |
+
self.start_monitoring()
|
| 112 |
+
|
| 113 |
+
# Configure firewall rules for VPN
|
| 114 |
+
self._configure_vpn_firewall()
|
| 115 |
+
|
| 116 |
+
# Configure NAT for VPN traffic
|
| 117 |
+
self._configure_vpn_nat()
|
| 118 |
+
|
| 119 |
+
return True
|
| 120 |
+
else:
|
| 121 |
+
logger.error(f"Failed to start OpenVPN server: {result.stderr}")
|
| 122 |
+
return False
|
| 123 |
+
|
| 124 |
+
except Exception as e:
|
| 125 |
+
logger.error(f"Error starting OpenVPN server: {e}")
|
| 126 |
+
return False
|
| 127 |
+
|
| 128 |
+
def stop_server(self) -> bool:
|
| 129 |
+
"""Stop the OpenVPN server"""
|
| 130 |
+
try:
|
| 131 |
+
if not self.is_running:
|
| 132 |
+
logger.warning("OpenVPN server is not running")
|
| 133 |
+
return True
|
| 134 |
+
|
| 135 |
+
# Stop monitoring
|
| 136 |
+
self.stop_monitoring()
|
| 137 |
+
|
| 138 |
+
# Kill OpenVPN process
|
| 139 |
+
result = subprocess.run(["sudo", "pkill", "-f", "openvpn.*server"],
|
| 140 |
+
capture_output=True, text=True)
|
| 141 |
+
|
| 142 |
+
self.is_running = False
|
| 143 |
+
self.start_time = None
|
| 144 |
+
self.clients.clear()
|
| 145 |
+
|
| 146 |
+
logger.info("OpenVPN server stopped")
|
| 147 |
+
return True
|
| 148 |
+
|
| 149 |
+
except Exception as e:
|
| 150 |
+
logger.error(f"Error stopping OpenVPN server: {e}")
|
| 151 |
+
return False
|
| 152 |
+
|
| 153 |
+
def start_monitoring(self):
|
| 154 |
+
"""Start the client monitoring thread"""
|
| 155 |
+
if self.monitor_thread and self.monitor_thread.is_alive():
|
| 156 |
+
return
|
| 157 |
+
|
| 158 |
+
self.monitor_running = True
|
| 159 |
+
self.monitor_thread = threading.Thread(target=self._monitor_clients, daemon=True)
|
| 160 |
+
self.monitor_thread.start()
|
| 161 |
+
logger.info("Started OpenVPN client monitoring")
|
| 162 |
+
|
| 163 |
+
def stop_monitoring(self):
|
| 164 |
+
"""Stop the client monitoring thread"""
|
| 165 |
+
self.monitor_running = False
|
| 166 |
+
if self.monitor_thread:
|
| 167 |
+
self.monitor_thread.join(timeout=5)
|
| 168 |
+
logger.info("Stopped OpenVPN client monitoring")
|
| 169 |
+
|
| 170 |
+
def _monitor_clients(self):
|
| 171 |
+
"""Monitor connected VPN clients"""
|
| 172 |
+
while self.monitor_running:
|
| 173 |
+
try:
|
| 174 |
+
self._update_client_status()
|
| 175 |
+
time.sleep(10) # Update every 10 seconds
|
| 176 |
+
except Exception as e:
|
| 177 |
+
logger.error(f"Error monitoring VPN clients: {e}")
|
| 178 |
+
time.sleep(30) # Wait longer on error
|
| 179 |
+
|
| 180 |
+
def _update_client_status(self):
|
| 181 |
+
"""Update client status from OpenVPN status log"""
|
| 182 |
+
try:
|
| 183 |
+
if not os.path.exists(self.status_log_path):
|
| 184 |
+
return
|
| 185 |
+
|
| 186 |
+
with open(self.status_log_path, 'r') as f:
|
| 187 |
+
content = f.read()
|
| 188 |
+
|
| 189 |
+
# Parse OpenVPN status log
|
| 190 |
+
lines = content.split('\n')
|
| 191 |
+
client_section = False
|
| 192 |
+
routing_section = False
|
| 193 |
+
|
| 194 |
+
current_clients = {}
|
| 195 |
+
|
| 196 |
+
for line in lines:
|
| 197 |
+
line = line.strip()
|
| 198 |
+
|
| 199 |
+
if line.startswith("OpenVPN CLIENT LIST"):
|
| 200 |
+
client_section = True
|
| 201 |
+
continue
|
| 202 |
+
elif line.startswith("ROUTING TABLE"):
|
| 203 |
+
client_section = False
|
| 204 |
+
routing_section = True
|
| 205 |
+
continue
|
| 206 |
+
elif line.startswith("GLOBAL STATS"):
|
| 207 |
+
routing_section = False
|
| 208 |
+
continue
|
| 209 |
+
|
| 210 |
+
if client_section and line and not line.startswith("Updated,"):
|
| 211 |
+
# Parse client line: Common Name,Real Address,Bytes Received,Bytes Sent,Connected Since
|
| 212 |
+
parts = line.split(',')
|
| 213 |
+
if len(parts) >= 5:
|
| 214 |
+
common_name = parts[0]
|
| 215 |
+
real_address = parts[1]
|
| 216 |
+
bytes_received = int(parts[2]) if parts[2].isdigit() else 0
|
| 217 |
+
bytes_sent = int(parts[3]) if parts[3].isdigit() else 0
|
| 218 |
+
connected_since = parts[4]
|
| 219 |
+
|
| 220 |
+
# Get VPN IP from routing table (will be parsed later)
|
| 221 |
+
vpn_ip = "unknown"
|
| 222 |
+
|
| 223 |
+
client = VPNClient(
|
| 224 |
+
client_id=common_name,
|
| 225 |
+
common_name=common_name,
|
| 226 |
+
ip_address=vpn_ip,
|
| 227 |
+
connected_at=time.time(), # Simplified for now
|
| 228 |
+
bytes_received=bytes_received,
|
| 229 |
+
bytes_sent=bytes_sent
|
| 230 |
+
)
|
| 231 |
+
current_clients[common_name] = client
|
| 232 |
+
|
| 233 |
+
elif routing_section and line and not line.startswith("Virtual Address,"):
|
| 234 |
+
# Parse routing line: Virtual Address,Common Name,Real Address,Last Ref
|
| 235 |
+
parts = line.split(',')
|
| 236 |
+
if len(parts) >= 2:
|
| 237 |
+
vpn_ip = parts[0]
|
| 238 |
+
common_name = parts[1]
|
| 239 |
+
|
| 240 |
+
if common_name in current_clients:
|
| 241 |
+
current_clients[common_name].ip_address = vpn_ip
|
| 242 |
+
|
| 243 |
+
# Update clients dictionary
|
| 244 |
+
self.clients = current_clients
|
| 245 |
+
|
| 246 |
+
# Integrate with DHCP server if available
|
| 247 |
+
if self.dhcp_server:
|
| 248 |
+
self._sync_with_dhcp()
|
| 249 |
+
|
| 250 |
+
except Exception as e:
|
| 251 |
+
logger.error(f"Error updating client status: {e}")
|
| 252 |
+
|
| 253 |
+
def _sync_with_dhcp(self):
|
| 254 |
+
"""Sync VPN clients with DHCP server"""
|
| 255 |
+
try:
|
| 256 |
+
for client in self.clients.values():
|
| 257 |
+
if client.ip_address != "unknown":
|
| 258 |
+
# Register VPN client IP with DHCP server
|
| 259 |
+
# This allows the ISP stack to track VPN clients
|
| 260 |
+
if hasattr(self.dhcp_server, 'register_static_lease'):
|
| 261 |
+
self.dhcp_server.register_static_lease(
|
| 262 |
+
client.common_name,
|
| 263 |
+
client.ip_address,
|
| 264 |
+
"VPN Client"
|
| 265 |
+
)
|
| 266 |
+
except Exception as e:
|
| 267 |
+
logger.error(f"Error syncing with DHCP: {e}")
|
| 268 |
+
|
| 269 |
+
def _configure_vpn_firewall(self):
|
| 270 |
+
"""Configure firewall rules for VPN traffic"""
|
| 271 |
+
try:
|
| 272 |
+
if not self.firewall:
|
| 273 |
+
return
|
| 274 |
+
|
| 275 |
+
# Add firewall rules for VPN
|
| 276 |
+
vpn_rules = [
|
| 277 |
+
{
|
| 278 |
+
"rule_id": "allow_openvpn",
|
| 279 |
+
"priority": 10,
|
| 280 |
+
"action": "ACCEPT",
|
| 281 |
+
"direction": "BOTH",
|
| 282 |
+
"dest_port": str(self.vpn_port),
|
| 283 |
+
"protocol": "UDP",
|
| 284 |
+
"description": "Allow OpenVPN traffic",
|
| 285 |
+
"enabled": True
|
| 286 |
+
},
|
| 287 |
+
{
|
| 288 |
+
"rule_id": "allow_vpn_network",
|
| 289 |
+
"priority": 11,
|
| 290 |
+
"action": "ACCEPT",
|
| 291 |
+
"direction": "BOTH",
|
| 292 |
+
"source_network": str(self.vpn_network),
|
| 293 |
+
"description": "Allow VPN client network traffic",
|
| 294 |
+
"enabled": True
|
| 295 |
+
}
|
| 296 |
+
]
|
| 297 |
+
|
| 298 |
+
for rule in vpn_rules:
|
| 299 |
+
if hasattr(self.firewall, 'add_rule'):
|
| 300 |
+
self.firewall.add_rule(rule)
|
| 301 |
+
|
| 302 |
+
logger.info("Configured firewall rules for VPN")
|
| 303 |
+
|
| 304 |
+
except Exception as e:
|
| 305 |
+
logger.error(f"Error configuring VPN firewall: {e}")
|
| 306 |
+
|
| 307 |
+
def _configure_vpn_nat(self):
|
| 308 |
+
"""Configure NAT for VPN traffic"""
|
| 309 |
+
try:
|
| 310 |
+
# NAT configuration will be handled by the external environment (e.g., HuggingFace Spaces setup)
|
| 311 |
+
# or by the underlying network infrastructure. We are removing direct iptables calls.
|
| 312 |
+
logger.info("Skipping direct iptables NAT configuration as per instructions.")
|
| 313 |
+
|
| 314 |
+
except Exception as e:
|
| 315 |
+
logger.error(f"Error configuring VPN NAT: {e}")
|
| 316 |
+
|
| 317 |
+
def get_server_status(self) -> VPNServerStatus:
|
| 318 |
+
"""Get current server status"""
|
| 319 |
+
total_bytes_received = sum(client.bytes_received for client in self.clients.values())
|
| 320 |
+
total_bytes_sent = sum(client.bytes_sent for client in self.clients.values())
|
| 321 |
+
uptime = time.time() - self.start_time if self.start_time else 0
|
| 322 |
+
|
| 323 |
+
return VPNServerStatus(
|
| 324 |
+
is_running=self.is_running,
|
| 325 |
+
connected_clients=len(self.clients),
|
| 326 |
+
total_bytes_received=total_bytes_received,
|
| 327 |
+
total_bytes_sent=total_bytes_sent,
|
| 328 |
+
uptime=uptime,
|
| 329 |
+
server_ip=self.vpn_server_ip,
|
| 330 |
+
server_port=self.vpn_port
|
| 331 |
+
)
|
| 332 |
+
|
| 333 |
+
def get_connected_clients(self) -> List[Dict[str, Any]]:
|
| 334 |
+
"""Get list of connected clients"""
|
| 335 |
+
return [asdict(client) for client in self.clients.values()]
|
| 336 |
+
|
| 337 |
+
def disconnect_client(self, client_id: str) -> bool:
|
| 338 |
+
"""Disconnect a specific client"""
|
| 339 |
+
try:
|
| 340 |
+
if client_id not in self.clients:
|
| 341 |
+
return False
|
| 342 |
+
|
| 343 |
+
# Send kill signal to specific client
|
| 344 |
+
# This requires OpenVPN management interface, simplified for now
|
| 345 |
+
logger.info(f"Disconnecting client: {client_id}")
|
| 346 |
+
|
| 347 |
+
# Remove from clients dict
|
| 348 |
+
del self.clients[client_id]
|
| 349 |
+
return True
|
| 350 |
+
|
| 351 |
+
except Exception as e:
|
| 352 |
+
logger.error(f"Error disconnecting client {client_id}: {e}")
|
| 353 |
+
return False
|
| 354 |
+
|
| 355 |
+
def generate_client_config(self, client_name: str, server_ip: str) -> str:
|
| 356 |
+
"""Generate client configuration file with embedded certificates"""
|
| 357 |
+
try:
|
| 358 |
+
# Read real CA certificate
|
| 359 |
+
ca_cert_path = "/etc/openvpn/server/ca.crt"
|
| 360 |
+
try:
|
| 361 |
+
with open(ca_cert_path, 'r') as f:
|
| 362 |
+
ca_cert = f.read()
|
| 363 |
+
except FileNotFoundError:
|
| 364 |
+
# Fallback to embedded certificate for development
|
| 365 |
+
ca_cert = """-----BEGIN CERTIFICATE-----
|
| 366 |
+
MIIDMzCCAhugAwIBAgIUNO765P4t/yD/PnIFTMVs0Q32TJYwDQYJKoZIhvcNAQEL
|
| 367 |
+
BQAwDjEMMAoGA1UEAwwDeWVzMB4XDTI1MDgwMjAxMjkzNVoXDTM1MDczMTAxMjkz
|
| 368 |
+
NVowDjEMMAoGA1UEAwwDeWVzMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
|
| 369 |
+
AQEAtwhMGXouHnHBRd2RhdrW8sOMgqt4wDXZC0J+4UMjOX6Y7t2O1Sgw/sWhwFPk
|
| 370 |
+
QF/cMoQIvsucklPogcnzzGtv9zDkAXyVyCC27UYbg8JfWZK3ZMrt6dfEmYf4KKXm
|
| 371 |
+
D6PLn9guxzBB63dhEWx/7fd6H9C/rK/u0rOh15DQRnfEI468cmXS5uNg8ke/73+y
|
| 372 |
+
Gzb6q7ZOFByBAwM0hW0lStBaIIcxouFrIK8B72O8H+6t10K1GvgiBhKvM3cc8dpN
|
| 373 |
+
y4qvRoN/o+eXarZG7G9dfm9OFgdd9LoXPTTbO+ftFPKOq4F41PnMd2Zcyk7P3GCr
|
| 374 |
+
3oK7NbISxZ5efLpy45lgSpqKBwIDAQABo4GIMIGFMB0GA1UdDgQWBBQIi0Er30cV
|
| 375 |
+
Qzi+U/LPV4Lf3yvGIzBJBgNVHSMEQjBAgBQIi0Er30cVQzi+U/LPV4Lf3yvGI6ES
|
| 376 |
+
pBAwDjEMMAoGA1UEAwwDeWVzghQ07vrk/i3/IP8+cgVMxWzRDfZMljAMBgNVHRME
|
| 377 |
+
BTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAHzfSFbi1G7WC
|
| 378 |
+
vMSOqSv4/jlBExnz/AlLUBHhgDomIdLK8Pb3tyCD5IYkmi0NT5x6DORcOV2ow1JZ
|
| 379 |
+
o4BL7OVV+fhz3VKXEpG+s3gq5j2m+raqLtu6QKBGg7SIUZ4MLjggvAcPjsK+n8sK
|
| 380 |
+
86sAUFVTccBxJlKBShAUPSNihyWwxB4PQFvwhefNQSoID1kAB2Fzf1beMX6Gp6Lj
|
| 381 |
+
ldI6e63lpYtIbp4+2F5SxJ/hGTUx+nWbOAHPvhBfhN6sEu9G1C5KPR0cm+xxOpZ9
|
| 382 |
+
lA7y4Dea7pyVybR/b7lFquE3TReXCoLx79UNNSv8erIlsy1jh9yXDnTCk8SN1dpO
|
| 383 |
+
YwJ9U0AHXA==
|
| 384 |
+
-----END CERTIFICATE-----"""
|
| 385 |
+
|
| 386 |
+
# Sample client certificate (in production, generate unique per client)
|
| 387 |
+
client_cert = f"""-----BEGIN CERTIFICATE-----
|
| 388 |
+
MIIDSzCCAjOgAwIBAgIU{client_name}1234567890abcdefghijklmnopqrstuvwxyz
|
| 389 |
+
ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890abcdefghijklmnopqrstuvwxyz1234567
|
| 390 |
+
890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz12345
|
| 391 |
+
67890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz123
|
| 392 |
+
4567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz1
|
| 393 |
+
234567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxy
|
| 394 |
+
z1234567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuv
|
| 395 |
+
wxyz1234567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrs
|
| 396 |
+
tuvwxyz1234567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmno
|
| 397 |
+
pqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghij
|
| 398 |
+
klmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz1234567890abcd
|
| 399 |
+
efghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz1234567
|
| 400 |
+
890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxy
|
| 401 |
+
z1234567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnop
|
| 402 |
+
qrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz1234567890abcde
|
| 403 |
+
fghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz12
|
| 404 |
+
34567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijk
|
| 405 |
+
lmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz12
|
| 406 |
+
34567890abcdefghijklmnopqrstuvwxyz1234567890abc
|
| 407 |
+
defghijklmnopqrstuvwxyz1234567890abcdefg
|
| 408 |
+
hijklmnopqrstuvwxyz1234567890
|
| 409 |
+
abcdefghijklmnopqr
|
| 410 |
+
stuvwxyz12345
|
| 411 |
+
67890abc
|
| 412 |
+
def
|
| 413 |
+
-----END CERTIFICATE-----"""
|
| 414 |
+
|
| 415 |
+
# Sample client private key (in production, generate unique per client)
|
| 416 |
+
client_key = f"""-----BEGIN PRIVATE KEY-----
|
| 417 |
+
MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC{client_name}1234567
|
| 418 |
+
890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz123456
|
| 419 |
+
7890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz12345
|
| 420 |
+
67890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz1234
|
| 421 |
+
567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz123
|
| 422 |
+
4567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz12
|
| 423 |
+
34567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz1
|
| 424 |
+
234567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxy
|
| 425 |
+
z1234567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuv
|
| 426 |
+
wxyz1234567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrs
|
| 427 |
+
tuvwxyz1234567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmno
|
| 428 |
+
pqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghij
|
| 429 |
+
klmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz1234567890abcd
|
| 430 |
+
efghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz1234567
|
| 431 |
+
890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxy
|
| 432 |
+
z1234567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijklmnop
|
| 433 |
+
qrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz1234567890abcde
|
| 434 |
+
fghijklmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz12
|
| 435 |
+
34567890abcdefghijklmnopqrstuvwxyz1234567890abcdefghijk
|
| 436 |
+
lmnopqrstuvwxyz1234567890abcdefghijklmnopqrstuvwxyz12
|
| 437 |
+
34567890abcdefghijklmnopqrstuvwxyz1234567890abc
|
| 438 |
+
defghijklmnopqrstuvwxyz1234567890abcdefg
|
| 439 |
+
hijklmnopqrstuvwxyz1234567890
|
| 440 |
+
abcdefghijklmnopqr
|
| 441 |
+
stuvwxyz12345
|
| 442 |
+
67890abc
|
| 443 |
+
defghijk
|
| 444 |
+
lmnopqr
|
| 445 |
+
stuv
|
| 446 |
+
-----END PRIVATE KEY-----"""
|
| 447 |
+
|
| 448 |
+
# Generate complete client configuration
|
| 449 |
+
client_config = f"""# OpenVPN Client Configuration for {client_name}
|
| 450 |
+
# Generated by Virtual ISP Stack
|
| 451 |
+
# Server: {server_ip}:{self.vpn_port}
|
| 452 |
+
|
| 453 |
+
client
|
| 454 |
+
dev tun
|
| 455 |
+
proto udp
|
| 456 |
+
remote {server_ip} {self.vpn_port}
|
| 457 |
+
resolv-retry infinite
|
| 458 |
+
nobind
|
| 459 |
+
persist-key
|
| 460 |
+
persist-tun
|
| 461 |
+
cipher AES-256-CBC
|
| 462 |
+
auth SHA256
|
| 463 |
+
verb 3
|
| 464 |
+
key-direction 1
|
| 465 |
+
redirect-gateway def1 bypass-dhcp
|
| 466 |
+
dhcp-option DNS 8.8.8.8
|
| 467 |
+
dhcp-option DNS 8.8.4.4
|
| 468 |
+
remote-cert-tls server
|
| 469 |
+
|
| 470 |
+
# Embedded CA Certificate
|
| 471 |
+
<ca>
|
| 472 |
+
{ca_cert}
|
| 473 |
+
</ca>
|
| 474 |
+
|
| 475 |
+
# Embedded Client Certificate
|
| 476 |
+
<cert>
|
| 477 |
+
{client_cert}
|
| 478 |
+
</cert>
|
| 479 |
+
|
| 480 |
+
# Embedded Client Private Key
|
| 481 |
+
<key>
|
| 482 |
+
{client_key}
|
| 483 |
+
</key>
|
| 484 |
+
|
| 485 |
+
# TLS Authentication Key (optional, for extra security)
|
| 486 |
+
# <tls-auth>
|
| 487 |
+
# -----BEGIN OpenVPN Static key V1-----
|
| 488 |
+
# [TLS-AUTH-KEY-CONTENT-WOULD-GO-HERE]
|
| 489 |
+
# -----END OpenVPN Static key V1-----
|
| 490 |
+
# </tls-auth>
|
| 491 |
+
"""
|
| 492 |
+
|
| 493 |
+
logger.info(f"Generated client configuration for {client_name}")
|
| 494 |
+
return client_config
|
| 495 |
+
|
| 496 |
+
except Exception as e:
|
| 497 |
+
logger.error(f"Error generating client config: {e}")
|
| 498 |
+
return ""
|
| 499 |
+
|
| 500 |
+
def save_client_config(self, client_name: str, config_content: str) -> bool:
|
| 501 |
+
"""Save client configuration to storage"""
|
| 502 |
+
try:
|
| 503 |
+
config_file_path = os.path.join(self.config_storage_path, f"{client_name}.ovpn")
|
| 504 |
+
with open(config_file_path, 'w') as f:
|
| 505 |
+
f.write(config_content)
|
| 506 |
+
|
| 507 |
+
logger.info(f"Saved client configuration for {client_name}")
|
| 508 |
+
return True
|
| 509 |
+
|
| 510 |
+
except Exception as e:
|
| 511 |
+
logger.error(f"Error saving client config for {client_name}: {e}")
|
| 512 |
+
return False
|
| 513 |
+
|
| 514 |
+
def load_client_config(self, client_name: str) -> str:
|
| 515 |
+
"""Load client configuration from storage"""
|
| 516 |
+
try:
|
| 517 |
+
config_file_path = os.path.join(self.config_storage_path, f"{client_name}.ovpn")
|
| 518 |
+
if not os.path.exists(config_file_path):
|
| 519 |
+
return ""
|
| 520 |
+
|
| 521 |
+
with open(config_file_path, 'r') as f:
|
| 522 |
+
config_content = f.read()
|
| 523 |
+
|
| 524 |
+
logger.info(f"Loaded client configuration for {client_name}")
|
| 525 |
+
return config_content
|
| 526 |
+
|
| 527 |
+
except Exception as e:
|
| 528 |
+
logger.error(f"Error loading client config for {client_name}: {e}")
|
| 529 |
+
return ""
|
| 530 |
+
|
| 531 |
+
def list_client_configs(self) -> List[str]:
|
| 532 |
+
"""List all stored client configurations"""
|
| 533 |
+
try:
|
| 534 |
+
config_files = []
|
| 535 |
+
if os.path.exists(self.config_storage_path):
|
| 536 |
+
for filename in os.listdir(self.config_storage_path):
|
| 537 |
+
if filename.endswith('.ovpn'):
|
| 538 |
+
client_name = filename[:-5] # Remove .ovpn extension
|
| 539 |
+
config_files.append(client_name)
|
| 540 |
+
|
| 541 |
+
return config_files
|
| 542 |
+
|
| 543 |
+
except Exception as e:
|
| 544 |
+
logger.error(f"Error listing client configs: {e}")
|
| 545 |
+
return []
|
| 546 |
+
|
| 547 |
+
def delete_client_config(self, client_name: str) -> bool:
|
| 548 |
+
"""Delete client configuration from storage"""
|
| 549 |
+
try:
|
| 550 |
+
config_file_path = os.path.join(self.config_storage_path, f"{client_name}.ovpn")
|
| 551 |
+
if os.path.exists(config_file_path):
|
| 552 |
+
os.remove(config_file_path)
|
| 553 |
+
logger.info(f"Deleted client configuration for {client_name}")
|
| 554 |
+
return True
|
| 555 |
+
else:
|
| 556 |
+
logger.warning(f"Client configuration for {client_name} not found")
|
| 557 |
+
return False
|
| 558 |
+
|
| 559 |
+
except Exception as e:
|
| 560 |
+
logger.error(f"Error deleting client config for {client_name}: {e}")
|
| 561 |
+
return False
|
| 562 |
+
|
| 563 |
+
def generate_and_save_client_config(self, client_name: str, server_ip: str) -> str:
|
| 564 |
+
"""Generate client configuration and save it to storage"""
|
| 565 |
+
try:
|
| 566 |
+
config_content = self.generate_client_config(client_name, server_ip)
|
| 567 |
+
if config_content:
|
| 568 |
+
if self.save_client_config(client_name, config_content):
|
| 569 |
+
return config_content
|
| 570 |
+
return ""
|
| 571 |
+
|
| 572 |
+
except Exception as e:
|
| 573 |
+
logger.error(f"Error generating and saving client config for {client_name}: {e}")
|
| 574 |
+
return ""
|
| 575 |
+
|
| 576 |
+
def get_statistics(self) -> Dict[str, Any]:
|
| 577 |
+
"""Get comprehensive VPN statistics"""
|
| 578 |
+
status = self.get_server_status()
|
| 579 |
+
|
| 580 |
+
return {
|
| 581 |
+
"server_status": asdict(status),
|
| 582 |
+
"connected_clients": self.get_connected_clients(),
|
| 583 |
+
"network_config": {
|
| 584 |
+
"vpn_network": str(self.vpn_network),
|
| 585 |
+
"server_ip": self.vpn_server_ip,
|
| 586 |
+
"server_port": self.vpn_port
|
| 587 |
+
},
|
| 588 |
+
"integration_status": {
|
| 589 |
+
"dhcp_integrated": self.dhcp_server is not None,
|
| 590 |
+
"nat_integrated": self.nat_engine is not None,
|
| 591 |
+
"firewall_integrated": self.firewall is not None,
|
| 592 |
+
"router_integrated": self.router is not None
|
| 593 |
+
}
|
| 594 |
+
}
|
| 595 |
+
|
| 596 |
+
# Global OpenVPN manager instance
|
| 597 |
+
openvpn_manager = None
|
| 598 |
+
|
| 599 |
+
def initialize_openvpn_manager(config: Dict[str, Any]) -> OpenVPNManager:
|
| 600 |
+
"""Initialize the OpenVPN manager"""
|
| 601 |
+
global openvpn_manager
|
| 602 |
+
openvpn_manager = OpenVPNManager(config)
|
| 603 |
+
return openvpn_manager
|
| 604 |
+
|
| 605 |
+
def get_openvpn_manager() -> Optional[OpenVPNManager]:
|
| 606 |
+
"""Get the global OpenVPN manager instance"""
|
| 607 |
+
return openvpn_manager
|
| 608 |
+
|
core/packet_bridge.py
ADDED
|
@@ -0,0 +1,664 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Packet Bridge Module
|
| 3 |
+
|
| 4 |
+
Handles communication with virtual clients:
|
| 5 |
+
- Accept packet streams over WebSocket/TCP
|
| 6 |
+
- Deliver response packets back to clients
|
| 7 |
+
- Frame processing (Ethernet → IPv4)
|
| 8 |
+
- Connection management
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
import asyncio
|
| 12 |
+
import websockets
|
| 13 |
+
import socket
|
| 14 |
+
import threading
|
| 15 |
+
import time
|
| 16 |
+
import struct
|
| 17 |
+
from typing import Dict, List, Optional, Callable, Set, Any, Tuple
|
| 18 |
+
from dataclasses import dataclass
|
| 19 |
+
from enum import Enum
|
| 20 |
+
import json
|
| 21 |
+
import logging
|
| 22 |
+
|
| 23 |
+
from .ip_parser import IPParser, ParsedPacket
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class BridgeType(Enum):
|
| 27 |
+
WEBSOCKET = "WEBSOCKET"
|
| 28 |
+
TCP_SOCKET = "TCP_SOCKET"
|
| 29 |
+
UDP_SOCKET = "UDP_SOCKET"
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
@dataclass
|
| 33 |
+
class ClientConnection:
|
| 34 |
+
"""Represents a client connection to the bridge"""
|
| 35 |
+
client_id: str
|
| 36 |
+
bridge_type: BridgeType
|
| 37 |
+
remote_address: str
|
| 38 |
+
remote_port: int
|
| 39 |
+
websocket: Optional[Any] = None # WebSocket connection
|
| 40 |
+
socket: Optional['socket.socket'] = None # TCP/UDP socket
|
| 41 |
+
connected_time: float = 0
|
| 42 |
+
last_activity: float = 0
|
| 43 |
+
packets_received: int = 0
|
| 44 |
+
packets_sent: int = 0
|
| 45 |
+
bytes_received: int = 0
|
| 46 |
+
bytes_sent: int = 0
|
| 47 |
+
is_active: bool = True
|
| 48 |
+
|
| 49 |
+
def __post_init__(self):
|
| 50 |
+
if self.connected_time == 0:
|
| 51 |
+
self.connected_time = time.time()
|
| 52 |
+
if self.last_activity == 0:
|
| 53 |
+
self.last_activity = time.time()
|
| 54 |
+
|
| 55 |
+
def update_activity(self, packet_count: int = 1, byte_count: int = 0, direction: str = 'received'):
|
| 56 |
+
"""Update connection activity"""
|
| 57 |
+
self.last_activity = time.time()
|
| 58 |
+
|
| 59 |
+
if direction == 'received':
|
| 60 |
+
self.packets_received += packet_count
|
| 61 |
+
self.bytes_received += byte_count
|
| 62 |
+
else:
|
| 63 |
+
self.packets_sent += packet_count
|
| 64 |
+
self.bytes_sent += byte_count
|
| 65 |
+
|
| 66 |
+
def to_dict(self) -> Dict:
|
| 67 |
+
"""Convert to dictionary"""
|
| 68 |
+
return {
|
| 69 |
+
'client_id': self.client_id,
|
| 70 |
+
'bridge_type': self.bridge_type.value,
|
| 71 |
+
'remote_address': self.remote_address,
|
| 72 |
+
'remote_port': self.remote_port,
|
| 73 |
+
'connected_time': self.connected_time,
|
| 74 |
+
'last_activity': self.last_activity,
|
| 75 |
+
'packets_received': self.packets_received,
|
| 76 |
+
'packets_sent': self.packets_sent,
|
| 77 |
+
'bytes_received': self.bytes_received,
|
| 78 |
+
'bytes_sent': self.bytes_sent,
|
| 79 |
+
'is_active': self.is_active,
|
| 80 |
+
'duration': time.time() - self.connected_time
|
| 81 |
+
}
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
class EthernetFrame:
|
| 85 |
+
"""Ethernet frame parser"""
|
| 86 |
+
|
| 87 |
+
def __init__(self):
|
| 88 |
+
self.dest_mac = b'\x00' * 6
|
| 89 |
+
self.src_mac = b'\x00' * 6
|
| 90 |
+
self.ethertype = 0x0800 # IPv4
|
| 91 |
+
self.payload = b''
|
| 92 |
+
|
| 93 |
+
@classmethod
|
| 94 |
+
def parse(cls, data: bytes) -> Optional['EthernetFrame']:
|
| 95 |
+
"""Parse Ethernet frame from raw bytes"""
|
| 96 |
+
if len(data) < 14: # Minimum Ethernet header size
|
| 97 |
+
return None
|
| 98 |
+
|
| 99 |
+
frame = cls()
|
| 100 |
+
frame.dest_mac = data[0:6]
|
| 101 |
+
frame.src_mac = data[6:12]
|
| 102 |
+
frame.ethertype = struct.unpack('!H', data[12:14])[0]
|
| 103 |
+
frame.payload = data[14:]
|
| 104 |
+
|
| 105 |
+
return frame
|
| 106 |
+
|
| 107 |
+
def build(self) -> bytes:
|
| 108 |
+
"""Build Ethernet frame as bytes"""
|
| 109 |
+
header = self.dest_mac + self.src_mac + struct.pack('!H', self.ethertype)
|
| 110 |
+
return header + self.payload
|
| 111 |
+
|
| 112 |
+
def is_ipv4(self) -> bool:
|
| 113 |
+
"""Check if frame contains IPv4 packet"""
|
| 114 |
+
return self.ethertype == 0x0800
|
| 115 |
+
|
| 116 |
+
def is_arp(self) -> bool:
|
| 117 |
+
"""Check if frame contains ARP packet"""
|
| 118 |
+
return self.ethertype == 0x0806
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
class PacketBridge:
|
| 122 |
+
"""Packet bridge implementation"""
|
| 123 |
+
|
| 124 |
+
def __init__(self, config: Dict):
|
| 125 |
+
self.config = config
|
| 126 |
+
self.clients: Dict[str, ClientConnection] = {}
|
| 127 |
+
self.packet_handlers: List[Callable[[ParsedPacket, str], Optional[bytes]]] = []
|
| 128 |
+
self.lock = threading.Lock()
|
| 129 |
+
|
| 130 |
+
# Configuration
|
| 131 |
+
self.websocket_host = config.get('websocket_host', '0.0.0.0')
|
| 132 |
+
self.websocket_port = config.get('websocket_port', 8765)
|
| 133 |
+
self.tcp_host = config.get('tcp_host', '0.0.0.0')
|
| 134 |
+
self.tcp_port = config.get('tcp_port', 8766)
|
| 135 |
+
self.max_clients = config.get('max_clients', 100)
|
| 136 |
+
self.client_timeout = config.get('client_timeout', 300)
|
| 137 |
+
|
| 138 |
+
# WebSocket server
|
| 139 |
+
self.websocket_server = None
|
| 140 |
+
self.tcp_server_socket = None
|
| 141 |
+
|
| 142 |
+
# Background tasks
|
| 143 |
+
self.running = False
|
| 144 |
+
self.websocket_task = None
|
| 145 |
+
self.tcp_task = None
|
| 146 |
+
self.cleanup_task = None
|
| 147 |
+
|
| 148 |
+
# Statistics
|
| 149 |
+
self.stats = {
|
| 150 |
+
'total_clients': 0,
|
| 151 |
+
'active_clients': 0,
|
| 152 |
+
'packets_processed': 0,
|
| 153 |
+
'packets_forwarded': 0,
|
| 154 |
+
'packets_dropped': 0,
|
| 155 |
+
'bytes_processed': 0,
|
| 156 |
+
'websocket_connections': 0,
|
| 157 |
+
'tcp_connections': 0,
|
| 158 |
+
'connection_errors': 0
|
| 159 |
+
}
|
| 160 |
+
|
| 161 |
+
# Event loop
|
| 162 |
+
self.loop = None
|
| 163 |
+
|
| 164 |
+
def add_packet_handler(self, handler: Callable[[ParsedPacket, str], Optional[bytes]]):
|
| 165 |
+
"""Add packet handler function"""
|
| 166 |
+
self.packet_handlers.append(handler)
|
| 167 |
+
|
| 168 |
+
def remove_packet_handler(self, handler: Callable[[ParsedPacket, str], Optional[bytes]]):
|
| 169 |
+
"""Remove packet handler function"""
|
| 170 |
+
if handler in self.packet_handlers:
|
| 171 |
+
self.packet_handlers.remove(handler)
|
| 172 |
+
|
| 173 |
+
def _generate_client_id(self, remote_address: str, remote_port: int) -> str:
|
| 174 |
+
"""Generate unique client ID"""
|
| 175 |
+
timestamp = int(time.time() * 1000)
|
| 176 |
+
return f"client_{remote_address}_{remote_port}_{timestamp}"
|
| 177 |
+
|
| 178 |
+
def _process_ethernet_frame(self, frame_data: bytes, client_id: str) -> Optional[bytes]:
|
| 179 |
+
"""Process Ethernet frame and extract IP packet"""
|
| 180 |
+
try:
|
| 181 |
+
# Parse Ethernet frame
|
| 182 |
+
frame = EthernetFrame.parse(frame_data)
|
| 183 |
+
if not frame or not frame.is_ipv4():
|
| 184 |
+
return None
|
| 185 |
+
|
| 186 |
+
# Parse IP packet
|
| 187 |
+
packet = IPParser.parse_packet(frame.payload)
|
| 188 |
+
self.stats['packets_processed'] += 1
|
| 189 |
+
self.stats['bytes_processed'] += len(frame_data)
|
| 190 |
+
|
| 191 |
+
# Process through packet handlers
|
| 192 |
+
response_packet = None
|
| 193 |
+
for handler in self.packet_handlers:
|
| 194 |
+
try:
|
| 195 |
+
response = handler(packet, client_id)
|
| 196 |
+
if response:
|
| 197 |
+
response_packet = response
|
| 198 |
+
break
|
| 199 |
+
except Exception as e:
|
| 200 |
+
logging.error(f"Packet handler error: {e}")
|
| 201 |
+
|
| 202 |
+
if response_packet:
|
| 203 |
+
# Wrap response in Ethernet frame
|
| 204 |
+
response_frame = EthernetFrame()
|
| 205 |
+
response_frame.dest_mac = frame.src_mac
|
| 206 |
+
response_frame.src_mac = frame.dest_mac
|
| 207 |
+
response_frame.ethertype = 0x0800
|
| 208 |
+
response_frame.payload = response_packet
|
| 209 |
+
|
| 210 |
+
self.stats['packets_forwarded'] += 1
|
| 211 |
+
return response_frame.build()
|
| 212 |
+
else:
|
| 213 |
+
self.stats['packets_dropped'] += 1
|
| 214 |
+
return None
|
| 215 |
+
|
| 216 |
+
except Exception as e:
|
| 217 |
+
logging.error(f"Error processing Ethernet frame: {e}")
|
| 218 |
+
self.stats['packets_dropped'] += 1
|
| 219 |
+
return None
|
| 220 |
+
|
| 221 |
+
async def _handle_websocket_client(self, websocket, path):
|
| 222 |
+
"""Handle WebSocket client connection"""
|
| 223 |
+
client_address = websocket.remote_address
|
| 224 |
+
client_id = self._generate_client_id(client_address[0], client_address[1])
|
| 225 |
+
|
| 226 |
+
# Create client connection
|
| 227 |
+
client = ClientConnection(
|
| 228 |
+
client_id=client_id,
|
| 229 |
+
bridge_type=BridgeType.WEBSOCKET,
|
| 230 |
+
remote_address=client_address[0],
|
| 231 |
+
remote_port=client_address[1],
|
| 232 |
+
websocket=websocket
|
| 233 |
+
)
|
| 234 |
+
|
| 235 |
+
with self.lock:
|
| 236 |
+
if len(self.clients) >= self.max_clients:
|
| 237 |
+
await websocket.close(code=1013, reason="Too many clients")
|
| 238 |
+
return
|
| 239 |
+
|
| 240 |
+
self.clients[client_id] = client
|
| 241 |
+
|
| 242 |
+
self.stats['total_clients'] += 1
|
| 243 |
+
self.stats['active_clients'] = len(self.clients)
|
| 244 |
+
self.stats['websocket_connections'] += 1
|
| 245 |
+
|
| 246 |
+
logging.info(f"WebSocket client connected: {client_id} from {client_address}")
|
| 247 |
+
|
| 248 |
+
try:
|
| 249 |
+
async for message in websocket:
|
| 250 |
+
if isinstance(message, bytes):
|
| 251 |
+
# Binary message - treat as Ethernet frame
|
| 252 |
+
client.update_activity(1, len(message), 'received')
|
| 253 |
+
|
| 254 |
+
response = self._process_ethernet_frame(message, client_id)
|
| 255 |
+
if response:
|
| 256 |
+
await websocket.send(response)
|
| 257 |
+
client.update_activity(1, len(response), 'sent')
|
| 258 |
+
|
| 259 |
+
elif isinstance(message, str):
|
| 260 |
+
# Text message - treat as control message
|
| 261 |
+
try:
|
| 262 |
+
control_msg = json.loads(message)
|
| 263 |
+
await self._handle_control_message(client, control_msg)
|
| 264 |
+
except json.JSONDecodeError:
|
| 265 |
+
logging.warning(f"Invalid control message from {client_id}: {message}")
|
| 266 |
+
|
| 267 |
+
except websockets.exceptions.ConnectionClosed:
|
| 268 |
+
logging.info(f"WebSocket client disconnected: {client_id}")
|
| 269 |
+
except Exception as e:
|
| 270 |
+
logging.error(f"WebSocket client error: {e}")
|
| 271 |
+
self.stats['connection_errors'] += 1
|
| 272 |
+
|
| 273 |
+
finally:
|
| 274 |
+
# Clean up client
|
| 275 |
+
with self.lock:
|
| 276 |
+
if client_id in self.clients:
|
| 277 |
+
self.clients[client_id].is_active = False
|
| 278 |
+
del self.clients[client_id]
|
| 279 |
+
|
| 280 |
+
self.stats['active_clients'] = len(self.clients)
|
| 281 |
+
|
| 282 |
+
async def _handle_control_message(self, client: ClientConnection, message: Dict):
|
| 283 |
+
"""Handle control message from client"""
|
| 284 |
+
msg_type = message.get('type')
|
| 285 |
+
|
| 286 |
+
if msg_type == 'ping':
|
| 287 |
+
# Respond to ping
|
| 288 |
+
response = {'type': 'pong', 'timestamp': time.time()}
|
| 289 |
+
await client.websocket.send(json.dumps(response))
|
| 290 |
+
|
| 291 |
+
elif msg_type == 'stats':
|
| 292 |
+
# Send client statistics
|
| 293 |
+
response = {
|
| 294 |
+
'type': 'stats',
|
| 295 |
+
'client_stats': client.to_dict(),
|
| 296 |
+
'bridge_stats': self.get_stats()
|
| 297 |
+
}
|
| 298 |
+
await client.websocket.send(json.dumps(response))
|
| 299 |
+
|
| 300 |
+
elif msg_type == 'config':
|
| 301 |
+
# Handle configuration updates
|
| 302 |
+
config_data = message.get('data', {})
|
| 303 |
+
# Process configuration updates here
|
| 304 |
+
response = {'type': 'config_ack', 'status': 'ok'}
|
| 305 |
+
await client.websocket.send(json.dumps(response))
|
| 306 |
+
|
| 307 |
+
def _handle_tcp_client(self, client_socket: socket.socket, client_address: Tuple[str, int]):
|
| 308 |
+
"""Handle TCP client connection"""
|
| 309 |
+
client_id = self._generate_client_id(client_address[0], client_address[1])
|
| 310 |
+
|
| 311 |
+
# Create client connection
|
| 312 |
+
client = ClientConnection(
|
| 313 |
+
client_id=client_id,
|
| 314 |
+
bridge_type=BridgeType.TCP_SOCKET,
|
| 315 |
+
remote_address=client_address[0],
|
| 316 |
+
remote_port=client_address[1],
|
| 317 |
+
socket=client_socket
|
| 318 |
+
)
|
| 319 |
+
|
| 320 |
+
with self.lock:
|
| 321 |
+
if len(self.clients) >= self.max_clients:
|
| 322 |
+
client_socket.close()
|
| 323 |
+
return
|
| 324 |
+
|
| 325 |
+
self.clients[client_id] = client
|
| 326 |
+
|
| 327 |
+
self.stats['total_clients'] += 1
|
| 328 |
+
self.stats['active_clients'] = len(self.clients)
|
| 329 |
+
self.stats['tcp_connections'] += 1
|
| 330 |
+
|
| 331 |
+
logging.info(f"TCP client connected: {client_id} from {client_address}")
|
| 332 |
+
|
| 333 |
+
try:
|
| 334 |
+
client_socket.settimeout(self.client_timeout)
|
| 335 |
+
|
| 336 |
+
while client.is_active:
|
| 337 |
+
try:
|
| 338 |
+
# Read frame length (4 bytes)
|
| 339 |
+
length_data = client_socket.recv(4)
|
| 340 |
+
if not length_data:
|
| 341 |
+
break
|
| 342 |
+
|
| 343 |
+
frame_length = struct.unpack('!I', length_data)[0]
|
| 344 |
+
if frame_length > 65536: # Sanity check
|
| 345 |
+
break
|
| 346 |
+
|
| 347 |
+
# Read frame data
|
| 348 |
+
frame_data = b''
|
| 349 |
+
while len(frame_data) < frame_length:
|
| 350 |
+
chunk = client_socket.recv(frame_length - len(frame_data))
|
| 351 |
+
if not chunk:
|
| 352 |
+
break
|
| 353 |
+
frame_data += chunk
|
| 354 |
+
|
| 355 |
+
if len(frame_data) != frame_length:
|
| 356 |
+
break
|
| 357 |
+
|
| 358 |
+
client.update_activity(1, len(frame_data), 'received')
|
| 359 |
+
|
| 360 |
+
# Process frame
|
| 361 |
+
response = self._process_ethernet_frame(frame_data, client_id)
|
| 362 |
+
if response:
|
| 363 |
+
# Send response with length prefix
|
| 364 |
+
response_length = struct.pack('!I', len(response))
|
| 365 |
+
client_socket.send(response_length + response)
|
| 366 |
+
client.update_activity(1, len(response), 'sent')
|
| 367 |
+
|
| 368 |
+
except socket.timeout:
|
| 369 |
+
continue
|
| 370 |
+
except Exception as e:
|
| 371 |
+
logging.error(f"TCP client error: {e}")
|
| 372 |
+
break
|
| 373 |
+
|
| 374 |
+
except Exception as e:
|
| 375 |
+
logging.error(f"TCP client handler error: {e}")
|
| 376 |
+
self.stats['connection_errors'] += 1
|
| 377 |
+
|
| 378 |
+
finally:
|
| 379 |
+
# Clean up client
|
| 380 |
+
try:
|
| 381 |
+
client_socket.close()
|
| 382 |
+
except:
|
| 383 |
+
pass
|
| 384 |
+
|
| 385 |
+
with self.lock:
|
| 386 |
+
if client_id in self.clients:
|
| 387 |
+
self.clients[client_id].is_active = False
|
| 388 |
+
del self.clients[client_id]
|
| 389 |
+
|
| 390 |
+
self.stats['active_clients'] = len(self.clients)
|
| 391 |
+
logging.info(f"TCP client disconnected: {client_id}")
|
| 392 |
+
|
| 393 |
+
def _tcp_server_loop(self):
|
| 394 |
+
"""TCP server loop"""
|
| 395 |
+
try:
|
| 396 |
+
self.tcp_server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
| 397 |
+
self.tcp_server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
| 398 |
+
self.tcp_server_socket.bind((self.tcp_host, self.tcp_port))
|
| 399 |
+
self.tcp_server_socket.listen(10)
|
| 400 |
+
|
| 401 |
+
logging.info(f"TCP bridge server listening on {self.tcp_host}:{self.tcp_port}")
|
| 402 |
+
|
| 403 |
+
while self.running:
|
| 404 |
+
try:
|
| 405 |
+
client_socket, client_address = self.tcp_server_socket.accept()
|
| 406 |
+
|
| 407 |
+
# Handle client in separate thread
|
| 408 |
+
client_thread = threading.Thread(
|
| 409 |
+
target=self._handle_tcp_client,
|
| 410 |
+
args=(client_socket, client_address),
|
| 411 |
+
daemon=True
|
| 412 |
+
)
|
| 413 |
+
client_thread.start()
|
| 414 |
+
|
| 415 |
+
except socket.error as e:
|
| 416 |
+
if self.running:
|
| 417 |
+
logging.error(f"TCP server error: {e}")
|
| 418 |
+
time.sleep(1)
|
| 419 |
+
|
| 420 |
+
except Exception as e:
|
| 421 |
+
logging.error(f"TCP server loop error: {e}")
|
| 422 |
+
|
| 423 |
+
finally:
|
| 424 |
+
if self.tcp_server_socket:
|
| 425 |
+
self.tcp_server_socket.close()
|
| 426 |
+
|
| 427 |
+
def _cleanup_loop(self):
|
| 428 |
+
"""Background cleanup loop"""
|
| 429 |
+
while self.running:
|
| 430 |
+
try:
|
| 431 |
+
current_time = time.time()
|
| 432 |
+
expired_clients = []
|
| 433 |
+
|
| 434 |
+
with self.lock:
|
| 435 |
+
for client_id, client in self.clients.items():
|
| 436 |
+
# Mark inactive clients for removal
|
| 437 |
+
if current_time - client.last_activity > self.client_timeout:
|
| 438 |
+
expired_clients.append(client_id)
|
| 439 |
+
|
| 440 |
+
# Clean up expired clients
|
| 441 |
+
for client_id in expired_clients:
|
| 442 |
+
with self.lock:
|
| 443 |
+
if client_id in self.clients:
|
| 444 |
+
client = self.clients[client_id]
|
| 445 |
+
client.is_active = False
|
| 446 |
+
|
| 447 |
+
# Close connections
|
| 448 |
+
if client.websocket:
|
| 449 |
+
try:
|
| 450 |
+
asyncio.run_coroutine_threadsafe(
|
| 451 |
+
client.websocket.close(),
|
| 452 |
+
self.loop
|
| 453 |
+
)
|
| 454 |
+
except:
|
| 455 |
+
pass
|
| 456 |
+
|
| 457 |
+
if client.socket:
|
| 458 |
+
try:
|
| 459 |
+
client.socket.close()
|
| 460 |
+
except:
|
| 461 |
+
pass
|
| 462 |
+
|
| 463 |
+
del self.clients[client_id]
|
| 464 |
+
logging.info(f"Cleaned up expired client: {client_id}")
|
| 465 |
+
|
| 466 |
+
self.stats['active_clients'] = len(self.clients)
|
| 467 |
+
|
| 468 |
+
time.sleep(30) # Cleanup every 30 seconds
|
| 469 |
+
|
| 470 |
+
except Exception as e:
|
| 471 |
+
logging.error(f"Cleanup loop error: {e}")
|
| 472 |
+
time.sleep(5)
|
| 473 |
+
|
| 474 |
+
def send_packet_to_client(self, client_id: str, packet_data: bytes) -> bool:
|
| 475 |
+
"""Send packet to specific client"""
|
| 476 |
+
with self.lock:
|
| 477 |
+
client = self.clients.get(client_id)
|
| 478 |
+
|
| 479 |
+
if not client or not client.is_active:
|
| 480 |
+
return False
|
| 481 |
+
|
| 482 |
+
try:
|
| 483 |
+
if client.bridge_type == BridgeType.WEBSOCKET:
|
| 484 |
+
# Send via WebSocket
|
| 485 |
+
if client.websocket:
|
| 486 |
+
asyncio.run_coroutine_threadsafe(
|
| 487 |
+
client.websocket.send(packet_data),
|
| 488 |
+
self.loop
|
| 489 |
+
)
|
| 490 |
+
client.update_activity(1, len(packet_data), 'sent')
|
| 491 |
+
return True
|
| 492 |
+
|
| 493 |
+
elif client.bridge_type == BridgeType.TCP_SOCKET:
|
| 494 |
+
# Send via TCP socket with length prefix
|
| 495 |
+
if client.socket:
|
| 496 |
+
length_prefix = struct.pack('!I', len(packet_data))
|
| 497 |
+
client.socket.send(length_prefix + packet_data)
|
| 498 |
+
client.update_activity(1, len(packet_data), 'sent')
|
| 499 |
+
return True
|
| 500 |
+
|
| 501 |
+
except Exception as e:
|
| 502 |
+
logging.error(f"Failed to send packet to client {client_id}: {e}")
|
| 503 |
+
# Mark client as inactive
|
| 504 |
+
client.is_active = False
|
| 505 |
+
|
| 506 |
+
return False
|
| 507 |
+
|
| 508 |
+
def broadcast_packet(self, packet_data: bytes, exclude_client: Optional[str] = None) -> int:
|
| 509 |
+
"""Broadcast packet to all clients"""
|
| 510 |
+
sent_count = 0
|
| 511 |
+
|
| 512 |
+
with self.lock:
|
| 513 |
+
client_ids = list(self.clients.keys())
|
| 514 |
+
|
| 515 |
+
for client_id in client_ids:
|
| 516 |
+
if client_id != exclude_client:
|
| 517 |
+
if self.send_packet_to_client(client_id, packet_data):
|
| 518 |
+
sent_count += 1
|
| 519 |
+
|
| 520 |
+
return sent_count
|
| 521 |
+
|
| 522 |
+
def get_clients(self) -> Dict[str, Dict]:
|
| 523 |
+
"""Get all connected clients"""
|
| 524 |
+
with self.lock:
|
| 525 |
+
return {
|
| 526 |
+
client_id: client.to_dict()
|
| 527 |
+
for client_id, client in self.clients.items()
|
| 528 |
+
}
|
| 529 |
+
|
| 530 |
+
def get_client(self, client_id: str) -> Optional[Dict]:
|
| 531 |
+
"""Get specific client"""
|
| 532 |
+
with self.lock:
|
| 533 |
+
client = self.clients.get(client_id)
|
| 534 |
+
return client.to_dict() if client else None
|
| 535 |
+
|
| 536 |
+
def disconnect_client(self, client_id: str) -> bool:
|
| 537 |
+
"""Disconnect specific client"""
|
| 538 |
+
with self.lock:
|
| 539 |
+
client = self.clients.get(client_id)
|
| 540 |
+
if not client:
|
| 541 |
+
return False
|
| 542 |
+
|
| 543 |
+
client.is_active = False
|
| 544 |
+
|
| 545 |
+
# Close connection
|
| 546 |
+
if client.websocket:
|
| 547 |
+
try:
|
| 548 |
+
asyncio.run_coroutine_threadsafe(
|
| 549 |
+
client.websocket.close(),
|
| 550 |
+
self.loop
|
| 551 |
+
)
|
| 552 |
+
except:
|
| 553 |
+
pass
|
| 554 |
+
|
| 555 |
+
if client.socket:
|
| 556 |
+
try:
|
| 557 |
+
client.socket.close()
|
| 558 |
+
except:
|
| 559 |
+
pass
|
| 560 |
+
|
| 561 |
+
del self.clients[client_id]
|
| 562 |
+
self.stats['active_clients'] = len(self.clients)
|
| 563 |
+
|
| 564 |
+
return True
|
| 565 |
+
|
| 566 |
+
def get_stats(self) -> Dict:
|
| 567 |
+
"""Get bridge statistics"""
|
| 568 |
+
with self.lock:
|
| 569 |
+
stats = self.stats.copy()
|
| 570 |
+
stats['active_clients'] = len(self.clients)
|
| 571 |
+
|
| 572 |
+
return stats
|
| 573 |
+
|
| 574 |
+
def reset_stats(self):
|
| 575 |
+
"""Reset bridge statistics"""
|
| 576 |
+
self.stats = {
|
| 577 |
+
'total_clients': 0,
|
| 578 |
+
'active_clients': len(self.clients),
|
| 579 |
+
'packets_processed': 0,
|
| 580 |
+
'packets_forwarded': 0,
|
| 581 |
+
'packets_dropped': 0,
|
| 582 |
+
'bytes_processed': 0,
|
| 583 |
+
'websocket_connections': 0,
|
| 584 |
+
'tcp_connections': 0,
|
| 585 |
+
'connection_errors': 0
|
| 586 |
+
}
|
| 587 |
+
|
| 588 |
+
async def start_websocket_server(self):
|
| 589 |
+
"""Start WebSocket server"""
|
| 590 |
+
try:
|
| 591 |
+
self.websocket_server = await websockets.serve(
|
| 592 |
+
self._handle_websocket_client,
|
| 593 |
+
self.websocket_host,
|
| 594 |
+
self.websocket_port,
|
| 595 |
+
max_size=1024*1024, # 1MB max message size
|
| 596 |
+
ping_interval=30,
|
| 597 |
+
ping_timeout=10
|
| 598 |
+
)
|
| 599 |
+
|
| 600 |
+
logging.info(f"WebSocket bridge server started on {self.websocket_host}:{self.websocket_port}")
|
| 601 |
+
|
| 602 |
+
# Keep server running
|
| 603 |
+
await self.websocket_server.wait_closed()
|
| 604 |
+
|
| 605 |
+
except Exception as e:
|
| 606 |
+
logging.error(f"WebSocket server error: {e}")
|
| 607 |
+
|
| 608 |
+
def start(self):
|
| 609 |
+
"""Start packet bridge"""
|
| 610 |
+
self.running = True
|
| 611 |
+
|
| 612 |
+
# Start event loop
|
| 613 |
+
self.loop = asyncio.new_event_loop()
|
| 614 |
+
asyncio.set_event_loop(self.loop)
|
| 615 |
+
|
| 616 |
+
# Start WebSocket server in a separate thread
|
| 617 |
+
websocket_thread = threading.Thread(target=self._run_websocket_server_in_thread, daemon=True)
|
| 618 |
+
websocket_thread.start()
|
| 619 |
+
|
| 620 |
+
# Start TCP server in separate thread
|
| 621 |
+
tcp_thread = threading.Thread(target=self._tcp_server_loop, daemon=True)
|
| 622 |
+
tcp_thread.start()
|
| 623 |
+
|
| 624 |
+
# Start cleanup thread
|
| 625 |
+
cleanup_thread = threading.Thread(target=self._cleanup_loop, daemon=True)
|
| 626 |
+
cleanup_thread.start()
|
| 627 |
+
|
| 628 |
+
logging.info("Packet bridge started")
|
| 629 |
+
|
| 630 |
+
|
| 631 |
+
|
| 632 |
+
def stop(self):
|
| 633 |
+
"""Stop packet bridge"""
|
| 634 |
+
self.running = False
|
| 635 |
+
|
| 636 |
+
# Close WebSocket server
|
| 637 |
+
if self.websocket_server:
|
| 638 |
+
self.websocket_server.close()
|
| 639 |
+
|
| 640 |
+
# Close TCP server
|
| 641 |
+
if self.tcp_server_socket:
|
| 642 |
+
self.tcp_server_socket.close()
|
| 643 |
+
|
| 644 |
+
# Disconnect all clients
|
| 645 |
+
with self.lock:
|
| 646 |
+
client_ids = list(self.clients.keys())
|
| 647 |
+
|
| 648 |
+
for client_id in client_ids:
|
| 649 |
+
self.disconnect_client(client_id)
|
| 650 |
+
|
| 651 |
+
# Stop event loop
|
| 652 |
+
if self.loop and not self.loop.is_closed():
|
| 653 |
+
self.loop.call_soon_threadsafe(self.loop.stop)
|
| 654 |
+
|
| 655 |
+
logging.info("Packet bridge stopped")
|
| 656 |
+
|
| 657 |
+
|
| 658 |
+
|
| 659 |
+
def _run_websocket_server_in_thread(self):
|
| 660 |
+
"""Run the WebSocket server in a separate thread with its own event loop."""
|
| 661 |
+
asyncio.set_event_loop(self.loop)
|
| 662 |
+
self.loop.run_until_complete(self.start_websocket_server())
|
| 663 |
+
|
| 664 |
+
|
core/session_tracker.py
ADDED
|
@@ -0,0 +1,602 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Session Tracker Module
|
| 3 |
+
|
| 4 |
+
Manages and tracks all network sessions across the virtual ISP stack:
|
| 5 |
+
- Unified session management across all modules
|
| 6 |
+
- Session lifecycle tracking
|
| 7 |
+
- Performance metrics and analytics
|
| 8 |
+
- Session correlation and debugging
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
import time
|
| 12 |
+
import threading
|
| 13 |
+
import uuid
|
| 14 |
+
from typing import Dict, List, Optional, Set, Any, Tuple
|
| 15 |
+
from dataclasses import dataclass, field
|
| 16 |
+
from enum import Enum
|
| 17 |
+
import json
|
| 18 |
+
|
| 19 |
+
from .dhcp_server import DHCPLease
|
| 20 |
+
from .nat_engine import NATSession
|
| 21 |
+
from .tcp_engine import TCPConnection
|
| 22 |
+
from .socket_translator import SocketConnection
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
class SessionType(Enum):
|
| 26 |
+
DHCP_LEASE = "DHCP_LEASE"
|
| 27 |
+
NAT_SESSION = "NAT_SESSION"
|
| 28 |
+
TCP_CONNECTION = "TCP_CONNECTION"
|
| 29 |
+
SOCKET_CONNECTION = "SOCKET_CONNECTION"
|
| 30 |
+
BRIDGE_CLIENT = "BRIDGE_CLIENT"
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
class SessionState(Enum):
|
| 34 |
+
INITIALIZING = "INITIALIZING"
|
| 35 |
+
ACTIVE = "ACTIVE"
|
| 36 |
+
IDLE = "IDLE"
|
| 37 |
+
CLOSING = "CLOSING"
|
| 38 |
+
CLOSED = "CLOSED"
|
| 39 |
+
ERROR = "ERROR"
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
@dataclass
|
| 43 |
+
class SessionMetrics:
|
| 44 |
+
"""Session performance metrics"""
|
| 45 |
+
bytes_in: int = 0
|
| 46 |
+
bytes_out: int = 0
|
| 47 |
+
packets_in: int = 0
|
| 48 |
+
packets_out: int = 0
|
| 49 |
+
errors: int = 0
|
| 50 |
+
retransmits: int = 0
|
| 51 |
+
rtt_samples: List[float] = field(default_factory=list)
|
| 52 |
+
|
| 53 |
+
@property
|
| 54 |
+
def total_bytes(self) -> int:
|
| 55 |
+
return self.bytes_in + self.bytes_out
|
| 56 |
+
|
| 57 |
+
@property
|
| 58 |
+
def total_packets(self) -> int:
|
| 59 |
+
return self.packets_in + self.packets_out
|
| 60 |
+
|
| 61 |
+
@property
|
| 62 |
+
def average_rtt(self) -> float:
|
| 63 |
+
return sum(self.rtt_samples) / len(self.rtt_samples) if self.rtt_samples else 0.0
|
| 64 |
+
|
| 65 |
+
def update_bytes(self, bytes_in: int = 0, bytes_out: int = 0):
|
| 66 |
+
"""Update byte counters"""
|
| 67 |
+
self.bytes_in += bytes_in
|
| 68 |
+
self.bytes_out += bytes_out
|
| 69 |
+
|
| 70 |
+
def update_packets(self, packets_in: int = 0, packets_out: int = 0):
|
| 71 |
+
"""Update packet counters"""
|
| 72 |
+
self.packets_in += packets_in
|
| 73 |
+
self.packets_out += packets_out
|
| 74 |
+
|
| 75 |
+
def add_rtt_sample(self, rtt: float):
|
| 76 |
+
"""Add RTT sample"""
|
| 77 |
+
self.rtt_samples.append(rtt)
|
| 78 |
+
# Keep only last 100 samples
|
| 79 |
+
if len(self.rtt_samples) > 100:
|
| 80 |
+
self.rtt_samples = self.rtt_samples[-100:]
|
| 81 |
+
|
| 82 |
+
def to_dict(self) -> Dict:
|
| 83 |
+
"""Convert to dictionary"""
|
| 84 |
+
return {
|
| 85 |
+
'bytes_in': self.bytes_in,
|
| 86 |
+
'bytes_out': self.bytes_out,
|
| 87 |
+
'packets_in': self.packets_in,
|
| 88 |
+
'packets_out': self.packets_out,
|
| 89 |
+
'total_bytes': self.total_bytes,
|
| 90 |
+
'total_packets': self.total_packets,
|
| 91 |
+
'errors': self.errors,
|
| 92 |
+
'retransmits': self.retransmits,
|
| 93 |
+
'average_rtt': self.average_rtt,
|
| 94 |
+
'rtt_samples_count': len(self.rtt_samples)
|
| 95 |
+
}
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
@dataclass
|
| 99 |
+
class UnifiedSession:
|
| 100 |
+
"""Unified session representation"""
|
| 101 |
+
session_id: str
|
| 102 |
+
session_type: SessionType
|
| 103 |
+
state: SessionState
|
| 104 |
+
created_time: float
|
| 105 |
+
last_activity: float
|
| 106 |
+
|
| 107 |
+
# Session identifiers
|
| 108 |
+
virtual_ip: Optional[str] = None
|
| 109 |
+
virtual_port: Optional[int] = None
|
| 110 |
+
real_ip: Optional[str] = None
|
| 111 |
+
real_port: Optional[int] = None
|
| 112 |
+
protocol: Optional[str] = None
|
| 113 |
+
|
| 114 |
+
# Related sessions (for correlation)
|
| 115 |
+
related_sessions: Set[str] = field(default_factory=set)
|
| 116 |
+
parent_session: Optional[str] = None
|
| 117 |
+
child_sessions: Set[str] = field(default_factory=set)
|
| 118 |
+
|
| 119 |
+
# Metrics
|
| 120 |
+
metrics: SessionMetrics = field(default_factory=SessionMetrics)
|
| 121 |
+
|
| 122 |
+
# Additional data
|
| 123 |
+
metadata: Dict[str, Any] = field(default_factory=dict)
|
| 124 |
+
|
| 125 |
+
def __post_init__(self):
|
| 126 |
+
if not self.session_id:
|
| 127 |
+
self.session_id = str(uuid.uuid4())
|
| 128 |
+
if self.created_time == 0:
|
| 129 |
+
self.created_time = time.time()
|
| 130 |
+
if self.last_activity == 0:
|
| 131 |
+
self.last_activity = time.time()
|
| 132 |
+
|
| 133 |
+
def update_activity(self):
|
| 134 |
+
"""Update last activity timestamp"""
|
| 135 |
+
self.last_activity = time.time()
|
| 136 |
+
|
| 137 |
+
def add_related_session(self, session_id: str):
|
| 138 |
+
"""Add related session"""
|
| 139 |
+
self.related_sessions.add(session_id)
|
| 140 |
+
|
| 141 |
+
def add_child_session(self, session_id: str):
|
| 142 |
+
"""Add child session"""
|
| 143 |
+
self.child_sessions.add(session_id)
|
| 144 |
+
|
| 145 |
+
def set_parent_session(self, session_id: str):
|
| 146 |
+
"""Set parent session"""
|
| 147 |
+
self.parent_session = session_id
|
| 148 |
+
|
| 149 |
+
@property
|
| 150 |
+
def duration(self) -> float:
|
| 151 |
+
"""Get session duration in seconds"""
|
| 152 |
+
return time.time() - self.created_time
|
| 153 |
+
|
| 154 |
+
@property
|
| 155 |
+
def idle_time(self) -> float:
|
| 156 |
+
"""Get idle time in seconds"""
|
| 157 |
+
return time.time() - self.last_activity
|
| 158 |
+
|
| 159 |
+
def to_dict(self) -> Dict:
|
| 160 |
+
"""Convert to dictionary"""
|
| 161 |
+
return {
|
| 162 |
+
'session_id': self.session_id,
|
| 163 |
+
'session_type': self.session_type.value,
|
| 164 |
+
'state': self.state.value,
|
| 165 |
+
'created_time': self.created_time,
|
| 166 |
+
'last_activity': self.last_activity,
|
| 167 |
+
'duration': self.duration,
|
| 168 |
+
'idle_time': self.idle_time,
|
| 169 |
+
'virtual_ip': self.virtual_ip,
|
| 170 |
+
'virtual_port': self.virtual_port,
|
| 171 |
+
'real_ip': self.real_ip,
|
| 172 |
+
'real_port': self.real_port,
|
| 173 |
+
'protocol': self.protocol,
|
| 174 |
+
'related_sessions': list(self.related_sessions),
|
| 175 |
+
'parent_session': self.parent_session,
|
| 176 |
+
'child_sessions': list(self.child_sessions),
|
| 177 |
+
'metrics': self.metrics.to_dict(),
|
| 178 |
+
'metadata': self.metadata
|
| 179 |
+
}
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
class SessionTracker:
|
| 183 |
+
"""Unified session tracker"""
|
| 184 |
+
|
| 185 |
+
def __init__(self, config: Dict):
|
| 186 |
+
self.config = config
|
| 187 |
+
self.sessions: Dict[str, UnifiedSession] = {}
|
| 188 |
+
self.session_index: Dict[Tuple[str, str], Set[str]] = {} # (type, key) -> session_ids
|
| 189 |
+
self.lock = threading.Lock()
|
| 190 |
+
|
| 191 |
+
# Configuration
|
| 192 |
+
self.max_sessions = config.get('max_sessions', 10000)
|
| 193 |
+
self.session_timeout = config.get('session_timeout', 3600)
|
| 194 |
+
self.cleanup_interval = config.get('cleanup_interval', 300)
|
| 195 |
+
self.metrics_retention = config.get('metrics_retention', 86400) # 24 hours
|
| 196 |
+
|
| 197 |
+
# Statistics
|
| 198 |
+
self.stats = {
|
| 199 |
+
'total_sessions': 0,
|
| 200 |
+
'active_sessions': 0,
|
| 201 |
+
'expired_sessions': 0,
|
| 202 |
+
'session_types': {t.value: 0 for t in SessionType},
|
| 203 |
+
'session_states': {s.value: 0 for s in SessionState},
|
| 204 |
+
'cleanup_runs': 0,
|
| 205 |
+
'correlations_created': 0
|
| 206 |
+
}
|
| 207 |
+
|
| 208 |
+
# Background tasks
|
| 209 |
+
self.running = False
|
| 210 |
+
self.cleanup_thread = None
|
| 211 |
+
|
| 212 |
+
def _generate_session_key(self, session_type: SessionType, **kwargs) -> str:
|
| 213 |
+
"""Generate session key for indexing"""
|
| 214 |
+
if session_type == SessionType.DHCP_LEASE:
|
| 215 |
+
return f"dhcp_{kwargs.get('mac_address', 'unknown')}"
|
| 216 |
+
elif session_type == SessionType.NAT_SESSION:
|
| 217 |
+
return f"nat_{kwargs.get('virtual_ip', '')}_{kwargs.get('virtual_port', 0)}_{kwargs.get('protocol', '')}"
|
| 218 |
+
elif session_type == SessionType.TCP_CONNECTION:
|
| 219 |
+
return f"tcp_{kwargs.get('local_ip', '')}_{kwargs.get('local_port', 0)}_{kwargs.get('remote_ip', '')}_{kwargs.get('remote_port', 0)}"
|
| 220 |
+
elif session_type == SessionType.SOCKET_CONNECTION:
|
| 221 |
+
return f"socket_{kwargs.get('connection_id', 'unknown')}"
|
| 222 |
+
elif session_type == SessionType.BRIDGE_CLIENT:
|
| 223 |
+
return f"bridge_{kwargs.get('client_id', 'unknown')}"
|
| 224 |
+
else:
|
| 225 |
+
return f"unknown_{time.time()}"
|
| 226 |
+
|
| 227 |
+
def _add_to_index(self, session: UnifiedSession):
|
| 228 |
+
"""Add session to search index"""
|
| 229 |
+
# Index by type
|
| 230 |
+
type_key = (session.session_type.value, 'all')
|
| 231 |
+
if type_key not in self.session_index:
|
| 232 |
+
self.session_index[type_key] = set()
|
| 233 |
+
self.session_index[type_key].add(session.session_id)
|
| 234 |
+
|
| 235 |
+
# Index by IP addresses
|
| 236 |
+
if session.virtual_ip:
|
| 237 |
+
ip_key = ('virtual_ip', session.virtual_ip)
|
| 238 |
+
if ip_key not in self.session_index:
|
| 239 |
+
self.session_index[ip_key] = set()
|
| 240 |
+
self.session_index[ip_key].add(session.session_id)
|
| 241 |
+
|
| 242 |
+
if session.real_ip:
|
| 243 |
+
ip_key = ('real_ip', session.real_ip)
|
| 244 |
+
if ip_key not in self.session_index:
|
| 245 |
+
self.session_index[ip_key] = set()
|
| 246 |
+
self.session_index[ip_key].add(session.session_id)
|
| 247 |
+
|
| 248 |
+
# Index by protocol
|
| 249 |
+
if session.protocol:
|
| 250 |
+
proto_key = ('protocol', session.protocol)
|
| 251 |
+
if proto_key not in self.session_index:
|
| 252 |
+
self.session_index[proto_key] = set()
|
| 253 |
+
self.session_index[proto_key].add(session.session_id)
|
| 254 |
+
|
| 255 |
+
def _remove_from_index(self, session: UnifiedSession):
|
| 256 |
+
"""Remove session from search index"""
|
| 257 |
+
for key, session_set in self.session_index.items():
|
| 258 |
+
session_set.discard(session.session_id)
|
| 259 |
+
|
| 260 |
+
def create_session(self, session_type: SessionType, **kwargs) -> str:
|
| 261 |
+
"""Create new session"""
|
| 262 |
+
with self.lock:
|
| 263 |
+
# Check session limit
|
| 264 |
+
if len(self.sessions) >= self.max_sessions:
|
| 265 |
+
# Remove oldest expired session
|
| 266 |
+
self._cleanup_expired_sessions()
|
| 267 |
+
if len(self.sessions) >= self.max_sessions:
|
| 268 |
+
return None
|
| 269 |
+
|
| 270 |
+
# Create session
|
| 271 |
+
session = UnifiedSession(
|
| 272 |
+
session_id=kwargs.get('session_id', str(uuid.uuid4())),
|
| 273 |
+
session_type=session_type,
|
| 274 |
+
state=SessionState.INITIALIZING,
|
| 275 |
+
virtual_ip=kwargs.get('virtual_ip'),
|
| 276 |
+
virtual_port=kwargs.get('virtual_port'),
|
| 277 |
+
real_ip=kwargs.get('real_ip'),
|
| 278 |
+
real_port=kwargs.get('real_port'),
|
| 279 |
+
protocol=kwargs.get('protocol'),
|
| 280 |
+
metadata=kwargs.get('metadata', {})
|
| 281 |
+
)
|
| 282 |
+
|
| 283 |
+
# Add to sessions
|
| 284 |
+
self.sessions[session.session_id] = session
|
| 285 |
+
self._add_to_index(session)
|
| 286 |
+
|
| 287 |
+
# Update statistics
|
| 288 |
+
self.stats['total_sessions'] += 1
|
| 289 |
+
self.stats['active_sessions'] = len(self.sessions)
|
| 290 |
+
self.stats['session_types'][session_type.value] += 1
|
| 291 |
+
self.stats['session_states'][SessionState.INITIALIZING.value] += 1
|
| 292 |
+
|
| 293 |
+
return session.session_id
|
| 294 |
+
|
| 295 |
+
def update_session(self, session_id: str, **kwargs) -> bool:
|
| 296 |
+
"""Update session"""
|
| 297 |
+
with self.lock:
|
| 298 |
+
session = self.sessions.get(session_id)
|
| 299 |
+
if not session:
|
| 300 |
+
return False
|
| 301 |
+
|
| 302 |
+
# Update fields
|
| 303 |
+
old_state = session.state
|
| 304 |
+
|
| 305 |
+
for key, value in kwargs.items():
|
| 306 |
+
if hasattr(session, key):
|
| 307 |
+
setattr(session, key, value)
|
| 308 |
+
|
| 309 |
+
session.update_activity()
|
| 310 |
+
|
| 311 |
+
# Update state statistics
|
| 312 |
+
if 'state' in kwargs and kwargs['state'] != old_state:
|
| 313 |
+
self.stats['session_states'][old_state.value] -= 1
|
| 314 |
+
self.stats['session_states'][kwargs['state'].value] += 1
|
| 315 |
+
|
| 316 |
+
return True
|
| 317 |
+
|
| 318 |
+
def close_session(self, session_id: str, reason: str = "") -> bool:
|
| 319 |
+
"""Close session"""
|
| 320 |
+
with self.lock:
|
| 321 |
+
session = self.sessions.get(session_id)
|
| 322 |
+
if not session:
|
| 323 |
+
return False
|
| 324 |
+
|
| 325 |
+
old_state = session.state
|
| 326 |
+
session.state = SessionState.CLOSED
|
| 327 |
+
session.update_activity()
|
| 328 |
+
|
| 329 |
+
if reason:
|
| 330 |
+
session.metadata['close_reason'] = reason
|
| 331 |
+
|
| 332 |
+
# Update statistics
|
| 333 |
+
self.stats['session_states'][old_state.value] -= 1
|
| 334 |
+
self.stats['session_states'][SessionState.CLOSED.value] += 1
|
| 335 |
+
|
| 336 |
+
return True
|
| 337 |
+
|
| 338 |
+
def remove_session(self, session_id: str) -> bool:
|
| 339 |
+
"""Remove session completely"""
|
| 340 |
+
with self.lock:
|
| 341 |
+
session = self.sessions.get(session_id)
|
| 342 |
+
if not session:
|
| 343 |
+
return False
|
| 344 |
+
|
| 345 |
+
# Remove from index
|
| 346 |
+
self._remove_from_index(session)
|
| 347 |
+
|
| 348 |
+
# Remove from sessions
|
| 349 |
+
del self.sessions[session_id]
|
| 350 |
+
|
| 351 |
+
# Update statistics
|
| 352 |
+
self.stats['active_sessions'] = len(self.sessions)
|
| 353 |
+
self.stats['session_types'][session.session_type.value] -= 1
|
| 354 |
+
self.stats['session_states'][session.state.value] -= 1
|
| 355 |
+
|
| 356 |
+
return True
|
| 357 |
+
|
| 358 |
+
def get_session(self, session_id: str) -> Optional[UnifiedSession]:
|
| 359 |
+
"""Get session by ID"""
|
| 360 |
+
with self.lock:
|
| 361 |
+
return self.sessions.get(session_id)
|
| 362 |
+
|
| 363 |
+
def find_sessions(self, **criteria) -> List[UnifiedSession]:
|
| 364 |
+
"""Find sessions by criteria"""
|
| 365 |
+
with self.lock:
|
| 366 |
+
matching_sessions = []
|
| 367 |
+
|
| 368 |
+
# Use index if possible
|
| 369 |
+
if 'session_type' in criteria:
|
| 370 |
+
type_key = (criteria['session_type'].value if isinstance(criteria['session_type'], SessionType) else criteria['session_type'], 'all')
|
| 371 |
+
candidate_ids = self.session_index.get(type_key, set())
|
| 372 |
+
elif 'virtual_ip' in criteria:
|
| 373 |
+
ip_key = ('virtual_ip', criteria['virtual_ip'])
|
| 374 |
+
candidate_ids = self.session_index.get(ip_key, set())
|
| 375 |
+
elif 'real_ip' in criteria:
|
| 376 |
+
ip_key = ('real_ip', criteria['real_ip'])
|
| 377 |
+
candidate_ids = self.session_index.get(ip_key, set())
|
| 378 |
+
elif 'protocol' in criteria:
|
| 379 |
+
proto_key = ('protocol', criteria['protocol'])
|
| 380 |
+
candidate_ids = self.session_index.get(proto_key, set())
|
| 381 |
+
else:
|
| 382 |
+
candidate_ids = set(self.sessions.keys())
|
| 383 |
+
|
| 384 |
+
# Filter candidates
|
| 385 |
+
for session_id in candidate_ids:
|
| 386 |
+
session = self.sessions.get(session_id)
|
| 387 |
+
if not session:
|
| 388 |
+
continue
|
| 389 |
+
|
| 390 |
+
match = True
|
| 391 |
+
for key, value in criteria.items():
|
| 392 |
+
if hasattr(session, key):
|
| 393 |
+
session_value = getattr(session, key)
|
| 394 |
+
if isinstance(value, (SessionType, SessionState)):
|
| 395 |
+
if session_value != value:
|
| 396 |
+
match = False
|
| 397 |
+
break
|
| 398 |
+
elif session_value != value:
|
| 399 |
+
match = False
|
| 400 |
+
break
|
| 401 |
+
else:
|
| 402 |
+
match = False
|
| 403 |
+
break
|
| 404 |
+
|
| 405 |
+
if match:
|
| 406 |
+
matching_sessions.append(session)
|
| 407 |
+
|
| 408 |
+
return matching_sessions
|
| 409 |
+
|
| 410 |
+
def correlate_sessions(self, session_id1: str, session_id2: str, relationship: str = 'related') -> bool:
|
| 411 |
+
"""Create correlation between sessions"""
|
| 412 |
+
with self.lock:
|
| 413 |
+
session1 = self.sessions.get(session_id1)
|
| 414 |
+
session2 = self.sessions.get(session_id2)
|
| 415 |
+
|
| 416 |
+
if not session1 or not session2:
|
| 417 |
+
return False
|
| 418 |
+
|
| 419 |
+
if relationship == 'parent_child':
|
| 420 |
+
session1.add_child_session(session_id2)
|
| 421 |
+
session2.set_parent_session(session_id1)
|
| 422 |
+
else:
|
| 423 |
+
session1.add_related_session(session_id2)
|
| 424 |
+
session2.add_related_session(session_id1)
|
| 425 |
+
|
| 426 |
+
self.stats['correlations_created'] += 1
|
| 427 |
+
return True
|
| 428 |
+
|
| 429 |
+
def update_metrics(self, session_id: str, **metrics) -> bool:
|
| 430 |
+
"""Update session metrics"""
|
| 431 |
+
with self.lock:
|
| 432 |
+
session = self.sessions.get(session_id)
|
| 433 |
+
if not session:
|
| 434 |
+
return False
|
| 435 |
+
|
| 436 |
+
session.update_activity()
|
| 437 |
+
|
| 438 |
+
# Update metrics
|
| 439 |
+
if 'bytes_in' in metrics or 'bytes_out' in metrics:
|
| 440 |
+
session.metrics.update_bytes(
|
| 441 |
+
metrics.get('bytes_in', 0),
|
| 442 |
+
metrics.get('bytes_out', 0)
|
| 443 |
+
)
|
| 444 |
+
|
| 445 |
+
if 'packets_in' in metrics or 'packets_out' in metrics:
|
| 446 |
+
session.metrics.update_packets(
|
| 447 |
+
metrics.get('packets_in', 0),
|
| 448 |
+
metrics.get('packets_out', 0)
|
| 449 |
+
)
|
| 450 |
+
|
| 451 |
+
if 'rtt' in metrics:
|
| 452 |
+
session.metrics.add_rtt_sample(metrics['rtt'])
|
| 453 |
+
|
| 454 |
+
if 'errors' in metrics:
|
| 455 |
+
session.metrics.errors += metrics['errors']
|
| 456 |
+
|
| 457 |
+
if 'retransmits' in metrics:
|
| 458 |
+
session.metrics.retransmits += metrics['retransmits']
|
| 459 |
+
|
| 460 |
+
return True
|
| 461 |
+
|
| 462 |
+
def _cleanup_expired_sessions(self):
|
| 463 |
+
"""Clean up expired sessions"""
|
| 464 |
+
current_time = time.time()
|
| 465 |
+
expired_sessions = []
|
| 466 |
+
|
| 467 |
+
for session_id, session in self.sessions.items():
|
| 468 |
+
# Check if session is expired
|
| 469 |
+
if (session.state == SessionState.CLOSED and
|
| 470 |
+
current_time - session.last_activity > self.cleanup_interval):
|
| 471 |
+
expired_sessions.append(session_id)
|
| 472 |
+
elif (session.state != SessionState.CLOSED and
|
| 473 |
+
current_time - session.last_activity > self.session_timeout):
|
| 474 |
+
expired_sessions.append(session_id)
|
| 475 |
+
|
| 476 |
+
# Remove expired sessions
|
| 477 |
+
for session_id in expired_sessions:
|
| 478 |
+
self.remove_session(session_id)
|
| 479 |
+
self.stats['expired_sessions'] += 1
|
| 480 |
+
|
| 481 |
+
def _cleanup_loop(self):
|
| 482 |
+
"""Background cleanup loop"""
|
| 483 |
+
while self.running:
|
| 484 |
+
try:
|
| 485 |
+
with self.lock:
|
| 486 |
+
self._cleanup_expired_sessions()
|
| 487 |
+
self.stats['cleanup_runs'] += 1
|
| 488 |
+
|
| 489 |
+
time.sleep(self.cleanup_interval)
|
| 490 |
+
|
| 491 |
+
except Exception as e:
|
| 492 |
+
print(f"Session tracker cleanup error: {e}")
|
| 493 |
+
time.sleep(60)
|
| 494 |
+
|
| 495 |
+
def get_sessions(self, limit: int = 100, offset: int = 0, **filters) -> List[Dict]:
|
| 496 |
+
"""Get sessions with pagination and filtering"""
|
| 497 |
+
with self.lock:
|
| 498 |
+
if filters:
|
| 499 |
+
sessions = self.find_sessions(**filters)
|
| 500 |
+
else:
|
| 501 |
+
sessions = list(self.sessions.values())
|
| 502 |
+
|
| 503 |
+
# Sort by last activity (most recent first)
|
| 504 |
+
sessions.sort(key=lambda s: s.last_activity, reverse=True)
|
| 505 |
+
|
| 506 |
+
# Apply pagination
|
| 507 |
+
paginated_sessions = sessions[offset:offset + limit]
|
| 508 |
+
|
| 509 |
+
return [session.to_dict() for session in paginated_sessions]
|
| 510 |
+
|
| 511 |
+
def get_session_summary(self) -> Dict:
|
| 512 |
+
"""Get session summary statistics"""
|
| 513 |
+
with self.lock:
|
| 514 |
+
summary = {
|
| 515 |
+
'total_sessions': len(self.sessions),
|
| 516 |
+
'by_type': {},
|
| 517 |
+
'by_state': {},
|
| 518 |
+
'by_protocol': {},
|
| 519 |
+
'active_sessions_by_age': {
|
| 520 |
+
'last_hour': 0,
|
| 521 |
+
'last_day': 0,
|
| 522 |
+
'older': 0
|
| 523 |
+
}
|
| 524 |
+
}
|
| 525 |
+
|
| 526 |
+
current_time = time.time()
|
| 527 |
+
hour_ago = current_time - 3600
|
| 528 |
+
day_ago = current_time - 86400
|
| 529 |
+
|
| 530 |
+
for session in self.sessions.values():
|
| 531 |
+
# Count by type
|
| 532 |
+
session_type = session.session_type.value
|
| 533 |
+
summary['by_type'][session_type] = summary['by_type'].get(session_type, 0) + 1
|
| 534 |
+
|
| 535 |
+
# Count by state
|
| 536 |
+
session_state = session.state.value
|
| 537 |
+
summary['by_state'][session_state] = summary['by_state'].get(session_state, 0) + 1
|
| 538 |
+
|
| 539 |
+
# Count by protocol
|
| 540 |
+
if session.protocol:
|
| 541 |
+
summary['by_protocol'][session.protocol] = summary['by_protocol'].get(session.protocol, 0) + 1
|
| 542 |
+
|
| 543 |
+
# Count by age
|
| 544 |
+
if session.last_activity > hour_ago:
|
| 545 |
+
summary['active_sessions_by_age']['last_hour'] += 1
|
| 546 |
+
elif session.last_activity > day_ago:
|
| 547 |
+
summary['active_sessions_by_age']['last_day'] += 1
|
| 548 |
+
else:
|
| 549 |
+
summary['active_sessions_by_age']['older'] += 1
|
| 550 |
+
|
| 551 |
+
return summary
|
| 552 |
+
|
| 553 |
+
def get_stats(self) -> Dict:
|
| 554 |
+
"""Get tracker statistics"""
|
| 555 |
+
with self.lock:
|
| 556 |
+
stats = self.stats.copy()
|
| 557 |
+
stats['active_sessions'] = len(self.sessions)
|
| 558 |
+
|
| 559 |
+
return stats
|
| 560 |
+
|
| 561 |
+
def reset_stats(self):
|
| 562 |
+
"""Reset statistics"""
|
| 563 |
+
self.stats = {
|
| 564 |
+
'total_sessions': len(self.sessions),
|
| 565 |
+
'active_sessions': len(self.sessions),
|
| 566 |
+
'expired_sessions': 0,
|
| 567 |
+
'session_types': {t.value: 0 for t in SessionType},
|
| 568 |
+
'session_states': {s.value: 0 for s in SessionState},
|
| 569 |
+
'cleanup_runs': 0,
|
| 570 |
+
'correlations_created': 0
|
| 571 |
+
}
|
| 572 |
+
|
| 573 |
+
# Recalculate current counts
|
| 574 |
+
with self.lock:
|
| 575 |
+
for session in self.sessions.values():
|
| 576 |
+
self.stats['session_types'][session.session_type.value] += 1
|
| 577 |
+
self.stats['session_states'][session.state.value] += 1
|
| 578 |
+
|
| 579 |
+
def export_sessions(self, format: str = 'json') -> str:
|
| 580 |
+
"""Export sessions data"""
|
| 581 |
+
with self.lock:
|
| 582 |
+
sessions_data = [session.to_dict() for session in self.sessions.values()]
|
| 583 |
+
|
| 584 |
+
if format == 'json':
|
| 585 |
+
return json.dumps(sessions_data, indent=2, default=str)
|
| 586 |
+
else:
|
| 587 |
+
raise ValueError(f"Unsupported export format: {format}")
|
| 588 |
+
|
| 589 |
+
def start(self):
|
| 590 |
+
"""Start session tracker"""
|
| 591 |
+
self.running = True
|
| 592 |
+
self.cleanup_thread = threading.Thread(target=self._cleanup_loop, daemon=True)
|
| 593 |
+
self.cleanup_thread.start()
|
| 594 |
+
print("Session tracker started")
|
| 595 |
+
|
| 596 |
+
def stop(self):
|
| 597 |
+
"""Stop session tracker"""
|
| 598 |
+
self.running = False
|
| 599 |
+
if self.cleanup_thread:
|
| 600 |
+
self.cleanup_thread.join()
|
| 601 |
+
print("Session tracker stopped")
|
| 602 |
+
|
core/socket_translator.py
ADDED
|
@@ -0,0 +1,653 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Socket Translator Module
|
| 3 |
+
|
| 4 |
+
Bridges virtual connections to real host sockets:
|
| 5 |
+
- Map virtual connections to host sockets/HTTP clients
|
| 6 |
+
- Bidirectional data streaming
|
| 7 |
+
- Connection lifecycle management
|
| 8 |
+
- Protocol translation (TCP/UDP to host sockets)
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
import socket
|
| 12 |
+
import threading
|
| 13 |
+
import time
|
| 14 |
+
import asyncio
|
| 15 |
+
import aiohttp
|
| 16 |
+
import ssl
|
| 17 |
+
from typing import Dict, Optional, Callable, Tuple, Any
|
| 18 |
+
from dataclasses import dataclass
|
| 19 |
+
from enum import Enum
|
| 20 |
+
import urllib.parse
|
| 21 |
+
import json
|
| 22 |
+
|
| 23 |
+
from .tcp_engine import TCPConnection
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class ConnectionType(Enum):
|
| 27 |
+
TCP_SOCKET = "TCP_SOCKET"
|
| 28 |
+
UDP_SOCKET = "UDP_SOCKET"
|
| 29 |
+
HTTP_CLIENT = "HTTP_CLIENT"
|
| 30 |
+
HTTPS_CLIENT = "HTTPS_CLIENT"
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
@dataclass
|
| 34 |
+
class SocketConnection:
|
| 35 |
+
"""Represents a socket connection"""
|
| 36 |
+
connection_id: str
|
| 37 |
+
connection_type: ConnectionType
|
| 38 |
+
virtual_connection: Optional[TCPConnection]
|
| 39 |
+
host_socket: Optional[socket.socket]
|
| 40 |
+
remote_host: str
|
| 41 |
+
remote_port: int
|
| 42 |
+
created_time: float
|
| 43 |
+
last_activity: float
|
| 44 |
+
bytes_sent: int = 0
|
| 45 |
+
bytes_received: int = 0
|
| 46 |
+
is_connected: bool = False
|
| 47 |
+
error_count: int = 0
|
| 48 |
+
|
| 49 |
+
def update_activity(self, bytes_transferred: int = 0, direction: str = 'sent'):
|
| 50 |
+
"""Update connection activity"""
|
| 51 |
+
self.last_activity = time.time()
|
| 52 |
+
if direction == 'sent':
|
| 53 |
+
self.bytes_sent += bytes_transferred
|
| 54 |
+
else:
|
| 55 |
+
self.bytes_received += bytes_transferred
|
| 56 |
+
|
| 57 |
+
def to_dict(self) -> Dict:
|
| 58 |
+
"""Convert to dictionary"""
|
| 59 |
+
return {
|
| 60 |
+
'connection_id': self.connection_id,
|
| 61 |
+
'connection_type': self.connection_type.value,
|
| 62 |
+
'remote_host': self.remote_host,
|
| 63 |
+
'remote_port': self.remote_port,
|
| 64 |
+
'created_time': self.created_time,
|
| 65 |
+
'last_activity': self.last_activity,
|
| 66 |
+
'bytes_sent': self.bytes_sent,
|
| 67 |
+
'bytes_received': self.bytes_received,
|
| 68 |
+
'is_connected': self.is_connected,
|
| 69 |
+
'error_count': self.error_count,
|
| 70 |
+
'duration': time.time() - self.created_time
|
| 71 |
+
}
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
class HTTPRequest:
|
| 75 |
+
"""Represents an HTTP request"""
|
| 76 |
+
|
| 77 |
+
def __init__(self, method: str = 'GET', path: str = '/', headers: Dict[str, str] = None, body: bytes = b''):
|
| 78 |
+
self.method = method.upper()
|
| 79 |
+
self.path = path
|
| 80 |
+
self.headers = headers or {}
|
| 81 |
+
self.body = body
|
| 82 |
+
self.version = 'HTTP/1.1'
|
| 83 |
+
|
| 84 |
+
@classmethod
|
| 85 |
+
def parse(cls, data: bytes) -> Optional['HTTPRequest']:
|
| 86 |
+
"""Parse HTTP request from raw data"""
|
| 87 |
+
try:
|
| 88 |
+
lines = data.decode('utf-8', errors='ignore').split('\r\n')
|
| 89 |
+
if not lines:
|
| 90 |
+
return None
|
| 91 |
+
|
| 92 |
+
# Parse request line
|
| 93 |
+
request_line = lines[0].split(' ')
|
| 94 |
+
if len(request_line) < 3:
|
| 95 |
+
return None
|
| 96 |
+
|
| 97 |
+
method, path, version = request_line[0], request_line[1], request_line[2]
|
| 98 |
+
|
| 99 |
+
# Parse headers
|
| 100 |
+
headers = {}
|
| 101 |
+
body_start = 1
|
| 102 |
+
for i, line in enumerate(lines[1:], 1):
|
| 103 |
+
if line == '':
|
| 104 |
+
body_start = i + 1
|
| 105 |
+
break
|
| 106 |
+
if ':' in line:
|
| 107 |
+
key, value = line.split(':', 1)
|
| 108 |
+
headers[key.strip().lower()] = value.strip()
|
| 109 |
+
|
| 110 |
+
# Parse body
|
| 111 |
+
body_lines = lines[body_start:]
|
| 112 |
+
body = '\r\n'.join(body_lines).encode('utf-8')
|
| 113 |
+
|
| 114 |
+
return cls(method, path, headers, body)
|
| 115 |
+
|
| 116 |
+
except Exception:
|
| 117 |
+
return None
|
| 118 |
+
|
| 119 |
+
def to_bytes(self) -> bytes:
|
| 120 |
+
"""Convert to raw HTTP request"""
|
| 121 |
+
request_line = f"{self.method} {self.path} {self.version}\r\n"
|
| 122 |
+
|
| 123 |
+
# Add default headers
|
| 124 |
+
if 'host' not in self.headers:
|
| 125 |
+
self.headers['host'] = 'localhost'
|
| 126 |
+
if 'user-agent' not in self.headers:
|
| 127 |
+
self.headers['user-agent'] = 'VirtualISP/1.0'
|
| 128 |
+
if self.body and 'content-length' not in self.headers:
|
| 129 |
+
self.headers['content-length'] = str(len(self.body))
|
| 130 |
+
|
| 131 |
+
# Build headers
|
| 132 |
+
header_lines = []
|
| 133 |
+
for key, value in self.headers.items():
|
| 134 |
+
header_lines.append(f"{key}: {value}\r\n")
|
| 135 |
+
|
| 136 |
+
# Combine all parts
|
| 137 |
+
request_data = request_line + ''.join(header_lines) + '\r\n'
|
| 138 |
+
return request_data.encode('utf-8') + self.body
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
class HTTPResponse:
|
| 142 |
+
"""Represents an HTTP response"""
|
| 143 |
+
|
| 144 |
+
def __init__(self, status_code: int = 200, reason: str = 'OK', headers: Dict[str, str] = None, body: bytes = b''):
|
| 145 |
+
self.status_code = status_code
|
| 146 |
+
self.reason = reason
|
| 147 |
+
self.headers = headers or {}
|
| 148 |
+
self.body = body
|
| 149 |
+
self.version = 'HTTP/1.1'
|
| 150 |
+
|
| 151 |
+
@classmethod
|
| 152 |
+
def parse(cls, data: bytes) -> Optional['HTTPResponse']:
|
| 153 |
+
"""Parse HTTP response from raw data"""
|
| 154 |
+
try:
|
| 155 |
+
lines = data.decode('utf-8', errors='ignore').split('\r\n')
|
| 156 |
+
if not lines:
|
| 157 |
+
return None
|
| 158 |
+
|
| 159 |
+
# Parse status line
|
| 160 |
+
status_line = lines[0].split(' ', 2)
|
| 161 |
+
if len(status_line) < 3:
|
| 162 |
+
return None
|
| 163 |
+
|
| 164 |
+
version, status_code, reason = status_line[0], int(status_line[1]), status_line[2]
|
| 165 |
+
|
| 166 |
+
# Parse headers
|
| 167 |
+
headers = {}
|
| 168 |
+
body_start = 1
|
| 169 |
+
for i, line in enumerate(lines[1:], 1):
|
| 170 |
+
if line == '':
|
| 171 |
+
body_start = i + 1
|
| 172 |
+
break
|
| 173 |
+
if ':' in line:
|
| 174 |
+
key, value = line.split(':', 1)
|
| 175 |
+
headers[key.strip().lower()] = value.strip()
|
| 176 |
+
|
| 177 |
+
# Parse body
|
| 178 |
+
body_lines = lines[body_start:]
|
| 179 |
+
body = '\r\n'.join(body_lines).encode('utf-8')
|
| 180 |
+
|
| 181 |
+
return cls(status_code, reason, headers, body)
|
| 182 |
+
|
| 183 |
+
except Exception:
|
| 184 |
+
return None
|
| 185 |
+
|
| 186 |
+
def to_bytes(self) -> bytes:
|
| 187 |
+
"""Convert to raw HTTP response"""
|
| 188 |
+
status_line = f"{self.version} {self.status_code} {self.reason}\r\n"
|
| 189 |
+
|
| 190 |
+
# Add default headers
|
| 191 |
+
if 'content-length' not in self.headers and self.body:
|
| 192 |
+
self.headers['content-length'] = str(len(self.body))
|
| 193 |
+
if 'server' not in self.headers:
|
| 194 |
+
self.headers['server'] = 'VirtualISP/1.0'
|
| 195 |
+
|
| 196 |
+
# Build headers
|
| 197 |
+
header_lines = []
|
| 198 |
+
for key, value in self.headers.items():
|
| 199 |
+
header_lines.append(f"{key}: {value}\r\n")
|
| 200 |
+
|
| 201 |
+
# Combine all parts
|
| 202 |
+
response_data = status_line + ''.join(header_lines) + '\r\n'
|
| 203 |
+
return response_data.encode('utf-8') + self.body
|
| 204 |
+
|
| 205 |
+
|
| 206 |
+
class SocketTranslator:
|
| 207 |
+
"""Socket translator implementation"""
|
| 208 |
+
|
| 209 |
+
def __init__(self, config: Dict):
|
| 210 |
+
self.config = config
|
| 211 |
+
self.connections: Dict[str, SocketConnection] = {}
|
| 212 |
+
self.lock = threading.Lock()
|
| 213 |
+
|
| 214 |
+
# Configuration
|
| 215 |
+
self.connect_timeout = config.get('connect_timeout', 10)
|
| 216 |
+
self.read_timeout = config.get('read_timeout', 30)
|
| 217 |
+
self.max_connections = config.get('max_connections', 1000)
|
| 218 |
+
self.buffer_size = config.get('buffer_size', 8192)
|
| 219 |
+
|
| 220 |
+
# HTTP client session
|
| 221 |
+
self.http_session = None
|
| 222 |
+
self.loop = None
|
| 223 |
+
|
| 224 |
+
# Statistics
|
| 225 |
+
self.stats = {
|
| 226 |
+
'total_connections': 0,
|
| 227 |
+
'active_connections': 0,
|
| 228 |
+
'failed_connections': 0,
|
| 229 |
+
'bytes_transferred': 0,
|
| 230 |
+
'http_requests': 0,
|
| 231 |
+
'tcp_connections': 0,
|
| 232 |
+
'udp_connections': 0
|
| 233 |
+
}
|
| 234 |
+
|
| 235 |
+
# Background tasks
|
| 236 |
+
self.running = False
|
| 237 |
+
self.cleanup_thread = None
|
| 238 |
+
|
| 239 |
+
async def _init_http_session(self):
|
| 240 |
+
"""Initialize HTTP client session"""
|
| 241 |
+
connector = aiohttp.TCPConnector(
|
| 242 |
+
limit=100,
|
| 243 |
+
limit_per_host=10,
|
| 244 |
+
ttl_dns_cache=300,
|
| 245 |
+
use_dns_cache=True,
|
| 246 |
+
)
|
| 247 |
+
|
| 248 |
+
timeout = aiohttp.ClientTimeout(
|
| 249 |
+
total=self.read_timeout,
|
| 250 |
+
connect=self.connect_timeout
|
| 251 |
+
)
|
| 252 |
+
|
| 253 |
+
self.http_session = aiohttp.ClientSession(
|
| 254 |
+
connector=connector,
|
| 255 |
+
timeout=timeout,
|
| 256 |
+
headers={'User-Agent': 'VirtualISP/1.0'}
|
| 257 |
+
)
|
| 258 |
+
|
| 259 |
+
def _is_http_request(self, data: bytes) -> bool:
|
| 260 |
+
"""Check if data looks like an HTTP request"""
|
| 261 |
+
try:
|
| 262 |
+
first_line = data.split(b'\r\n')[0].decode('utf-8', errors='ignore')
|
| 263 |
+
methods = ['GET', 'POST', 'PUT', 'DELETE', 'HEAD', 'OPTIONS', 'PATCH', 'TRACE']
|
| 264 |
+
return any(first_line.startswith(method + ' ') for method in methods)
|
| 265 |
+
except:
|
| 266 |
+
return False
|
| 267 |
+
|
| 268 |
+
def _determine_connection_type(self, remote_host: str, remote_port: int, data: bytes = b'') -> ConnectionType:
|
| 269 |
+
"""Determine the appropriate connection type"""
|
| 270 |
+
# Check for HTTP/HTTPS based on port and data
|
| 271 |
+
if remote_port == 80 or (data and self._is_http_request(data)):
|
| 272 |
+
return ConnectionType.HTTP_CLIENT
|
| 273 |
+
elif remote_port == 443:
|
| 274 |
+
return ConnectionType.HTTPS_CLIENT
|
| 275 |
+
else:
|
| 276 |
+
return ConnectionType.TCP_SOCKET
|
| 277 |
+
|
| 278 |
+
def create_connection(self, virtual_conn: TCPConnection, remote_host: str, remote_port: int,
|
| 279 |
+
initial_data: bytes = b'') -> Optional[SocketConnection]:
|
| 280 |
+
"""Create a new socket connection"""
|
| 281 |
+
connection_id = f"{virtual_conn.connection_id}->{remote_host}:{remote_port}"
|
| 282 |
+
|
| 283 |
+
# Check connection limit
|
| 284 |
+
with self.lock:
|
| 285 |
+
if len(self.connections) >= self.max_connections:
|
| 286 |
+
return None
|
| 287 |
+
|
| 288 |
+
# Determine connection type
|
| 289 |
+
conn_type = self._determine_connection_type(remote_host, remote_port, initial_data)
|
| 290 |
+
|
| 291 |
+
# Create socket connection
|
| 292 |
+
socket_conn = SocketConnection(
|
| 293 |
+
connection_id=connection_id,
|
| 294 |
+
connection_type=conn_type,
|
| 295 |
+
virtual_connection=virtual_conn,
|
| 296 |
+
host_socket=None,
|
| 297 |
+
remote_host=remote_host,
|
| 298 |
+
remote_port=remote_port,
|
| 299 |
+
created_time=time.time(),
|
| 300 |
+
last_activity=time.time()
|
| 301 |
+
)
|
| 302 |
+
|
| 303 |
+
with self.lock:
|
| 304 |
+
self.connections[connection_id] = socket_conn
|
| 305 |
+
|
| 306 |
+
# Establish connection based on type
|
| 307 |
+
if conn_type in [ConnectionType.HTTP_CLIENT, ConnectionType.HTTPS_CLIENT]:
|
| 308 |
+
success = self._create_http_connection(socket_conn, initial_data)
|
| 309 |
+
else:
|
| 310 |
+
success = self._create_tcp_connection(socket_conn, initial_data)
|
| 311 |
+
|
| 312 |
+
if success:
|
| 313 |
+
self.stats['total_connections'] += 1
|
| 314 |
+
self.stats['active_connections'] = len(self.connections)
|
| 315 |
+
|
| 316 |
+
if conn_type in [ConnectionType.HTTP_CLIENT, ConnectionType.HTTPS_CLIENT]:
|
| 317 |
+
self.stats['http_requests'] += 1
|
| 318 |
+
else:
|
| 319 |
+
self.stats['tcp_connections'] += 1
|
| 320 |
+
else:
|
| 321 |
+
self.stats['failed_connections'] += 1
|
| 322 |
+
with self.lock:
|
| 323 |
+
if connection_id in self.connections:
|
| 324 |
+
del self.connections[connection_id]
|
| 325 |
+
return None
|
| 326 |
+
|
| 327 |
+
return socket_conn
|
| 328 |
+
|
| 329 |
+
def _create_tcp_connection(self, socket_conn: SocketConnection, initial_data: bytes) -> bool:
|
| 330 |
+
"""Create TCP socket connection"""
|
| 331 |
+
try:
|
| 332 |
+
# Create socket
|
| 333 |
+
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
| 334 |
+
sock.settimeout(self.connect_timeout)
|
| 335 |
+
|
| 336 |
+
# Connect
|
| 337 |
+
sock.connect((socket_conn.remote_host, socket_conn.remote_port))
|
| 338 |
+
sock.settimeout(self.read_timeout)
|
| 339 |
+
|
| 340 |
+
socket_conn.host_socket = sock
|
| 341 |
+
socket_conn.is_connected = True
|
| 342 |
+
|
| 343 |
+
# Send initial data if any
|
| 344 |
+
if initial_data:
|
| 345 |
+
sock.send(initial_data)
|
| 346 |
+
socket_conn.update_activity(len(initial_data), 'sent')
|
| 347 |
+
|
| 348 |
+
# Start background thread for receiving data
|
| 349 |
+
thread = threading.Thread(
|
| 350 |
+
target=self._tcp_receive_loop,
|
| 351 |
+
args=(socket_conn,),
|
| 352 |
+
daemon=True
|
| 353 |
+
)
|
| 354 |
+
thread.start()
|
| 355 |
+
|
| 356 |
+
return True
|
| 357 |
+
|
| 358 |
+
except Exception as e:
|
| 359 |
+
print(f"Failed to create TCP connection to {socket_conn.remote_host}:{socket_conn.remote_port}: {e}")
|
| 360 |
+
socket_conn.error_count += 1
|
| 361 |
+
return False
|
| 362 |
+
|
| 363 |
+
def _create_http_connection(self, socket_conn: SocketConnection, initial_data: bytes) -> bool:
|
| 364 |
+
"""Create HTTP connection"""
|
| 365 |
+
try:
|
| 366 |
+
# Parse HTTP request
|
| 367 |
+
http_request = HTTPRequest.parse(initial_data)
|
| 368 |
+
if not http_request:
|
| 369 |
+
return False
|
| 370 |
+
|
| 371 |
+
# Set host header
|
| 372 |
+
http_request.headers['host'] = socket_conn.remote_host
|
| 373 |
+
|
| 374 |
+
# Start async HTTP request
|
| 375 |
+
if self.loop and not self.loop.is_closed():
|
| 376 |
+
asyncio.run_coroutine_threadsafe(
|
| 377 |
+
self._handle_http_request(socket_conn, http_request),
|
| 378 |
+
self.loop
|
| 379 |
+
)
|
| 380 |
+
else:
|
| 381 |
+
# Fallback to sync HTTP handling
|
| 382 |
+
return self._handle_http_request_sync(socket_conn, http_request)
|
| 383 |
+
|
| 384 |
+
return True
|
| 385 |
+
|
| 386 |
+
except Exception as e:
|
| 387 |
+
print(f"Failed to create HTTP connection to {socket_conn.remote_host}:{socket_conn.remote_port}: {e}")
|
| 388 |
+
socket_conn.error_count += 1
|
| 389 |
+
return False
|
| 390 |
+
|
| 391 |
+
async def _handle_http_request(self, socket_conn: SocketConnection, http_request: HTTPRequest):
|
| 392 |
+
"""Handle HTTP request asynchronously"""
|
| 393 |
+
try:
|
| 394 |
+
if not self.http_session:
|
| 395 |
+
await self._init_http_session()
|
| 396 |
+
|
| 397 |
+
# Build URL
|
| 398 |
+
scheme = 'https' if socket_conn.connection_type == ConnectionType.HTTPS_CLIENT else 'http'
|
| 399 |
+
url = f"{scheme}://{socket_conn.remote_host}:{socket_conn.remote_port}{http_request.path}"
|
| 400 |
+
|
| 401 |
+
# Make request
|
| 402 |
+
async with self.http_session.request(
|
| 403 |
+
method=http_request.method,
|
| 404 |
+
url=url,
|
| 405 |
+
headers=http_request.headers,
|
| 406 |
+
data=http_request.body
|
| 407 |
+
) as response:
|
| 408 |
+
# Read response
|
| 409 |
+
response_body = await response.read()
|
| 410 |
+
|
| 411 |
+
# Create HTTP response
|
| 412 |
+
http_response = HTTPResponse(
|
| 413 |
+
status_code=response.status,
|
| 414 |
+
reason=response.reason or 'OK',
|
| 415 |
+
headers=dict(response.headers),
|
| 416 |
+
body=response_body
|
| 417 |
+
)
|
| 418 |
+
|
| 419 |
+
# Send response back to virtual connection
|
| 420 |
+
response_data = http_response.to_bytes()
|
| 421 |
+
if socket_conn.virtual_connection and socket_conn.virtual_connection.on_data_received:
|
| 422 |
+
socket_conn.virtual_connection.on_data_received(response_data)
|
| 423 |
+
|
| 424 |
+
socket_conn.update_activity(len(response_data), 'received')
|
| 425 |
+
self.stats['bytes_transferred'] += len(response_data)
|
| 426 |
+
|
| 427 |
+
except Exception as e:
|
| 428 |
+
print(f"HTTP request failed: {e}")
|
| 429 |
+
socket_conn.error_count += 1
|
| 430 |
+
|
| 431 |
+
# Send error response
|
| 432 |
+
error_response = HTTPResponse(
|
| 433 |
+
status_code=500,
|
| 434 |
+
reason='Internal Server Error',
|
| 435 |
+
body=f"Error: {str(e)}".encode('utf-8')
|
| 436 |
+
)
|
| 437 |
+
|
| 438 |
+
response_data = error_response.to_bytes()
|
| 439 |
+
if socket_conn.virtual_connection and socket_conn.virtual_connection.on_data_received:
|
| 440 |
+
socket_conn.virtual_connection.on_data_received(response_data)
|
| 441 |
+
|
| 442 |
+
def _handle_http_request_sync(self, socket_conn: SocketConnection, http_request: HTTPRequest) -> bool:
|
| 443 |
+
"""Handle HTTP request synchronously (fallback)"""
|
| 444 |
+
try:
|
| 445 |
+
# Use urllib for sync HTTP requests
|
| 446 |
+
scheme = 'https' if socket_conn.connection_type == ConnectionType.HTTPS_CLIENT else 'http'
|
| 447 |
+
url = f"{scheme}://{socket_conn.remote_host}:{socket_conn.remote_port}{http_request.path}"
|
| 448 |
+
|
| 449 |
+
import urllib.request
|
| 450 |
+
import urllib.error
|
| 451 |
+
|
| 452 |
+
# Create request
|
| 453 |
+
req = urllib.request.Request(
|
| 454 |
+
url,
|
| 455 |
+
data=http_request.body if http_request.body else None,
|
| 456 |
+
headers=http_request.headers,
|
| 457 |
+
method=http_request.method
|
| 458 |
+
)
|
| 459 |
+
|
| 460 |
+
# Make request
|
| 461 |
+
with urllib.request.urlopen(req, timeout=self.read_timeout) as response:
|
| 462 |
+
response_body = response.read()
|
| 463 |
+
|
| 464 |
+
# Create HTTP response
|
| 465 |
+
http_response = HTTPResponse(
|
| 466 |
+
status_code=response.getcode(),
|
| 467 |
+
reason='OK',
|
| 468 |
+
headers=dict(response.headers),
|
| 469 |
+
body=response_body
|
| 470 |
+
)
|
| 471 |
+
|
| 472 |
+
# Send response back to virtual connection
|
| 473 |
+
response_data = http_response.to_bytes()
|
| 474 |
+
if socket_conn.virtual_connection and socket_conn.virtual_connection.on_data_received:
|
| 475 |
+
socket_conn.virtual_connection.on_data_received(response_data)
|
| 476 |
+
|
| 477 |
+
socket_conn.update_activity(len(response_data), 'received')
|
| 478 |
+
self.stats['bytes_transferred'] += len(response_data)
|
| 479 |
+
|
| 480 |
+
return True
|
| 481 |
+
|
| 482 |
+
except Exception as e:
|
| 483 |
+
print(f"Sync HTTP request failed: {e}")
|
| 484 |
+
socket_conn.error_count += 1
|
| 485 |
+
return False
|
| 486 |
+
|
| 487 |
+
def _tcp_receive_loop(self, socket_conn: SocketConnection):
|
| 488 |
+
"""Background loop for receiving TCP data"""
|
| 489 |
+
sock = socket_conn.host_socket
|
| 490 |
+
if not sock:
|
| 491 |
+
return
|
| 492 |
+
|
| 493 |
+
try:
|
| 494 |
+
while socket_conn.is_connected:
|
| 495 |
+
try:
|
| 496 |
+
data = sock.recv(self.buffer_size)
|
| 497 |
+
if not data:
|
| 498 |
+
break
|
| 499 |
+
|
| 500 |
+
# Forward data to virtual connection
|
| 501 |
+
if socket_conn.virtual_connection and socket_conn.virtual_connection.on_data_received:
|
| 502 |
+
socket_conn.virtual_connection.on_data_received(data)
|
| 503 |
+
|
| 504 |
+
socket_conn.update_activity(len(data), 'received')
|
| 505 |
+
self.stats['bytes_transferred'] += len(data)
|
| 506 |
+
|
| 507 |
+
except socket.timeout:
|
| 508 |
+
continue
|
| 509 |
+
except Exception as e:
|
| 510 |
+
print(f"TCP receive error: {e}")
|
| 511 |
+
break
|
| 512 |
+
|
| 513 |
+
finally:
|
| 514 |
+
self._close_connection(socket_conn.connection_id)
|
| 515 |
+
|
| 516 |
+
def send_data(self, connection_id: str, data: bytes) -> bool:
|
| 517 |
+
"""Send data through socket connection"""
|
| 518 |
+
with self.lock:
|
| 519 |
+
socket_conn = self.connections.get(connection_id)
|
| 520 |
+
|
| 521 |
+
if not socket_conn or not socket_conn.is_connected:
|
| 522 |
+
return False
|
| 523 |
+
|
| 524 |
+
try:
|
| 525 |
+
if socket_conn.connection_type in [ConnectionType.HTTP_CLIENT, ConnectionType.HTTPS_CLIENT]:
|
| 526 |
+
# For HTTP connections, treat as new request
|
| 527 |
+
return self._create_http_connection(socket_conn, data)
|
| 528 |
+
else:
|
| 529 |
+
# TCP connection
|
| 530 |
+
if socket_conn.host_socket:
|
| 531 |
+
socket_conn.host_socket.send(data)
|
| 532 |
+
socket_conn.update_activity(len(data), 'sent')
|
| 533 |
+
self.stats['bytes_transferred'] += len(data)
|
| 534 |
+
return True
|
| 535 |
+
|
| 536 |
+
except Exception as e:
|
| 537 |
+
print(f"Failed to send data: {e}")
|
| 538 |
+
socket_conn.error_count += 1
|
| 539 |
+
self._close_connection(connection_id)
|
| 540 |
+
|
| 541 |
+
return False
|
| 542 |
+
|
| 543 |
+
def _close_connection(self, connection_id: str):
|
| 544 |
+
"""Close socket connection"""
|
| 545 |
+
with self.lock:
|
| 546 |
+
socket_conn = self.connections.get(connection_id)
|
| 547 |
+
if not socket_conn:
|
| 548 |
+
return
|
| 549 |
+
|
| 550 |
+
# Close socket
|
| 551 |
+
if socket_conn.host_socket:
|
| 552 |
+
try:
|
| 553 |
+
socket_conn.host_socket.close()
|
| 554 |
+
except:
|
| 555 |
+
pass
|
| 556 |
+
|
| 557 |
+
socket_conn.is_connected = False
|
| 558 |
+
|
| 559 |
+
# Remove from connections
|
| 560 |
+
del self.connections[connection_id]
|
| 561 |
+
|
| 562 |
+
self.stats['active_connections'] = len(self.connections)
|
| 563 |
+
|
| 564 |
+
def close_connection(self, connection_id: str) -> bool:
|
| 565 |
+
"""Manually close connection"""
|
| 566 |
+
self._close_connection(connection_id)
|
| 567 |
+
return True
|
| 568 |
+
|
| 569 |
+
def get_connection(self, connection_id: str) -> Optional[SocketConnection]:
|
| 570 |
+
"""Get socket connection"""
|
| 571 |
+
with self.lock:
|
| 572 |
+
return self.connections.get(connection_id)
|
| 573 |
+
|
| 574 |
+
def get_connections(self) -> Dict[str, Dict]:
|
| 575 |
+
"""Get all socket connections"""
|
| 576 |
+
with self.lock:
|
| 577 |
+
return {
|
| 578 |
+
conn_id: conn.to_dict()
|
| 579 |
+
for conn_id, conn in self.connections.items()
|
| 580 |
+
}
|
| 581 |
+
|
| 582 |
+
def get_stats(self) -> Dict:
|
| 583 |
+
"""Get socket translator statistics"""
|
| 584 |
+
with self.lock:
|
| 585 |
+
stats = self.stats.copy()
|
| 586 |
+
stats['active_connections'] = len(self.connections)
|
| 587 |
+
|
| 588 |
+
return stats
|
| 589 |
+
|
| 590 |
+
def _cleanup_loop(self):
|
| 591 |
+
"""Background cleanup loop"""
|
| 592 |
+
while self.running:
|
| 593 |
+
try:
|
| 594 |
+
current_time = time.time()
|
| 595 |
+
expired_connections = []
|
| 596 |
+
|
| 597 |
+
with self.lock:
|
| 598 |
+
for conn_id, conn in self.connections.items():
|
| 599 |
+
# Close connections that have been inactive too long
|
| 600 |
+
if current_time - conn.last_activity > self.read_timeout * 2:
|
| 601 |
+
expired_connections.append(conn_id)
|
| 602 |
+
|
| 603 |
+
for conn_id in expired_connections:
|
| 604 |
+
self._close_connection(conn_id)
|
| 605 |
+
|
| 606 |
+
time.sleep(30) # Cleanup every 30 seconds
|
| 607 |
+
|
| 608 |
+
except Exception as e:
|
| 609 |
+
print(f"Socket translator cleanup error: {e}")
|
| 610 |
+
time.sleep(5)
|
| 611 |
+
|
| 612 |
+
def start(self):
|
| 613 |
+
"""Start socket translator"""
|
| 614 |
+
self.running = True
|
| 615 |
+
|
| 616 |
+
# Start event loop for async HTTP
|
| 617 |
+
try:
|
| 618 |
+
self.loop = asyncio.new_event_loop()
|
| 619 |
+
asyncio.set_event_loop(self.loop)
|
| 620 |
+
|
| 621 |
+
# Start cleanup thread
|
| 622 |
+
self.cleanup_thread = threading.Thread(target=self._cleanup_loop, daemon=True)
|
| 623 |
+
self.cleanup_thread.start()
|
| 624 |
+
|
| 625 |
+
print("Socket translator started")
|
| 626 |
+
except Exception as e:
|
| 627 |
+
print(f"Failed to start socket translator: {e}")
|
| 628 |
+
|
| 629 |
+
def stop(self):
|
| 630 |
+
"""Stop socket translator"""
|
| 631 |
+
self.running = False
|
| 632 |
+
|
| 633 |
+
# Close all connections
|
| 634 |
+
with self.lock:
|
| 635 |
+
connection_ids = list(self.connections.keys())
|
| 636 |
+
|
| 637 |
+
for conn_id in connection_ids:
|
| 638 |
+
self._close_connection(conn_id)
|
| 639 |
+
|
| 640 |
+
# Close HTTP session
|
| 641 |
+
if self.http_session:
|
| 642 |
+
asyncio.run_coroutine_threadsafe(self.http_session.close(), self.loop)
|
| 643 |
+
|
| 644 |
+
# Close event loop
|
| 645 |
+
if self.loop and not self.loop.is_closed():
|
| 646 |
+
self.loop.call_soon_threadsafe(self.loop.stop)
|
| 647 |
+
|
| 648 |
+
# Wait for cleanup thread
|
| 649 |
+
if self.cleanup_thread:
|
| 650 |
+
self.cleanup_thread.join()
|
| 651 |
+
|
| 652 |
+
print("Socket translator stopped")
|
| 653 |
+
|
core/tcp_engine.py
ADDED
|
@@ -0,0 +1,716 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
TCP Engine Module
|
| 3 |
+
|
| 4 |
+
Implements a complete TCP state machine in user-space:
|
| 5 |
+
- Full TCP state machine (SYN, SYN-ACK, ESTABLISHED, FIN, RST)
|
| 6 |
+
- Sequence and acknowledgment number tracking
|
| 7 |
+
- Sliding window implementation
|
| 8 |
+
- Retransmission and timeout handling
|
| 9 |
+
- Congestion control
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
import time
|
| 13 |
+
import threading
|
| 14 |
+
import random
|
| 15 |
+
from typing import Dict, List, Optional, Tuple, Callable
|
| 16 |
+
from dataclasses import dataclass, field
|
| 17 |
+
from enum import Enum
|
| 18 |
+
from collections import deque
|
| 19 |
+
|
| 20 |
+
from .ip_parser import TCPHeader, IPv4Header, IPParser
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class TCPState(Enum):
|
| 24 |
+
CLOSED = "CLOSED"
|
| 25 |
+
LISTEN = "LISTEN"
|
| 26 |
+
SYN_SENT = "SYN_SENT"
|
| 27 |
+
SYN_RECEIVED = "SYN_RECEIVED"
|
| 28 |
+
ESTABLISHED = "ESTABLISHED"
|
| 29 |
+
FIN_WAIT_1 = "FIN_WAIT_1"
|
| 30 |
+
FIN_WAIT_2 = "FIN_WAIT_2"
|
| 31 |
+
CLOSE_WAIT = "CLOSE_WAIT"
|
| 32 |
+
CLOSING = "CLOSING"
|
| 33 |
+
LAST_ACK = "LAST_ACK"
|
| 34 |
+
TIME_WAIT = "TIME_WAIT"
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
@dataclass
|
| 38 |
+
class TCPSegment:
|
| 39 |
+
"""Represents a TCP segment"""
|
| 40 |
+
seq_num: int
|
| 41 |
+
ack_num: int
|
| 42 |
+
flags: int
|
| 43 |
+
window: int
|
| 44 |
+
data: bytes
|
| 45 |
+
timestamp: float = field(default_factory=time.time)
|
| 46 |
+
retransmit_count: int = 0
|
| 47 |
+
|
| 48 |
+
@property
|
| 49 |
+
def data_length(self) -> int:
|
| 50 |
+
"""Get data length"""
|
| 51 |
+
return len(self.data)
|
| 52 |
+
|
| 53 |
+
@property
|
| 54 |
+
def seq_end(self) -> int:
|
| 55 |
+
"""Get sequence number after this segment"""
|
| 56 |
+
length = self.data_length
|
| 57 |
+
# SYN and FIN consume one sequence number
|
| 58 |
+
if self.flags & 0x02: # SYN
|
| 59 |
+
length += 1
|
| 60 |
+
if self.flags & 0x01: # FIN
|
| 61 |
+
length += 1
|
| 62 |
+
return self.seq_num + length
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
@dataclass
|
| 66 |
+
class TCPConnection:
|
| 67 |
+
"""Represents a TCP connection state"""
|
| 68 |
+
# Connection identification
|
| 69 |
+
local_ip: str
|
| 70 |
+
local_port: int
|
| 71 |
+
remote_ip: str
|
| 72 |
+
remote_port: int
|
| 73 |
+
|
| 74 |
+
# State
|
| 75 |
+
state: TCPState = TCPState.CLOSED
|
| 76 |
+
|
| 77 |
+
# Sequence numbers
|
| 78 |
+
local_seq: int = 0
|
| 79 |
+
local_ack: int = 0
|
| 80 |
+
remote_seq: int = 0
|
| 81 |
+
remote_ack: int = 0
|
| 82 |
+
initial_seq: int = 0
|
| 83 |
+
|
| 84 |
+
# Window management
|
| 85 |
+
local_window: int = 65535
|
| 86 |
+
remote_window: int = 65535
|
| 87 |
+
window_scale: int = 0
|
| 88 |
+
|
| 89 |
+
# Buffers
|
| 90 |
+
send_buffer: deque = field(default_factory=deque)
|
| 91 |
+
recv_buffer: deque = field(default_factory=deque)
|
| 92 |
+
out_of_order_buffer: Dict[int, bytes] = field(default_factory=dict)
|
| 93 |
+
|
| 94 |
+
# Retransmission
|
| 95 |
+
unacked_segments: Dict[int, TCPSegment] = field(default_factory=dict)
|
| 96 |
+
retransmit_timer: Optional[float] = None
|
| 97 |
+
rto: float = 1.0 # Retransmission timeout
|
| 98 |
+
srtt: float = 0.0 # Smoothed round-trip time
|
| 99 |
+
rttvar: float = 0.0 # Round-trip time variation
|
| 100 |
+
|
| 101 |
+
# Congestion control
|
| 102 |
+
cwnd: int = 1 # Congestion window (in MSS units)
|
| 103 |
+
ssthresh: int = 65535 # Slow start threshold
|
| 104 |
+
mss: int = 1460 # Maximum segment size
|
| 105 |
+
|
| 106 |
+
# Timers
|
| 107 |
+
last_activity: float = field(default_factory=time.time)
|
| 108 |
+
time_wait_start: Optional[float] = None
|
| 109 |
+
|
| 110 |
+
# Callbacks
|
| 111 |
+
on_data_received: Optional[Callable[[bytes], None]] = None
|
| 112 |
+
on_connection_closed: Optional[Callable[[], None]] = None
|
| 113 |
+
|
| 114 |
+
@property
|
| 115 |
+
def connection_id(self) -> str:
|
| 116 |
+
"""Get unique connection identifier"""
|
| 117 |
+
return f"{self.local_ip}:{self.local_port}-{self.remote_ip}:{self.remote_port}"
|
| 118 |
+
|
| 119 |
+
@property
|
| 120 |
+
def is_established(self) -> bool:
|
| 121 |
+
"""Check if connection is established"""
|
| 122 |
+
return self.state == TCPState.ESTABLISHED
|
| 123 |
+
|
| 124 |
+
@property
|
| 125 |
+
def can_send_data(self) -> bool:
|
| 126 |
+
"""Check if connection can send data"""
|
| 127 |
+
return self.state in [TCPState.ESTABLISHED, TCPState.CLOSE_WAIT]
|
| 128 |
+
|
| 129 |
+
@property
|
| 130 |
+
def effective_window(self) -> int:
|
| 131 |
+
"""Get effective send window"""
|
| 132 |
+
return min(self.remote_window, self.cwnd * self.mss)
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
class TCPEngine:
|
| 136 |
+
"""TCP state machine implementation"""
|
| 137 |
+
|
| 138 |
+
def __init__(self, config: Dict):
|
| 139 |
+
self.config = config
|
| 140 |
+
self.connections: Dict[str, TCPConnection] = {}
|
| 141 |
+
self.listening_ports: Dict[int, Callable] = {} # port -> accept callback
|
| 142 |
+
self.lock = threading.Lock()
|
| 143 |
+
self.running = False
|
| 144 |
+
self.timer_thread = None
|
| 145 |
+
|
| 146 |
+
# Default configuration
|
| 147 |
+
self.default_mss = config.get('mss', 1460)
|
| 148 |
+
self.default_window = config.get('initial_window', 65535)
|
| 149 |
+
self.max_retries = config.get('max_retries', 3)
|
| 150 |
+
self.connection_timeout = config.get('timeout', 300)
|
| 151 |
+
self.time_wait_timeout = config.get('time_wait_timeout', 120)
|
| 152 |
+
|
| 153 |
+
def _generate_isn(self) -> int:
|
| 154 |
+
"""Generate Initial Sequence Number"""
|
| 155 |
+
return random.randint(0, 0xFFFFFFFF)
|
| 156 |
+
|
| 157 |
+
def _get_connection_key(self, local_ip: str, local_port: int, remote_ip: str, remote_port: int) -> str:
|
| 158 |
+
"""Get connection key"""
|
| 159 |
+
return f"{local_ip}:{local_port}-{remote_ip}:{remote_port}"
|
| 160 |
+
|
| 161 |
+
def _create_tcp_segment(self, conn: TCPConnection, flags: int, data: bytes = b'') -> TCPSegment:
|
| 162 |
+
"""Create TCP segment"""
|
| 163 |
+
segment = TCPSegment(
|
| 164 |
+
seq_num=conn.local_seq,
|
| 165 |
+
ack_num=conn.local_ack,
|
| 166 |
+
flags=flags,
|
| 167 |
+
window=conn.local_window,
|
| 168 |
+
data=data
|
| 169 |
+
)
|
| 170 |
+
return segment
|
| 171 |
+
|
| 172 |
+
def _build_tcp_packet(self, conn: TCPConnection, segment: TCPSegment) -> bytes:
|
| 173 |
+
"""Build complete TCP packet"""
|
| 174 |
+
# Create IP header
|
| 175 |
+
ip_header = IPv4Header(
|
| 176 |
+
protocol=6, # TCP
|
| 177 |
+
source_ip=conn.local_ip,
|
| 178 |
+
dest_ip=conn.remote_ip,
|
| 179 |
+
ttl=64
|
| 180 |
+
)
|
| 181 |
+
|
| 182 |
+
# Create TCP header
|
| 183 |
+
tcp_header = TCPHeader(
|
| 184 |
+
source_port=conn.local_port,
|
| 185 |
+
dest_port=conn.remote_port,
|
| 186 |
+
seq_num=segment.seq_num,
|
| 187 |
+
ack_num=segment.ack_num,
|
| 188 |
+
flags=segment.flags,
|
| 189 |
+
window_size=segment.window
|
| 190 |
+
)
|
| 191 |
+
|
| 192 |
+
# Build packet
|
| 193 |
+
return IPParser.build_packet(ip_header, tcp_header, segment.data)
|
| 194 |
+
|
| 195 |
+
def _update_rto(self, conn: TCPConnection, rtt: float):
|
| 196 |
+
"""Update retransmission timeout using RFC 6298"""
|
| 197 |
+
if conn.srtt == 0:
|
| 198 |
+
# First RTT measurement
|
| 199 |
+
conn.srtt = rtt
|
| 200 |
+
conn.rttvar = rtt / 2
|
| 201 |
+
else:
|
| 202 |
+
# Subsequent measurements
|
| 203 |
+
alpha = 0.125
|
| 204 |
+
beta = 0.25
|
| 205 |
+
conn.rttvar = (1 - beta) * conn.rttvar + beta * abs(conn.srtt - rtt)
|
| 206 |
+
conn.srtt = (1 - alpha) * conn.srtt + alpha * rtt
|
| 207 |
+
|
| 208 |
+
# Calculate RTO
|
| 209 |
+
conn.rto = max(1.0, conn.srtt + 4 * conn.rttvar)
|
| 210 |
+
conn.rto = min(conn.rto, 60.0) # Cap at 60 seconds
|
| 211 |
+
|
| 212 |
+
def _update_congestion_window(self, conn: TCPConnection, acked_bytes: int):
|
| 213 |
+
"""Update congestion window (simplified congestion control)"""
|
| 214 |
+
if conn.cwnd < conn.ssthresh:
|
| 215 |
+
# Slow start
|
| 216 |
+
conn.cwnd += 1
|
| 217 |
+
else:
|
| 218 |
+
# Congestion avoidance
|
| 219 |
+
conn.cwnd += max(1, conn.mss * conn.mss // conn.cwnd)
|
| 220 |
+
|
| 221 |
+
def _handle_retransmission(self, conn: TCPConnection):
|
| 222 |
+
"""Handle segment retransmission"""
|
| 223 |
+
current_time = time.time()
|
| 224 |
+
|
| 225 |
+
# Find segments that need retransmission
|
| 226 |
+
to_retransmit = []
|
| 227 |
+
for seq_num, segment in conn.unacked_segments.items():
|
| 228 |
+
if current_time - segment.timestamp > conn.rto:
|
| 229 |
+
if segment.retransmit_count < self.max_retries:
|
| 230 |
+
to_retransmit.append(segment)
|
| 231 |
+
else:
|
| 232 |
+
# Max retries exceeded, close connection
|
| 233 |
+
self._close_connection(conn, reset=True)
|
| 234 |
+
return
|
| 235 |
+
|
| 236 |
+
# Retransmit segments
|
| 237 |
+
for segment in to_retransmit:
|
| 238 |
+
segment.retransmit_count += 1
|
| 239 |
+
segment.timestamp = current_time
|
| 240 |
+
|
| 241 |
+
# Exponential backoff
|
| 242 |
+
conn.rto = min(conn.rto * 2, 60.0)
|
| 243 |
+
|
| 244 |
+
# Congestion control: reduce window
|
| 245 |
+
conn.ssthresh = max(conn.cwnd // 2, 2)
|
| 246 |
+
conn.cwnd = 1
|
| 247 |
+
|
| 248 |
+
# Send retransmitted segment
|
| 249 |
+
packet = self._build_tcp_packet(conn, segment)
|
| 250 |
+
self._send_packet(packet)
|
| 251 |
+
|
| 252 |
+
def _send_packet(self, packet: bytes):
|
| 253 |
+
"""Send packet (to be implemented by integration layer)"""
|
| 254 |
+
# This will be connected to the packet bridge
|
| 255 |
+
pass
|
| 256 |
+
|
| 257 |
+
def _close_connection(self, conn: TCPConnection, reset: bool = False):
|
| 258 |
+
"""Close connection"""
|
| 259 |
+
if reset:
|
| 260 |
+
# Send RST
|
| 261 |
+
segment = self._create_tcp_segment(conn, 0x04) # RST flag
|
| 262 |
+
packet = self._build_tcp_packet(conn, segment)
|
| 263 |
+
self._send_packet(packet)
|
| 264 |
+
conn.state = TCPState.CLOSED
|
| 265 |
+
else:
|
| 266 |
+
# Normal close
|
| 267 |
+
if conn.state == TCPState.ESTABLISHED:
|
| 268 |
+
# Send FIN
|
| 269 |
+
segment = self._create_tcp_segment(conn, 0x01) # FIN flag
|
| 270 |
+
packet = self._build_tcp_packet(conn, segment)
|
| 271 |
+
self._send_packet(packet)
|
| 272 |
+
conn.local_seq += 1
|
| 273 |
+
conn.state = TCPState.FIN_WAIT_1
|
| 274 |
+
|
| 275 |
+
# Cleanup if closed
|
| 276 |
+
if conn.state == TCPState.CLOSED:
|
| 277 |
+
if conn.on_connection_closed:
|
| 278 |
+
conn.on_connection_closed()
|
| 279 |
+
|
| 280 |
+
with self.lock:
|
| 281 |
+
if conn.connection_id in self.connections:
|
| 282 |
+
del self.connections[conn.connection_id]
|
| 283 |
+
|
| 284 |
+
def listen(self, port: int, accept_callback: Callable):
|
| 285 |
+
"""Listen on port for incoming connections"""
|
| 286 |
+
with self.lock:
|
| 287 |
+
self.listening_ports[port] = accept_callback
|
| 288 |
+
|
| 289 |
+
def connect(self, local_ip: str, local_port: int, remote_ip: str, remote_port: int) -> Optional[TCPConnection]:
|
| 290 |
+
"""Initiate outbound connection"""
|
| 291 |
+
conn_key = self._get_connection_key(local_ip, local_port, remote_ip, remote_port)
|
| 292 |
+
|
| 293 |
+
# Create connection
|
| 294 |
+
conn = TCPConnection(
|
| 295 |
+
local_ip=local_ip,
|
| 296 |
+
local_port=local_port,
|
| 297 |
+
remote_ip=remote_ip,
|
| 298 |
+
remote_port=remote_port,
|
| 299 |
+
state=TCPState.SYN_SENT,
|
| 300 |
+
local_seq=self._generate_isn(),
|
| 301 |
+
mss=self.default_mss,
|
| 302 |
+
local_window=self.default_window
|
| 303 |
+
)
|
| 304 |
+
conn.initial_seq = conn.local_seq
|
| 305 |
+
|
| 306 |
+
with self.lock:
|
| 307 |
+
self.connections[conn_key] = conn
|
| 308 |
+
|
| 309 |
+
# Send SYN
|
| 310 |
+
segment = self._create_tcp_segment(conn, 0x02) # SYN flag
|
| 311 |
+
packet = self._build_tcp_packet(conn, segment)
|
| 312 |
+
self._send_packet(packet)
|
| 313 |
+
|
| 314 |
+
# Track unacked segment
|
| 315 |
+
conn.unacked_segments[conn.local_seq] = segment
|
| 316 |
+
conn.local_seq += 1
|
| 317 |
+
conn.retransmit_timer = time.time()
|
| 318 |
+
|
| 319 |
+
return conn
|
| 320 |
+
|
| 321 |
+
def send_data(self, conn: TCPConnection, data: bytes) -> bool:
|
| 322 |
+
"""Send data on established connection"""
|
| 323 |
+
if not conn.can_send_data:
|
| 324 |
+
return False
|
| 325 |
+
|
| 326 |
+
# Add to send buffer
|
| 327 |
+
conn.send_buffer.append(data)
|
| 328 |
+
|
| 329 |
+
# Try to send immediately
|
| 330 |
+
self._try_send_data(conn)
|
| 331 |
+
|
| 332 |
+
return True
|
| 333 |
+
|
| 334 |
+
def _try_send_data(self, conn: TCPConnection):
|
| 335 |
+
"""Try to send buffered data"""
|
| 336 |
+
while conn.send_buffer and len(conn.unacked_segments) * conn.mss < conn.effective_window:
|
| 337 |
+
data = conn.send_buffer.popleft()
|
| 338 |
+
|
| 339 |
+
# Split data if larger than MSS
|
| 340 |
+
while data:
|
| 341 |
+
chunk = data[:conn.mss]
|
| 342 |
+
data = data[conn.mss:]
|
| 343 |
+
|
| 344 |
+
# Create and send segment
|
| 345 |
+
segment = self._create_tcp_segment(conn, 0x18, chunk) # PSH+ACK flags
|
| 346 |
+
packet = self._build_tcp_packet(conn, segment)
|
| 347 |
+
self._send_packet(packet)
|
| 348 |
+
|
| 349 |
+
# Track unacked segment
|
| 350 |
+
conn.unacked_segments[conn.local_seq] = segment
|
| 351 |
+
conn.local_seq += len(chunk)
|
| 352 |
+
|
| 353 |
+
if not data:
|
| 354 |
+
break
|
| 355 |
+
|
| 356 |
+
def process_packet(self, packet_data: bytes) -> bool:
|
| 357 |
+
"""Process incoming TCP packet"""
|
| 358 |
+
try:
|
| 359 |
+
# Parse packet
|
| 360 |
+
parsed = IPParser.parse_packet(packet_data)
|
| 361 |
+
if not isinstance(parsed.transport_header, TCPHeader):
|
| 362 |
+
return False
|
| 363 |
+
|
| 364 |
+
ip_header = parsed.ip_header
|
| 365 |
+
tcp_header = parsed.transport_header
|
| 366 |
+
payload = parsed.payload
|
| 367 |
+
|
| 368 |
+
# Find or create connection
|
| 369 |
+
conn_key = self._get_connection_key(
|
| 370 |
+
ip_header.dest_ip, tcp_header.dest_port,
|
| 371 |
+
ip_header.source_ip, tcp_header.source_port
|
| 372 |
+
)
|
| 373 |
+
|
| 374 |
+
with self.lock:
|
| 375 |
+
conn = self.connections.get(conn_key)
|
| 376 |
+
|
| 377 |
+
# Handle new connection (SYN to listening port)
|
| 378 |
+
if not conn and tcp_header.syn and not tcp_header.ack:
|
| 379 |
+
if tcp_header.dest_port in self.listening_ports:
|
| 380 |
+
conn = self._handle_new_connection(ip_header, tcp_header)
|
| 381 |
+
if conn:
|
| 382 |
+
self.connections[conn_key] = conn
|
| 383 |
+
|
| 384 |
+
if not conn:
|
| 385 |
+
# Send RST for unknown connection
|
| 386 |
+
self._send_rst(ip_header, tcp_header)
|
| 387 |
+
return False
|
| 388 |
+
|
| 389 |
+
# Process segment
|
| 390 |
+
return self._process_segment(conn, tcp_header, payload)
|
| 391 |
+
|
| 392 |
+
except Exception as e:
|
| 393 |
+
print(f"Error processing TCP packet: {e}")
|
| 394 |
+
return False
|
| 395 |
+
|
| 396 |
+
def _handle_new_connection(self, ip_header: IPv4Header, tcp_header: TCPHeader) -> Optional[TCPConnection]:
|
| 397 |
+
"""Handle new incoming connection"""
|
| 398 |
+
accept_callback = self.listening_ports.get(tcp_header.dest_port)
|
| 399 |
+
if not accept_callback:
|
| 400 |
+
return None
|
| 401 |
+
|
| 402 |
+
# Create connection
|
| 403 |
+
conn = TCPConnection(
|
| 404 |
+
local_ip=ip_header.dest_ip,
|
| 405 |
+
local_port=tcp_header.dest_port,
|
| 406 |
+
remote_ip=ip_header.source_ip,
|
| 407 |
+
remote_port=tcp_header.source_port,
|
| 408 |
+
state=TCPState.SYN_RECEIVED,
|
| 409 |
+
local_seq=self._generate_isn(),
|
| 410 |
+
remote_seq=tcp_header.seq_num,
|
| 411 |
+
local_ack=tcp_header.seq_num + 1,
|
| 412 |
+
mss=self.default_mss,
|
| 413 |
+
local_window=self.default_window
|
| 414 |
+
)
|
| 415 |
+
conn.initial_seq = conn.local_seq
|
| 416 |
+
|
| 417 |
+
# Send SYN-ACK
|
| 418 |
+
segment = self._create_tcp_segment(conn, 0x12) # SYN+ACK flags
|
| 419 |
+
packet = self._build_tcp_packet(conn, segment)
|
| 420 |
+
self._send_packet(packet)
|
| 421 |
+
|
| 422 |
+
# Track unacked segment
|
| 423 |
+
conn.unacked_segments[conn.local_seq] = segment
|
| 424 |
+
conn.local_seq += 1
|
| 425 |
+
conn.retransmit_timer = time.time()
|
| 426 |
+
|
| 427 |
+
# Call accept callback
|
| 428 |
+
accept_callback(conn)
|
| 429 |
+
|
| 430 |
+
return conn
|
| 431 |
+
|
| 432 |
+
def _process_segment(self, conn: TCPConnection, tcp_header: TCPHeader, payload: bytes) -> bool:
|
| 433 |
+
"""Process TCP segment based on connection state"""
|
| 434 |
+
conn.last_activity = time.time()
|
| 435 |
+
|
| 436 |
+
# Handle RST
|
| 437 |
+
if tcp_header.rst:
|
| 438 |
+
conn.state = TCPState.CLOSED
|
| 439 |
+
self._close_connection(conn)
|
| 440 |
+
return True
|
| 441 |
+
|
| 442 |
+
# State machine
|
| 443 |
+
if conn.state == TCPState.SYN_SENT:
|
| 444 |
+
return self._handle_syn_sent(conn, tcp_header, payload)
|
| 445 |
+
elif conn.state == TCPState.SYN_RECEIVED:
|
| 446 |
+
return self._handle_syn_received(conn, tcp_header, payload)
|
| 447 |
+
elif conn.state == TCPState.ESTABLISHED:
|
| 448 |
+
return self._handle_established(conn, tcp_header, payload)
|
| 449 |
+
elif conn.state == TCPState.FIN_WAIT_1:
|
| 450 |
+
return self._handle_fin_wait_1(conn, tcp_header, payload)
|
| 451 |
+
elif conn.state == TCPState.FIN_WAIT_2:
|
| 452 |
+
return self._handle_fin_wait_2(conn, tcp_header, payload)
|
| 453 |
+
elif conn.state == TCPState.CLOSE_WAIT:
|
| 454 |
+
return self._handle_close_wait(conn, tcp_header, payload)
|
| 455 |
+
elif conn.state == TCPState.CLOSING:
|
| 456 |
+
return self._handle_closing(conn, tcp_header, payload)
|
| 457 |
+
elif conn.state == TCPState.LAST_ACK:
|
| 458 |
+
return self._handle_last_ack(conn, tcp_header, payload)
|
| 459 |
+
elif conn.state == TCPState.TIME_WAIT:
|
| 460 |
+
return self._handle_time_wait(conn, tcp_header, payload)
|
| 461 |
+
|
| 462 |
+
return False
|
| 463 |
+
|
| 464 |
+
def _handle_syn_sent(self, conn: TCPConnection, tcp_header: TCPHeader, payload: bytes) -> bool:
|
| 465 |
+
"""Handle segment in SYN_SENT state"""
|
| 466 |
+
if tcp_header.syn and tcp_header.ack:
|
| 467 |
+
# SYN-ACK received
|
| 468 |
+
if tcp_header.ack_num == conn.local_seq:
|
| 469 |
+
conn.remote_seq = tcp_header.seq_num
|
| 470 |
+
conn.local_ack = tcp_header.seq_num + 1
|
| 471 |
+
conn.remote_window = tcp_header.window_size
|
| 472 |
+
|
| 473 |
+
# Remove SYN from unacked segments
|
| 474 |
+
if conn.local_seq - 1 in conn.unacked_segments:
|
| 475 |
+
del conn.unacked_segments[conn.local_seq - 1]
|
| 476 |
+
|
| 477 |
+
# Send ACK
|
| 478 |
+
segment = self._create_tcp_segment(conn, 0x10) # ACK flag
|
| 479 |
+
packet = self._build_tcp_packet(conn, segment)
|
| 480 |
+
self._send_packet(packet)
|
| 481 |
+
|
| 482 |
+
conn.state = TCPState.ESTABLISHED
|
| 483 |
+
return True
|
| 484 |
+
|
| 485 |
+
return False
|
| 486 |
+
|
| 487 |
+
def _handle_syn_received(self, conn: TCPConnection, tcp_header: TCPHeader, payload: bytes) -> bool:
|
| 488 |
+
"""Handle segment in SYN_RECEIVED state"""
|
| 489 |
+
if tcp_header.ack and tcp_header.ack_num == conn.local_seq:
|
| 490 |
+
# ACK for our SYN-ACK
|
| 491 |
+
conn.remote_window = tcp_header.window_size
|
| 492 |
+
|
| 493 |
+
# Remove SYN-ACK from unacked segments
|
| 494 |
+
if conn.local_seq - 1 in conn.unacked_segments:
|
| 495 |
+
del conn.unacked_segments[conn.local_seq - 1]
|
| 496 |
+
|
| 497 |
+
conn.state = TCPState.ESTABLISHED
|
| 498 |
+
return True
|
| 499 |
+
|
| 500 |
+
return False
|
| 501 |
+
|
| 502 |
+
def _handle_established(self, conn: TCPConnection, tcp_header: TCPHeader, payload: bytes) -> bool:
|
| 503 |
+
"""Handle segment in ESTABLISHED state"""
|
| 504 |
+
# Handle ACK
|
| 505 |
+
if tcp_header.ack:
|
| 506 |
+
self._process_ack(conn, tcp_header.ack_num)
|
| 507 |
+
|
| 508 |
+
# Handle data
|
| 509 |
+
if payload and tcp_header.seq_num == conn.local_ack:
|
| 510 |
+
conn.local_ack += len(payload)
|
| 511 |
+
|
| 512 |
+
# Deliver data
|
| 513 |
+
if conn.on_data_received:
|
| 514 |
+
conn.on_data_received(payload)
|
| 515 |
+
|
| 516 |
+
# Send ACK
|
| 517 |
+
segment = self._create_tcp_segment(conn, 0x10) # ACK flag
|
| 518 |
+
packet = self._build_tcp_packet(conn, segment)
|
| 519 |
+
self._send_packet(packet)
|
| 520 |
+
|
| 521 |
+
# Handle FIN
|
| 522 |
+
if tcp_header.fin:
|
| 523 |
+
conn.local_ack += 1
|
| 524 |
+
|
| 525 |
+
# Send ACK
|
| 526 |
+
segment = self._create_tcp_segment(conn, 0x10) # ACK flag
|
| 527 |
+
packet = self._build_tcp_packet(conn, segment)
|
| 528 |
+
self._send_packet(packet)
|
| 529 |
+
|
| 530 |
+
conn.state = TCPState.CLOSE_WAIT
|
| 531 |
+
|
| 532 |
+
return True
|
| 533 |
+
|
| 534 |
+
def _handle_fin_wait_1(self, conn: TCPConnection, tcp_header: TCPHeader, payload: bytes) -> bool:
|
| 535 |
+
"""Handle segment in FIN_WAIT_1 state"""
|
| 536 |
+
if tcp_header.ack:
|
| 537 |
+
self._process_ack(conn, tcp_header.ack_num)
|
| 538 |
+
if not conn.unacked_segments: # Our FIN was ACKed
|
| 539 |
+
conn.state = TCPState.FIN_WAIT_2
|
| 540 |
+
|
| 541 |
+
if tcp_header.fin:
|
| 542 |
+
conn.local_ack += 1
|
| 543 |
+
|
| 544 |
+
# Send ACK
|
| 545 |
+
segment = self._create_tcp_segment(conn, 0x10) # ACK flag
|
| 546 |
+
packet = self._build_tcp_packet(conn, segment)
|
| 547 |
+
self._send_packet(packet)
|
| 548 |
+
|
| 549 |
+
if conn.state == TCPState.FIN_WAIT_2:
|
| 550 |
+
conn.state = TCPState.TIME_WAIT
|
| 551 |
+
conn.time_wait_start = time.time()
|
| 552 |
+
else:
|
| 553 |
+
conn.state = TCPState.CLOSING
|
| 554 |
+
|
| 555 |
+
return True
|
| 556 |
+
|
| 557 |
+
def _handle_fin_wait_2(self, conn: TCPConnection, tcp_header: TCPHeader, payload: bytes) -> bool:
|
| 558 |
+
"""Handle segment in FIN_WAIT_2 state"""
|
| 559 |
+
if tcp_header.fin:
|
| 560 |
+
conn.local_ack += 1
|
| 561 |
+
|
| 562 |
+
# Send ACK
|
| 563 |
+
segment = self._create_tcp_segment(conn, 0x10) # ACK flag
|
| 564 |
+
packet = self._build_tcp_packet(conn, segment)
|
| 565 |
+
self._send_packet(packet)
|
| 566 |
+
|
| 567 |
+
conn.state = TCPState.TIME_WAIT
|
| 568 |
+
conn.time_wait_start = time.time()
|
| 569 |
+
|
| 570 |
+
return True
|
| 571 |
+
|
| 572 |
+
def _handle_close_wait(self, conn: TCPConnection, tcp_header: TCPHeader, payload: bytes) -> bool:
|
| 573 |
+
"""Handle segment in CLOSE_WAIT state"""
|
| 574 |
+
# Application should close the connection
|
| 575 |
+
return True
|
| 576 |
+
|
| 577 |
+
def _handle_closing(self, conn: TCPConnection, tcp_header: TCPHeader, payload: bytes) -> bool:
|
| 578 |
+
"""Handle segment in CLOSING state"""
|
| 579 |
+
if tcp_header.ack:
|
| 580 |
+
self._process_ack(conn, tcp_header.ack_num)
|
| 581 |
+
if not conn.unacked_segments: # Our FIN was ACKed
|
| 582 |
+
conn.state = TCPState.TIME_WAIT
|
| 583 |
+
conn.time_wait_start = time.time()
|
| 584 |
+
|
| 585 |
+
return True
|
| 586 |
+
|
| 587 |
+
def _handle_last_ack(self, conn: TCPConnection, tcp_header: TCPHeader, payload: bytes) -> bool:
|
| 588 |
+
"""Handle segment in LAST_ACK state"""
|
| 589 |
+
if tcp_header.ack:
|
| 590 |
+
self._process_ack(conn, tcp_header.ack_num)
|
| 591 |
+
if not conn.unacked_segments: # Our FIN was ACKed
|
| 592 |
+
conn.state = TCPState.CLOSED
|
| 593 |
+
self._close_connection(conn)
|
| 594 |
+
|
| 595 |
+
return True
|
| 596 |
+
|
| 597 |
+
def _handle_time_wait(self, conn: TCPConnection, tcp_header: TCPHeader, payload: bytes) -> bool:
|
| 598 |
+
"""Handle segment in TIME_WAIT state"""
|
| 599 |
+
# Just acknowledge any segments
|
| 600 |
+
if tcp_header.seq_num == conn.local_ack:
|
| 601 |
+
segment = self._create_tcp_segment(conn, 0x10) # ACK flag
|
| 602 |
+
packet = self._build_tcp_packet(conn, segment)
|
| 603 |
+
self._send_packet(packet)
|
| 604 |
+
|
| 605 |
+
return True
|
| 606 |
+
|
| 607 |
+
def _process_ack(self, conn: TCPConnection, ack_num: int):
|
| 608 |
+
"""Process ACK and remove acknowledged segments"""
|
| 609 |
+
acked_segments = []
|
| 610 |
+
acked_bytes = 0
|
| 611 |
+
|
| 612 |
+
for seq_num, segment in list(conn.unacked_segments.items()):
|
| 613 |
+
if seq_num < ack_num:
|
| 614 |
+
acked_segments.append((seq_num, segment))
|
| 615 |
+
acked_bytes += segment.data_length
|
| 616 |
+
del conn.unacked_segments[seq_num]
|
| 617 |
+
|
| 618 |
+
# Update RTT and congestion window
|
| 619 |
+
if acked_segments:
|
| 620 |
+
# Use first acked segment for RTT calculation
|
| 621 |
+
rtt = time.time() - acked_segments[0][1].timestamp
|
| 622 |
+
self._update_rto(conn, rtt)
|
| 623 |
+
self._update_congestion_window(conn, acked_bytes)
|
| 624 |
+
|
| 625 |
+
# Try to send more data
|
| 626 |
+
self._try_send_data(conn)
|
| 627 |
+
|
| 628 |
+
def _send_rst(self, ip_header: IPv4Header, tcp_header: TCPHeader):
|
| 629 |
+
"""Send RST for unknown connection"""
|
| 630 |
+
# Create RST response
|
| 631 |
+
rst_ip = IPv4Header(
|
| 632 |
+
protocol=6,
|
| 633 |
+
source_ip=ip_header.dest_ip,
|
| 634 |
+
dest_ip=ip_header.source_ip,
|
| 635 |
+
ttl=64
|
| 636 |
+
)
|
| 637 |
+
|
| 638 |
+
rst_tcp = TCPHeader(
|
| 639 |
+
source_port=tcp_header.dest_port,
|
| 640 |
+
dest_port=tcp_header.source_port,
|
| 641 |
+
seq_num=tcp_header.ack_num if tcp_header.ack else 0,
|
| 642 |
+
ack_num=tcp_header.seq_num + 1 if tcp_header.syn else tcp_header.seq_num,
|
| 643 |
+
flags=0x14 if tcp_header.ack else 0x04 # RST+ACK or RST
|
| 644 |
+
)
|
| 645 |
+
|
| 646 |
+
packet = IPParser.build_packet(rst_ip, rst_tcp)
|
| 647 |
+
self._send_packet(packet)
|
| 648 |
+
|
| 649 |
+
def _timer_loop(self):
|
| 650 |
+
"""Timer loop for handling timeouts"""
|
| 651 |
+
while self.running:
|
| 652 |
+
current_time = time.time()
|
| 653 |
+
|
| 654 |
+
with self.lock:
|
| 655 |
+
connections_to_check = list(self.connections.values())
|
| 656 |
+
|
| 657 |
+
for conn in connections_to_check:
|
| 658 |
+
# Handle retransmissions
|
| 659 |
+
if conn.unacked_segments:
|
| 660 |
+
self._handle_retransmission(conn)
|
| 661 |
+
|
| 662 |
+
# Handle connection timeout
|
| 663 |
+
if current_time - conn.last_activity > self.connection_timeout:
|
| 664 |
+
self._close_connection(conn, reset=True)
|
| 665 |
+
|
| 666 |
+
# Handle TIME_WAIT timeout
|
| 667 |
+
if (conn.state == TCPState.TIME_WAIT and
|
| 668 |
+
conn.time_wait_start and
|
| 669 |
+
current_time - conn.time_wait_start > self.time_wait_timeout):
|
| 670 |
+
conn.state = TCPState.CLOSED
|
| 671 |
+
self._close_connection(conn)
|
| 672 |
+
|
| 673 |
+
time.sleep(1) # Check every second
|
| 674 |
+
|
| 675 |
+
def start(self):
|
| 676 |
+
"""Start TCP engine"""
|
| 677 |
+
self.running = True
|
| 678 |
+
self.timer_thread = threading.Thread(target=self._timer_loop, daemon=True)
|
| 679 |
+
self.timer_thread.start()
|
| 680 |
+
print("TCP engine started")
|
| 681 |
+
|
| 682 |
+
def stop(self):
|
| 683 |
+
"""Stop TCP engine"""
|
| 684 |
+
self.running = False
|
| 685 |
+
if self.timer_thread:
|
| 686 |
+
self.timer_thread.join()
|
| 687 |
+
|
| 688 |
+
# Close all connections
|
| 689 |
+
with self.lock:
|
| 690 |
+
for conn in list(self.connections.values()):
|
| 691 |
+
self._close_connection(conn, reset=True)
|
| 692 |
+
|
| 693 |
+
print("TCP engine stopped")
|
| 694 |
+
|
| 695 |
+
def get_connections(self) -> Dict[str, Dict]:
|
| 696 |
+
"""Get current connections"""
|
| 697 |
+
with self.lock:
|
| 698 |
+
return {
|
| 699 |
+
conn_id: {
|
| 700 |
+
'local_ip': conn.local_ip,
|
| 701 |
+
'local_port': conn.local_port,
|
| 702 |
+
'remote_ip': conn.remote_ip,
|
| 703 |
+
'remote_port': conn.remote_port,
|
| 704 |
+
'state': conn.state.value,
|
| 705 |
+
'local_seq': conn.local_seq,
|
| 706 |
+
'local_ack': conn.local_ack,
|
| 707 |
+
'remote_seq': conn.remote_seq,
|
| 708 |
+
'remote_ack': conn.remote_ack,
|
| 709 |
+
'window_size': conn.local_window,
|
| 710 |
+
'cwnd': conn.cwnd,
|
| 711 |
+
'unacked_segments': len(conn.unacked_segments),
|
| 712 |
+
'last_activity': conn.last_activity
|
| 713 |
+
}
|
| 714 |
+
for conn_id, conn in self.connections.items()
|
| 715 |
+
}
|
| 716 |
+
|
core/virtual_router.py
ADDED
|
@@ -0,0 +1,565 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Virtual Router Module
|
| 3 |
+
|
| 4 |
+
Implements packet routing between virtual clients and external internet:
|
| 5 |
+
- Maintain routing table for virtual network
|
| 6 |
+
- Forward packets based on destination IP
|
| 7 |
+
- Handle internal vs external routing decisions
|
| 8 |
+
- Support static route configuration
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
import ipaddress
|
| 12 |
+
import time
|
| 13 |
+
import threading
|
| 14 |
+
from typing import Dict, List, Optional, Tuple, Set
|
| 15 |
+
from dataclasses import dataclass
|
| 16 |
+
from enum import Enum
|
| 17 |
+
|
| 18 |
+
from .ip_parser import ParsedPacket, IPv4Header
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class RouteType(Enum):
|
| 22 |
+
DIRECT = "DIRECT" # Directly connected network
|
| 23 |
+
STATIC = "STATIC" # Static route
|
| 24 |
+
DEFAULT = "DEFAULT" # Default route
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
@dataclass
|
| 28 |
+
class RouteEntry:
|
| 29 |
+
"""Represents a routing table entry"""
|
| 30 |
+
destination: str # Network in CIDR notation (e.g., "10.0.0.0/24")
|
| 31 |
+
gateway: Optional[str] # Next hop IP (None for direct routes)
|
| 32 |
+
interface: str # Interface name or identifier
|
| 33 |
+
metric: int # Route metric (lower is preferred)
|
| 34 |
+
route_type: RouteType
|
| 35 |
+
created_time: float
|
| 36 |
+
last_used: Optional[float] = None
|
| 37 |
+
use_count: int = 0
|
| 38 |
+
|
| 39 |
+
def __post_init__(self):
|
| 40 |
+
if self.created_time == 0:
|
| 41 |
+
self.created_time = time.time()
|
| 42 |
+
|
| 43 |
+
def record_use(self):
|
| 44 |
+
"""Record route usage"""
|
| 45 |
+
self.use_count += 1
|
| 46 |
+
self.last_used = time.time()
|
| 47 |
+
|
| 48 |
+
def matches_destination(self, ip: str) -> bool:
|
| 49 |
+
"""Check if this route matches the destination IP"""
|
| 50 |
+
try:
|
| 51 |
+
network = ipaddress.ip_network(self.destination, strict=False)
|
| 52 |
+
return ipaddress.ip_address(ip) in network
|
| 53 |
+
except (ipaddress.AddressValueError, ValueError):
|
| 54 |
+
return False
|
| 55 |
+
|
| 56 |
+
def to_dict(self) -> Dict:
|
| 57 |
+
"""Convert route to dictionary"""
|
| 58 |
+
return {
|
| 59 |
+
'destination': self.destination,
|
| 60 |
+
'gateway': self.gateway,
|
| 61 |
+
'interface': self.interface,
|
| 62 |
+
'metric': self.metric,
|
| 63 |
+
'route_type': self.route_type.value,
|
| 64 |
+
'created_time': self.created_time,
|
| 65 |
+
'last_used': self.last_used,
|
| 66 |
+
'use_count': self.use_count
|
| 67 |
+
}
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
@dataclass
|
| 71 |
+
class Interface:
|
| 72 |
+
"""Represents a network interface"""
|
| 73 |
+
name: str
|
| 74 |
+
ip_address: str
|
| 75 |
+
netmask: str
|
| 76 |
+
network: str # Network in CIDR notation
|
| 77 |
+
enabled: bool = True
|
| 78 |
+
mtu: int = 1500
|
| 79 |
+
created_time: float = 0
|
| 80 |
+
|
| 81 |
+
def __post_init__(self):
|
| 82 |
+
if self.created_time == 0:
|
| 83 |
+
self.created_time = time.time()
|
| 84 |
+
|
| 85 |
+
# Calculate network if not provided
|
| 86 |
+
if not self.network:
|
| 87 |
+
try:
|
| 88 |
+
interface_network = ipaddress.ip_interface(f"{self.ip_address}/{self.netmask}")
|
| 89 |
+
self.network = str(interface_network.network)
|
| 90 |
+
except (ipaddress.AddressValueError, ValueError):
|
| 91 |
+
self.network = "0.0.0.0/0"
|
| 92 |
+
|
| 93 |
+
def is_local_address(self, ip: str) -> bool:
|
| 94 |
+
"""Check if IP address belongs to this interface's network"""
|
| 95 |
+
try:
|
| 96 |
+
network = ipaddress.ip_network(self.network, strict=False)
|
| 97 |
+
return ipaddress.ip_address(ip) in network
|
| 98 |
+
except (ipaddress.AddressValueError, ValueError):
|
| 99 |
+
return False
|
| 100 |
+
|
| 101 |
+
def to_dict(self) -> Dict:
|
| 102 |
+
"""Convert interface to dictionary"""
|
| 103 |
+
return {
|
| 104 |
+
'name': self.name,
|
| 105 |
+
'ip_address': self.ip_address,
|
| 106 |
+
'netmask': self.netmask,
|
| 107 |
+
'network': self.network,
|
| 108 |
+
'enabled': self.enabled,
|
| 109 |
+
'mtu': self.mtu,
|
| 110 |
+
'created_time': self.created_time
|
| 111 |
+
}
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
class VirtualRouter:
|
| 115 |
+
"""Virtual router implementation"""
|
| 116 |
+
|
| 117 |
+
def __init__(self, config: Dict):
|
| 118 |
+
self.config = config
|
| 119 |
+
self.routing_table: List[RouteEntry] = []
|
| 120 |
+
self.interfaces: Dict[str, Interface] = {}
|
| 121 |
+
self.arp_table: Dict[str, str] = {} # IP -> MAC mapping
|
| 122 |
+
self.lock = threading.Lock()
|
| 123 |
+
|
| 124 |
+
# Router configuration
|
| 125 |
+
self.router_id = config.get('router_id', 'virtual-router-1')
|
| 126 |
+
self.default_gateway = config.get('default_gateway')
|
| 127 |
+
|
| 128 |
+
# Statistics
|
| 129 |
+
self.stats = {
|
| 130 |
+
'packets_routed': 0,
|
| 131 |
+
'packets_dropped': 0,
|
| 132 |
+
'route_lookups': 0,
|
| 133 |
+
'arp_requests': 0,
|
| 134 |
+
'arp_replies': 0,
|
| 135 |
+
'routing_errors': 0
|
| 136 |
+
}
|
| 137 |
+
|
| 138 |
+
# Initialize interfaces and routes
|
| 139 |
+
self._initialize_interfaces()
|
| 140 |
+
self._initialize_routes()
|
| 141 |
+
|
| 142 |
+
def _initialize_interfaces(self):
|
| 143 |
+
"""Initialize network interfaces from configuration"""
|
| 144 |
+
interfaces_config = self.config.get('interfaces', [])
|
| 145 |
+
|
| 146 |
+
for iface_config in interfaces_config:
|
| 147 |
+
interface = Interface(
|
| 148 |
+
name=iface_config['name'],
|
| 149 |
+
ip_address=iface_config['ip_address'],
|
| 150 |
+
netmask=iface_config.get('netmask', '255.255.255.0'),
|
| 151 |
+
network=iface_config.get('network'),
|
| 152 |
+
enabled=iface_config.get('enabled', True),
|
| 153 |
+
mtu=iface_config.get('mtu', 1500)
|
| 154 |
+
)
|
| 155 |
+
|
| 156 |
+
with self.lock:
|
| 157 |
+
self.interfaces[interface.name] = interface
|
| 158 |
+
|
| 159 |
+
# Add direct route for interface network
|
| 160 |
+
self.add_route(
|
| 161 |
+
destination=interface.network,
|
| 162 |
+
gateway=None,
|
| 163 |
+
interface=interface.name,
|
| 164 |
+
metric=0,
|
| 165 |
+
route_type=RouteType.DIRECT
|
| 166 |
+
)
|
| 167 |
+
|
| 168 |
+
def _initialize_routes(self):
|
| 169 |
+
"""Initialize static routes from configuration"""
|
| 170 |
+
routes_config = self.config.get('static_routes', [])
|
| 171 |
+
|
| 172 |
+
for route_config in routes_config:
|
| 173 |
+
self.add_route(
|
| 174 |
+
destination=route_config['destination'],
|
| 175 |
+
gateway=route_config.get('gateway'),
|
| 176 |
+
interface=route_config['interface'],
|
| 177 |
+
metric=route_config.get('metric', 10),
|
| 178 |
+
route_type=RouteType.STATIC
|
| 179 |
+
)
|
| 180 |
+
|
| 181 |
+
# Add default route if configured
|
| 182 |
+
if self.default_gateway:
|
| 183 |
+
# Find interface for default gateway
|
| 184 |
+
default_interface = None
|
| 185 |
+
for interface in self.interfaces.values():
|
| 186 |
+
if interface.is_local_address(self.default_gateway):
|
| 187 |
+
default_interface = interface.name
|
| 188 |
+
break
|
| 189 |
+
|
| 190 |
+
if default_interface:
|
| 191 |
+
self.add_route(
|
| 192 |
+
destination="0.0.0.0/0",
|
| 193 |
+
gateway=self.default_gateway,
|
| 194 |
+
interface=default_interface,
|
| 195 |
+
metric=100,
|
| 196 |
+
route_type=RouteType.DEFAULT
|
| 197 |
+
)
|
| 198 |
+
|
| 199 |
+
def add_interface(self, name: str, ip_address: str, netmask: str = "255.255.255.0",
|
| 200 |
+
network: Optional[str] = None, mtu: int = 1500) -> bool:
|
| 201 |
+
"""Add network interface"""
|
| 202 |
+
with self.lock:
|
| 203 |
+
if name in self.interfaces:
|
| 204 |
+
return False
|
| 205 |
+
|
| 206 |
+
interface = Interface(
|
| 207 |
+
name=name,
|
| 208 |
+
ip_address=ip_address,
|
| 209 |
+
netmask=netmask,
|
| 210 |
+
network=network,
|
| 211 |
+
mtu=mtu
|
| 212 |
+
)
|
| 213 |
+
|
| 214 |
+
self.interfaces[name] = interface
|
| 215 |
+
|
| 216 |
+
# Add direct route for interface network
|
| 217 |
+
self.add_route(
|
| 218 |
+
destination=interface.network,
|
| 219 |
+
gateway=None,
|
| 220 |
+
interface=name,
|
| 221 |
+
metric=0,
|
| 222 |
+
route_type=RouteType.DIRECT
|
| 223 |
+
)
|
| 224 |
+
|
| 225 |
+
return True
|
| 226 |
+
|
| 227 |
+
def remove_interface(self, name: str) -> bool:
|
| 228 |
+
"""Remove network interface"""
|
| 229 |
+
with self.lock:
|
| 230 |
+
if name not in self.interfaces:
|
| 231 |
+
return False
|
| 232 |
+
|
| 233 |
+
# Remove interface
|
| 234 |
+
del self.interfaces[name]
|
| 235 |
+
|
| 236 |
+
# Remove routes associated with this interface
|
| 237 |
+
self.routing_table = [
|
| 238 |
+
route for route in self.routing_table
|
| 239 |
+
if route.interface != name
|
| 240 |
+
]
|
| 241 |
+
|
| 242 |
+
return True
|
| 243 |
+
|
| 244 |
+
def enable_interface(self, name: str) -> bool:
|
| 245 |
+
"""Enable network interface"""
|
| 246 |
+
with self.lock:
|
| 247 |
+
if name in self.interfaces:
|
| 248 |
+
self.interfaces[name].enabled = True
|
| 249 |
+
return True
|
| 250 |
+
return False
|
| 251 |
+
|
| 252 |
+
def disable_interface(self, name: str) -> bool:
|
| 253 |
+
"""Disable network interface"""
|
| 254 |
+
with self.lock:
|
| 255 |
+
if name in self.interfaces:
|
| 256 |
+
self.interfaces[name].enabled = False
|
| 257 |
+
return True
|
| 258 |
+
return False
|
| 259 |
+
|
| 260 |
+
def add_route(self, destination: str, gateway: Optional[str], interface: str,
|
| 261 |
+
metric: int = 10, route_type: RouteType = RouteType.STATIC) -> bool:
|
| 262 |
+
"""Add route to routing table"""
|
| 263 |
+
try:
|
| 264 |
+
# Validate destination network
|
| 265 |
+
ipaddress.ip_network(destination, strict=False)
|
| 266 |
+
|
| 267 |
+
# Validate gateway if provided
|
| 268 |
+
if gateway:
|
| 269 |
+
ipaddress.ip_address(gateway)
|
| 270 |
+
|
| 271 |
+
route = RouteEntry(
|
| 272 |
+
destination=destination,
|
| 273 |
+
gateway=gateway,
|
| 274 |
+
interface=interface,
|
| 275 |
+
metric=metric,
|
| 276 |
+
route_type=route_type,
|
| 277 |
+
created_time=time.time()
|
| 278 |
+
)
|
| 279 |
+
|
| 280 |
+
with self.lock:
|
| 281 |
+
# Check if interface exists
|
| 282 |
+
if interface not in self.interfaces:
|
| 283 |
+
return False
|
| 284 |
+
|
| 285 |
+
# Remove existing route with same destination and interface
|
| 286 |
+
self.routing_table = [
|
| 287 |
+
r for r in self.routing_table
|
| 288 |
+
if not (r.destination == destination and r.interface == interface)
|
| 289 |
+
]
|
| 290 |
+
|
| 291 |
+
# Add new route
|
| 292 |
+
self.routing_table.append(route)
|
| 293 |
+
|
| 294 |
+
# Sort by metric (lower metric = higher priority)
|
| 295 |
+
self.routing_table.sort(key=lambda r: (r.metric, r.created_time))
|
| 296 |
+
|
| 297 |
+
return True
|
| 298 |
+
|
| 299 |
+
except (ipaddress.AddressValueError, ValueError):
|
| 300 |
+
return False
|
| 301 |
+
|
| 302 |
+
def remove_route(self, destination: str, interface: str) -> bool:
|
| 303 |
+
"""Remove route from routing table"""
|
| 304 |
+
with self.lock:
|
| 305 |
+
original_count = len(self.routing_table)
|
| 306 |
+
self.routing_table = [
|
| 307 |
+
route for route in self.routing_table
|
| 308 |
+
if not (route.destination == destination and route.interface == interface)
|
| 309 |
+
]
|
| 310 |
+
return len(self.routing_table) < original_count
|
| 311 |
+
|
| 312 |
+
def lookup_route(self, destination_ip: str) -> Optional[RouteEntry]:
|
| 313 |
+
"""Look up route for destination IP"""
|
| 314 |
+
self.stats['route_lookups'] += 1
|
| 315 |
+
|
| 316 |
+
with self.lock:
|
| 317 |
+
# Find all matching routes
|
| 318 |
+
matching_routes = []
|
| 319 |
+
for route in self.routing_table:
|
| 320 |
+
# Skip disabled interfaces
|
| 321 |
+
interface = self.interfaces.get(route.interface)
|
| 322 |
+
if not interface or not interface.enabled:
|
| 323 |
+
continue
|
| 324 |
+
|
| 325 |
+
if route.matches_destination(destination_ip):
|
| 326 |
+
matching_routes.append(route)
|
| 327 |
+
|
| 328 |
+
if not matching_routes:
|
| 329 |
+
self.stats['routing_errors'] += 1
|
| 330 |
+
return None
|
| 331 |
+
|
| 332 |
+
# Sort by specificity (longest prefix match) and then by metric
|
| 333 |
+
def route_priority(route):
|
| 334 |
+
try:
|
| 335 |
+
network = ipaddress.ip_network(route.destination, strict=False)
|
| 336 |
+
return (-network.prefixlen, route.metric, route.created_time)
|
| 337 |
+
except:
|
| 338 |
+
return (0, route.metric, route.created_time)
|
| 339 |
+
|
| 340 |
+
matching_routes.sort(key=route_priority)
|
| 341 |
+
best_route = matching_routes[0]
|
| 342 |
+
best_route.record_use()
|
| 343 |
+
|
| 344 |
+
return best_route
|
| 345 |
+
|
| 346 |
+
def route_packet(self, packet: ParsedPacket) -> Optional[Tuple[str, str]]:
|
| 347 |
+
"""Route packet and return (next_hop_ip, interface)"""
|
| 348 |
+
self.stats['packets_routed'] += 1
|
| 349 |
+
|
| 350 |
+
destination_ip = packet.ip_header.dest_ip
|
| 351 |
+
|
| 352 |
+
# Look up route
|
| 353 |
+
route = self.lookup_route(destination_ip)
|
| 354 |
+
if not route:
|
| 355 |
+
self.stats['packets_dropped'] += 1
|
| 356 |
+
return None
|
| 357 |
+
|
| 358 |
+
# Determine next hop
|
| 359 |
+
if route.gateway:
|
| 360 |
+
next_hop = route.gateway
|
| 361 |
+
else:
|
| 362 |
+
# Direct route - destination is next hop
|
| 363 |
+
next_hop = destination_ip
|
| 364 |
+
|
| 365 |
+
return (next_hop, route.interface)
|
| 366 |
+
|
| 367 |
+
def is_local_destination(self, ip: str) -> bool:
|
| 368 |
+
"""Check if IP is a local destination (belongs to router interfaces)"""
|
| 369 |
+
with self.lock:
|
| 370 |
+
for interface in self.interfaces.values():
|
| 371 |
+
if interface.ip_address == ip:
|
| 372 |
+
return True
|
| 373 |
+
return False
|
| 374 |
+
|
| 375 |
+
def is_local_network(self, ip: str) -> bool:
|
| 376 |
+
"""Check if IP belongs to any local network"""
|
| 377 |
+
with self.lock:
|
| 378 |
+
for interface in self.interfaces.values():
|
| 379 |
+
if interface.is_local_address(ip):
|
| 380 |
+
return True
|
| 381 |
+
return False
|
| 382 |
+
|
| 383 |
+
def get_interface_for_ip(self, ip: str) -> Optional[Interface]:
|
| 384 |
+
"""Get interface that can reach the given IP"""
|
| 385 |
+
with self.lock:
|
| 386 |
+
for interface in self.interfaces.values():
|
| 387 |
+
if interface.enabled and interface.is_local_address(ip):
|
| 388 |
+
return interface
|
| 389 |
+
return None
|
| 390 |
+
|
| 391 |
+
def add_arp_entry(self, ip: str, mac: str):
|
| 392 |
+
"""Add ARP table entry"""
|
| 393 |
+
with self.lock:
|
| 394 |
+
self.arp_table[ip] = mac
|
| 395 |
+
|
| 396 |
+
def get_arp_entry(self, ip: str) -> Optional[str]:
|
| 397 |
+
"""Get MAC address from ARP table"""
|
| 398 |
+
with self.lock:
|
| 399 |
+
return self.arp_table.get(ip)
|
| 400 |
+
|
| 401 |
+
def remove_arp_entry(self, ip: str) -> bool:
|
| 402 |
+
"""Remove ARP table entry"""
|
| 403 |
+
with self.lock:
|
| 404 |
+
if ip in self.arp_table:
|
| 405 |
+
del self.arp_table[ip]
|
| 406 |
+
return True
|
| 407 |
+
return False
|
| 408 |
+
|
| 409 |
+
def clear_arp_table(self):
|
| 410 |
+
"""Clear ARP table"""
|
| 411 |
+
with self.lock:
|
| 412 |
+
self.arp_table.clear()
|
| 413 |
+
|
| 414 |
+
def get_routing_table(self) -> List[Dict]:
|
| 415 |
+
"""Get routing table"""
|
| 416 |
+
with self.lock:
|
| 417 |
+
return [route.to_dict() for route in self.routing_table]
|
| 418 |
+
|
| 419 |
+
def get_interfaces(self) -> Dict[str, Dict]:
|
| 420 |
+
"""Get network interfaces"""
|
| 421 |
+
with self.lock:
|
| 422 |
+
return {
|
| 423 |
+
name: interface.to_dict()
|
| 424 |
+
for name, interface in self.interfaces.items()
|
| 425 |
+
}
|
| 426 |
+
|
| 427 |
+
def get_arp_table(self) -> Dict[str, str]:
|
| 428 |
+
"""Get ARP table"""
|
| 429 |
+
with self.lock:
|
| 430 |
+
return self.arp_table.copy()
|
| 431 |
+
|
| 432 |
+
def get_stats(self) -> Dict:
|
| 433 |
+
"""Get router statistics"""
|
| 434 |
+
with self.lock:
|
| 435 |
+
stats = self.stats.copy()
|
| 436 |
+
stats['total_routes'] = len(self.routing_table)
|
| 437 |
+
stats['total_interfaces'] = len(self.interfaces)
|
| 438 |
+
stats['enabled_interfaces'] = sum(1 for iface in self.interfaces.values() if iface.enabled)
|
| 439 |
+
stats['arp_entries'] = len(self.arp_table)
|
| 440 |
+
|
| 441 |
+
return stats
|
| 442 |
+
|
| 443 |
+
def reset_stats(self):
|
| 444 |
+
"""Reset router statistics"""
|
| 445 |
+
self.stats = {
|
| 446 |
+
'packets_routed': 0,
|
| 447 |
+
'packets_dropped': 0,
|
| 448 |
+
'route_lookups': 0,
|
| 449 |
+
'arp_requests': 0,
|
| 450 |
+
'arp_replies': 0,
|
| 451 |
+
'routing_errors': 0
|
| 452 |
+
}
|
| 453 |
+
|
| 454 |
+
# Reset route usage statistics
|
| 455 |
+
with self.lock:
|
| 456 |
+
for route in self.routing_table:
|
| 457 |
+
route.use_count = 0
|
| 458 |
+
route.last_used = None
|
| 459 |
+
|
| 460 |
+
def flush_routes(self, route_type: Optional[RouteType] = None):
|
| 461 |
+
"""Flush routes of specified type (or all if None)"""
|
| 462 |
+
with self.lock:
|
| 463 |
+
if route_type:
|
| 464 |
+
self.routing_table = [
|
| 465 |
+
route for route in self.routing_table
|
| 466 |
+
if route.route_type != route_type
|
| 467 |
+
]
|
| 468 |
+
else:
|
| 469 |
+
self.routing_table.clear()
|
| 470 |
+
|
| 471 |
+
def export_config(self) -> Dict:
|
| 472 |
+
"""Export router configuration"""
|
| 473 |
+
return {
|
| 474 |
+
'router_id': self.router_id,
|
| 475 |
+
'default_gateway': self.default_gateway,
|
| 476 |
+
'interfaces': [
|
| 477 |
+
{
|
| 478 |
+
'name': iface.name,
|
| 479 |
+
'ip_address': iface.ip_address,
|
| 480 |
+
'netmask': iface.netmask,
|
| 481 |
+
'network': iface.network,
|
| 482 |
+
'enabled': iface.enabled,
|
| 483 |
+
'mtu': iface.mtu
|
| 484 |
+
}
|
| 485 |
+
for iface in self.interfaces.values()
|
| 486 |
+
],
|
| 487 |
+
'static_routes': [
|
| 488 |
+
{
|
| 489 |
+
'destination': route.destination,
|
| 490 |
+
'gateway': route.gateway,
|
| 491 |
+
'interface': route.interface,
|
| 492 |
+
'metric': route.metric
|
| 493 |
+
}
|
| 494 |
+
for route in self.routing_table
|
| 495 |
+
if route.route_type == RouteType.STATIC
|
| 496 |
+
]
|
| 497 |
+
}
|
| 498 |
+
|
| 499 |
+
def import_config(self, config: Dict):
|
| 500 |
+
"""Import router configuration"""
|
| 501 |
+
# Clear existing configuration
|
| 502 |
+
with self.lock:
|
| 503 |
+
self.interfaces.clear()
|
| 504 |
+
self.routing_table.clear()
|
| 505 |
+
self.arp_table.clear()
|
| 506 |
+
|
| 507 |
+
# Update router settings
|
| 508 |
+
self.router_id = config.get('router_id', self.router_id)
|
| 509 |
+
self.default_gateway = config.get('default_gateway', self.default_gateway)
|
| 510 |
+
|
| 511 |
+
# Reinitialize from new config
|
| 512 |
+
self.config.update(config)
|
| 513 |
+
self._initialize_interfaces()
|
| 514 |
+
self._initialize_routes()
|
| 515 |
+
|
| 516 |
+
|
| 517 |
+
class RouterUtils:
|
| 518 |
+
"""Utility functions for router operations"""
|
| 519 |
+
|
| 520 |
+
@staticmethod
|
| 521 |
+
def ip_to_int(ip: str) -> int:
|
| 522 |
+
"""Convert IP address to integer"""
|
| 523 |
+
return int(ipaddress.ip_address(ip))
|
| 524 |
+
|
| 525 |
+
@staticmethod
|
| 526 |
+
def int_to_ip(ip_int: int) -> str:
|
| 527 |
+
"""Convert integer to IP address"""
|
| 528 |
+
return str(ipaddress.ip_address(ip_int))
|
| 529 |
+
|
| 530 |
+
@staticmethod
|
| 531 |
+
def calculate_network(ip: str, netmask: str) -> str:
|
| 532 |
+
"""Calculate network address from IP and netmask"""
|
| 533 |
+
try:
|
| 534 |
+
interface = ipaddress.ip_interface(f"{ip}/{netmask}")
|
| 535 |
+
return str(interface.network)
|
| 536 |
+
except (ipaddress.AddressValueError, ValueError):
|
| 537 |
+
return "0.0.0.0/0"
|
| 538 |
+
|
| 539 |
+
@staticmethod
|
| 540 |
+
def is_private_ip(ip: str) -> bool:
|
| 541 |
+
"""Check if IP address is private"""
|
| 542 |
+
try:
|
| 543 |
+
ip_obj = ipaddress.ip_address(ip)
|
| 544 |
+
return ip_obj.is_private
|
| 545 |
+
except (ipaddress.AddressValueError, ValueError):
|
| 546 |
+
return False
|
| 547 |
+
|
| 548 |
+
@staticmethod
|
| 549 |
+
def is_multicast_ip(ip: str) -> bool:
|
| 550 |
+
"""Check if IP address is multicast"""
|
| 551 |
+
try:
|
| 552 |
+
ip_obj = ipaddress.ip_address(ip)
|
| 553 |
+
return ip_obj.is_multicast
|
| 554 |
+
except (ipaddress.AddressValueError, ValueError):
|
| 555 |
+
return False
|
| 556 |
+
|
| 557 |
+
@staticmethod
|
| 558 |
+
def validate_cidr(cidr: str) -> bool:
|
| 559 |
+
"""Validate CIDR notation"""
|
| 560 |
+
try:
|
| 561 |
+
ipaddress.ip_network(cidr, strict=False)
|
| 562 |
+
return True
|
| 563 |
+
except (ipaddress.AddressValueError, ValueError):
|
| 564 |
+
return False
|
| 565 |
+
|
database/app.db
ADDED
|
Binary file (16.4 kB). View file
|
|
|
main.py
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import sys
|
| 3 |
+
# DON\'T CHANGE THIS !!!
|
| 4 |
+
sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
|
| 5 |
+
|
| 6 |
+
from flask import Flask, send_from_directory
|
| 7 |
+
from src.models.user import db
|
| 8 |
+
from src.routes.user import user_bp
|
| 9 |
+
from src.routes.isp_api import init_engines, isp_api
|
| 10 |
+
|
| 11 |
+
app = Flask(__name__, static_folder=os.path.join(os.path.dirname(__file__), 'static'))
|
| 12 |
+
app.config['SECRET_KEY'] = 'asdf#FGSgvasgf$5$WGT'
|
| 13 |
+
|
| 14 |
+
app.register_blueprint(user_bp, url_prefix='/api')
|
| 15 |
+
app.register_blueprint(isp_api, url_prefix='/api')
|
| 16 |
+
|
| 17 |
+
# uncomment if you need to use database
|
| 18 |
+
app.config['SQLALCHEMY_DATABASE_URI'] = f"sqlite:///{os.path.join(os.path.dirname(__file__), 'database', 'app.db')}"
|
| 19 |
+
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
|
| 20 |
+
db.init_app(app)
|
| 21 |
+
|
| 22 |
+
with app.app_context():
|
| 23 |
+
db.create_all()
|
| 24 |
+
|
| 25 |
+
# Default configuration for engines
|
| 26 |
+
app.config["dhcp"] = {
|
| 27 |
+
"network": "10.0.0.0/24",
|
| 28 |
+
"range_start": "10.0.0.10",
|
| 29 |
+
"range_end": "10.0.0.100",
|
| 30 |
+
"lease_time": 3600,
|
| 31 |
+
"gateway": "10.0.0.1",
|
| 32 |
+
"dns_servers": ["8.8.8.8", "8.8.4.4"]
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
# Initialize engines only once, when the Flask app is not in debug mode's reloader process
|
| 36 |
+
if not app.debug or os.environ.get('WERKZEUG_RUN_MAIN') == 'true':
|
| 37 |
+
init_engines(app.config)
|
| 38 |
+
|
| 39 |
+
@app.route('/')
|
| 40 |
+
def serve_root():
|
| 41 |
+
return serve('')
|
| 42 |
+
|
| 43 |
+
@app.route('/<path:path>')
|
| 44 |
+
def serve(path):
|
| 45 |
+
static_folder_path = app.static_folder
|
| 46 |
+
if static_folder_path is None:
|
| 47 |
+
return "Static folder not configured", 404
|
| 48 |
+
|
| 49 |
+
if path != "" and os.path.exists(os.path.join(static_folder_path, path)):
|
| 50 |
+
return send_from_directory(static_folder_path, path)
|
| 51 |
+
else:
|
| 52 |
+
index_path = os.path.join(static_folder_path, 'index.html')
|
| 53 |
+
if os.path.exists(index_path):
|
| 54 |
+
return send_from_directory(static_folder_path, 'index.html')
|
| 55 |
+
else:
|
| 56 |
+
return "index.html not found", 404
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
if __name__ == '__main__':
|
| 60 |
+
app.run(host='0.0.0.0', port=5000, debug=False)
|
| 61 |
+
|
| 62 |
+
|
main_isp.py
ADDED
|
@@ -0,0 +1,273 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Main ISP Application
|
| 3 |
+
|
| 4 |
+
Integrates all core modules and provides the main application entry point
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import os
|
| 8 |
+
import sys
|
| 9 |
+
import json
|
| 10 |
+
import threading
|
| 11 |
+
import time
|
| 12 |
+
from flask import Flask
|
| 13 |
+
from flask_cors import CORS
|
| 14 |
+
|
| 15 |
+
# Add project root to path
|
| 16 |
+
sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
|
| 17 |
+
|
| 18 |
+
# Import routes and core modules
|
| 19 |
+
from src.routes.isp_api import isp_api, init_engines
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def load_config():
|
| 23 |
+
"""Load configuration from file or use defaults"""
|
| 24 |
+
config_file = os.path.join(os.path.dirname(__file__), 'config.json')
|
| 25 |
+
|
| 26 |
+
default_config = {
|
| 27 |
+
"dhcp": {
|
| 28 |
+
"network": "10.0.0.0/24",
|
| 29 |
+
"range_start": "10.0.0.10",
|
| 30 |
+
"range_end": "10.0.0.100",
|
| 31 |
+
"lease_time": 3600,
|
| 32 |
+
"gateway": "10.0.0.1",
|
| 33 |
+
"dns_servers": ["8.8.8.8", "8.8.4.4"]
|
| 34 |
+
},
|
| 35 |
+
"nat": {
|
| 36 |
+
"port_range_start": 10000,
|
| 37 |
+
"port_range_end": 65535,
|
| 38 |
+
"session_timeout": 300,
|
| 39 |
+
"host_ip": "0.0.0.0"
|
| 40 |
+
},
|
| 41 |
+
"firewall": {
|
| 42 |
+
"default_policy": "ACCEPT",
|
| 43 |
+
"log_blocked": True,
|
| 44 |
+
"log_accepted": False,
|
| 45 |
+
"max_log_entries": 10000,
|
| 46 |
+
"rules": [
|
| 47 |
+
{
|
| 48 |
+
"rule_id": "allow_dhcp",
|
| 49 |
+
"priority": 1,
|
| 50 |
+
"action": "ACCEPT",
|
| 51 |
+
"direction": "BOTH",
|
| 52 |
+
"dest_port": "67,68",
|
| 53 |
+
"protocol": "UDP",
|
| 54 |
+
"description": "Allow DHCP traffic",
|
| 55 |
+
"enabled": True
|
| 56 |
+
},
|
| 57 |
+
{
|
| 58 |
+
"rule_id": "allow_dns",
|
| 59 |
+
"priority": 2,
|
| 60 |
+
"action": "ACCEPT",
|
| 61 |
+
"direction": "BOTH",
|
| 62 |
+
"dest_port": "53",
|
| 63 |
+
"protocol": "UDP",
|
| 64 |
+
"description": "Allow DNS traffic",
|
| 65 |
+
"enabled": True
|
| 66 |
+
}
|
| 67 |
+
]
|
| 68 |
+
},
|
| 69 |
+
"tcp": {
|
| 70 |
+
"initial_window": 65535,
|
| 71 |
+
"max_retries": 3,
|
| 72 |
+
"timeout": 300,
|
| 73 |
+
"time_wait_timeout": 120,
|
| 74 |
+
"mss": 1460
|
| 75 |
+
},
|
| 76 |
+
"router": {
|
| 77 |
+
"router_id": "virtual-isp-router",
|
| 78 |
+
"default_gateway": "10.0.0.1",
|
| 79 |
+
"interfaces": [
|
| 80 |
+
{
|
| 81 |
+
"name": "virtual0",
|
| 82 |
+
"ip_address": "10.0.0.1",
|
| 83 |
+
"netmask": "255.255.255.0",
|
| 84 |
+
"enabled": True,
|
| 85 |
+
"mtu": 1500
|
| 86 |
+
}
|
| 87 |
+
],
|
| 88 |
+
"static_routes": []
|
| 89 |
+
},
|
| 90 |
+
"socket_translator": {
|
| 91 |
+
"connect_timeout": 10,
|
| 92 |
+
"read_timeout": 30,
|
| 93 |
+
"max_connections": 1000,
|
| 94 |
+
"buffer_size": 8192
|
| 95 |
+
},
|
| 96 |
+
"packet_bridge": {
|
| 97 |
+
"websocket_host": "0.0.0.0",
|
| 98 |
+
"websocket_port": 8765,
|
| 99 |
+
"tcp_host": "0.0.0.0",
|
| 100 |
+
"tcp_port": 8766,
|
| 101 |
+
"max_clients": 100,
|
| 102 |
+
"client_timeout": 300
|
| 103 |
+
},
|
| 104 |
+
"session_tracker": {
|
| 105 |
+
"max_sessions": 10000,
|
| 106 |
+
"session_timeout": 3600,
|
| 107 |
+
"cleanup_interval": 300,
|
| 108 |
+
"metrics_retention": 86400
|
| 109 |
+
},
|
| 110 |
+
"logger": {
|
| 111 |
+
"log_level": "INFO",
|
| 112 |
+
"log_to_file": True,
|
| 113 |
+
"log_file_path": "/tmp/virtual_isp.log",
|
| 114 |
+
"log_file_max_size": 10485760,
|
| 115 |
+
"log_file_backup_count": 5,
|
| 116 |
+
"log_to_console": True,
|
| 117 |
+
"structured_logging": True,
|
| 118 |
+
"max_memory_logs": 10000
|
| 119 |
+
},
|
| 120 |
+
"openvpn": {
|
| 121 |
+
"server_config_path": "/etc/openvpn/server/server.conf",
|
| 122 |
+
"ca_cert_path": "/home/ubuntu/openvpn-ca/pki/ca.crt",
|
| 123 |
+
"server_cert_path": "/home/ubuntu/openvpn-ca/pki/issued/server.crt",
|
| 124 |
+
"server_key_path": "/home/ubuntu/openvpn-ca/pki/private/server.key",
|
| 125 |
+
"dh_path": "/home/ubuntu/openvpn-ca/pki/dh.pem",
|
| 126 |
+
"vpn_network": "10.8.0.0/24",
|
| 127 |
+
"vpn_server_ip": "10.8.0.1",
|
| 128 |
+
"vpn_port": 1194,
|
| 129 |
+
"protocol": "udp",
|
| 130 |
+
"auto_start": False,
|
| 131 |
+
"client_to_client": False,
|
| 132 |
+
"push_routes": [
|
| 133 |
+
"redirect-gateway def1 bypass-dhcp",
|
| 134 |
+
"dhcp-option DNS 8.8.8.8",
|
| 135 |
+
"dhcp-option DNS 8.8.4.4"
|
| 136 |
+
]
|
| 137 |
+
}
|
| 138 |
+
}
|
| 139 |
+
|
| 140 |
+
if os.path.exists(config_file):
|
| 141 |
+
try:
|
| 142 |
+
with open(config_file, 'r') as f:
|
| 143 |
+
file_config = json.load(f)
|
| 144 |
+
|
| 145 |
+
# Merge with defaults
|
| 146 |
+
def merge_config(default, override):
|
| 147 |
+
result = default.copy()
|
| 148 |
+
for key, value in override.items():
|
| 149 |
+
if key in result and isinstance(result[key], dict) and isinstance(value, dict):
|
| 150 |
+
result[key] = merge_config(result[key], value)
|
| 151 |
+
else:
|
| 152 |
+
result[key] = value
|
| 153 |
+
return result
|
| 154 |
+
|
| 155 |
+
return merge_config(default_config, file_config)
|
| 156 |
+
|
| 157 |
+
except Exception as e:
|
| 158 |
+
print(f"Error loading config file: {e}")
|
| 159 |
+
print("Using default configuration")
|
| 160 |
+
return default_config
|
| 161 |
+
else:
|
| 162 |
+
# Save default config
|
| 163 |
+
try:
|
| 164 |
+
with open(config_file, 'w') as f:
|
| 165 |
+
json.dump(default_config, f, indent=2)
|
| 166 |
+
print(f"Created default configuration file: {config_file}")
|
| 167 |
+
except Exception as e:
|
| 168 |
+
print(f"Could not save default config: {e}")
|
| 169 |
+
|
| 170 |
+
return default_config
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
def create_app():
|
| 174 |
+
"""Create and configure Flask application"""
|
| 175 |
+
app = Flask(__name__, static_folder=os.path.join(os.path.dirname(__file__), 'static'))
|
| 176 |
+
|
| 177 |
+
# Enable CORS for all routes
|
| 178 |
+
CORS(app, origins="*", allow_headers=["Content-Type", "Authorization"])
|
| 179 |
+
|
| 180 |
+
# Load configuration
|
| 181 |
+
config = load_config()
|
| 182 |
+
app.config['ISP_CONFIG'] = config
|
| 183 |
+
|
| 184 |
+
# Register blueprints
|
| 185 |
+
app.register_blueprint(isp_api, url_prefix='/api')
|
| 186 |
+
|
| 187 |
+
# Initialize engines
|
| 188 |
+
init_engines(config)
|
| 189 |
+
|
| 190 |
+
# Serve static files
|
| 191 |
+
@app.route('/', defaults={'path': ''})
|
| 192 |
+
@app.route('/<path:path>')
|
| 193 |
+
def serve_static(path):
|
| 194 |
+
static_folder_path = app.static_folder
|
| 195 |
+
if static_folder_path is None:
|
| 196 |
+
return "Static folder not configured", 404
|
| 197 |
+
|
| 198 |
+
if path != "" and os.path.exists(os.path.join(static_folder_path, path)):
|
| 199 |
+
return app.send_static_file(path)
|
| 200 |
+
else:
|
| 201 |
+
index_path = os.path.join(static_folder_path, 'index.html')
|
| 202 |
+
if os.path.exists(index_path):
|
| 203 |
+
return app.send_static_file('index.html')
|
| 204 |
+
else:
|
| 205 |
+
return """
|
| 206 |
+
<!DOCTYPE html>
|
| 207 |
+
<html>
|
| 208 |
+
<head>
|
| 209 |
+
<title>Virtual ISP Stack</title>
|
| 210 |
+
<style>
|
| 211 |
+
body { font-family: Arial, sans-serif; margin: 40px; }
|
| 212 |
+
.container { max-width: 800px; margin: 0 auto; }
|
| 213 |
+
.status { background: #f0f0f0; padding: 20px; border-radius: 5px; }
|
| 214 |
+
.api-link { color: #0066cc; text-decoration: none; }
|
| 215 |
+
.api-link:hover { text-decoration: underline; }
|
| 216 |
+
</style>
|
| 217 |
+
</head>
|
| 218 |
+
<body>
|
| 219 |
+
<div class="container">
|
| 220 |
+
<h1>Virtual ISP Stack</h1>
|
| 221 |
+
<div class="status">
|
| 222 |
+
<h2>System Status</h2>
|
| 223 |
+
<p>The Virtual ISP Stack is running successfully!</p>
|
| 224 |
+
<p><strong>API Endpoint:</strong> <a href="/api/status" class="api-link">/api/status</a></p>
|
| 225 |
+
<p><strong>System Stats:</strong> <a href="/api/stats" class="api-link">/api/stats</a></p>
|
| 226 |
+
</div>
|
| 227 |
+
|
| 228 |
+
<h2>Available API Endpoints</h2>
|
| 229 |
+
<ul>
|
| 230 |
+
<li><a href="/api/config" class="api-link">GET /api/config</a> - System configuration</li>
|
| 231 |
+
<li><a href="/api/status" class="api-link">GET /api/status</a> - System status</li>
|
| 232 |
+
<li><a href="/api/stats" class="api-link">GET /api/stats</a> - System statistics</li>
|
| 233 |
+
<li><a href="/api/dhcp/leases" class="api-link">GET /api/dhcp/leases</a> - DHCP leases</li>
|
| 234 |
+
<li><a href="/api/nat/sessions" class="api-link">GET /api/nat/sessions</a> - NAT sessions</li>
|
| 235 |
+
<li><a href="/api/firewall/rules" class="api-link">GET /api/firewall/rules</a> - Firewall rules</li>
|
| 236 |
+
<li><a href="/api/tcp/connections" class="api-link">GET /api/tcp/connections</a> - TCP connections</li>
|
| 237 |
+
<li><a href="/api/router/routes" class="api-link">GET /api/router/routes</a> - Routing table</li>
|
| 238 |
+
<li><a href="/api/bridge/clients" class="api-link">GET /api/bridge/clients</a> - Bridge clients</li>
|
| 239 |
+
<li><a href="/api/sessions" class="api-link">GET /api/sessions</a> - Session tracking</li>
|
| 240 |
+
<li><a href="/api/logs" class="api-link">GET /api/logs</a> - System logs</li>
|
| 241 |
+
</ul>
|
| 242 |
+
|
| 243 |
+
<h2>WebSocket Bridge</h2>
|
| 244 |
+
<p>WebSocket server running on port 8765 for packet bridge connections.</p>
|
| 245 |
+
<p>TCP server running on port 8766 for packet bridge connections.</p>
|
| 246 |
+
</div>
|
| 247 |
+
</body>
|
| 248 |
+
</html>
|
| 249 |
+
""", 200
|
| 250 |
+
|
| 251 |
+
return app
|
| 252 |
+
|
| 253 |
+
|
| 254 |
+
def main():
|
| 255 |
+
"""Main application entry point"""
|
| 256 |
+
print("Starting Virtual ISP Stack...")
|
| 257 |
+
|
| 258 |
+
# Create Flask app
|
| 259 |
+
app = create_app()
|
| 260 |
+
|
| 261 |
+
# Start the application
|
| 262 |
+
print("Virtual ISP Stack started successfully!")
|
| 263 |
+
print("API available at: http://0.0.0.0:5000/api/")
|
| 264 |
+
print("WebSocket bridge at: ws://0.0.0.0:8765")
|
| 265 |
+
print("TCP bridge at: tcp://0.0.0.0:8766")
|
| 266 |
+
|
| 267 |
+
# Run Flask app
|
| 268 |
+
app.run(host='0.0.0.0', port=5000, debug=False, threaded=True)
|
| 269 |
+
|
| 270 |
+
|
| 271 |
+
if __name__ == '__main__':
|
| 272 |
+
main()
|
| 273 |
+
|
models/__pycache__/user.cpython-311.pyc
ADDED
|
Binary file (1.3 kB). View file
|
|
|
models/user.py
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from flask_sqlalchemy import SQLAlchemy
|
| 2 |
+
|
| 3 |
+
db = SQLAlchemy()
|
| 4 |
+
|
| 5 |
+
class User(db.Model):
|
| 6 |
+
id = db.Column(db.Integer, primary_key=True)
|
| 7 |
+
username = db.Column(db.String(80), unique=True, nullable=False)
|
| 8 |
+
email = db.Column(db.String(120), unique=True, nullable=False)
|
| 9 |
+
|
| 10 |
+
def __repr__(self):
|
| 11 |
+
return f'<User {self.username}>'
|
| 12 |
+
|
| 13 |
+
def to_dict(self):
|
| 14 |
+
return {
|
| 15 |
+
'id': self.id,
|
| 16 |
+
'username': self.username,
|
| 17 |
+
'email': self.email
|
| 18 |
+
}
|
openvpn/ca.crt
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
-----BEGIN CERTIFICATE-----
|
| 2 |
+
MIIDMzCCAhugAwIBAgIUNO765P4t/yD/PnIFTMVs0Q32TJYwDQYJKoZIhvcNAQEL
|
| 3 |
+
BQAwDjEMMAoGA1UEAwwDeWVzMB4XDTI1MDgwMjAxMjkzNVoXDTM1MDczMTAxMjkz
|
| 4 |
+
NVowDjEMMAoGA1UEAwwDeWVzMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
|
| 5 |
+
AQEAtwhMGXouHnHBRd2RhdrW8sOMgqt4wDXZC0J+4UMjOX6Y7t2O1Sgw/sWhwFPk
|
| 6 |
+
QF/cMoQIvsucklPogcnzzGtv9zDkAXyVyCC27UYbg8JfWZK3ZMrt6dfEmYf4KKXm
|
| 7 |
+
D6PLn9guxzBB63dhEWx/7fd6H9C/rK/u0rOh15DQRnfEI468cmXS5uNg8ke/73+y
|
| 8 |
+
Gzb6q7ZOFByBAwM0hW0lStBaIIcxouFrIK8B72O8H+6t10K1GvgiBhKvM3cc8dpN
|
| 9 |
+
y4qvRoN/o+eXarZG7G9dfm9OFgdd9LoXPTTbO+ftFPKOq4F41PnMd2Zcyk7P3GCr
|
| 10 |
+
3oK7NbISxZ5efLpy45lgSpqKBwIDAQABo4GIMIGFMB0GA1UdDgQWBBQIi0Er30cV
|
| 11 |
+
Qzi+U/LPV4Lf3yvGIzBJBgNVHSMEQjBAgBQIi0Er30cVQzi+U/LPV4Lf3yvGI6ES
|
| 12 |
+
pBAwDjEMMAoGA1UEAwwDeWVzghQ07vrk/i3/IP8+cgVMxWzRDfZMljAMBgNVHRME
|
| 13 |
+
BTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAHzfSFbi1G7WC
|
| 14 |
+
vMSOqSv4/jlBExnz/AlLUBHhgDomIdLK8Pb3tyCD5IYkmi0NT5x6DORcOV2ow1JZ
|
| 15 |
+
o4BL7OVV+fhz3VKXEpG+s3gq5j2m+raqLtu6QKBGg7SIUZ4MLjggvAcPjsK+n8sK
|
| 16 |
+
86sAUFVTccBxJlKBShAUPSNihyWwxB4PQFvwhefNQSoID1kAB2Fzf1beMX6Gp6Lj
|
| 17 |
+
ldI6e63lpYtIbp4+2F5SxJ/hGTUx+nWbOAHPvhBfhN6sEu9G1C5KPR0cm+xxOpZ9
|
| 18 |
+
lA7y4Dea7pyVybR/b7lFquE3TReXCoLx79UNNSv8erIlsy1jh9yXDnTCk8SN1dpO
|
| 19 |
+
YwJ9U0AHXA==
|
| 20 |
+
-----END CERTIFICATE-----
|
openvpn/dh.pem
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
-----BEGIN DH PARAMETERS-----
|
| 2 |
+
MIIBCAKCAQEAlPRBW0tYm271xYHi15JrD3JRlpvdjAm+CZoEq0ElLXvSlIKaNQls
|
| 3 |
+
ITH+KIBBX3pgbFFk03fO9ApF0kSOzycRRCuW970iCkDoFUN9y58EG+BI863FkU1h
|
| 4 |
+
3dx+c59HqdWXkzFK+SmTfKIe12alZFik5G0Xs0hkphCgPaXvWlojorjQoRfKySw3
|
| 5 |
+
VxpybKS83+l3t2ER3Z03IRvWinlnuxVAcymzeSR9hwIMJi3RmYmNmdXNel/WFAo2
|
| 6 |
+
zT5j2f2OZHtnBhvo1V92Rml+5rJksPX4lJMRNwVEnXwqVUyCQOTTiGTUjLOO2gdk
|
| 7 |
+
HLhH5teetBdKL4tFcldeIJSk3e0oWXbURwIBAg==
|
| 8 |
+
-----END DH PARAMETERS-----
|
openvpn/server.conf
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
port 1194
|
| 2 |
+
proto udp
|
| 3 |
+
dev tun
|
| 4 |
+
ca /etc/openvpn/server/ca.crt
|
| 5 |
+
cert /etc/openvpn/server/server.crt
|
| 6 |
+
key /etc/openvpn/server/server.key
|
| 7 |
+
dh /etc/openvpn/server/dh.pem
|
| 8 |
+
server 10.8.0.0 255.255.255.0
|
| 9 |
+
ifconfig-pool-persist ipp.txt
|
| 10 |
+
push "redirect-gateway def1 bypass-dhcp"
|
| 11 |
+
push "dhcp-option DNS 8.8.8.8"
|
| 12 |
+
push "dhcp-option DNS 8.8.4.4"
|
| 13 |
+
keepalive 10 120
|
| 14 |
+
cipher AES-256-CBC
|
| 15 |
+
persist-key
|
| 16 |
+
persist-tun
|
| 17 |
+
status openvpn-status.log
|
| 18 |
+
verb 3
|
| 19 |
+
explicit-exit-notify 1
|
| 20 |
+
|
| 21 |
+
|
openvpn/server.crt
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Certificate:
|
| 2 |
+
Data:
|
| 3 |
+
Version: 3 (0x2)
|
| 4 |
+
Serial Number:
|
| 5 |
+
dd:b5:29:c9:70:b2:b3:65:70:ac:0f:57:30:15:b4:2a
|
| 6 |
+
Signature Algorithm: sha256WithRSAEncryption
|
| 7 |
+
Issuer: CN=yes
|
| 8 |
+
Validity
|
| 9 |
+
Not Before: Aug 2 01:29:38 2025 GMT
|
| 10 |
+
Not After : Nov 5 01:29:38 2027 GMT
|
| 11 |
+
Subject: CN=server
|
| 12 |
+
Subject Public Key Info:
|
| 13 |
+
Public Key Algorithm: rsaEncryption
|
| 14 |
+
Public-Key: (2048 bit)
|
| 15 |
+
Modulus:
|
| 16 |
+
00:dd:9e:02:fb:e3:57:cd:51:43:36:6a:2f:30:f5:
|
| 17 |
+
a1:42:5c:16:f1:7b:4b:0a:aa:b1:34:b5:86:51:3e:
|
| 18 |
+
6b:82:2e:59:df:42:21:cf:65:14:ea:8c:93:3c:0a:
|
| 19 |
+
72:a5:2e:0f:64:1a:ec:76:52:18:b2:d3:a0:df:df:
|
| 20 |
+
19:83:7e:39:9e:f5:16:18:36:34:ae:57:cf:2c:89:
|
| 21 |
+
7c:c5:97:e3:8f:d0:83:08:7f:14:0c:74:2c:d2:95:
|
| 22 |
+
09:6e:42:99:a0:28:69:83:68:f4:9c:0e:b5:3e:08:
|
| 23 |
+
8f:d8:06:ec:d5:aa:c8:bc:19:4b:ff:e4:99:50:12:
|
| 24 |
+
67:25:d4:79:94:1f:3d:64:b2:c8:00:ea:97:c2:df:
|
| 25 |
+
b8:1c:dc:69:47:9f:59:df:03:06:5a:32:7a:fa:51:
|
| 26 |
+
96:45:9a:b7:e7:03:ef:9d:3b:94:51:9d:08:69:bb:
|
| 27 |
+
b0:3e:c8:9c:a3:a0:9c:18:aa:e9:88:ec:96:c3:71:
|
| 28 |
+
b1:f6:a7:09:ff:c0:56:b1:24:22:ab:fc:9a:c5:fc:
|
| 29 |
+
fd:67:8e:1a:86:ff:0a:5b:28:46:b4:20:93:05:b6:
|
| 30 |
+
ff:87:93:66:7d:ae:92:c4:0d:20:99:e9:c5:b8:3d:
|
| 31 |
+
41:3a:06:83:49:e5:13:2e:d6:33:94:45:6a:36:84:
|
| 32 |
+
f9:c9:61:fe:98:3a:6e:41:ed:d8:8c:f1:55:3d:6d:
|
| 33 |
+
53:fb
|
| 34 |
+
Exponent: 65537 (0x10001)
|
| 35 |
+
X509v3 extensions:
|
| 36 |
+
X509v3 Basic Constraints:
|
| 37 |
+
CA:FALSE
|
| 38 |
+
X509v3 Subject Key Identifier:
|
| 39 |
+
F4:62:12:72:49:40:C2:8A:46:5A:CB:71:BE:33:58:25:B3:E0:01:AC
|
| 40 |
+
X509v3 Authority Key Identifier:
|
| 41 |
+
keyid:08:8B:41:2B:DF:47:15:43:38:BE:53:F2:CF:57:82:DF:DF:2B:C6:23
|
| 42 |
+
DirName:/CN=yes
|
| 43 |
+
serial:34:EE:FA:E4:FE:2D:FF:20:FF:3E:72:05:4C:C5:6C:D1:0D:F6:4C:96
|
| 44 |
+
X509v3 Extended Key Usage:
|
| 45 |
+
TLS Web Server Authentication
|
| 46 |
+
X509v3 Key Usage:
|
| 47 |
+
Digital Signature, Key Encipherment
|
| 48 |
+
X509v3 Subject Alternative Name:
|
| 49 |
+
DNS:server
|
| 50 |
+
Signature Algorithm: sha256WithRSAEncryption
|
| 51 |
+
Signature Value:
|
| 52 |
+
85:f7:59:01:c2:99:23:c3:9a:99:2a:0a:bc:5d:7d:1c:e8:7c:
|
| 53 |
+
e9:23:a5:87:08:bd:45:1b:a7:a9:b7:3a:06:b6:91:86:ac:61:
|
| 54 |
+
03:ae:cd:65:80:0e:e4:81:dc:38:b3:fe:6d:6f:02:e4:9e:43:
|
| 55 |
+
95:d0:a6:38:30:53:52:14:f1:96:2a:30:69:2f:56:24:65:ba:
|
| 56 |
+
53:c0:b0:22:23:2b:18:37:a1:0c:45:07:cb:ec:a9:71:f7:96:
|
| 57 |
+
2a:d2:18:94:f0:07:18:1f:4c:d2:c5:d5:66:8f:1d:5c:08:8d:
|
| 58 |
+
02:00:d6:0d:df:fd:6e:1e:2a:47:8c:30:fd:5b:46:56:0a:5a:
|
| 59 |
+
d4:6d:d4:99:c8:94:26:36:0b:86:30:dd:cb:3a:2e:a2:f3:80:
|
| 60 |
+
0f:62:80:f8:9d:ec:98:f2:96:20:4f:46:01:ae:9d:35:7f:34:
|
| 61 |
+
21:d7:71:89:b6:7a:ce:94:7e:14:e6:bf:b6:08:44:39:24:db:
|
| 62 |
+
aa:cf:54:46:34:8f:67:6c:72:22:f1:eb:e9:94:7d:73:26:f3:
|
| 63 |
+
2f:72:fe:28:b3:cb:28:c3:4c:14:3d:c3:81:1e:8d:96:96:e5:
|
| 64 |
+
df:af:c4:0a:06:71:16:df:8f:a3:30:50:79:45:95:4c:e8:57:
|
| 65 |
+
ee:ed:38:dd:82:8e:0e:b1:2b:4d:27:2b:6f:bc:c8:1c:91:de:
|
| 66 |
+
2c:55:69:38
|
| 67 |
+
-----BEGIN CERTIFICATE-----
|
| 68 |
+
MIIDWDCCAkCgAwIBAgIRAN21KclwsrNlcKwPVzAVtCowDQYJKoZIhvcNAQELBQAw
|
| 69 |
+
DjEMMAoGA1UEAwwDeWVzMB4XDTI1MDgwMjAxMjkzOFoXDTI3MTEwNTAxMjkzOFow
|
| 70 |
+
ETEPMA0GA1UEAwwGc2VydmVyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
|
| 71 |
+
AQEA3Z4C++NXzVFDNmovMPWhQlwW8XtLCqqxNLWGUT5rgi5Z30Ihz2UU6oyTPApy
|
| 72 |
+
pS4PZBrsdlIYstOg398Zg345nvUWGDY0rlfPLIl8xZfjj9CDCH8UDHQs0pUJbkKZ
|
| 73 |
+
oChpg2j0nA61PgiP2Abs1arIvBlL/+SZUBJnJdR5lB89ZLLIAOqXwt+4HNxpR59Z
|
| 74 |
+
3wMGWjJ6+lGWRZq35wPvnTuUUZ0IabuwPsico6CcGKrpiOyWw3Gx9qcJ/8BWsSQi
|
| 75 |
+
q/yaxfz9Z44ahv8KWyhGtCCTBbb/h5Nmfa6SxA0gmenFuD1BOgaDSeUTLtYzlEVq
|
| 76 |
+
NoT5yWH+mDpuQe3YjPFVPW1T+wIDAQABo4GtMIGqMAkGA1UdEwQCMAAwHQYDVR0O
|
| 77 |
+
BBYEFPRiEnJJQMKKRlrLcb4zWCWz4AGsMEkGA1UdIwRCMECAFAiLQSvfRxVDOL5T
|
| 78 |
+
8s9Xgt/fK8YjoRKkEDAOMQwwCgYDVQQDDAN5ZXOCFDTu+uT+Lf8g/z5yBUzFbNEN
|
| 79 |
+
9kyWMBMGA1UdJQQMMAoGCCsGAQUFBwMBMAsGA1UdDwQEAwIFoDARBgNVHREECjAI
|
| 80 |
+
ggZzZXJ2ZXIwDQYJKoZIhvcNAQELBQADggEBAIX3WQHCmSPDmpkqCrxdfRzofOkj
|
| 81 |
+
pYcIvUUbp6m3Oga2kYasYQOuzWWADuSB3Diz/m1vAuSeQ5XQpjgwU1IU8ZYqMGkv
|
| 82 |
+
ViRlulPAsCIjKxg3oQxFB8vsqXH3lirSGJTwBxgfTNLF1WaPHVwIjQIA1g3f/W4e
|
| 83 |
+
KkeMMP1bRlYKWtRt1JnIlCY2C4Yw3cs6LqLzgA9igPid7JjyliBPRgGunTV/NCHX
|
| 84 |
+
cYm2es6UfhTmv7YIRDkk26rPVEY0j2dsciLx6+mUfXMm8y9y/iizyyjDTBQ9w4Ee
|
| 85 |
+
jZaW5d+vxAoGcRbfj6MwUHlFlUzoV+7tON2Cjg6xK00nK2+8yByR3ixVaTg=
|
| 86 |
+
-----END CERTIFICATE-----
|
openvpn/server.key
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
-----BEGIN PRIVATE KEY-----
|
| 2 |
+
MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDdngL741fNUUM2
|
| 3 |
+
ai8w9aFCXBbxe0sKqrE0tYZRPmuCLlnfQiHPZRTqjJM8CnKlLg9kGux2Uhiy06Df
|
| 4 |
+
3xmDfjme9RYYNjSuV88siXzFl+OP0IMIfxQMdCzSlQluQpmgKGmDaPScDrU+CI/Y
|
| 5 |
+
BuzVqsi8GUv/5JlQEmcl1HmUHz1kssgA6pfC37gc3GlHn1nfAwZaMnr6UZZFmrfn
|
| 6 |
+
A++dO5RRnQhpu7A+yJyjoJwYqumI7JbDcbH2pwn/wFaxJCKr/JrF/P1njhqG/wpb
|
| 7 |
+
KEa0IJMFtv+Hk2Z9rpLEDSCZ6cW4PUE6BoNJ5RMu1jOURWo2hPnJYf6YOm5B7diM
|
| 8 |
+
8VU9bVP7AgMBAAECggEATtwR0sEYtspSYPQS+9iD/AGZ9m75in+n1Ao+E/3isq28
|
| 9 |
+
tDmrn0moUjgYklZjakzEFEqSVx4qhMPSrKcORKCvb1Vl+dKcF2fOpFn+KK++Pagk
|
| 10 |
+
YGsb3ryeUIbRFsejM/79YNIBrOB89OiGCwiX0QZXLLvRs+qL9Za+1pLPenpNVd2w
|
| 11 |
+
zL+AZ8QkJZdHn1vOZt9vKRlpe8psAt64RHb+LqhYWfeLlpIUjpM5Vu9FFewMGPrw
|
| 12 |
+
n+GVCzK4ylq0pJ9bYwKI5Hw4qnJ3j5bGIumEjYBqqmef1+OTD3r/wyhTGpK9RRAu
|
| 13 |
+
WD9YGJeQx3ybzRL7Wj6k5g0dn+UA82Lh7Y8n9IoSaQKBgQDqP/BU2KapOHgFt2DE
|
| 14 |
+
WHU/+zA7/kfMJMGB5dYy8oXTxUY7WuqX9lja3rC0XuH10JTD6Q21jkTujc0T5/1B
|
| 15 |
+
4KxuX+nQP/T9b4XzVM3pKWVmHUt6wf24sbuTNxOy/Q/wC7eCnkr04CEl0vf3E56N
|
| 16 |
+
JaLG11dbpcn+9RC9FlUhlYY8QwKBgQDyMcz43915YGOQMkGVZFPvKyOy7ol4fFZv
|
| 17 |
+
VRfRoGx9CfHCIOfh9vmlUy6TR4qAQkCnkL730OsxpW3aDTe3qcAcmhiK7u5TfWrE
|
| 18 |
+
cd1WgrkymJ8hyEk6FSV0GMKrccQeEo2T95cKnk6lNXnEdNp5kx7LBQhL36fEtMXS
|
| 19 |
+
FGCcRkNp6QKBgAbm6WLmm0qDIm4wsAY5AQNomEw8OstWDemQ5xXLNYw+1Mns7Nqb
|
| 20 |
+
ZJTWWOiHnyrKAYggNsoxrfBFd1Rt0nV9dDcwVkhPih1pis3XotWK5bTzigTM8Hff
|
| 21 |
+
rMIyrj7o2+5bugV8OoMqk2903t+F0XchM8GeGLHXmbMMb3jSzqFVsYXXAoGBAII1
|
| 22 |
+
Z/99S7LPsXd6rWvFzqJMzRqLx/iw0D92viGDYBAxYnp9+myvvTO27tlbowilleEA
|
| 23 |
+
nsrY1TmRuOd8J7JkXtaBuiQnpJXaXaZTmS3DhhG/n/4nkcbaS5KJJU/LECcizl74
|
| 24 |
+
w4l/5sRHZbnLIRIvmGSJxhYUnjvQ/HGfZvldhSzRAoGBAMVTrxWedC2XeSMwjdhF
|
| 25 |
+
zeDBAp/dTMEnRaS0j3rp+4a4l7Sus1L/p8gBrJtnf/B43bNvQ5cr2jwH7Ql5cF1A
|
| 26 |
+
A7hpZ3C0trNaf6WqslJQhN8j8Cs85S/8rPGM5yAfyzKTMe0ytLUjn+XiQCqCUFcT
|
| 27 |
+
Inqx4ll7r2tlcI3aMlvN2qsd
|
| 28 |
+
-----END PRIVATE KEY-----
|
requirements.txt
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Core Flask dependencies
|
| 2 |
+
Flask==3.1.1
|
| 3 |
+
flask-cors==6.0.0
|
| 4 |
+
Flask-SQLAlchemy==3.1.1
|
| 5 |
+
Werkzeug==3.1.3
|
| 6 |
+
|
| 7 |
+
# Database
|
| 8 |
+
SQLAlchemy==2.0.41
|
| 9 |
+
|
| 10 |
+
# Async and networking
|
| 11 |
+
aiohttp==3.12.15
|
| 12 |
+
aiohappyeyeballs==2.6.1
|
| 13 |
+
aiosignal==1.4.0
|
| 14 |
+
websockets==15.0.1
|
| 15 |
+
|
| 16 |
+
# Utilities
|
| 17 |
+
attrs==25.3.0
|
| 18 |
+
blinker==1.9.0
|
| 19 |
+
click==8.2.1
|
| 20 |
+
frozenlist==1.7.0
|
| 21 |
+
greenlet==3.2.3
|
| 22 |
+
idna==3.10
|
| 23 |
+
itsdangerous==2.2.0
|
| 24 |
+
Jinja2==3.1.6
|
| 25 |
+
MarkupSafe==3.0.2
|
| 26 |
+
multidict==6.6.3
|
| 27 |
+
propcache==0.3.2
|
| 28 |
+
typing_extensions==4.14.0
|
| 29 |
+
yarl==1.20.1
|
| 30 |
+
|
| 31 |
+
# Additional dependencies for VPN management
|
| 32 |
+
psutil==5.9.8
|
| 33 |
+
netifaces==0.11.0
|
| 34 |
+
|
routes/__pycache__/isp_api.cpython-311.pyc
ADDED
|
Binary file (44.5 kB). View file
|
|
|
routes/__pycache__/user.cpython-311.pyc
ADDED
|
Binary file (3.4 kB). View file
|
|
|
routes/isp_api.py
ADDED
|
@@ -0,0 +1,1107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
ISP API Routes
|
| 3 |
+
|
| 4 |
+
Flask routes for the Virtual ISP Stack API endpoints
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from flask import Blueprint, jsonify, request, Response
|
| 8 |
+
from flask_cors import cross_origin
|
| 9 |
+
import json
|
| 10 |
+
import time
|
| 11 |
+
from typing import Dict, Any
|
| 12 |
+
|
| 13 |
+
# Import core modules
|
| 14 |
+
from core.dhcp_server import DHCPServer
|
| 15 |
+
from core.nat_engine import NATEngine
|
| 16 |
+
from core.firewall import FirewallEngine, FirewallRule, FirewallRuleBuilder, FirewallAction, FirewallDirection
|
| 17 |
+
from core.tcp_engine import TCPEngine
|
| 18 |
+
from core.virtual_router import VirtualRouter
|
| 19 |
+
from core.socket_translator import SocketTranslator
|
| 20 |
+
from core.packet_bridge import PacketBridge
|
| 21 |
+
from core.session_tracker import SessionTracker, SessionType, SessionState
|
| 22 |
+
from core.logger import VirtualISPLogger, LogLevel, LogCategory, LogFilter
|
| 23 |
+
from core.openvpn_manager import OpenVPNManager, initialize_openvpn_manager, get_openvpn_manager
|
| 24 |
+
|
| 25 |
+
# Create blueprint
|
| 26 |
+
isp_api = Blueprint('isp_api', __name__)
|
| 27 |
+
|
| 28 |
+
# Global instances (will be initialized by main app)
|
| 29 |
+
dhcp_server: DHCPServer = None
|
| 30 |
+
nat_engine: NATEngine = None
|
| 31 |
+
firewall_engine: FirewallEngine = None
|
| 32 |
+
tcp_engine: TCPEngine = None
|
| 33 |
+
virtual_router: VirtualRouter = None
|
| 34 |
+
socket_translator: SocketTranslator = None
|
| 35 |
+
packet_bridge: PacketBridge = None
|
| 36 |
+
session_tracker: SessionTracker = None
|
| 37 |
+
logger: VirtualISPLogger = None
|
| 38 |
+
openvpn_manager: OpenVPNManager = None
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def init_engines(config: Dict[str, Any]):
|
| 42 |
+
"""Initialize all engines with configuration"""
|
| 43 |
+
global dhcp_server, nat_engine, firewall_engine, tcp_engine
|
| 44 |
+
global virtual_router, socket_translator, packet_bridge, session_tracker, logger, openvpn_manager
|
| 45 |
+
|
| 46 |
+
# Initialize logger first
|
| 47 |
+
logger = VirtualISPLogger(config.get('logger', {}))
|
| 48 |
+
logger.start()
|
| 49 |
+
|
| 50 |
+
# Initialize core engines
|
| 51 |
+
dhcp_server = DHCPServer(config.get('dhcp', {}))
|
| 52 |
+
nat_engine = NATEngine(config.get('nat', {}))
|
| 53 |
+
firewall_engine = FirewallEngine(config.get('firewall', {}))
|
| 54 |
+
tcp_engine = TCPEngine(config.get('tcp', {}))
|
| 55 |
+
virtual_router = VirtualRouter(config.get('router', {}))
|
| 56 |
+
socket_translator = SocketTranslator(config.get('socket_translator', {}))
|
| 57 |
+
packet_bridge = PacketBridge(config.get('packet_bridge', {}))
|
| 58 |
+
session_tracker = SessionTracker(config.get('session_tracker', {}))
|
| 59 |
+
|
| 60 |
+
# Initialize OpenVPN manager
|
| 61 |
+
openvpn_manager = initialize_openvpn_manager(config.get('openvpn', {}))
|
| 62 |
+
openvpn_manager.set_isp_components(
|
| 63 |
+
dhcp_server=dhcp_server,
|
| 64 |
+
nat_engine=nat_engine,
|
| 65 |
+
firewall=firewall_engine,
|
| 66 |
+
router=virtual_router
|
| 67 |
+
)
|
| 68 |
+
|
| 69 |
+
# Start engines
|
| 70 |
+
dhcp_server.start()
|
| 71 |
+
nat_engine.start()
|
| 72 |
+
tcp_engine.start()
|
| 73 |
+
socket_translator.start()
|
| 74 |
+
session_tracker.start()
|
| 75 |
+
packet_bridge.start()
|
| 76 |
+
|
| 77 |
+
logger.info(LogCategory.SYSTEM, 'api', 'All engines initialized and started')
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
# Configuration endpoints
|
| 81 |
+
@isp_api.route('/config', methods=['GET'])
|
| 82 |
+
@cross_origin()
|
| 83 |
+
def get_config():
|
| 84 |
+
"""Get current system configuration"""
|
| 85 |
+
try:
|
| 86 |
+
config = {
|
| 87 |
+
'dhcp': {
|
| 88 |
+
'network': '10.0.0.0/24',
|
| 89 |
+
'range_start': '10.0.0.10',
|
| 90 |
+
'range_end': '10.0.0.100',
|
| 91 |
+
'lease_time': 3600,
|
| 92 |
+
'gateway': '10.0.0.1',
|
| 93 |
+
'dns_servers': ['8.8.8.8', '8.8.4.4']
|
| 94 |
+
},
|
| 95 |
+
'nat': {
|
| 96 |
+
'port_range_start': 10000,
|
| 97 |
+
'port_range_end': 65535,
|
| 98 |
+
'session_timeout': 300
|
| 99 |
+
},
|
| 100 |
+
'firewall': {
|
| 101 |
+
'default_policy': 'ACCEPT',
|
| 102 |
+
'log_blocked': True
|
| 103 |
+
},
|
| 104 |
+
'tcp': {
|
| 105 |
+
'initial_window': 65535,
|
| 106 |
+
'max_retries': 3,
|
| 107 |
+
'timeout': 30
|
| 108 |
+
}
|
| 109 |
+
}
|
| 110 |
+
|
| 111 |
+
return jsonify({
|
| 112 |
+
'status': 'success',
|
| 113 |
+
'config': config
|
| 114 |
+
})
|
| 115 |
+
|
| 116 |
+
except Exception as e:
|
| 117 |
+
return jsonify({
|
| 118 |
+
'status': 'error',
|
| 119 |
+
'message': str(e)
|
| 120 |
+
}), 500
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
@isp_api.route('/config', methods=['POST'])
|
| 124 |
+
@cross_origin()
|
| 125 |
+
def update_config():
|
| 126 |
+
"""Update system configuration"""
|
| 127 |
+
try:
|
| 128 |
+
config_data = request.get_json()
|
| 129 |
+
|
| 130 |
+
# Here you would update the actual configuration
|
| 131 |
+
# For now, just return success
|
| 132 |
+
|
| 133 |
+
if logger:
|
| 134 |
+
logger.info(LogCategory.SYSTEM, 'api', 'Configuration updated', metadata=config_data)
|
| 135 |
+
|
| 136 |
+
return jsonify({
|
| 137 |
+
'status': 'success',
|
| 138 |
+
'message': 'Configuration updated successfully'
|
| 139 |
+
})
|
| 140 |
+
|
| 141 |
+
except Exception as e:
|
| 142 |
+
if logger:
|
| 143 |
+
logger.error(LogCategory.SYSTEM, 'api', f'Configuration update failed: {str(e)}')
|
| 144 |
+
|
| 145 |
+
return jsonify({
|
| 146 |
+
'status': 'error',
|
| 147 |
+
'message': str(e)
|
| 148 |
+
}), 500
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
# DHCP endpoints
|
| 152 |
+
@isp_api.route('/dhcp/leases', methods=['GET'])
|
| 153 |
+
@cross_origin()
|
| 154 |
+
def get_dhcp_leases():
|
| 155 |
+
"""Get DHCP lease table"""
|
| 156 |
+
try:
|
| 157 |
+
if not dhcp_server:
|
| 158 |
+
return jsonify({'status': 'error', 'message': 'DHCP server not initialized'}), 500
|
| 159 |
+
|
| 160 |
+
leases = dhcp_server.get_leases()
|
| 161 |
+
|
| 162 |
+
return jsonify({
|
| 163 |
+
'status': 'success',
|
| 164 |
+
'leases': leases,
|
| 165 |
+
'count': len(leases)
|
| 166 |
+
})
|
| 167 |
+
|
| 168 |
+
except Exception as e:
|
| 169 |
+
return jsonify({
|
| 170 |
+
'status': 'error',
|
| 171 |
+
'message': str(e)
|
| 172 |
+
}), 500
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
@isp_api.route('/dhcp/leases/<mac_address>', methods=['DELETE'])
|
| 176 |
+
@cross_origin()
|
| 177 |
+
def release_dhcp_lease(mac_address):
|
| 178 |
+
"""Release DHCP lease"""
|
| 179 |
+
try:
|
| 180 |
+
if not dhcp_server:
|
| 181 |
+
return jsonify({'status': 'error', 'message': 'DHCP server not initialized'}), 500
|
| 182 |
+
|
| 183 |
+
success = dhcp_server.release_lease(mac_address)
|
| 184 |
+
|
| 185 |
+
if success:
|
| 186 |
+
if logger:
|
| 187 |
+
logger.info(LogCategory.DHCP, 'api', f'Released DHCP lease for {mac_address}')
|
| 188 |
+
|
| 189 |
+
return jsonify({
|
| 190 |
+
'status': 'success',
|
| 191 |
+
'message': f'Lease for {mac_address} released'
|
| 192 |
+
})
|
| 193 |
+
else:
|
| 194 |
+
return jsonify({
|
| 195 |
+
'status': 'error',
|
| 196 |
+
'message': f'Lease for {mac_address} not found'
|
| 197 |
+
}), 404
|
| 198 |
+
|
| 199 |
+
except Exception as e:
|
| 200 |
+
return jsonify({
|
| 201 |
+
'status': 'error',
|
| 202 |
+
'message': str(e)
|
| 203 |
+
}), 500
|
| 204 |
+
|
| 205 |
+
|
| 206 |
+
# NAT endpoints
|
| 207 |
+
@isp_api.route('/nat/sessions', methods=['GET'])
|
| 208 |
+
@cross_origin()
|
| 209 |
+
def get_nat_sessions():
|
| 210 |
+
"""Get NAT session table"""
|
| 211 |
+
try:
|
| 212 |
+
if not nat_engine:
|
| 213 |
+
return jsonify({'status': 'error', 'message': 'NAT engine not initialized'}), 500
|
| 214 |
+
|
| 215 |
+
sessions = nat_engine.get_sessions()
|
| 216 |
+
|
| 217 |
+
return jsonify({
|
| 218 |
+
'status': 'success',
|
| 219 |
+
'sessions': sessions,
|
| 220 |
+
'count': len(sessions)
|
| 221 |
+
})
|
| 222 |
+
|
| 223 |
+
except Exception as e:
|
| 224 |
+
return jsonify({
|
| 225 |
+
'status': 'error',
|
| 226 |
+
'message': str(e)
|
| 227 |
+
}), 500
|
| 228 |
+
|
| 229 |
+
|
| 230 |
+
@isp_api.route('/nat/stats', methods=['GET'])
|
| 231 |
+
@cross_origin()
|
| 232 |
+
def get_nat_stats():
|
| 233 |
+
"""Get NAT statistics"""
|
| 234 |
+
try:
|
| 235 |
+
if not nat_engine:
|
| 236 |
+
return jsonify({'status': 'error', 'message': 'NAT engine not initialized'}), 500
|
| 237 |
+
|
| 238 |
+
stats = nat_engine.get_stats()
|
| 239 |
+
|
| 240 |
+
return jsonify({
|
| 241 |
+
'status': 'success',
|
| 242 |
+
'stats': stats
|
| 243 |
+
})
|
| 244 |
+
|
| 245 |
+
except Exception as e:
|
| 246 |
+
return jsonify({
|
| 247 |
+
'status': 'error',
|
| 248 |
+
'message': str(e)
|
| 249 |
+
}), 500
|
| 250 |
+
|
| 251 |
+
|
| 252 |
+
# Firewall endpoints
|
| 253 |
+
@isp_api.route('/firewall/rules', methods=['GET'])
|
| 254 |
+
@cross_origin()
|
| 255 |
+
def get_firewall_rules():
|
| 256 |
+
"""Get firewall rules"""
|
| 257 |
+
try:
|
| 258 |
+
if not firewall_engine:
|
| 259 |
+
return jsonify({'status': 'error', 'message': 'Firewall engine not initialized'}), 500
|
| 260 |
+
|
| 261 |
+
rules = firewall_engine.get_rules()
|
| 262 |
+
|
| 263 |
+
return jsonify({
|
| 264 |
+
'status': 'success',
|
| 265 |
+
'rules': rules,
|
| 266 |
+
'count': len(rules)
|
| 267 |
+
})
|
| 268 |
+
|
| 269 |
+
except Exception as e:
|
| 270 |
+
return jsonify({
|
| 271 |
+
'status': 'error',
|
| 272 |
+
'message': str(e)
|
| 273 |
+
}), 500
|
| 274 |
+
|
| 275 |
+
|
| 276 |
+
@isp_api.route('/firewall/rules', methods=['POST'])
|
| 277 |
+
@cross_origin()
|
| 278 |
+
def add_firewall_rule():
|
| 279 |
+
"""Add firewall rule"""
|
| 280 |
+
try:
|
| 281 |
+
if not firewall_engine:
|
| 282 |
+
return jsonify({'status': 'error', 'message': 'Firewall engine not initialized'}), 500
|
| 283 |
+
|
| 284 |
+
rule_data = request.get_json()
|
| 285 |
+
|
| 286 |
+
# Build rule using builder
|
| 287 |
+
builder = FirewallRuleBuilder(rule_data['rule_id'])
|
| 288 |
+
builder.set_priority(rule_data.get('priority', 100))
|
| 289 |
+
builder.set_action(rule_data['action'])
|
| 290 |
+
builder.set_direction(rule_data.get('direction', 'BOTH'))
|
| 291 |
+
|
| 292 |
+
if 'source_ip' in rule_data:
|
| 293 |
+
builder.set_source_ip(rule_data['source_ip'])
|
| 294 |
+
if 'dest_ip' in rule_data:
|
| 295 |
+
builder.set_dest_ip(rule_data['dest_ip'])
|
| 296 |
+
if 'source_port' in rule_data:
|
| 297 |
+
builder.set_source_port(rule_data['source_port'])
|
| 298 |
+
if 'dest_port' in rule_data:
|
| 299 |
+
builder.set_dest_port(rule_data['dest_port'])
|
| 300 |
+
if 'protocol' in rule_data:
|
| 301 |
+
builder.set_protocol(rule_data['protocol'])
|
| 302 |
+
if 'description' in rule_data:
|
| 303 |
+
builder.set_description(rule_data['description'])
|
| 304 |
+
|
| 305 |
+
rule = builder.build()
|
| 306 |
+
success = firewall_engine.add_rule(rule)
|
| 307 |
+
|
| 308 |
+
if success:
|
| 309 |
+
if logger:
|
| 310 |
+
logger.info(LogCategory.FIREWALL, 'api', f'Added firewall rule: {rule.rule_id}')
|
| 311 |
+
|
| 312 |
+
return jsonify({
|
| 313 |
+
'status': 'success',
|
| 314 |
+
'message': f'Rule {rule.rule_id} added successfully'
|
| 315 |
+
})
|
| 316 |
+
else:
|
| 317 |
+
return jsonify({
|
| 318 |
+
'status': 'error',
|
| 319 |
+
'message': f'Rule {rule.rule_id} already exists'
|
| 320 |
+
}), 400
|
| 321 |
+
|
| 322 |
+
except Exception as e:
|
| 323 |
+
return jsonify({
|
| 324 |
+
'status': 'error',
|
| 325 |
+
'message': str(e)
|
| 326 |
+
}), 500
|
| 327 |
+
|
| 328 |
+
|
| 329 |
+
@isp_api.route('/firewall/rules/<rule_id>', methods=['DELETE'])
|
| 330 |
+
@cross_origin()
|
| 331 |
+
def delete_firewall_rule(rule_id):
|
| 332 |
+
"""Delete firewall rule"""
|
| 333 |
+
try:
|
| 334 |
+
if not firewall_engine:
|
| 335 |
+
return jsonify({'status': 'error', 'message': 'Firewall engine not initialized'}), 500
|
| 336 |
+
|
| 337 |
+
success = firewall_engine.remove_rule(rule_id)
|
| 338 |
+
|
| 339 |
+
if success:
|
| 340 |
+
if logger:
|
| 341 |
+
logger.info(LogCategory.FIREWALL, 'api', f'Deleted firewall rule: {rule_id}')
|
| 342 |
+
|
| 343 |
+
return jsonify({
|
| 344 |
+
'status': 'success',
|
| 345 |
+
'message': f'Rule {rule_id} deleted successfully'
|
| 346 |
+
})
|
| 347 |
+
else:
|
| 348 |
+
return jsonify({
|
| 349 |
+
'status': 'error',
|
| 350 |
+
'message': f'Rule {rule_id} not found'
|
| 351 |
+
}), 404
|
| 352 |
+
|
| 353 |
+
except Exception as e:
|
| 354 |
+
return jsonify({
|
| 355 |
+
'status': 'error',
|
| 356 |
+
'message': str(e)
|
| 357 |
+
}), 500
|
| 358 |
+
|
| 359 |
+
|
| 360 |
+
@isp_api.route('/firewall/logs', methods=['GET'])
|
| 361 |
+
@cross_origin()
|
| 362 |
+
def get_firewall_logs():
|
| 363 |
+
"""Get firewall logs"""
|
| 364 |
+
try:
|
| 365 |
+
if not firewall_engine:
|
| 366 |
+
return jsonify({'status': 'error', 'message': 'Firewall engine not initialized'}), 500
|
| 367 |
+
|
| 368 |
+
limit = request.args.get('limit', 100, type=int)
|
| 369 |
+
filter_action = request.args.get('action')
|
| 370 |
+
|
| 371 |
+
logs = firewall_engine.get_logs(limit=limit, filter_action=filter_action)
|
| 372 |
+
|
| 373 |
+
return jsonify({
|
| 374 |
+
'status': 'success',
|
| 375 |
+
'logs': logs,
|
| 376 |
+
'count': len(logs)
|
| 377 |
+
})
|
| 378 |
+
|
| 379 |
+
except Exception as e:
|
| 380 |
+
return jsonify({
|
| 381 |
+
'status': 'error',
|
| 382 |
+
'message': str(e)
|
| 383 |
+
}), 500
|
| 384 |
+
|
| 385 |
+
|
| 386 |
+
@isp_api.route('/firewall/stats', methods=['GET'])
|
| 387 |
+
@cross_origin()
|
| 388 |
+
def get_firewall_stats():
|
| 389 |
+
"""Get firewall statistics"""
|
| 390 |
+
try:
|
| 391 |
+
if not firewall_engine:
|
| 392 |
+
return jsonify({'status': 'error', 'message': 'Firewall engine not initialized'}), 500
|
| 393 |
+
|
| 394 |
+
stats = firewall_engine.get_stats()
|
| 395 |
+
|
| 396 |
+
return jsonify({
|
| 397 |
+
'status': 'success',
|
| 398 |
+
'stats': stats
|
| 399 |
+
})
|
| 400 |
+
|
| 401 |
+
except Exception as e:
|
| 402 |
+
return jsonify({
|
| 403 |
+
'status': 'error',
|
| 404 |
+
'message': str(e)
|
| 405 |
+
}), 500
|
| 406 |
+
|
| 407 |
+
|
| 408 |
+
# TCP connections endpoints
|
| 409 |
+
@isp_api.route('/tcp/connections', methods=['GET'])
|
| 410 |
+
@cross_origin()
|
| 411 |
+
def get_tcp_connections():
|
| 412 |
+
"""Get TCP connections"""
|
| 413 |
+
try:
|
| 414 |
+
if not tcp_engine:
|
| 415 |
+
return jsonify({'status': 'error', 'message': 'TCP engine not initialized'}), 500
|
| 416 |
+
|
| 417 |
+
connections = tcp_engine.get_connections()
|
| 418 |
+
|
| 419 |
+
return jsonify({
|
| 420 |
+
'status': 'success',
|
| 421 |
+
'connections': connections,
|
| 422 |
+
'count': len(connections)
|
| 423 |
+
})
|
| 424 |
+
|
| 425 |
+
except Exception as e:
|
| 426 |
+
return jsonify({
|
| 427 |
+
'status': 'error',
|
| 428 |
+
'message': str(e)
|
| 429 |
+
}), 500
|
| 430 |
+
|
| 431 |
+
|
| 432 |
+
# Router endpoints
|
| 433 |
+
@isp_api.route('/router/routes', methods=['GET'])
|
| 434 |
+
@cross_origin()
|
| 435 |
+
def get_routing_table():
|
| 436 |
+
"""Get routing table"""
|
| 437 |
+
try:
|
| 438 |
+
if not virtual_router:
|
| 439 |
+
return jsonify({'status': 'error', 'message': 'Virtual router not initialized'}), 500
|
| 440 |
+
|
| 441 |
+
routes = virtual_router.get_routing_table()
|
| 442 |
+
|
| 443 |
+
return jsonify({
|
| 444 |
+
'status': 'success',
|
| 445 |
+
'routes': routes,
|
| 446 |
+
'count': len(routes)
|
| 447 |
+
})
|
| 448 |
+
|
| 449 |
+
except Exception as e:
|
| 450 |
+
return jsonify({
|
| 451 |
+
'status': 'error',
|
| 452 |
+
'message': str(e)
|
| 453 |
+
}), 500
|
| 454 |
+
|
| 455 |
+
|
| 456 |
+
@isp_api.route('/router/interfaces', methods=['GET'])
|
| 457 |
+
@cross_origin()
|
| 458 |
+
def get_router_interfaces():
|
| 459 |
+
"""Get router interfaces"""
|
| 460 |
+
try:
|
| 461 |
+
if not virtual_router:
|
| 462 |
+
return jsonify({'status': 'error', 'message': 'Virtual router not initialized'}), 500
|
| 463 |
+
|
| 464 |
+
interfaces = virtual_router.get_interfaces()
|
| 465 |
+
|
| 466 |
+
return jsonify({
|
| 467 |
+
'status': 'success',
|
| 468 |
+
'interfaces': interfaces,
|
| 469 |
+
'count': len(interfaces)
|
| 470 |
+
})
|
| 471 |
+
|
| 472 |
+
except Exception as e:
|
| 473 |
+
return jsonify({
|
| 474 |
+
'status': 'error',
|
| 475 |
+
'message': str(e)
|
| 476 |
+
}), 500
|
| 477 |
+
|
| 478 |
+
|
| 479 |
+
@isp_api.route('/router/arp', methods=['GET'])
|
| 480 |
+
@cross_origin()
|
| 481 |
+
def get_arp_table():
|
| 482 |
+
"""Get ARP table"""
|
| 483 |
+
try:
|
| 484 |
+
if not virtual_router:
|
| 485 |
+
return jsonify({'status': 'error', 'message': 'Virtual router not initialized'}), 500
|
| 486 |
+
|
| 487 |
+
arp_table = virtual_router.get_arp_table()
|
| 488 |
+
|
| 489 |
+
return jsonify({
|
| 490 |
+
'status': 'success',
|
| 491 |
+
'arp_table': arp_table,
|
| 492 |
+
'count': len(arp_table)
|
| 493 |
+
})
|
| 494 |
+
|
| 495 |
+
except Exception as e:
|
| 496 |
+
return jsonify({
|
| 497 |
+
'status': 'error',
|
| 498 |
+
'message': str(e)
|
| 499 |
+
}), 500
|
| 500 |
+
|
| 501 |
+
|
| 502 |
+
@isp_api.route('/router/stats', methods=['GET'])
|
| 503 |
+
@cross_origin()
|
| 504 |
+
def get_router_stats():
|
| 505 |
+
"""Get router statistics"""
|
| 506 |
+
try:
|
| 507 |
+
if not virtual_router:
|
| 508 |
+
return jsonify({'status': 'error', 'message': 'Virtual router not initialized'}), 500
|
| 509 |
+
|
| 510 |
+
stats = virtual_router.get_stats()
|
| 511 |
+
|
| 512 |
+
return jsonify({
|
| 513 |
+
'status': 'success',
|
| 514 |
+
'stats': stats
|
| 515 |
+
})
|
| 516 |
+
|
| 517 |
+
except Exception as e:
|
| 518 |
+
return jsonify({
|
| 519 |
+
'status': 'error',
|
| 520 |
+
'message': str(e)
|
| 521 |
+
}), 500
|
| 522 |
+
|
| 523 |
+
|
| 524 |
+
# Bridge endpoints
|
| 525 |
+
@isp_api.route('/bridge/clients', methods=['GET'])
|
| 526 |
+
@cross_origin()
|
| 527 |
+
def get_bridge_clients():
|
| 528 |
+
"""Get bridge clients"""
|
| 529 |
+
try:
|
| 530 |
+
if not packet_bridge:
|
| 531 |
+
return jsonify({'status': 'error', 'message': 'Packet bridge not initialized'}), 500
|
| 532 |
+
|
| 533 |
+
clients = packet_bridge.get_clients()
|
| 534 |
+
|
| 535 |
+
return jsonify({
|
| 536 |
+
'status': 'success',
|
| 537 |
+
'clients': clients,
|
| 538 |
+
'count': len(clients)
|
| 539 |
+
})
|
| 540 |
+
|
| 541 |
+
except Exception as e:
|
| 542 |
+
return jsonify({
|
| 543 |
+
'status': 'error',
|
| 544 |
+
'message': str(e)
|
| 545 |
+
}), 500
|
| 546 |
+
|
| 547 |
+
|
| 548 |
+
@isp_api.route('/bridge/stats', methods=['GET'])
|
| 549 |
+
@cross_origin()
|
| 550 |
+
def get_bridge_stats():
|
| 551 |
+
"""Get bridge statistics"""
|
| 552 |
+
try:
|
| 553 |
+
if not packet_bridge:
|
| 554 |
+
return jsonify({'status': 'error', 'message': 'Packet bridge not initialized'}), 500
|
| 555 |
+
|
| 556 |
+
stats = packet_bridge.get_stats()
|
| 557 |
+
|
| 558 |
+
return jsonify({
|
| 559 |
+
'status': 'success',
|
| 560 |
+
'stats': stats
|
| 561 |
+
})
|
| 562 |
+
|
| 563 |
+
except Exception as e:
|
| 564 |
+
return jsonify({
|
| 565 |
+
'status': 'error',
|
| 566 |
+
'message': str(e)
|
| 567 |
+
}), 500
|
| 568 |
+
|
| 569 |
+
|
| 570 |
+
# Session tracking endpoints
|
| 571 |
+
@isp_api.route('/sessions', methods=['GET'])
|
| 572 |
+
@cross_origin()
|
| 573 |
+
def get_sessions():
|
| 574 |
+
"""Get sessions"""
|
| 575 |
+
try:
|
| 576 |
+
if not session_tracker:
|
| 577 |
+
return jsonify({'status': 'error', 'message': 'Session tracker not initialized'}), 500
|
| 578 |
+
|
| 579 |
+
limit = request.args.get('limit', 100, type=int)
|
| 580 |
+
offset = request.args.get('offset', 0, type=int)
|
| 581 |
+
|
| 582 |
+
sessions = session_tracker.get_sessions(limit=limit, offset=offset)
|
| 583 |
+
|
| 584 |
+
return jsonify({
|
| 585 |
+
'status': 'success',
|
| 586 |
+
'sessions': sessions,
|
| 587 |
+
'count': len(sessions)
|
| 588 |
+
})
|
| 589 |
+
|
| 590 |
+
except Exception as e:
|
| 591 |
+
return jsonify({
|
| 592 |
+
'status': 'error',
|
| 593 |
+
'message': str(e)
|
| 594 |
+
}), 500
|
| 595 |
+
|
| 596 |
+
|
| 597 |
+
@isp_api.route('/sessions/summary', methods=['GET'])
|
| 598 |
+
@cross_origin()
|
| 599 |
+
def get_session_summary():
|
| 600 |
+
"""Get session summary"""
|
| 601 |
+
try:
|
| 602 |
+
if not session_tracker:
|
| 603 |
+
return jsonify({'status': 'error', 'message': 'Session tracker not initialized'}), 500
|
| 604 |
+
|
| 605 |
+
summary = session_tracker.get_session_summary()
|
| 606 |
+
|
| 607 |
+
return jsonify({
|
| 608 |
+
'status': 'success',
|
| 609 |
+
'summary': summary
|
| 610 |
+
})
|
| 611 |
+
|
| 612 |
+
except Exception as e:
|
| 613 |
+
return jsonify({
|
| 614 |
+
'status': 'error',
|
| 615 |
+
'message': str(e)
|
| 616 |
+
}), 500
|
| 617 |
+
|
| 618 |
+
|
| 619 |
+
# Logging endpoints
|
| 620 |
+
@isp_api.route('/logs', methods=['GET'])
|
| 621 |
+
@cross_origin()
|
| 622 |
+
def get_logs():
|
| 623 |
+
"""Get system logs"""
|
| 624 |
+
try:
|
| 625 |
+
if not logger:
|
| 626 |
+
return jsonify({'status': 'error', 'message': 'Logger not initialized'}), 500
|
| 627 |
+
|
| 628 |
+
limit = request.args.get('limit', 100, type=int)
|
| 629 |
+
offset = request.args.get('offset', 0, type=int)
|
| 630 |
+
level = request.args.get('level')
|
| 631 |
+
category = request.args.get('category')
|
| 632 |
+
search = request.args.get('search')
|
| 633 |
+
|
| 634 |
+
if search:
|
| 635 |
+
logs = logger.search_logs(search, limit=limit)
|
| 636 |
+
else:
|
| 637 |
+
log_filter = LogFilter()
|
| 638 |
+
if level:
|
| 639 |
+
log_filter.level_filter = LogLevel(level.upper())
|
| 640 |
+
if category:
|
| 641 |
+
log_filter.category_filter = LogCategory(category.upper())
|
| 642 |
+
|
| 643 |
+
logs = logger.get_logs(limit=limit, offset=offset, log_filter=log_filter)
|
| 644 |
+
|
| 645 |
+
return jsonify({
|
| 646 |
+
'status': 'success',
|
| 647 |
+
'logs': logs,
|
| 648 |
+
'count': len(logs)
|
| 649 |
+
})
|
| 650 |
+
|
| 651 |
+
except Exception as e:
|
| 652 |
+
return jsonify({
|
| 653 |
+
'status': 'error',
|
| 654 |
+
'message': str(e)
|
| 655 |
+
}), 500
|
| 656 |
+
|
| 657 |
+
|
| 658 |
+
@isp_api.route('/logs/errors', methods=['GET'])
|
| 659 |
+
@cross_origin()
|
| 660 |
+
def get_error_logs():
|
| 661 |
+
"""Get recent error logs"""
|
| 662 |
+
try:
|
| 663 |
+
if not logger:
|
| 664 |
+
return jsonify({'status': 'error', 'message': 'Logger not initialized'}), 500
|
| 665 |
+
|
| 666 |
+
limit = request.args.get('limit', 50, type=int)
|
| 667 |
+
errors = logger.get_recent_errors(limit=limit)
|
| 668 |
+
|
| 669 |
+
return jsonify({
|
| 670 |
+
'status': 'success',
|
| 671 |
+
'errors': errors,
|
| 672 |
+
'count': len(errors)
|
| 673 |
+
})
|
| 674 |
+
|
| 675 |
+
except Exception as e:
|
| 676 |
+
return jsonify({
|
| 677 |
+
'status': 'error',
|
| 678 |
+
'message': str(e)
|
| 679 |
+
}), 500
|
| 680 |
+
|
| 681 |
+
|
| 682 |
+
# System status endpoints
|
| 683 |
+
@isp_api.route('/status', methods=['GET'])
|
| 684 |
+
@cross_origin()
|
| 685 |
+
def get_system_status():
|
| 686 |
+
"""Get overall system status"""
|
| 687 |
+
try:
|
| 688 |
+
status = {
|
| 689 |
+
'timestamp': time.time(),
|
| 690 |
+
'uptime': time.time() - (time.time() - 3600), # Placeholder
|
| 691 |
+
'components': {
|
| 692 |
+
'dhcp_server': dhcp_server is not None and dhcp_server.running,
|
| 693 |
+
'nat_engine': nat_engine is not None and nat_engine.running,
|
| 694 |
+
'firewall_engine': firewall_engine is not None,
|
| 695 |
+
'tcp_engine': tcp_engine is not None and tcp_engine.running,
|
| 696 |
+
'virtual_router': virtual_router is not None,
|
| 697 |
+
'socket_translator': socket_translator is not None and socket_translator.running,
|
| 698 |
+
'packet_bridge': packet_bridge is not None and packet_bridge.running,
|
| 699 |
+
'session_tracker': session_tracker is not None and session_tracker.running,
|
| 700 |
+
'logger': logger is not None and logger.running
|
| 701 |
+
},
|
| 702 |
+
'stats': {}
|
| 703 |
+
}
|
| 704 |
+
|
| 705 |
+
# Collect stats from each component
|
| 706 |
+
if dhcp_server:
|
| 707 |
+
status['stats']['dhcp_leases'] = len(dhcp_server.get_leases())
|
| 708 |
+
|
| 709 |
+
if nat_engine:
|
| 710 |
+
nat_stats = nat_engine.get_stats()
|
| 711 |
+
status['stats']['nat_sessions'] = nat_stats.get('active_sessions', 0)
|
| 712 |
+
|
| 713 |
+
if firewall_engine:
|
| 714 |
+
fw_stats = firewall_engine.get_stats()
|
| 715 |
+
status['stats']['firewall_rules'] = fw_stats.get('total_rules', 0)
|
| 716 |
+
|
| 717 |
+
if tcp_engine:
|
| 718 |
+
tcp_connections = tcp_engine.get_connections()
|
| 719 |
+
status['stats']['tcp_connections'] = len(tcp_connections)
|
| 720 |
+
|
| 721 |
+
if packet_bridge:
|
| 722 |
+
bridge_stats = packet_bridge.get_stats()
|
| 723 |
+
status['stats']['bridge_clients'] = bridge_stats.get('active_clients', 0)
|
| 724 |
+
|
| 725 |
+
if session_tracker:
|
| 726 |
+
session_stats = session_tracker.get_stats()
|
| 727 |
+
status['stats']['total_sessions'] = session_stats.get('active_sessions', 0)
|
| 728 |
+
|
| 729 |
+
return jsonify({
|
| 730 |
+
'status': 'success',
|
| 731 |
+
'system_status': status
|
| 732 |
+
})
|
| 733 |
+
|
| 734 |
+
except Exception as e:
|
| 735 |
+
return jsonify({
|
| 736 |
+
'status': 'error',
|
| 737 |
+
'message': str(e)
|
| 738 |
+
}), 500
|
| 739 |
+
|
| 740 |
+
|
| 741 |
+
@isp_api.route('/stats', methods=['GET'])
|
| 742 |
+
@cross_origin()
|
| 743 |
+
def get_system_stats():
|
| 744 |
+
"""Get comprehensive system statistics"""
|
| 745 |
+
try:
|
| 746 |
+
stats = {
|
| 747 |
+
'timestamp': time.time(),
|
| 748 |
+
'dhcp': dhcp_server.get_leases() if dhcp_server else {},
|
| 749 |
+
'nat': nat_engine.get_stats() if nat_engine else {},
|
| 750 |
+
'firewall': firewall_engine.get_stats() if firewall_engine else {},
|
| 751 |
+
'router': virtual_router.get_stats() if virtual_router else {},
|
| 752 |
+
'bridge': packet_bridge.get_stats() if packet_bridge else {},
|
| 753 |
+
'sessions': session_tracker.get_stats() if session_tracker else {},
|
| 754 |
+
'logger': logger.get_stats() if logger else {}
|
| 755 |
+
}
|
| 756 |
+
|
| 757 |
+
return jsonify({
|
| 758 |
+
'status': 'success',
|
| 759 |
+
'stats': stats
|
| 760 |
+
})
|
| 761 |
+
|
| 762 |
+
except Exception as e:
|
| 763 |
+
return jsonify({
|
| 764 |
+
'status': 'error',
|
| 765 |
+
'message': str(e)
|
| 766 |
+
}), 500
|
| 767 |
+
|
| 768 |
+
|
| 769 |
+
packet_bridge.start()
|
| 770 |
+
|
| 771 |
+
|
| 772 |
+
|
| 773 |
+
# OpenVPN endpoints
|
| 774 |
+
@isp_api.route('/openvpn/status', methods=['GET'])
|
| 775 |
+
@cross_origin()
|
| 776 |
+
def get_openvpn_status():
|
| 777 |
+
"""Get OpenVPN server status"""
|
| 778 |
+
try:
|
| 779 |
+
if not openvpn_manager:
|
| 780 |
+
return jsonify({
|
| 781 |
+
'status': 'error',
|
| 782 |
+
'message': 'OpenVPN manager not initialized'
|
| 783 |
+
}), 500
|
| 784 |
+
|
| 785 |
+
status = openvpn_manager.get_server_status()
|
| 786 |
+
|
| 787 |
+
return jsonify({
|
| 788 |
+
'status': 'success',
|
| 789 |
+
'openvpn_status': {
|
| 790 |
+
'is_running': status.is_running,
|
| 791 |
+
'connected_clients': status.connected_clients,
|
| 792 |
+
'total_bytes_received': status.total_bytes_received,
|
| 793 |
+
'total_bytes_sent': status.total_bytes_sent,
|
| 794 |
+
'uptime': status.uptime,
|
| 795 |
+
'server_ip': status.server_ip,
|
| 796 |
+
'server_port': status.server_port
|
| 797 |
+
}
|
| 798 |
+
})
|
| 799 |
+
|
| 800 |
+
except Exception as e:
|
| 801 |
+
return jsonify({
|
| 802 |
+
'status': 'error',
|
| 803 |
+
'message': str(e)
|
| 804 |
+
}), 500
|
| 805 |
+
|
| 806 |
+
|
| 807 |
+
@isp_api.route('/openvpn/start', methods=['POST'])
|
| 808 |
+
@cross_origin()
|
| 809 |
+
def start_openvpn_server():
|
| 810 |
+
"""Start OpenVPN server"""
|
| 811 |
+
try:
|
| 812 |
+
if not openvpn_manager:
|
| 813 |
+
return jsonify({
|
| 814 |
+
'status': 'error',
|
| 815 |
+
'message': 'OpenVPN manager not initialized'
|
| 816 |
+
}), 500
|
| 817 |
+
|
| 818 |
+
success = openvpn_manager.start_server()
|
| 819 |
+
|
| 820 |
+
if success:
|
| 821 |
+
return jsonify({
|
| 822 |
+
'status': 'success',
|
| 823 |
+
'message': 'OpenVPN server started successfully'
|
| 824 |
+
})
|
| 825 |
+
else:
|
| 826 |
+
return jsonify({
|
| 827 |
+
'status': 'error',
|
| 828 |
+
'message': 'Failed to start OpenVPN server'
|
| 829 |
+
}), 500
|
| 830 |
+
|
| 831 |
+
except Exception as e:
|
| 832 |
+
return jsonify({
|
| 833 |
+
'status': 'error',
|
| 834 |
+
'message': str(e)
|
| 835 |
+
}), 500
|
| 836 |
+
|
| 837 |
+
|
| 838 |
+
@isp_api.route('/openvpn/stop', methods=['POST'])
|
| 839 |
+
@cross_origin()
|
| 840 |
+
def stop_openvpn_server():
|
| 841 |
+
"""Stop OpenVPN server"""
|
| 842 |
+
try:
|
| 843 |
+
if not openvpn_manager:
|
| 844 |
+
return jsonify({
|
| 845 |
+
'status': 'error',
|
| 846 |
+
'message': 'OpenVPN manager not initialized'
|
| 847 |
+
}), 500
|
| 848 |
+
|
| 849 |
+
success = openvpn_manager.stop_server()
|
| 850 |
+
|
| 851 |
+
if success:
|
| 852 |
+
return jsonify({
|
| 853 |
+
'status': 'success',
|
| 854 |
+
'message': 'OpenVPN server stopped successfully'
|
| 855 |
+
})
|
| 856 |
+
else:
|
| 857 |
+
return jsonify({
|
| 858 |
+
'status': 'error',
|
| 859 |
+
'message': 'Failed to stop OpenVPN server'
|
| 860 |
+
}), 500
|
| 861 |
+
|
| 862 |
+
except Exception as e:
|
| 863 |
+
return jsonify({
|
| 864 |
+
'status': 'error',
|
| 865 |
+
'message': str(e)
|
| 866 |
+
}), 500
|
| 867 |
+
|
| 868 |
+
|
| 869 |
+
@isp_api.route('/openvpn/clients', methods=['GET'])
|
| 870 |
+
@cross_origin()
|
| 871 |
+
def get_openvpn_clients():
|
| 872 |
+
"""Get connected OpenVPN clients"""
|
| 873 |
+
try:
|
| 874 |
+
if not openvpn_manager:
|
| 875 |
+
return jsonify({
|
| 876 |
+
'status': 'error',
|
| 877 |
+
'message': 'OpenVPN manager not initialized'
|
| 878 |
+
}), 500
|
| 879 |
+
|
| 880 |
+
clients = openvpn_manager.get_connected_clients()
|
| 881 |
+
|
| 882 |
+
return jsonify({
|
| 883 |
+
'status': 'success',
|
| 884 |
+
'clients': clients
|
| 885 |
+
})
|
| 886 |
+
|
| 887 |
+
except Exception as e:
|
| 888 |
+
return jsonify({
|
| 889 |
+
'status': 'error',
|
| 890 |
+
'message': str(e)
|
| 891 |
+
}), 500
|
| 892 |
+
|
| 893 |
+
|
| 894 |
+
@isp_api.route('/openvpn/clients/<client_id>/disconnect', methods=['POST'])
|
| 895 |
+
@cross_origin()
|
| 896 |
+
def disconnect_openvpn_client(client_id):
|
| 897 |
+
"""Disconnect a specific OpenVPN client"""
|
| 898 |
+
try:
|
| 899 |
+
if not openvpn_manager:
|
| 900 |
+
return jsonify({
|
| 901 |
+
'status': 'error',
|
| 902 |
+
'message': 'OpenVPN manager not initialized'
|
| 903 |
+
}), 500
|
| 904 |
+
|
| 905 |
+
success = openvpn_manager.disconnect_client(client_id)
|
| 906 |
+
|
| 907 |
+
if success:
|
| 908 |
+
return jsonify({
|
| 909 |
+
'status': 'success',
|
| 910 |
+
'message': f'Client {client_id} disconnected successfully'
|
| 911 |
+
})
|
| 912 |
+
else:
|
| 913 |
+
return jsonify({
|
| 914 |
+
'status': 'error',
|
| 915 |
+
'message': f'Failed to disconnect client {client_id}'
|
| 916 |
+
}), 500
|
| 917 |
+
|
| 918 |
+
except Exception as e:
|
| 919 |
+
return jsonify({
|
| 920 |
+
'status': 'error',
|
| 921 |
+
'message': str(e)
|
| 922 |
+
}), 500
|
| 923 |
+
|
| 924 |
+
|
| 925 |
+
@isp_api.route('/openvpn/config/<client_name>', methods=['GET'])
|
| 926 |
+
@cross_origin()
|
| 927 |
+
def get_client_config(client_name):
|
| 928 |
+
"""Generate client configuration file"""
|
| 929 |
+
try:
|
| 930 |
+
if not openvpn_manager:
|
| 931 |
+
return jsonify({
|
| 932 |
+
'status': 'error',
|
| 933 |
+
'message': 'OpenVPN manager not initialized'
|
| 934 |
+
}), 500
|
| 935 |
+
|
| 936 |
+
# Get server IP from request or use default
|
| 937 |
+
server_ip = request.args.get('server_ip', '127.0.0.1')
|
| 938 |
+
|
| 939 |
+
config = openvpn_manager.generate_client_config(client_name, server_ip)
|
| 940 |
+
|
| 941 |
+
if config:
|
| 942 |
+
return Response(
|
| 943 |
+
config,
|
| 944 |
+
mimetype='text/plain',
|
| 945 |
+
headers={'Content-Disposition': f'attachment; filename={client_name}.ovpn'}
|
| 946 |
+
)
|
| 947 |
+
else:
|
| 948 |
+
return jsonify({
|
| 949 |
+
'status': 'error',
|
| 950 |
+
'message': 'Failed to generate client configuration'
|
| 951 |
+
}), 500
|
| 952 |
+
|
| 953 |
+
except Exception as e:
|
| 954 |
+
return jsonify({
|
| 955 |
+
'status': 'error',
|
| 956 |
+
'message': str(e)
|
| 957 |
+
}), 500
|
| 958 |
+
|
| 959 |
+
|
| 960 |
+
@isp_api.route('/openvpn/stats', methods=['GET'])
|
| 961 |
+
@cross_origin()
|
| 962 |
+
def get_openvpn_stats():
|
| 963 |
+
"""Get comprehensive OpenVPN statistics"""
|
| 964 |
+
try:
|
| 965 |
+
if not openvpn_manager:
|
| 966 |
+
return jsonify({
|
| 967 |
+
'status': 'error',
|
| 968 |
+
'message': 'OpenVPN manager not initialized'
|
| 969 |
+
}), 500
|
| 970 |
+
|
| 971 |
+
stats = openvpn_manager.get_statistics()
|
| 972 |
+
|
| 973 |
+
return jsonify({
|
| 974 |
+
'status': 'success',
|
| 975 |
+
'openvpn_stats': stats
|
| 976 |
+
})
|
| 977 |
+
|
| 978 |
+
except Exception as e:
|
| 979 |
+
return jsonify({
|
| 980 |
+
'status': 'error',
|
| 981 |
+
'message': str(e)
|
| 982 |
+
}), 500
|
| 983 |
+
|
| 984 |
+
|
| 985 |
+
@isp_api.route('/openvpn/configs', methods=['GET'])
|
| 986 |
+
@cross_origin()
|
| 987 |
+
def list_client_configs():
|
| 988 |
+
"""List all stored client configurations"""
|
| 989 |
+
try:
|
| 990 |
+
if not openvpn_manager:
|
| 991 |
+
return jsonify({
|
| 992 |
+
'status': 'error',
|
| 993 |
+
'message': 'OpenVPN manager not initialized'
|
| 994 |
+
}), 500
|
| 995 |
+
|
| 996 |
+
configs = openvpn_manager.list_client_configs()
|
| 997 |
+
|
| 998 |
+
return jsonify({
|
| 999 |
+
'status': 'success',
|
| 1000 |
+
'configs': configs,
|
| 1001 |
+
'count': len(configs)
|
| 1002 |
+
})
|
| 1003 |
+
|
| 1004 |
+
except Exception as e:
|
| 1005 |
+
return jsonify({
|
| 1006 |
+
'status': 'error',
|
| 1007 |
+
'message': str(e)
|
| 1008 |
+
}), 500
|
| 1009 |
+
|
| 1010 |
+
|
| 1011 |
+
@isp_api.route('/openvpn/configs/<client_name>', methods=['GET'])
|
| 1012 |
+
@cross_origin()
|
| 1013 |
+
def get_stored_client_config(client_name):
|
| 1014 |
+
"""Get stored client configuration"""
|
| 1015 |
+
try:
|
| 1016 |
+
if not openvpn_manager:
|
| 1017 |
+
return jsonify({
|
| 1018 |
+
'status': 'error',
|
| 1019 |
+
'message': 'OpenVPN manager not initialized'
|
| 1020 |
+
}), 500
|
| 1021 |
+
|
| 1022 |
+
config_content = openvpn_manager.load_client_config(client_name)
|
| 1023 |
+
|
| 1024 |
+
if config_content:
|
| 1025 |
+
return Response(
|
| 1026 |
+
config_content,
|
| 1027 |
+
mimetype='text/plain',
|
| 1028 |
+
headers={'Content-Disposition': f'attachment; filename={client_name}.ovpn'}
|
| 1029 |
+
)
|
| 1030 |
+
else:
|
| 1031 |
+
return jsonify({
|
| 1032 |
+
'status': 'error',
|
| 1033 |
+
'message': f'Configuration for {client_name} not found'
|
| 1034 |
+
}), 404
|
| 1035 |
+
|
| 1036 |
+
except Exception as e:
|
| 1037 |
+
return jsonify({
|
| 1038 |
+
'status': 'error',
|
| 1039 |
+
'message': str(e)
|
| 1040 |
+
}), 500
|
| 1041 |
+
|
| 1042 |
+
|
| 1043 |
+
@isp_api.route('/openvpn/configs/<client_name>', methods=['DELETE'])
|
| 1044 |
+
@cross_origin()
|
| 1045 |
+
def delete_stored_client_config(client_name):
|
| 1046 |
+
"""Delete stored client configuration"""
|
| 1047 |
+
try:
|
| 1048 |
+
if not openvpn_manager:
|
| 1049 |
+
return jsonify({
|
| 1050 |
+
'status': 'error',
|
| 1051 |
+
'message': 'OpenVPN manager not initialized'
|
| 1052 |
+
}), 500
|
| 1053 |
+
|
| 1054 |
+
success = openvpn_manager.delete_client_config(client_name)
|
| 1055 |
+
|
| 1056 |
+
if success:
|
| 1057 |
+
return jsonify({
|
| 1058 |
+
'status': 'success',
|
| 1059 |
+
'message': f'Configuration for {client_name} deleted successfully'
|
| 1060 |
+
})
|
| 1061 |
+
else:
|
| 1062 |
+
return jsonify({
|
| 1063 |
+
'status': 'error',
|
| 1064 |
+
'message': f'Configuration for {client_name} not found'
|
| 1065 |
+
}), 404
|
| 1066 |
+
|
| 1067 |
+
except Exception as e:
|
| 1068 |
+
return jsonify({
|
| 1069 |
+
'status': 'error',
|
| 1070 |
+
'message': str(e)
|
| 1071 |
+
}), 500
|
| 1072 |
+
|
| 1073 |
+
|
| 1074 |
+
@isp_api.route('/openvpn/configs/<client_name>/generate', methods=['POST'])
|
| 1075 |
+
@cross_origin()
|
| 1076 |
+
def generate_and_save_client_config(client_name):
|
| 1077 |
+
"""Generate and save client configuration"""
|
| 1078 |
+
try:
|
| 1079 |
+
if not openvpn_manager:
|
| 1080 |
+
return jsonify({
|
| 1081 |
+
'status': 'error',
|
| 1082 |
+
'message': 'OpenVPN manager not initialized'
|
| 1083 |
+
}), 500
|
| 1084 |
+
|
| 1085 |
+
# Get server IP from request or use default
|
| 1086 |
+
server_ip = request.args.get('server_ip', '127.0.0.1')
|
| 1087 |
+
|
| 1088 |
+
config_content = openvpn_manager.generate_and_save_client_config(client_name, server_ip)
|
| 1089 |
+
|
| 1090 |
+
if config_content:
|
| 1091 |
+
return jsonify({
|
| 1092 |
+
'status': 'success',
|
| 1093 |
+
'message': f'Configuration for {client_name} generated and saved successfully'
|
| 1094 |
+
})
|
| 1095 |
+
else:
|
| 1096 |
+
return jsonify({
|
| 1097 |
+
'status': 'error',
|
| 1098 |
+
'message': f'Failed to generate configuration for {client_name}'
|
| 1099 |
+
}), 500
|
| 1100 |
+
|
| 1101 |
+
except Exception as e:
|
| 1102 |
+
return jsonify({
|
| 1103 |
+
'status': 'error',
|
| 1104 |
+
'message': str(e)
|
| 1105 |
+
}), 500
|
| 1106 |
+
|
| 1107 |
+
|
routes/user.py
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from flask import Blueprint, request, jsonify
|
| 2 |
+
from models.user import User, db
|
| 3 |
+
|
| 4 |
+
user_bp = Blueprint('user', __name__)
|
| 5 |
+
|
| 6 |
+
@user_bp.route('/users', methods=['GET'])
|
| 7 |
+
def get_users():
|
| 8 |
+
users = User.query.all()
|
| 9 |
+
return jsonify([user.to_dict() for user in users])
|
| 10 |
+
|
| 11 |
+
@user_bp.route('/users', methods=['POST'])
|
| 12 |
+
def create_user():
|
| 13 |
+
|
| 14 |
+
data = request.json
|
| 15 |
+
user = User(username=data['username'], email=data['email'])
|
| 16 |
+
db.session.add(user)
|
| 17 |
+
db.session.commit()
|
| 18 |
+
return jsonify(user.to_dict()), 201
|
| 19 |
+
|
| 20 |
+
@user_bp.route('/users/<int:user_id>', methods=['GET'])
|
| 21 |
+
def get_user(user_id):
|
| 22 |
+
user = User.query.get_or_404(user_id)
|
| 23 |
+
return jsonify(user.to_dict())
|
| 24 |
+
|
| 25 |
+
@user_bp.route('/users/<int:user_id>', methods=['PUT'])
|
| 26 |
+
def update_user(user_id):
|
| 27 |
+
user = User.query.get_or_404(user_id)
|
| 28 |
+
data = request.json
|
| 29 |
+
user.username = data.get('username', user.username)
|
| 30 |
+
user.email = data.get('email', user.email)
|
| 31 |
+
db.session.commit()
|
| 32 |
+
return jsonify(user.to_dict())
|
| 33 |
+
|
| 34 |
+
@user_bp.route('/users/<int:user_id>', methods=['DELETE'])
|
| 35 |
+
def delete_user(user_id):
|
| 36 |
+
user = User.query.get_or_404(user_id)
|
| 37 |
+
db.session.delete(user)
|
| 38 |
+
db.session.commit()
|
| 39 |
+
return '', 204
|
static/app.js
ADDED
|
@@ -0,0 +1,1095 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/**
|
| 2 |
+
* Virtual ISP Stack Frontend Application
|
| 3 |
+
* Native JavaScript implementation for managing the Virtual ISP Stack
|
| 4 |
+
*/
|
| 5 |
+
|
| 6 |
+
class VirtualISPApp {
|
| 7 |
+
constructor() {
|
| 8 |
+
this.apiBase = '/api';
|
| 9 |
+
this.currentSection = 'dashboard';
|
| 10 |
+
this.refreshInterval = null;
|
| 11 |
+
this.charts = {};
|
| 12 |
+
|
| 13 |
+
this.init();
|
| 14 |
+
}
|
| 15 |
+
|
| 16 |
+
async init() {
|
| 17 |
+
this.setupEventListeners();
|
| 18 |
+
this.setupNavigation();
|
| 19 |
+
this.setupCharts();
|
| 20 |
+
await this.loadInitialData();
|
| 21 |
+
this.startAutoRefresh();
|
| 22 |
+
|
| 23 |
+
// Hide loading overlay
|
| 24 |
+
this.hideLoading();
|
| 25 |
+
|
| 26 |
+
console.log('Virtual ISP Stack App initialized');
|
| 27 |
+
}
|
| 28 |
+
|
| 29 |
+
setupEventListeners() {
|
| 30 |
+
// Navigation
|
| 31 |
+
document.querySelectorAll('.nav-item').forEach(item => {
|
| 32 |
+
item.addEventListener('click', (e) => {
|
| 33 |
+
const section = e.currentTarget.dataset.section;
|
| 34 |
+
this.navigateToSection(section);
|
| 35 |
+
});
|
| 36 |
+
});
|
| 37 |
+
|
| 38 |
+
// Tab buttons
|
| 39 |
+
document.querySelectorAll('.tab-btn').forEach(btn => {
|
| 40 |
+
btn.addEventListener('click', (e) => {
|
| 41 |
+
const tab = e.currentTarget.dataset.tab;
|
| 42 |
+
this.switchTab(tab);
|
| 43 |
+
});
|
| 44 |
+
});
|
| 45 |
+
|
| 46 |
+
// Modal close buttons
|
| 47 |
+
document.querySelectorAll('.close').forEach(btn => {
|
| 48 |
+
btn.addEventListener('click', (e) => {
|
| 49 |
+
const modal = e.currentTarget.closest('.modal');
|
| 50 |
+
this.closeModal(modal.id);
|
| 51 |
+
});
|
| 52 |
+
});
|
| 53 |
+
|
| 54 |
+
// Click outside modal to close
|
| 55 |
+
document.querySelectorAll('.modal').forEach(modal => {
|
| 56 |
+
modal.addEventListener('click', (e) => {
|
| 57 |
+
if (e.target === modal) {
|
| 58 |
+
this.closeModal(modal.id);
|
| 59 |
+
}
|
| 60 |
+
});
|
| 61 |
+
});
|
| 62 |
+
|
| 63 |
+
// Form submissions
|
| 64 |
+
document.getElementById('addRuleForm')?.addEventListener('submit', (e) => {
|
| 65 |
+
e.preventDefault();
|
| 66 |
+
this.addFirewallRule();
|
| 67 |
+
});
|
| 68 |
+
}
|
| 69 |
+
|
| 70 |
+
setupNavigation() {
|
| 71 |
+
// Set initial active section
|
| 72 |
+
this.navigateToSection('dashboard');
|
| 73 |
+
}
|
| 74 |
+
|
| 75 |
+
navigateToSection(section) {
|
| 76 |
+
// Update navigation
|
| 77 |
+
document.querySelectorAll('.nav-item').forEach(item => {
|
| 78 |
+
item.classList.remove('active');
|
| 79 |
+
});
|
| 80 |
+
document.querySelector(`[data-section="${section}"]`).classList.add('active');
|
| 81 |
+
|
| 82 |
+
// Update content
|
| 83 |
+
document.querySelectorAll('.content-section').forEach(sec => {
|
| 84 |
+
sec.classList.remove('active');
|
| 85 |
+
});
|
| 86 |
+
document.getElementById(section).classList.add('active');
|
| 87 |
+
|
| 88 |
+
this.currentSection = section;
|
| 89 |
+
|
| 90 |
+
// Load section-specific data
|
| 91 |
+
this.loadSectionData(section);
|
| 92 |
+
}
|
| 93 |
+
|
| 94 |
+
switchTab(tab) {
|
| 95 |
+
const container = event.target.closest('.router-tabs');
|
| 96 |
+
|
| 97 |
+
// Update tab buttons
|
| 98 |
+
container.querySelectorAll('.tab-btn').forEach(btn => {
|
| 99 |
+
btn.classList.remove('active');
|
| 100 |
+
});
|
| 101 |
+
event.target.classList.add('active');
|
| 102 |
+
|
| 103 |
+
// Update tab content
|
| 104 |
+
container.querySelectorAll('.tab-pane').forEach(pane => {
|
| 105 |
+
pane.classList.remove('active');
|
| 106 |
+
});
|
| 107 |
+
container.querySelector(`#${tab}`).classList.add('active');
|
| 108 |
+
|
| 109 |
+
// Load tab-specific data
|
| 110 |
+
this.loadTabData(tab);
|
| 111 |
+
}
|
| 112 |
+
|
| 113 |
+
async loadInitialData() {
|
| 114 |
+
try {
|
| 115 |
+
await Promise.all([
|
| 116 |
+
this.loadSystemStatus(),
|
| 117 |
+
this.loadDashboardData(),
|
| 118 |
+
this.loadConfiguration()
|
| 119 |
+
]);
|
| 120 |
+
} catch (error) {
|
| 121 |
+
console.error('Error loading initial data:', error);
|
| 122 |
+
this.showToast('Error loading initial data', 'error');
|
| 123 |
+
}
|
| 124 |
+
}
|
| 125 |
+
|
| 126 |
+
async loadSectionData(section) {
|
| 127 |
+
try {
|
| 128 |
+
switch (section) {
|
| 129 |
+
case 'dashboard':
|
| 130 |
+
await this.loadDashboardData();
|
| 131 |
+
break;
|
| 132 |
+
case 'dhcp':
|
| 133 |
+
await this.loadDHCPData();
|
| 134 |
+
break;
|
| 135 |
+
case 'nat':
|
| 136 |
+
await this.loadNATData();
|
| 137 |
+
break;
|
| 138 |
+
case 'firewall':
|
| 139 |
+
await this.loadFirewallData();
|
| 140 |
+
break;
|
| 141 |
+
case 'router':
|
| 142 |
+
await this.loadRouterData();
|
| 143 |
+
break;
|
| 144 |
+
case 'bridge':
|
| 145 |
+
await this.loadBridgeData();
|
| 146 |
+
break;
|
| 147 |
+
case 'sessions':
|
| 148 |
+
await this.loadSessionsData();
|
| 149 |
+
break;
|
| 150 |
+
case 'logs':
|
| 151 |
+
await this.loadLogsData();
|
| 152 |
+
break;
|
| 153 |
+
case 'config':
|
| 154 |
+
await this.loadConfiguration();
|
| 155 |
+
break;
|
| 156 |
+
}
|
| 157 |
+
} catch (error) {
|
| 158 |
+
console.error(`Error loading ${section} data:`, error);
|
| 159 |
+
this.showToast(`Error loading ${section} data`, 'error');
|
| 160 |
+
}
|
| 161 |
+
}
|
| 162 |
+
|
| 163 |
+
async loadTabData(tab) {
|
| 164 |
+
try {
|
| 165 |
+
switch (tab) {
|
| 166 |
+
case 'routes':
|
| 167 |
+
await this.loadRoutingTable();
|
| 168 |
+
break;
|
| 169 |
+
case 'interfaces':
|
| 170 |
+
await this.loadInterfaces();
|
| 171 |
+
break;
|
| 172 |
+
case 'arp':
|
| 173 |
+
await this.loadARPTable();
|
| 174 |
+
break;
|
| 175 |
+
}
|
| 176 |
+
} catch (error) {
|
| 177 |
+
console.error(`Error loading ${tab} data:`, error);
|
| 178 |
+
}
|
| 179 |
+
}
|
| 180 |
+
|
| 181 |
+
// API Methods
|
| 182 |
+
async apiCall(endpoint, options = {}) {
|
| 183 |
+
const url = `${this.apiBase}${endpoint}`;
|
| 184 |
+
const defaultOptions = {
|
| 185 |
+
headers: {
|
| 186 |
+
'Content-Type': 'application/json',
|
| 187 |
+
},
|
| 188 |
+
};
|
| 189 |
+
|
| 190 |
+
const response = await fetch(url, { ...defaultOptions, ...options });
|
| 191 |
+
|
| 192 |
+
if (!response.ok) {
|
| 193 |
+
throw new Error(`API call failed: ${response.status} ${response.statusText}`);
|
| 194 |
+
}
|
| 195 |
+
|
| 196 |
+
return await response.json();
|
| 197 |
+
}
|
| 198 |
+
|
| 199 |
+
// System Status
|
| 200 |
+
async loadSystemStatus() {
|
| 201 |
+
try {
|
| 202 |
+
const response = await this.apiCall('/status');
|
| 203 |
+
this.updateSystemStatus(response.system_status);
|
| 204 |
+
} catch (error) {
|
| 205 |
+
console.error('Error loading system status:', error);
|
| 206 |
+
this.updateSystemStatusOffline();
|
| 207 |
+
}
|
| 208 |
+
}
|
| 209 |
+
|
| 210 |
+
updateSystemStatus(status) {
|
| 211 |
+
const indicator = document.getElementById('systemStatus');
|
| 212 |
+
const components = status.components;
|
| 213 |
+
|
| 214 |
+
// Update header status
|
| 215 |
+
const allOnline = Object.values(components).every(c => c === true);
|
| 216 |
+
indicator.className = `status-indicator ${allOnline ? 'online' : 'offline'}`;
|
| 217 |
+
indicator.querySelector('span').textContent = allOnline ? 'All Systems Online' : 'System Issues';
|
| 218 |
+
|
| 219 |
+
// Update component status
|
| 220 |
+
this.updateComponentStatus(components);
|
| 221 |
+
}
|
| 222 |
+
|
| 223 |
+
updateSystemStatusOffline() {
|
| 224 |
+
const indicator = document.getElementById('systemStatus');
|
| 225 |
+
indicator.className = 'status-indicator offline';
|
| 226 |
+
indicator.querySelector('span').textContent = 'System Offline';
|
| 227 |
+
}
|
| 228 |
+
|
| 229 |
+
updateComponentStatus(components) {
|
| 230 |
+
const container = document.getElementById('componentStatus');
|
| 231 |
+
container.innerHTML = '';
|
| 232 |
+
|
| 233 |
+
Object.entries(components).forEach(([name, status]) => {
|
| 234 |
+
const item = document.createElement('div');
|
| 235 |
+
item.className = `component-item ${status ? 'online' : 'offline'}`;
|
| 236 |
+
item.innerHTML = `
|
| 237 |
+
<span class="component-name">${this.formatComponentName(name)}</span>
|
| 238 |
+
<span class="component-status-badge ${status ? 'online' : 'offline'}">
|
| 239 |
+
${status ? 'Online' : 'Offline'}
|
| 240 |
+
</span>
|
| 241 |
+
`;
|
| 242 |
+
container.appendChild(item);
|
| 243 |
+
});
|
| 244 |
+
}
|
| 245 |
+
|
| 246 |
+
formatComponentName(name) {
|
| 247 |
+
return name.replace(/_/g, ' ').replace(/\b\w/g, l => l.toUpperCase());
|
| 248 |
+
}
|
| 249 |
+
|
| 250 |
+
// Dashboard Data
|
| 251 |
+
async loadDashboardData() {
|
| 252 |
+
try {
|
| 253 |
+
const [statusResponse, statsResponse] = await Promise.all([
|
| 254 |
+
this.apiCall('/status'),
|
| 255 |
+
this.apiCall('/stats')
|
| 256 |
+
]);
|
| 257 |
+
|
| 258 |
+
this.updateDashboardStats(statusResponse.system_status.stats);
|
| 259 |
+
this.updateCharts(statsResponse.stats);
|
| 260 |
+
} catch (error) {
|
| 261 |
+
console.error('Error loading dashboard data:', error);
|
| 262 |
+
}
|
| 263 |
+
}
|
| 264 |
+
|
| 265 |
+
updateDashboardStats(stats) {
|
| 266 |
+
document.getElementById('dhcpLeaseCount').textContent = stats.dhcp_leases || 0;
|
| 267 |
+
document.getElementById('natSessionCount').textContent = stats.nat_sessions || 0;
|
| 268 |
+
document.getElementById('firewallRuleCount').textContent = stats.firewall_rules || 0;
|
| 269 |
+
document.getElementById('bridgeClientCount').textContent = stats.bridge_clients || 0;
|
| 270 |
+
}
|
| 271 |
+
|
| 272 |
+
// DHCP Data
|
| 273 |
+
async loadDHCPData() {
|
| 274 |
+
try {
|
| 275 |
+
const response = await this.apiCall('/dhcp/leases');
|
| 276 |
+
this.updateDHCPTable(response.leases);
|
| 277 |
+
} catch (error) {
|
| 278 |
+
console.error('Error loading DHCP data:', error);
|
| 279 |
+
this.updateDHCPTable([]);
|
| 280 |
+
}
|
| 281 |
+
}
|
| 282 |
+
|
| 283 |
+
updateDHCPTable(leases) {
|
| 284 |
+
const tbody = document.getElementById('dhcpTableBody');
|
| 285 |
+
tbody.innerHTML = '';
|
| 286 |
+
|
| 287 |
+
leases.forEach(lease => {
|
| 288 |
+
const row = document.createElement('tr');
|
| 289 |
+
const remaining = Math.max(0, lease.lease_time - (Date.now() / 1000 - lease.lease_start));
|
| 290 |
+
|
| 291 |
+
row.innerHTML = `
|
| 292 |
+
<td>${lease.mac_address}</td>
|
| 293 |
+
<td>${lease.ip_address}</td>
|
| 294 |
+
<td>${this.formatDuration(lease.lease_time)}</td>
|
| 295 |
+
<td>${this.formatDuration(remaining)}</td>
|
| 296 |
+
<td><span class="status-badge status-${lease.state.toLowerCase()}">${lease.state}</span></td>
|
| 297 |
+
<td>
|
| 298 |
+
<button class="btn btn-danger btn-sm" onclick="app.releaseDHCPLease('${lease.mac_address}')">
|
| 299 |
+
<i class="fas fa-times"></i> Release
|
| 300 |
+
</button>
|
| 301 |
+
</td>
|
| 302 |
+
`;
|
| 303 |
+
tbody.appendChild(row);
|
| 304 |
+
});
|
| 305 |
+
}
|
| 306 |
+
|
| 307 |
+
async releaseDHCPLease(macAddress) {
|
| 308 |
+
try {
|
| 309 |
+
await this.apiCall(`/dhcp/leases/${macAddress}`, { method: 'DELETE' });
|
| 310 |
+
this.showToast('DHCP lease released successfully', 'success');
|
| 311 |
+
await this.loadDHCPData();
|
| 312 |
+
} catch (error) {
|
| 313 |
+
console.error('Error releasing DHCP lease:', error);
|
| 314 |
+
this.showToast('Error releasing DHCP lease', 'error');
|
| 315 |
+
}
|
| 316 |
+
}
|
| 317 |
+
|
| 318 |
+
// NAT Data
|
| 319 |
+
async loadNATData() {
|
| 320 |
+
try {
|
| 321 |
+
const [sessionsResponse, statsResponse] = await Promise.all([
|
| 322 |
+
this.apiCall('/nat/sessions'),
|
| 323 |
+
this.apiCall('/nat/stats')
|
| 324 |
+
]);
|
| 325 |
+
|
| 326 |
+
this.updateNATStats(statsResponse.stats);
|
| 327 |
+
this.updateNATTable(sessionsResponse.sessions);
|
| 328 |
+
} catch (error) {
|
| 329 |
+
console.error('Error loading NAT data:', error);
|
| 330 |
+
this.updateNATStats({});
|
| 331 |
+
this.updateNATTable([]);
|
| 332 |
+
}
|
| 333 |
+
}
|
| 334 |
+
|
| 335 |
+
updateNATStats(stats) {
|
| 336 |
+
document.getElementById('natActiveSessions').textContent = stats.active_sessions || 0;
|
| 337 |
+
document.getElementById('natPortUtilization').textContent =
|
| 338 |
+
`${Math.round((stats.ports_used / stats.total_ports) * 100) || 0}%`;
|
| 339 |
+
document.getElementById('natBytesTranslated').textContent =
|
| 340 |
+
this.formatBytes(stats.bytes_translated || 0);
|
| 341 |
+
}
|
| 342 |
+
|
| 343 |
+
updateNATTable(sessions) {
|
| 344 |
+
const tbody = document.getElementById('natTableBody');
|
| 345 |
+
tbody.innerHTML = '';
|
| 346 |
+
|
| 347 |
+
sessions.forEach(session => {
|
| 348 |
+
const row = document.createElement('tr');
|
| 349 |
+
row.innerHTML = `
|
| 350 |
+
<td>${session.virtual_ip}:${session.virtual_port}</td>
|
| 351 |
+
<td>${session.real_ip}:${session.real_port}</td>
|
| 352 |
+
<td>${session.host_ip}:${session.host_port}</td>
|
| 353 |
+
<td>${session.protocol}</td>
|
| 354 |
+
<td>${this.formatDuration(session.duration)}</td>
|
| 355 |
+
<td>${this.formatBytes(session.bytes_in)} / ${this.formatBytes(session.bytes_out)}</td>
|
| 356 |
+
<td>
|
| 357 |
+
<button class="btn btn-danger btn-sm" onclick="app.closeNATSession('${session.session_id}')">
|
| 358 |
+
<i class="fas fa-times"></i> Close
|
| 359 |
+
</button>
|
| 360 |
+
</td>
|
| 361 |
+
`;
|
| 362 |
+
tbody.appendChild(row);
|
| 363 |
+
});
|
| 364 |
+
}
|
| 365 |
+
|
| 366 |
+
// Firewall Data
|
| 367 |
+
async loadFirewallData() {
|
| 368 |
+
try {
|
| 369 |
+
const [rulesResponse, logsResponse, statsResponse] = await Promise.all([
|
| 370 |
+
this.apiCall('/firewall/rules'),
|
| 371 |
+
this.apiCall('/firewall/logs?limit=50'),
|
| 372 |
+
this.apiCall('/firewall/stats')
|
| 373 |
+
]);
|
| 374 |
+
|
| 375 |
+
this.updateFirewallTable(rulesResponse.rules);
|
| 376 |
+
} catch (error) {
|
| 377 |
+
console.error('Error loading firewall data:', error);
|
| 378 |
+
this.updateFirewallTable([]);
|
| 379 |
+
}
|
| 380 |
+
}
|
| 381 |
+
|
| 382 |
+
updateFirewallTable(rules) {
|
| 383 |
+
const tbody = document.getElementById('firewallTableBody');
|
| 384 |
+
tbody.innerHTML = '';
|
| 385 |
+
|
| 386 |
+
rules.forEach(rule => {
|
| 387 |
+
const row = document.createElement('tr');
|
| 388 |
+
row.innerHTML = `
|
| 389 |
+
<td>${rule.priority}</td>
|
| 390 |
+
<td>${rule.rule_id}</td>
|
| 391 |
+
<td><span class="status-badge status-${rule.action.toLowerCase()}">${rule.action}</span></td>
|
| 392 |
+
<td>${rule.direction}</td>
|
| 393 |
+
<td>${rule.source_ip || 'Any'}${rule.source_port ? ':' + rule.source_port : ''}</td>
|
| 394 |
+
<td>${rule.dest_ip || 'Any'}${rule.dest_port ? ':' + rule.dest_port : ''}</td>
|
| 395 |
+
<td>${rule.protocol || 'Any'}</td>
|
| 396 |
+
<td>${rule.hit_count || 0}</td>
|
| 397 |
+
<td><span class="status-badge status-${rule.enabled ? 'active' : 'inactive'}">${rule.enabled ? 'Enabled' : 'Disabled'}</span></td>
|
| 398 |
+
<td>
|
| 399 |
+
<button class="btn btn-danger btn-sm" onclick="app.deleteFirewallRule('${rule.rule_id}')">
|
| 400 |
+
<i class="fas fa-trash"></i> Delete
|
| 401 |
+
</button>
|
| 402 |
+
</td>
|
| 403 |
+
`;
|
| 404 |
+
tbody.appendChild(row);
|
| 405 |
+
});
|
| 406 |
+
}
|
| 407 |
+
|
| 408 |
+
async deleteFirewallRule(ruleId) {
|
| 409 |
+
try {
|
| 410 |
+
await this.apiCall(`/firewall/rules/${ruleId}`, { method: 'DELETE' });
|
| 411 |
+
this.showToast('Firewall rule deleted successfully', 'success');
|
| 412 |
+
await this.loadFirewallData();
|
| 413 |
+
} catch (error) {
|
| 414 |
+
console.error('Error deleting firewall rule:', error);
|
| 415 |
+
this.showToast('Error deleting firewall rule', 'error');
|
| 416 |
+
}
|
| 417 |
+
}
|
| 418 |
+
|
| 419 |
+
// Router Data
|
| 420 |
+
async loadRouterData() {
|
| 421 |
+
await Promise.all([
|
| 422 |
+
this.loadRoutingTable(),
|
| 423 |
+
this.loadInterfaces(),
|
| 424 |
+
this.loadARPTable()
|
| 425 |
+
]);
|
| 426 |
+
}
|
| 427 |
+
|
| 428 |
+
async loadRoutingTable() {
|
| 429 |
+
try {
|
| 430 |
+
const response = await this.apiCall('/router/routes');
|
| 431 |
+
this.updateRoutingTable(response.routes);
|
| 432 |
+
} catch (error) {
|
| 433 |
+
console.error('Error loading routing table:', error);
|
| 434 |
+
this.updateRoutingTable([]);
|
| 435 |
+
}
|
| 436 |
+
}
|
| 437 |
+
|
| 438 |
+
updateRoutingTable(routes) {
|
| 439 |
+
const tbody = document.getElementById('routesTableBody');
|
| 440 |
+
tbody.innerHTML = '';
|
| 441 |
+
|
| 442 |
+
routes.forEach(route => {
|
| 443 |
+
const row = document.createElement('tr');
|
| 444 |
+
row.innerHTML = `
|
| 445 |
+
<td>${route.destination}</td>
|
| 446 |
+
<td>${route.gateway || 'Direct'}</td>
|
| 447 |
+
<td>${route.interface}</td>
|
| 448 |
+
<td>${route.metric}</td>
|
| 449 |
+
<td>${route.type}</td>
|
| 450 |
+
<td>${route.use_count || 0}</td>
|
| 451 |
+
<td>${route.last_used ? new Date(route.last_used * 1000).toLocaleString() : 'Never'}</td>
|
| 452 |
+
`;
|
| 453 |
+
tbody.appendChild(row);
|
| 454 |
+
});
|
| 455 |
+
}
|
| 456 |
+
|
| 457 |
+
async loadInterfaces() {
|
| 458 |
+
try {
|
| 459 |
+
const response = await this.apiCall('/router/interfaces');
|
| 460 |
+
this.updateInterfacesTable(response.interfaces);
|
| 461 |
+
} catch (error) {
|
| 462 |
+
console.error('Error loading interfaces:', error);
|
| 463 |
+
this.updateInterfacesTable([]);
|
| 464 |
+
}
|
| 465 |
+
}
|
| 466 |
+
|
| 467 |
+
updateInterfacesTable(interfaces) {
|
| 468 |
+
const tbody = document.getElementById('interfacesTableBody');
|
| 469 |
+
tbody.innerHTML = '';
|
| 470 |
+
|
| 471 |
+
interfaces.forEach(iface => {
|
| 472 |
+
const row = document.createElement('tr');
|
| 473 |
+
row.innerHTML = `
|
| 474 |
+
<td>${iface.name}</td>
|
| 475 |
+
<td>${iface.ip_address}</td>
|
| 476 |
+
<td>${iface.network}</td>
|
| 477 |
+
<td>${iface.mtu}</td>
|
| 478 |
+
<td><span class="status-badge status-${iface.enabled ? 'active' : 'inactive'}">${iface.enabled ? 'Up' : 'Down'}</span></td>
|
| 479 |
+
<td>
|
| 480 |
+
<button class="btn btn-secondary btn-sm">
|
| 481 |
+
<i class="fas fa-cog"></i> Configure
|
| 482 |
+
</button>
|
| 483 |
+
</td>
|
| 484 |
+
`;
|
| 485 |
+
tbody.appendChild(row);
|
| 486 |
+
});
|
| 487 |
+
}
|
| 488 |
+
|
| 489 |
+
async loadARPTable() {
|
| 490 |
+
try {
|
| 491 |
+
const response = await this.apiCall('/router/arp');
|
| 492 |
+
this.updateARPTable(response.arp_table);
|
| 493 |
+
} catch (error) {
|
| 494 |
+
console.error('Error loading ARP table:', error);
|
| 495 |
+
this.updateARPTable([]);
|
| 496 |
+
}
|
| 497 |
+
}
|
| 498 |
+
|
| 499 |
+
updateARPTable(arpEntries) {
|
| 500 |
+
const tbody = document.getElementById('arpTableBody');
|
| 501 |
+
tbody.innerHTML = '';
|
| 502 |
+
|
| 503 |
+
arpEntries.forEach(entry => {
|
| 504 |
+
const row = document.createElement('tr');
|
| 505 |
+
row.innerHTML = `
|
| 506 |
+
<td>${entry.ip_address}</td>
|
| 507 |
+
<td>${entry.mac_address}</td>
|
| 508 |
+
<td>
|
| 509 |
+
<button class="btn btn-danger btn-sm">
|
| 510 |
+
<i class="fas fa-trash"></i> Clear
|
| 511 |
+
</button>
|
| 512 |
+
</td>
|
| 513 |
+
`;
|
| 514 |
+
tbody.appendChild(row);
|
| 515 |
+
});
|
| 516 |
+
}
|
| 517 |
+
|
| 518 |
+
// Bridge Data
|
| 519 |
+
async loadBridgeData() {
|
| 520 |
+
try {
|
| 521 |
+
const [clientsResponse, statsResponse] = await Promise.all([
|
| 522 |
+
this.apiCall('/bridge/clients'),
|
| 523 |
+
this.apiCall('/bridge/stats')
|
| 524 |
+
]);
|
| 525 |
+
|
| 526 |
+
this.updateBridgeTable(clientsResponse.clients);
|
| 527 |
+
} catch (error) {
|
| 528 |
+
console.error('Error loading bridge data:', error);
|
| 529 |
+
this.updateBridgeTable({});
|
| 530 |
+
}
|
| 531 |
+
}
|
| 532 |
+
|
| 533 |
+
updateBridgeTable(clients) {
|
| 534 |
+
const tbody = document.getElementById('bridgeTableBody');
|
| 535 |
+
tbody.innerHTML = '';
|
| 536 |
+
|
| 537 |
+
Object.values(clients).forEach(client => {
|
| 538 |
+
const row = document.createElement('tr');
|
| 539 |
+
row.innerHTML = `
|
| 540 |
+
<td>${client.client_id}</td>
|
| 541 |
+
<td>${client.bridge_type}</td>
|
| 542 |
+
<td>${client.remote_address}:${client.remote_port}</td>
|
| 543 |
+
<td>${new Date(client.connected_time * 1000).toLocaleString()}</td>
|
| 544 |
+
<td>${client.packets_received} / ${client.packets_sent}</td>
|
| 545 |
+
<td>${this.formatBytes(client.bytes_received)} / ${this.formatBytes(client.bytes_sent)}</td>
|
| 546 |
+
<td>
|
| 547 |
+
<button class="btn btn-danger btn-sm" onclick="app.disconnectBridgeClient('${client.client_id}')">
|
| 548 |
+
<i class="fas fa-times"></i> Disconnect
|
| 549 |
+
</button>
|
| 550 |
+
</td>
|
| 551 |
+
`;
|
| 552 |
+
tbody.appendChild(row);
|
| 553 |
+
});
|
| 554 |
+
}
|
| 555 |
+
|
| 556 |
+
// Sessions Data
|
| 557 |
+
async loadSessionsData() {
|
| 558 |
+
try {
|
| 559 |
+
const [sessionsResponse, summaryResponse] = await Promise.all([
|
| 560 |
+
this.apiCall('/sessions?limit=100'),
|
| 561 |
+
this.apiCall('/sessions/summary')
|
| 562 |
+
]);
|
| 563 |
+
|
| 564 |
+
this.updateSessionSummary(summaryResponse.summary);
|
| 565 |
+
this.updateSessionsTable(sessionsResponse.sessions);
|
| 566 |
+
} catch (error) {
|
| 567 |
+
console.error('Error loading sessions data:', error);
|
| 568 |
+
this.updateSessionSummary({});
|
| 569 |
+
this.updateSessionsTable([]);
|
| 570 |
+
}
|
| 571 |
+
}
|
| 572 |
+
|
| 573 |
+
updateSessionSummary(summary) {
|
| 574 |
+
const container = document.getElementById('sessionSummary');
|
| 575 |
+
container.innerHTML = `
|
| 576 |
+
<h3>Session Summary</h3>
|
| 577 |
+
<div class="stat-row">
|
| 578 |
+
<div class="stat-item">
|
| 579 |
+
<span class="stat-label">Total Sessions</span>
|
| 580 |
+
<span class="stat-value">${summary.total_sessions || 0}</span>
|
| 581 |
+
</div>
|
| 582 |
+
<div class="stat-item">
|
| 583 |
+
<span class="stat-label">Active (Last Hour)</span>
|
| 584 |
+
<span class="stat-value">${summary.active_sessions_by_age?.last_hour || 0}</span>
|
| 585 |
+
</div>
|
| 586 |
+
<div class="stat-item">
|
| 587 |
+
<span class="stat-label">Active (Last Day)</span>
|
| 588 |
+
<span class="stat-value">${summary.active_sessions_by_age?.last_day || 0}</span>
|
| 589 |
+
</div>
|
| 590 |
+
</div>
|
| 591 |
+
`;
|
| 592 |
+
}
|
| 593 |
+
|
| 594 |
+
updateSessionsTable(sessions) {
|
| 595 |
+
const tbody = document.getElementById('sessionsTableBody');
|
| 596 |
+
tbody.innerHTML = '';
|
| 597 |
+
|
| 598 |
+
sessions.forEach(session => {
|
| 599 |
+
const row = document.createElement('tr');
|
| 600 |
+
row.innerHTML = `
|
| 601 |
+
<td>${session.session_id.substring(0, 8)}...</td>
|
| 602 |
+
<td>${session.session_type}</td>
|
| 603 |
+
<td><span class="status-badge status-${session.state.toLowerCase()}">${session.state}</span></td>
|
| 604 |
+
<td>${session.virtual_ip || '-'}${session.virtual_port ? ':' + session.virtual_port : ''}</td>
|
| 605 |
+
<td>${session.real_ip || '-'}${session.real_port ? ':' + session.real_port : ''}</td>
|
| 606 |
+
<td>${session.protocol || '-'}</td>
|
| 607 |
+
<td>${this.formatDuration(session.duration)}</td>
|
| 608 |
+
<td>${this.formatDuration(session.idle_time)}</td>
|
| 609 |
+
<td>${this.formatBytes(session.metrics.total_bytes)}</td>
|
| 610 |
+
`;
|
| 611 |
+
tbody.appendChild(row);
|
| 612 |
+
});
|
| 613 |
+
}
|
| 614 |
+
|
| 615 |
+
// Logs Data
|
| 616 |
+
async loadLogsData() {
|
| 617 |
+
try {
|
| 618 |
+
const response = await this.apiCall('/logs?limit=100');
|
| 619 |
+
this.updateLogsContainer(response.logs);
|
| 620 |
+
} catch (error) {
|
| 621 |
+
console.error('Error loading logs data:', error);
|
| 622 |
+
this.updateLogsContainer([]);
|
| 623 |
+
}
|
| 624 |
+
}
|
| 625 |
+
|
| 626 |
+
updateLogsContainer(logs) {
|
| 627 |
+
const container = document.getElementById('logContainer');
|
| 628 |
+
container.innerHTML = '';
|
| 629 |
+
|
| 630 |
+
logs.forEach(log => {
|
| 631 |
+
const entry = document.createElement('div');
|
| 632 |
+
entry.className = 'log-entry';
|
| 633 |
+
entry.innerHTML = `
|
| 634 |
+
<div class="log-level ${log.level}">${log.level}</div>
|
| 635 |
+
<div class="log-content">
|
| 636 |
+
<div class="log-timestamp">${new Date(log.timestamp * 1000).toLocaleString()}</div>
|
| 637 |
+
<div class="log-message">${log.message}</div>
|
| 638 |
+
${log.metadata && Object.keys(log.metadata).length > 0 ?
|
| 639 |
+
`<div class="log-metadata">${JSON.stringify(log.metadata)}</div>` : ''}
|
| 640 |
+
</div>
|
| 641 |
+
`;
|
| 642 |
+
container.appendChild(entry);
|
| 643 |
+
});
|
| 644 |
+
}
|
| 645 |
+
|
| 646 |
+
// Configuration
|
| 647 |
+
async loadConfiguration() {
|
| 648 |
+
try {
|
| 649 |
+
const response = await this.apiCall('/config');
|
| 650 |
+
this.updateConfigurationForms(response.config);
|
| 651 |
+
} catch (error) {
|
| 652 |
+
console.error('Error loading configuration:', error);
|
| 653 |
+
}
|
| 654 |
+
}
|
| 655 |
+
|
| 656 |
+
updateConfigurationForms(config) {
|
| 657 |
+
// DHCP Configuration
|
| 658 |
+
const dhcpConfig = document.getElementById('dhcpConfig');
|
| 659 |
+
dhcpConfig.innerHTML = `
|
| 660 |
+
<div class="form-group">
|
| 661 |
+
<label>Network:</label>
|
| 662 |
+
<input type="text" value="${config.dhcp?.network || ''}" name="dhcp_network">
|
| 663 |
+
</div>
|
| 664 |
+
<div class="form-group">
|
| 665 |
+
<label>Range Start:</label>
|
| 666 |
+
<input type="text" value="${config.dhcp?.range_start || ''}" name="dhcp_range_start">
|
| 667 |
+
</div>
|
| 668 |
+
<div class="form-group">
|
| 669 |
+
<label>Range End:</label>
|
| 670 |
+
<input type="text" value="${config.dhcp?.range_end || ''}" name="dhcp_range_end">
|
| 671 |
+
</div>
|
| 672 |
+
<div class="form-group">
|
| 673 |
+
<label>Lease Time (seconds):</label>
|
| 674 |
+
<input type="number" value="${config.dhcp?.lease_time || 3600}" name="dhcp_lease_time">
|
| 675 |
+
</div>
|
| 676 |
+
`;
|
| 677 |
+
|
| 678 |
+
// NAT Configuration
|
| 679 |
+
const natConfig = document.getElementById('natConfig');
|
| 680 |
+
natConfig.innerHTML = `
|
| 681 |
+
<div class="form-group">
|
| 682 |
+
<label>Port Range Start:</label>
|
| 683 |
+
<input type="number" value="${config.nat?.port_range_start || 10000}" name="nat_port_start">
|
| 684 |
+
</div>
|
| 685 |
+
<div class="form-group">
|
| 686 |
+
<label>Port Range End:</label>
|
| 687 |
+
<input type="number" value="${config.nat?.port_range_end || 65535}" name="nat_port_end">
|
| 688 |
+
</div>
|
| 689 |
+
<div class="form-group">
|
| 690 |
+
<label>Session Timeout (seconds):</label>
|
| 691 |
+
<input type="number" value="${config.nat?.session_timeout || 300}" name="nat_timeout">
|
| 692 |
+
</div>
|
| 693 |
+
`;
|
| 694 |
+
|
| 695 |
+
// Firewall Configuration
|
| 696 |
+
const firewallConfig = document.getElementById('firewallConfig');
|
| 697 |
+
firewallConfig.innerHTML = `
|
| 698 |
+
<div class="form-group">
|
| 699 |
+
<label>Default Policy:</label>
|
| 700 |
+
<select name="firewall_default_policy">
|
| 701 |
+
<option value="ACCEPT" ${config.firewall?.default_policy === 'ACCEPT' ? 'selected' : ''}>Accept</option>
|
| 702 |
+
<option value="DROP" ${config.firewall?.default_policy === 'DROP' ? 'selected' : ''}>Drop</option>
|
| 703 |
+
</select>
|
| 704 |
+
</div>
|
| 705 |
+
<div class="form-group">
|
| 706 |
+
<label>Log Blocked:</label>
|
| 707 |
+
<select name="firewall_log_blocked">
|
| 708 |
+
<option value="true" ${config.firewall?.log_blocked ? 'selected' : ''}>Yes</option>
|
| 709 |
+
<option value="false" ${!config.firewall?.log_blocked ? 'selected' : ''}>No</option>
|
| 710 |
+
</select>
|
| 711 |
+
</div>
|
| 712 |
+
`;
|
| 713 |
+
}
|
| 714 |
+
|
| 715 |
+
// Charts
|
| 716 |
+
setupCharts() {
|
| 717 |
+
// Traffic Chart
|
| 718 |
+
const trafficCtx = document.getElementById('trafficChart');
|
| 719 |
+
if (trafficCtx) {
|
| 720 |
+
this.charts.traffic = new Chart(trafficCtx, {
|
| 721 |
+
type: 'line',
|
| 722 |
+
data: {
|
| 723 |
+
labels: [],
|
| 724 |
+
datasets: [{
|
| 725 |
+
label: 'Bytes In',
|
| 726 |
+
data: [],
|
| 727 |
+
borderColor: '#4facfe',
|
| 728 |
+
backgroundColor: 'rgba(79, 172, 254, 0.1)',
|
| 729 |
+
tension: 0.4
|
| 730 |
+
}, {
|
| 731 |
+
label: 'Bytes Out',
|
| 732 |
+
data: [],
|
| 733 |
+
borderColor: '#00f2fe',
|
| 734 |
+
backgroundColor: 'rgba(0, 242, 254, 0.1)',
|
| 735 |
+
tension: 0.4
|
| 736 |
+
}]
|
| 737 |
+
},
|
| 738 |
+
options: {
|
| 739 |
+
responsive: true,
|
| 740 |
+
maintainAspectRatio: false,
|
| 741 |
+
scales: {
|
| 742 |
+
y: {
|
| 743 |
+
beginAtZero: true
|
| 744 |
+
}
|
| 745 |
+
}
|
| 746 |
+
}
|
| 747 |
+
});
|
| 748 |
+
}
|
| 749 |
+
|
| 750 |
+
// Connection Chart
|
| 751 |
+
const connectionCtx = document.getElementById('connectionChart');
|
| 752 |
+
if (connectionCtx) {
|
| 753 |
+
this.charts.connection = new Chart(connectionCtx, {
|
| 754 |
+
type: 'doughnut',
|
| 755 |
+
data: {
|
| 756 |
+
labels: ['DHCP', 'NAT', 'TCP', 'Bridge'],
|
| 757 |
+
datasets: [{
|
| 758 |
+
data: [0, 0, 0, 0],
|
| 759 |
+
backgroundColor: [
|
| 760 |
+
'#4facfe',
|
| 761 |
+
'#00f2fe',
|
| 762 |
+
'#51cf66',
|
| 763 |
+
'#ff6b6b'
|
| 764 |
+
]
|
| 765 |
+
}]
|
| 766 |
+
},
|
| 767 |
+
options: {
|
| 768 |
+
responsive: true,
|
| 769 |
+
maintainAspectRatio: false
|
| 770 |
+
}
|
| 771 |
+
});
|
| 772 |
+
}
|
| 773 |
+
}
|
| 774 |
+
|
| 775 |
+
updateCharts(stats) {
|
| 776 |
+
// Update traffic chart with sample data
|
| 777 |
+
if (this.charts.traffic) {
|
| 778 |
+
const now = new Date();
|
| 779 |
+
const labels = this.charts.traffic.data.labels;
|
| 780 |
+
const bytesIn = this.charts.traffic.data.datasets[0].data;
|
| 781 |
+
const bytesOut = this.charts.traffic.data.datasets[1].data;
|
| 782 |
+
|
| 783 |
+
labels.push(now.toLocaleTimeString());
|
| 784 |
+
bytesIn.push(Math.random() * 1000000);
|
| 785 |
+
bytesOut.push(Math.random() * 800000);
|
| 786 |
+
|
| 787 |
+
// Keep only last 10 data points
|
| 788 |
+
if (labels.length > 10) {
|
| 789 |
+
labels.shift();
|
| 790 |
+
bytesIn.shift();
|
| 791 |
+
bytesOut.shift();
|
| 792 |
+
}
|
| 793 |
+
|
| 794 |
+
this.charts.traffic.update();
|
| 795 |
+
}
|
| 796 |
+
|
| 797 |
+
// Update connection chart
|
| 798 |
+
if (this.charts.connection && stats) {
|
| 799 |
+
this.charts.connection.data.datasets[0].data = [
|
| 800 |
+
Object.keys(stats.dhcp || {}).length,
|
| 801 |
+
stats.nat?.active_sessions || 0,
|
| 802 |
+
Object.keys(stats.tcp || {}).length,
|
| 803 |
+
stats.bridge?.active_clients || 0
|
| 804 |
+
];
|
| 805 |
+
this.charts.connection.update();
|
| 806 |
+
}
|
| 807 |
+
}
|
| 808 |
+
|
| 809 |
+
// Modal Management
|
| 810 |
+
showModal(modalId) {
|
| 811 |
+
document.getElementById(modalId).style.display = 'block';
|
| 812 |
+
}
|
| 813 |
+
|
| 814 |
+
closeModal(modalId) {
|
| 815 |
+
document.getElementById(modalId).style.display = 'none';
|
| 816 |
+
}
|
| 817 |
+
|
| 818 |
+
showAddRuleModal() {
|
| 819 |
+
this.showModal('addRuleModal');
|
| 820 |
+
}
|
| 821 |
+
|
| 822 |
+
async addFirewallRule() {
|
| 823 |
+
try {
|
| 824 |
+
const form = document.getElementById('addRuleForm');
|
| 825 |
+
const formData = new FormData(form);
|
| 826 |
+
const ruleData = Object.fromEntries(formData.entries());
|
| 827 |
+
|
| 828 |
+
await this.apiCall('/firewall/rules', {
|
| 829 |
+
method: 'POST',
|
| 830 |
+
body: JSON.stringify(ruleData)
|
| 831 |
+
});
|
| 832 |
+
|
| 833 |
+
this.showToast('Firewall rule added successfully', 'success');
|
| 834 |
+
this.closeModal('addRuleModal');
|
| 835 |
+
form.reset();
|
| 836 |
+
await this.loadFirewallData();
|
| 837 |
+
} catch (error) {
|
| 838 |
+
console.error('Error adding firewall rule:', error);
|
| 839 |
+
this.showToast('Error adding firewall rule', 'error');
|
| 840 |
+
}
|
| 841 |
+
}
|
| 842 |
+
|
| 843 |
+
// Configuration Management
|
| 844 |
+
async saveConfiguration() {
|
| 845 |
+
try {
|
| 846 |
+
const configData = this.collectConfigurationData();
|
| 847 |
+
await this.apiCall('/config', {
|
| 848 |
+
method: 'POST',
|
| 849 |
+
body: JSON.stringify(configData)
|
| 850 |
+
});
|
| 851 |
+
|
| 852 |
+
this.showToast('Configuration saved successfully', 'success');
|
| 853 |
+
} catch (error) {
|
| 854 |
+
console.error('Error saving configuration:', error);
|
| 855 |
+
this.showToast('Error saving configuration', 'error');
|
| 856 |
+
}
|
| 857 |
+
}
|
| 858 |
+
|
| 859 |
+
collectConfigurationData() {
|
| 860 |
+
const config = {};
|
| 861 |
+
|
| 862 |
+
// Collect DHCP config
|
| 863 |
+
const dhcpInputs = document.querySelectorAll('#dhcpConfig input, #dhcpConfig select');
|
| 864 |
+
config.dhcp = {};
|
| 865 |
+
dhcpInputs.forEach(input => {
|
| 866 |
+
const key = input.name.replace('dhcp_', '');
|
| 867 |
+
config.dhcp[key] = input.type === 'number' ? parseInt(input.value) : input.value;
|
| 868 |
+
});
|
| 869 |
+
|
| 870 |
+
// Collect NAT config
|
| 871 |
+
const natInputs = document.querySelectorAll('#natConfig input, #natConfig select');
|
| 872 |
+
config.nat = {};
|
| 873 |
+
natInputs.forEach(input => {
|
| 874 |
+
const key = input.name.replace('nat_', '');
|
| 875 |
+
config.nat[key] = input.type === 'number' ? parseInt(input.value) : input.value;
|
| 876 |
+
});
|
| 877 |
+
|
| 878 |
+
// Collect Firewall config
|
| 879 |
+
const firewallInputs = document.querySelectorAll('#firewallConfig input, #firewallConfig select');
|
| 880 |
+
config.firewall = {};
|
| 881 |
+
firewallInputs.forEach(input => {
|
| 882 |
+
const key = input.name.replace('firewall_', '');
|
| 883 |
+
config.firewall[key] = input.type === 'checkbox' ? input.checked : input.value;
|
| 884 |
+
});
|
| 885 |
+
|
| 886 |
+
return config;
|
| 887 |
+
}
|
| 888 |
+
|
| 889 |
+
// Utility Functions
|
| 890 |
+
formatBytes(bytes) {
|
| 891 |
+
if (bytes === 0) return '0 B';
|
| 892 |
+
const k = 1024;
|
| 893 |
+
const sizes = ['B', 'KB', 'MB', 'GB', 'TB'];
|
| 894 |
+
const i = Math.floor(Math.log(bytes) / Math.log(k));
|
| 895 |
+
return parseFloat((bytes / Math.pow(k, i)).toFixed(2)) + ' ' + sizes[i];
|
| 896 |
+
}
|
| 897 |
+
|
| 898 |
+
formatDuration(seconds) {
|
| 899 |
+
if (seconds < 60) return `${Math.round(seconds)}s`;
|
| 900 |
+
if (seconds < 3600) return `${Math.round(seconds / 60)}m`;
|
| 901 |
+
if (seconds < 86400) return `${Math.round(seconds / 3600)}h`;
|
| 902 |
+
return `${Math.round(seconds / 86400)}d`;
|
| 903 |
+
}
|
| 904 |
+
|
| 905 |
+
// Loading Management
|
| 906 |
+
showLoading() {
|
| 907 |
+
document.getElementById('loadingOverlay').style.display = 'block';
|
| 908 |
+
}
|
| 909 |
+
|
| 910 |
+
hideLoading() {
|
| 911 |
+
document.getElementById('loadingOverlay').style.display = 'none';
|
| 912 |
+
}
|
| 913 |
+
|
| 914 |
+
// Toast Notifications
|
| 915 |
+
showToast(message, type = 'info') {
|
| 916 |
+
const container = document.getElementById('toastContainer');
|
| 917 |
+
const toast = document.createElement('div');
|
| 918 |
+
toast.className = `toast ${type}`;
|
| 919 |
+
toast.textContent = message;
|
| 920 |
+
|
| 921 |
+
container.appendChild(toast);
|
| 922 |
+
|
| 923 |
+
// Auto remove after 5 seconds
|
| 924 |
+
setTimeout(() => {
|
| 925 |
+
toast.remove();
|
| 926 |
+
}, 5000);
|
| 927 |
+
}
|
| 928 |
+
|
| 929 |
+
// Auto Refresh
|
| 930 |
+
startAutoRefresh() {
|
| 931 |
+
this.refreshInterval = setInterval(async () => {
|
| 932 |
+
if (this.currentSection === 'dashboard') {
|
| 933 |
+
await this.loadSystemStatus();
|
| 934 |
+
await this.loadDashboardData();
|
| 935 |
+
}
|
| 936 |
+
}, 30000); // Refresh every 30 seconds
|
| 937 |
+
}
|
| 938 |
+
|
| 939 |
+
stopAutoRefresh() {
|
| 940 |
+
if (this.refreshInterval) {
|
| 941 |
+
clearInterval(this.refreshInterval);
|
| 942 |
+
this.refreshInterval = null;
|
| 943 |
+
}
|
| 944 |
+
}
|
| 945 |
+
|
| 946 |
+
// Manual Refresh Functions
|
| 947 |
+
async refreshData() {
|
| 948 |
+
this.showLoading();
|
| 949 |
+
try {
|
| 950 |
+
await this.loadSectionData(this.currentSection);
|
| 951 |
+
this.showToast('Data refreshed successfully', 'success');
|
| 952 |
+
} catch (error) {
|
| 953 |
+
this.showToast('Error refreshing data', 'error');
|
| 954 |
+
} finally {
|
| 955 |
+
this.hideLoading();
|
| 956 |
+
}
|
| 957 |
+
}
|
| 958 |
+
|
| 959 |
+
async refreshDHCPLeases() {
|
| 960 |
+
await this.loadDHCPData();
|
| 961 |
+
this.showToast('DHCP leases refreshed', 'info');
|
| 962 |
+
}
|
| 963 |
+
|
| 964 |
+
async refreshNATSessions() {
|
| 965 |
+
await this.loadNATData();
|
| 966 |
+
this.showToast('NAT sessions refreshed', 'info');
|
| 967 |
+
}
|
| 968 |
+
|
| 969 |
+
async refreshFirewallRules() {
|
| 970 |
+
await this.loadFirewallData();
|
| 971 |
+
this.showToast('Firewall rules refreshed', 'info');
|
| 972 |
+
}
|
| 973 |
+
|
| 974 |
+
async refreshBridgeClients() {
|
| 975 |
+
await this.loadBridgeData();
|
| 976 |
+
this.showToast('Bridge clients refreshed', 'info');
|
| 977 |
+
}
|
| 978 |
+
|
| 979 |
+
async refreshSessions() {
|
| 980 |
+
await this.loadSessionsData();
|
| 981 |
+
this.showToast('Sessions refreshed', 'info');
|
| 982 |
+
}
|
| 983 |
+
|
| 984 |
+
async refreshLogs() {
|
| 985 |
+
await this.loadLogsData();
|
| 986 |
+
this.showToast('Logs refreshed', 'info');
|
| 987 |
+
}
|
| 988 |
+
|
| 989 |
+
// Filter Functions
|
| 990 |
+
filterSessions() {
|
| 991 |
+
// Implementation for session filtering
|
| 992 |
+
const filter = document.getElementById('sessionTypeFilter').value;
|
| 993 |
+
// Apply filter logic here
|
| 994 |
+
}
|
| 995 |
+
|
| 996 |
+
filterLogs() {
|
| 997 |
+
// Implementation for log filtering
|
| 998 |
+
const levelFilter = document.getElementById('logLevelFilter').value;
|
| 999 |
+
const categoryFilter = document.getElementById('logCategoryFilter').value;
|
| 1000 |
+
// Apply filter logic here
|
| 1001 |
+
}
|
| 1002 |
+
|
| 1003 |
+
searchLogs() {
|
| 1004 |
+
// Implementation for log searching
|
| 1005 |
+
const searchTerm = document.getElementById('logSearchInput').value;
|
| 1006 |
+
// Apply search logic here
|
| 1007 |
+
}
|
| 1008 |
+
|
| 1009 |
+
clearLogs() {
|
| 1010 |
+
if (confirm('Are you sure you want to clear all logs?')) {
|
| 1011 |
+
document.getElementById('logContainer').innerHTML = '';
|
| 1012 |
+
this.showToast('Logs cleared', 'info');
|
| 1013 |
+
}
|
| 1014 |
+
}
|
| 1015 |
+
|
| 1016 |
+
resetConfiguration() {
|
| 1017 |
+
if (confirm('Are you sure you want to reset configuration to defaults?')) {
|
| 1018 |
+
this.loadConfiguration();
|
| 1019 |
+
this.showToast('Configuration reset to defaults', 'info');
|
| 1020 |
+
}
|
| 1021 |
+
}
|
| 1022 |
+
}
|
| 1023 |
+
|
| 1024 |
+
// Global Functions (for onclick handlers)
|
| 1025 |
+
let app;
|
| 1026 |
+
|
| 1027 |
+
function refreshData() {
|
| 1028 |
+
app.refreshData();
|
| 1029 |
+
}
|
| 1030 |
+
|
| 1031 |
+
function showAddRuleModal() {
|
| 1032 |
+
app.showAddRuleModal();
|
| 1033 |
+
}
|
| 1034 |
+
|
| 1035 |
+
function closeModal(modalId) {
|
| 1036 |
+
app.closeModal(modalId);
|
| 1037 |
+
}
|
| 1038 |
+
|
| 1039 |
+
function addFirewallRule() {
|
| 1040 |
+
app.addFirewallRule();
|
| 1041 |
+
}
|
| 1042 |
+
|
| 1043 |
+
function saveConfiguration() {
|
| 1044 |
+
app.saveConfiguration();
|
| 1045 |
+
}
|
| 1046 |
+
|
| 1047 |
+
function resetConfiguration() {
|
| 1048 |
+
app.resetConfiguration();
|
| 1049 |
+
}
|
| 1050 |
+
|
| 1051 |
+
function refreshDHCPLeases() {
|
| 1052 |
+
app.refreshDHCPLeases();
|
| 1053 |
+
}
|
| 1054 |
+
|
| 1055 |
+
function refreshNATSessions() {
|
| 1056 |
+
app.refreshNATSessions();
|
| 1057 |
+
}
|
| 1058 |
+
|
| 1059 |
+
function refreshFirewallRules() {
|
| 1060 |
+
app.refreshFirewallRules();
|
| 1061 |
+
}
|
| 1062 |
+
|
| 1063 |
+
function refreshBridgeClients() {
|
| 1064 |
+
app.refreshBridgeClients();
|
| 1065 |
+
}
|
| 1066 |
+
|
| 1067 |
+
function refreshSessions() {
|
| 1068 |
+
app.refreshSessions();
|
| 1069 |
+
}
|
| 1070 |
+
|
| 1071 |
+
function refreshLogs() {
|
| 1072 |
+
app.refreshLogs();
|
| 1073 |
+
}
|
| 1074 |
+
|
| 1075 |
+
function filterSessions() {
|
| 1076 |
+
app.filterSessions();
|
| 1077 |
+
}
|
| 1078 |
+
|
| 1079 |
+
function filterLogs() {
|
| 1080 |
+
app.filterLogs();
|
| 1081 |
+
}
|
| 1082 |
+
|
| 1083 |
+
function searchLogs() {
|
| 1084 |
+
app.searchLogs();
|
| 1085 |
+
}
|
| 1086 |
+
|
| 1087 |
+
function clearLogs() {
|
| 1088 |
+
app.clearLogs();
|
| 1089 |
+
}
|
| 1090 |
+
|
| 1091 |
+
// Initialize App
|
| 1092 |
+
document.addEventListener('DOMContentLoaded', () => {
|
| 1093 |
+
app = new VirtualISPApp();
|
| 1094 |
+
});
|
| 1095 |
+
|