diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..3287c2dc4d0fdb86b046d8f4160e8e75f44695dd --- /dev/null +++ b/Dockerfile @@ -0,0 +1,52 @@ +# Virtual ISP Stack with OpenVPN Integration +# Dockerfile for containerized deployment + +FROM python:3.11-slim + +# Set working directory +WORKDIR /app + +# Install system dependencies +RUN apt-get update && apt-get install -y \ + openvpn \ + iptables \ + iproute2 \ + net-tools \ + procps \ + build-essential \ + python3-dev \ + && rm -rf /var/lib/apt/lists/* + +COPY openvpn/server.conf /etc/openvpn/server/server.conf +COPY openvpn/ca.crt /etc/openvpn/server/ca.crt +COPY openvpn/server.crt /etc/openvpn/server/server.crt +COPY openvpn/server.key /etc/openvpn/server/server.key +COPY openvpn/dh.pem /etc/openvpn/server/dh.pem + +# Copy requirements and install Python dependencies +COPY requirements.txt . +RUN pip install --no-cache-dir -r requirements.txt + +# Copy application files +COPY . . + +# Create necessary directories +RUN mkdir -p /tmp/vpn_client_configs \ + && mkdir -p /var/log/openvpn \ + && mkdir -p database + +# Set environment variables +ENV FLASK_APP=app.py +ENV FLASK_ENV=production +ENV PORT=7860 + +# Expose port +EXPOSE 7860 + +# Health check +HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \ + CMD curl -f http://localhost:7860/health || exit 1 + +# Run the application +CMD ["python", "app.py"] + diff --git a/__init__.py b/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/__pycache__/__init__.cpython-311.pyc b/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bcd4b517a75814628b84137e8937b634c03eecde Binary files /dev/null and b/__pycache__/__init__.cpython-311.pyc differ diff --git a/__pycache__/app.cpython-311.pyc b/__pycache__/app.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b431b092bb4b1a7706d5e34f4d6d2b7e280793e8 Binary files /dev/null and b/__pycache__/app.cpython-311.pyc differ diff --git a/__pycache__/main.cpython-311.pyc b/__pycache__/main.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8398ac78dc3f3379207a24a714b38dac618963b4 Binary files /dev/null and b/__pycache__/main.cpython-311.pyc differ diff --git a/__pycache__/main_isp.cpython-311.pyc b/__pycache__/main_isp.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a7eaeba1abe565f0085b0798187fe8db2c0a25e1 Binary files /dev/null and b/__pycache__/main_isp.cpython-311.pyc differ diff --git a/app.py b/app.py new file mode 100644 index 0000000000000000000000000000000000000000..d08610597942e95836fff9b16a75f7bc53a42826 --- /dev/null +++ b/app.py @@ -0,0 +1,213 @@ +#!/usr/bin/env python3 +""" +Virtual ISP Stack with OpenVPN Integration +HuggingFace Spaces Entry Point + +This application provides a complete Virtual ISP stack with OpenVPN server integration, +allowing users to manage VPN connections, generate client configurations, and monitor +network traffic through a RESTful API. +""" + +import os +import sys +import logging + +# Add current directory to Python path +sys.path.insert(0, os.path.dirname(__file__)) + +from flask import Flask, send_from_directory, jsonify +from flask_cors import CORS +from models.enhanced_user import db +from routes.auth import auth_bp +from routes.vpn_client import vpn_client_bp +from routes.vpn_server import vpn_server_bp +from routes.isp_api import init_engines, isp_api + +# Configure logging +logging.basicConfig( + level=logging.INFO, + format='%(asctime)s - %(name)s - %(levelname)s - %(message)s' +) +logger = logging.getLogger(__name__) + +# Create Flask application +app = Flask(__name__, static_folder=os.path.join(os.path.dirname(__file__), 'static')) + +# Enable CORS for all routes +CORS(app, origins="*") + +# Configuration +app.config['SECRET_KEY'] = os.environ.get('SECRET_KEY', 'vpn-isp-stack-secret-key-change-in-production') + +# Database configuration +database_path = os.path.join(os.path.dirname(__file__), 'database', 'app.db') +app.config['SQLALCHEMY_DATABASE_URI'] = f"sqlite:///{database_path}" +app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False + +# Initialize database +db.init_app(app) + +# Register blueprints +app.register_blueprint(auth_bp, url_prefix='/api') +app.register_blueprint(vpn_client_bp, url_prefix='/api') +app.register_blueprint(vpn_server_bp, url_prefix='/api') +app.register_blueprint(isp_api, url_prefix='/api') + +# Engine configuration +app.config.update({ + "dhcp": { + "network": "10.0.0.0/24", + "range_start": "10.0.0.10", + "range_end": "10.0.0.100", + "lease_time": 3600, + "gateway": "10.0.0.1", + "dns_servers": ["8.8.8.8", "8.8.4.4"] + }, + "nat": { + "port_range_start": 10000, + "port_range_end": 65535, + "session_timeout": 300 + }, + "firewall": { + "default_policy": "ACCEPT", + "log_blocked": True + }, + "tcp": { + "initial_window": 65535, + "max_retries": 3, + "timeout": 30 + }, + "openvpn": { + "server_ip": "10.8.0.1", + "server_port": 1194, + "network": "10.8.0.0/24" + }, + "logger": { + "log_level": "INFO", + "log_file": "/tmp/virtual_isp.log" + } +}) + +# Add VPN server configuration +app.config.update({ + 'VPN_SERVER_IP': os.environ.get('VPN_SERVER_IP', '127.0.0.1'), + 'OPENVPN_PORT': int(os.environ.get('OPENVPN_PORT', 1194)), + 'IKEV2_PORT': int(os.environ.get('IKEV2_PORT', 500)), + 'WIREGUARD_PORT': int(os.environ.get('WIREGUARD_PORT', 51820)), + 'WIREGUARD_SERVER_PUBLIC_KEY': os.environ.get('WIREGUARD_SERVER_PUBLIC_KEY', 'SERVER_PUBLIC_KEY_HERE') +}) + +# Initialize database tables +with app.app_context(): + try: + db.create_all() + logger.info("Database tables created successfully") + except Exception as e: + logger.error(f"Error creating database tables: {e}") + +# Initialize engines +try: + init_engines(app.config) + logger.info("All engines initialized successfully") +except Exception as e: + logger.error(f"Error initializing engines: {e}") + +@app.route('/') +def index(): + """Main index page - redirect to auth if not logged in""" + return serve_static('auth.html') + +@app.route('/auth') +def auth_page(): + """Authentication page""" + return serve_static('auth.html') + +@app.route('/dashboard') +def dashboard_page(): + """Dashboard page""" + return serve_static('index.html') + +@app.route('/health') +def health_check(): + """Health check endpoint for monitoring""" + return jsonify({ + 'status': 'healthy', + 'service': 'Virtual ISP Stack with OpenVPN', + 'version': '1.0.0' + }) + +@app.route('/api') +def api_info(): + """API information endpoint""" + return jsonify({ + 'service': 'Virtual ISP Stack API', + 'version': '1.0.0', + 'endpoints': { + 'openvpn': { + 'status': '/api/openvpn/status', + 'start': '/api/openvpn/start', + 'stop': '/api/openvpn/stop', + 'clients': '/api/openvpn/clients', + 'config': '/api/openvpn/config/', + 'stats': '/api/openvpn/stats', + 'configs': '/api/openvpn/configs' + }, + 'dhcp': { + 'leases': '/api/dhcp/leases' + }, + 'nat': { + 'sessions': '/api/nat/sessions', + 'stats': '/api/nat/stats' + }, + 'firewall': { + 'rules': '/api/firewall/rules', + 'logs': '/api/firewall/logs', + 'stats': '/api/firewall/stats' + } + } + }) + +@app.route('/') +def serve_static(path): + """Serve static files""" + static_folder_path = app.static_folder + if static_folder_path is None: + return jsonify({'error': 'Static folder not configured'}), 404 + + if path != "" and os.path.exists(os.path.join(static_folder_path, path)): + return send_from_directory(static_folder_path, path) + else: + index_path = os.path.join(static_folder_path, 'index.html') + if os.path.exists(index_path): + return send_from_directory(static_folder_path, 'index.html') + else: + return jsonify({ + 'message': 'Virtual ISP Stack with OpenVPN Integration', + 'status': 'running', + 'api_docs': '/api' + }) + +@app.errorhandler(404) +def not_found(error): + """Handle 404 errors""" + return jsonify({'error': 'Endpoint not found', 'api_docs': '/api'}), 404 + +@app.errorhandler(500) +def internal_error(error): + """Handle 500 errors""" + return jsonify({'error': 'Internal server error'}), 500 + +if __name__ == '__main__': + # Get port from environment variable (HuggingFace Spaces uses PORT) + port = int(os.environ.get('PORT', 7860)) + + logger.info(f"Starting Virtual ISP Stack with OpenVPN on port {port}") + + # Run the application + app.run( + host='0.0.0.0', + port=port, + debug=False, + threaded=True + ) + diff --git a/app_status.json b/app_status.json new file mode 100644 index 0000000000000000000000000000000000000000..65af808a0d737c312da870f3a5bca132603642ec --- /dev/null +++ b/app_status.json @@ -0,0 +1,755 @@ + + + + + + Virtual ISP Stack - Network Management Dashboard + + + + +
+ +
+
+ +
+
+ + System Status +
+
+ +
+
+
+
+ + + + + +
+ +
+
+

System Dashboard

+

Overview of Virtual ISP Stack components and performance

+
+ + +
+
+
+ +
+
+

0

+

DHCP Leases

+
+
+ +
+
+ +
+
+

0

+

NAT Sessions

+
+
+ +
+
+ +
+
+

0

+

Firewall Rules

+
+
+ +
+
+ +
+
+

0

+

Bridge Clients

+
+
+
+ + +
+

Component Status

+
+ +
+
+ + +
+
+

Network Traffic

+ +
+
+

Connection Distribution

+ +
+
+
+ + +
+
+

DHCP Management

+

Manage DHCP leases and configuration

+
+ +
+
+

Active Leases

+ +
+
+ + + + + + + + + + + + + + +
MAC AddressIP AddressLease TimeRemainingStateActions
+
+
+
+ + +
+
+

NAT Management

+

Network Address Translation sessions and statistics

+
+ +
+
+
+ Active Sessions: + 0 +
+
+ Port Utilization: + 0% +
+
+ Bytes Translated: + 0 +
+
+
+ +
+
+

NAT Sessions

+ +
+
+ + + + + + + + + + + + + + + +
Virtual IP:PortReal IP:PortHost IP:PortProtocolDurationBytes In/OutActions
+
+
+
+ + +
+
+

Firewall Management

+

Configure firewall rules and monitor traffic

+
+ +
+ + +
+ +
+
+

Firewall Rules

+
+
+ + + + + + + + + + + + + + + + + + +
PriorityRule IDActionDirectionSourceDestinationProtocolHitsStatusActions
+
+
+
+ + +
+
+

Router Management

+

Routing table and network interfaces

+
+ +
+
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + +
DestinationGatewayInterfaceMetricTypeUse CountLast Used
+
+
+ +
+
+ + + + + + + + + + + + + + +
NameIP AddressNetworkMTUStatusActions
+
+
+ +
+
+ + + + + + + + + + + +
IP AddressMAC AddressActions
+
+
+
+
+
+ + +
+
+

Packet Bridge

+

Connected clients and bridge statistics

+
+ +
+
+

WebSocket Server

+

Port: 8765

+

Status: Active

+
+
+

TCP Server

+

Port: 8766

+

Status: Active

+
+
+ +
+
+

Connected Clients

+ +
+
+ + + + + + + + + + + + + + + +
Client IDTypeRemote AddressConnected TimePackets In/OutBytes In/OutActions
+
+
+
+ + +
+
+

Session Tracking

+

Unified view of all network sessions

+
+ +
+ +
+ +
+
+

Active Sessions

+
+ + +
+
+
+ + + + + + + + + + + + + + + + + +
Session IDTypeStateVirtual IP:PortReal IP:PortProtocolDurationIdle TimeMetrics
+
+
+
+ + +
+
+

System Logs

+

Monitor system events and troubleshoot issues

+
+ +
+
+ + + +
+
+ + +
+
+ +
+ +
+
+ + +
+
+

VPN Management

+

OpenVPN server management and client connections

+
+ + +
+
+
+

OpenVPN Server

+
+ + + +
+
+
+
+ Status: + Unknown +
+
+ Server IP: + - +
+
+ Port: + - +
+
+ Connected Clients: + 0 +
+
+ Uptime: + - +
+
+
+
+ + +
+
+ Total Bytes Received: + 0 +
+
+ Total Bytes Sent: + 0 +
+
+ + +
+
+

Connected VPN Clients

+
+ + +
+
+
+ + + + + + + + + + + + + + + + +
Client IDCommon NameVPN IP AddressConnected SinceBytes ReceivedBytes SentStatusActions
+
+
+
+ + +
+
+

System Configuration

+

Configure system parameters and settings

+
+ +
+
+

DHCP Configuration

+
+ +
+
+ +
+

NAT Configuration

+
+ +
+
+ +
+

Firewall Configuration

+
+ +
+
+
+ +
+ + +
+
+
+
+ + + + + + + + +
+
+ +

Loading...

+
+
+ + +
+ + + + + + + diff --git a/config.json b/config.json new file mode 100644 index 0000000000000000000000000000000000000000..dedd0c72ce829136370fac246ec8b7122548ecbf --- /dev/null +++ b/config.json @@ -0,0 +1,98 @@ +{ + "dhcp": { + "network": "10.0.0.0/24", + "range_start": "10.0.0.10", + "range_end": "10.0.0.100", + "lease_time": 3600, + "gateway": "10.0.0.1", + "dns_servers": [ + "8.8.8.8", + "8.8.4.4" + ] + }, + "nat": { + "port_range_start": 10000, + "port_range_end": 65535, + "session_timeout": 300, + "host_ip": "0.0.0.0" + }, + "firewall": { + "default_policy": "ACCEPT", + "log_blocked": true, + "log_accepted": false, + "max_log_entries": 10000, + "rules": [ + { + "rule_id": "allow_dhcp", + "priority": 1, + "action": "ACCEPT", + "direction": "BOTH", + "dest_port": "67,68", + "protocol": "UDP", + "description": "Allow DHCP traffic", + "enabled": true + }, + { + "rule_id": "allow_dns", + "priority": 2, + "action": "ACCEPT", + "direction": "BOTH", + "dest_port": "53", + "protocol": "UDP", + "description": "Allow DNS traffic", + "enabled": true + } + ] + }, + "tcp": { + "initial_window": 65535, + "max_retries": 3, + "timeout": 300, + "time_wait_timeout": 120, + "mss": 1460 + }, + "router": { + "router_id": "virtual-isp-router", + "default_gateway": "10.0.0.1", + "interfaces": [ + { + "name": "virtual0", + "ip_address": "10.0.0.1", + "netmask": "255.255.255.0", + "enabled": true, + "mtu": 1500 + } + ], + "static_routes": [] + }, + "socket_translator": { + "connect_timeout": 10, + "read_timeout": 30, + "max_connections": 1000, + "buffer_size": 8192 + }, + "packet_bridge": { + "websocket_host": "0.0.0.0", + "websocket_port": 8765, + "tcp_host": "0.0.0.0", + "tcp_port": 8766, + "max_clients": 100, + "client_timeout": 300 + }, + "session_tracker": { + "max_sessions": 10000, + "session_timeout": 3600, + "cleanup_interval": 300, + "metrics_retention": 86400 + }, + "logger": { + "log_level": "INFO", + "log_to_file": true, + "log_file_path": "/tmp/virtual_isp.log", + "log_file_max_size": 10485760, + "log_file_backup_count": 5, + "log_to_console": true, + "structured_logging": true, + "max_memory_logs": 10000 + } +} \ No newline at end of file diff --git a/core/__init__.py b/core/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1fd4a6e555e40eeadd71c407e00abe44eadb3ccf --- /dev/null +++ b/core/__init__.py @@ -0,0 +1,2 @@ +# Core networking modules for the virtual ISP stack + diff --git a/core/__pycache__/__init__.cpython-311.pyc b/core/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..df9de76dc73b753c258f7c5ee2da8bf5706b342a Binary files /dev/null and b/core/__pycache__/__init__.cpython-311.pyc differ diff --git a/core/__pycache__/dhcp_server.cpython-311.pyc b/core/__pycache__/dhcp_server.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5eb6e1fb2353f127d3075070ce9092ee3ccab61d Binary files /dev/null and b/core/__pycache__/dhcp_server.cpython-311.pyc differ diff --git a/core/__pycache__/firewall.cpython-311.pyc b/core/__pycache__/firewall.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6aef2a21de8df7f9690e507d73b746d9f0b95a96 Binary files /dev/null and b/core/__pycache__/firewall.cpython-311.pyc differ diff --git a/core/__pycache__/ip_parser.cpython-311.pyc b/core/__pycache__/ip_parser.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a9857f1e080d95f05d8de0558e9d9fd08f382e53 Binary files /dev/null and b/core/__pycache__/ip_parser.cpython-311.pyc differ diff --git a/core/__pycache__/logger.cpython-311.pyc b/core/__pycache__/logger.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..49169d48af5bcad781397369238711b3e7bd1c83 Binary files /dev/null and b/core/__pycache__/logger.cpython-311.pyc differ diff --git a/core/__pycache__/nat_engine.cpython-311.pyc b/core/__pycache__/nat_engine.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ceb2c266fa8ce4a118c82dbeee5af6791f8f05a6 Binary files /dev/null and b/core/__pycache__/nat_engine.cpython-311.pyc differ diff --git a/core/__pycache__/openvpn_manager.cpython-311.pyc b/core/__pycache__/openvpn_manager.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d18c786495a566b930129820b14eb93b83496d24 Binary files /dev/null and b/core/__pycache__/openvpn_manager.cpython-311.pyc differ diff --git a/core/__pycache__/packet_bridge.cpython-311.pyc b/core/__pycache__/packet_bridge.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..878caf9a826b152fbd365a5be7d23162064fef12 Binary files /dev/null and b/core/__pycache__/packet_bridge.cpython-311.pyc differ diff --git a/core/__pycache__/session_tracker.cpython-311.pyc b/core/__pycache__/session_tracker.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..63e075fdfa592932d9a40c87a8f84b2285d9c077 Binary files /dev/null and b/core/__pycache__/session_tracker.cpython-311.pyc differ diff --git a/core/__pycache__/socket_translator.cpython-311.pyc b/core/__pycache__/socket_translator.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e22baa45f4b680bf4109102073efc3865bbdc764 Binary files /dev/null and b/core/__pycache__/socket_translator.cpython-311.pyc differ diff --git a/core/__pycache__/tcp_engine.cpython-311.pyc b/core/__pycache__/tcp_engine.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..600e13713dbd58f43f7a96f3664238a813267844 Binary files /dev/null and b/core/__pycache__/tcp_engine.cpython-311.pyc differ diff --git a/core/__pycache__/traffic_router.cpython-311.pyc b/core/__pycache__/traffic_router.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..763c29868e69f7b37a5190da51e3b198339d57a5 Binary files /dev/null and b/core/__pycache__/traffic_router.cpython-311.pyc differ diff --git a/core/__pycache__/virtual_router.cpython-311.pyc b/core/__pycache__/virtual_router.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..68608b5c1482789d938ba1cc0e6386426b871d83 Binary files /dev/null and b/core/__pycache__/virtual_router.cpython-311.pyc differ diff --git a/core/dhcp_server.py b/core/dhcp_server.py new file mode 100644 index 0000000000000000000000000000000000000000..fb666ce976d053aaa866914ee5a9cdfae46a7bdc --- /dev/null +++ b/core/dhcp_server.py @@ -0,0 +1,391 @@ +""" +DHCP Server Module + +Implements a user-space DHCP server that handles: +- DHCP DISCOVER → OFFER → REQUEST → ACK sequence +- IP lease management +- Lease renewals and expiration +""" + +import struct +import time +import socket +import threading +from typing import Dict, Optional, Tuple +from dataclasses import dataclass +from enum import Enum + + +class DHCPMessageType(Enum): + DISCOVER = 1 + OFFER = 2 + REQUEST = 3 + DECLINE = 4 + ACK = 5 + NAK = 6 + RELEASE = 7 + INFORM = 8 + + +@dataclass +class DHCPLease: + """Represents a DHCP lease""" + mac_address: str + ip_address: str + lease_time: int + lease_start: float + state: str = 'BOUND' + + @property + def is_expired(self) -> bool: + return time.time() > (self.lease_start + self.lease_time) + + @property + def remaining_time(self) -> int: + remaining = int((self.lease_start + self.lease_time) - time.time()) + return max(0, remaining) + + +class DHCPPacket: + """DHCP packet parser and builder""" + + def __init__(self): + self.op = 0 # Message op code / message type + self.htype = 1 # Hardware address type (Ethernet = 1) + self.hlen = 6 # Hardware address length + self.hops = 0 # Hops + self.xid = 0 # Transaction ID + self.secs = 0 # Seconds elapsed + self.flags = 0 # Flags + self.ciaddr = '0.0.0.0' # Client IP address + self.yiaddr = '0.0.0.0' # Your IP address + self.siaddr = '0.0.0.0' # Server IP address + self.giaddr = '0.0.0.0' # Gateway IP address + self.chaddr = b'\x00' * 16 # Client hardware address + self.sname = b'\x00' * 64 # Server name + self.file = b'\x00' * 128 # Boot file name + self.options = {} # DHCP options + + @classmethod + def parse(cls, data: bytes) -> 'DHCPPacket': + """Parse DHCP packet from raw bytes""" + packet = cls() + + # Parse fixed fields (first 236 bytes) + if len(data) < 236: + raise ValueError("DHCP packet too short") + + fields = struct.unpack('!BBBBIHH4s4s4s4s16s64s128s', data[:236]) + packet.op = fields[0] + packet.htype = fields[1] + packet.hlen = fields[2] + packet.hops = fields[3] + packet.xid = fields[4] + packet.secs = fields[5] + packet.flags = fields[6] + packet.ciaddr = socket.inet_ntoa(fields[7]) + packet.yiaddr = socket.inet_ntoa(fields[8]) + packet.siaddr = socket.inet_ntoa(fields[9]) + packet.giaddr = socket.inet_ntoa(fields[10]) + packet.chaddr = fields[11] + packet.sname = fields[12] + packet.file = fields[13] + + # Parse options (after magic cookie) + options_data = data[236:] + if len(options_data) >= 4: + magic = struct.unpack('!I', options_data[:4])[0] + if magic == 0x63825363: # DHCP magic cookie + packet.options = packet._parse_options(options_data[4:]) + + return packet + + def _parse_options(self, data: bytes) -> Dict[int, bytes]: + """Parse DHCP options""" + options = {} + i = 0 + + while i < len(data): + if data[i] == 255: # End option + break + elif data[i] == 0: # Pad option + i += 1 + continue + + option_type = data[i] + if i + 1 >= len(data): + break + + option_length = data[i + 1] + if i + 2 + option_length > len(data): + break + + option_data = data[i + 2:i + 2 + option_length] + options[option_type] = option_data + i += 2 + option_length + + return options + + def build(self) -> bytes: + """Build DHCP packet as bytes""" + # Build fixed fields + packet_data = struct.pack( + '!BBBBIHH4s4s4s4s16s64s128s', + self.op, self.htype, self.hlen, self.hops, + self.xid, self.secs, self.flags, + socket.inet_aton(self.ciaddr), + socket.inet_aton(self.yiaddr), + socket.inet_aton(self.siaddr), + socket.inet_aton(self.giaddr), + self.chaddr, self.sname, self.file + ) + + # Add magic cookie + packet_data += struct.pack('!I', 0x63825363) + + # Add options + for option_type, option_data in self.options.items(): + packet_data += struct.pack('!BB', option_type, len(option_data)) + packet_data += option_data + + # Add end option + packet_data += b'\xff' + + # Pad to minimum size + while len(packet_data) < 300: + packet_data += b'\x00' + + return packet_data + + def get_mac_address(self) -> str: + """Get client MAC address as string""" + return ':'.join(f'{b:02x}' for b in self.chaddr[:6]) + + def get_message_type(self) -> Optional[DHCPMessageType]: + """Get DHCP message type from options""" + if 53 in self.options and len(self.options[53]) == 1: + msg_type = self.options[53][0] + try: + return DHCPMessageType(msg_type) + except ValueError: + return None + return None + + +class DHCPServer: + """User-space DHCP server implementation""" + + def __init__(self, config: Dict): + self.config = config + self.leases: Dict[str, DHCPLease] = {} # MAC -> Lease + self.ip_pool = self._build_ip_pool() + self.running = False + self.server_thread = None + self.lock = threading.Lock() + + def _build_ip_pool(self) -> set: + """Build available IP address pool""" + network = self.config['network'] + start_ip = self.config['range_start'] + end_ip = self.config['range_end'] + + # Convert IP addresses to integers for range calculation + start_int = struct.unpack('!I', socket.inet_aton(start_ip))[0] + end_int = struct.unpack('!I', socket.inet_aton(end_ip))[0] + + pool = set() + for ip_int in range(start_int, end_int + 1): + ip_str = socket.inet_ntoa(struct.pack('!I', ip_int)) + pool.add(ip_str) + + return pool + + def _get_available_ip(self) -> Optional[str]: + """Get next available IP address""" + with self.lock: + # Remove expired leases + self._cleanup_expired_leases() + + # Find available IP + used_ips = {lease.ip_address for lease in self.leases.values()} + available_ips = self.ip_pool - used_ips + + if available_ips: + return min(available_ips) # Return lowest available IP + return None + + def _cleanup_expired_leases(self): + """Remove expired leases""" + expired_macs = [ + mac for mac, lease in self.leases.items() + if lease.is_expired + ] + for mac in expired_macs: + del self.leases[mac] + + def _create_dhcp_offer(self, discover_packet: DHCPPacket) -> DHCPPacket: + """Create DHCP OFFER response""" + mac_address = discover_packet.get_mac_address() + + # Check for existing lease + if mac_address in self.leases and not self.leases[mac_address].is_expired: + offered_ip = self.leases[mac_address].ip_address + else: + offered_ip = self._get_available_ip() + if not offered_ip: + return None # No available IPs + + # Create OFFER packet + offer = DHCPPacket() + offer.op = 2 # BOOTREPLY + offer.htype = discover_packet.htype + offer.hlen = discover_packet.hlen + offer.xid = discover_packet.xid + offer.yiaddr = offered_ip + offer.siaddr = self.config['gateway'] + offer.chaddr = discover_packet.chaddr + + # Add DHCP options + offer.options[53] = bytes([DHCPMessageType.OFFER.value]) # Message type + offer.options[1] = socket.inet_aton('255.255.255.0') # Subnet mask + offer.options[3] = socket.inet_aton(self.config['gateway']) # Router + offer.options[6] = b''.join(socket.inet_aton(dns) for dns in self.config['dns_servers']) # DNS + offer.options[51] = struct.pack('!I', self.config['lease_time']) # Lease time + offer.options[54] = socket.inet_aton(self.config['gateway']) # DHCP server identifier + + return offer + + def _create_dhcp_ack(self, request_packet: DHCPPacket) -> DHCPPacket: + """Create DHCP ACK response""" + mac_address = request_packet.get_mac_address() + requested_ip = request_packet.ciaddr + + # If no requested IP in ciaddr, check option 50 + if requested_ip == '0.0.0.0' and 50 in request_packet.options: + requested_ip = socket.inet_ntoa(request_packet.options[50]) + + # Validate request + if not self._validate_request(mac_address, requested_ip): + return self._create_dhcp_nak(request_packet) + + # Create or update lease + lease = DHCPLease( + mac_address=mac_address, + ip_address=requested_ip, + lease_time=self.config['lease_time'], + lease_start=time.time() + ) + + with self.lock: + self.leases[mac_address] = lease + + # Create ACK packet + ack = DHCPPacket() + ack.op = 2 # BOOTREPLY + ack.htype = request_packet.htype + ack.hlen = request_packet.hlen + ack.xid = request_packet.xid + ack.yiaddr = requested_ip + ack.siaddr = self.config['gateway'] + ack.chaddr = request_packet.chaddr + + # Add DHCP options + ack.options[53] = bytes([DHCPMessageType.ACK.value]) # Message type + ack.options[1] = socket.inet_aton('255.255.255.0') # Subnet mask + ack.options[3] = socket.inet_aton(self.config['gateway']) # Router + ack.options[6] = b''.join(socket.inet_aton(dns) for dns in self.config['dns_servers']) # DNS + ack.options[51] = struct.pack('!I', self.config['lease_time']) # Lease time + ack.options[54] = socket.inet_aton(self.config['gateway']) # DHCP server identifier + + return ack + + def _create_dhcp_nak(self, request_packet: DHCPPacket) -> DHCPPacket: + """Create DHCP NAK response""" + nak = DHCPPacket() + nak.op = 2 # BOOTREPLY + nak.htype = request_packet.htype + nak.hlen = request_packet.hlen + nak.xid = request_packet.xid + nak.chaddr = request_packet.chaddr + + # Add DHCP options + nak.options[53] = bytes([DHCPMessageType.NAK.value]) # Message type + nak.options[54] = socket.inet_aton(self.config['gateway']) # DHCP server identifier + + return nak + + def _validate_request(self, mac_address: str, requested_ip: str) -> bool: + """Validate DHCP request""" + # Check if IP is in our pool + if requested_ip not in self.ip_pool: + return False + + # Check if IP is available or already assigned to this MAC + with self.lock: + for mac, lease in self.leases.items(): + if lease.ip_address == requested_ip: + if mac != mac_address and not lease.is_expired: + return False # IP already assigned to different MAC + + return True + + def process_packet(self, packet_data: bytes, client_addr: Tuple[str, int]) -> Optional[bytes]: + """Process incoming DHCP packet and return response""" + try: + packet = DHCPPacket.parse(packet_data) + message_type = packet.get_message_type() + + if message_type == DHCPMessageType.DISCOVER: + response = self._create_dhcp_offer(packet) + elif message_type == DHCPMessageType.REQUEST: + response = self._create_dhcp_ack(packet) + elif message_type == DHCPMessageType.RELEASE: + # Handle lease release + mac_address = packet.get_mac_address() + with self.lock: + if mac_address in self.leases: + del self.leases[mac_address] + return None + else: + return None + + if response: + return response.build() + + except Exception as e: + print(f"Error processing DHCP packet: {e}") + return None + + def get_leases(self) -> Dict[str, Dict]: + """Get current lease table""" + with self.lock: + self._cleanup_expired_leases() + return { + mac: { + 'ip_address': lease.ip_address, + 'lease_time': lease.lease_time, + 'lease_start': lease.lease_start, + 'remaining_time': lease.remaining_time, + 'state': lease.state + } + for mac, lease in self.leases.items() + } + + def release_lease(self, mac_address: str) -> bool: + """Manually release a lease""" + with self.lock: + if mac_address in self.leases: + del self.leases[mac_address] + return True + return False + + def start(self): + """Start DHCP server (placeholder for integration with packet bridge)""" + self.running = True + print(f"DHCP server started - Pool: {self.config['range_start']} - {self.config['range_end']}") + + def stop(self): + """Stop DHCP server""" + self.running = False + print("DHCP server stopped") + diff --git a/core/firewall.py b/core/firewall.py new file mode 100644 index 0000000000000000000000000000000000000000..23471d95f1e82904ed0231a9ada902dd5f2cfa06 --- /dev/null +++ b/core/firewall.py @@ -0,0 +1,523 @@ +""" +Firewall Module + +Implements packet filtering and access control: +- Rule-based packet filtering (allow/block by IP, port, protocol) +- Ordered rule processing +- Logging and statistics +- Dynamic rule management via API +""" + +import time +import threading +import ipaddress +import re +from typing import Dict, List, Optional, Tuple, Any +from dataclasses import dataclass +from enum import Enum + +from .ip_parser import ParsedPacket, TCPHeader, UDPHeader + + +class FirewallAction(Enum): + ACCEPT = "ACCEPT" + DROP = "DROP" + REJECT = "REJECT" + + +class FirewallDirection(Enum): + INBOUND = "INBOUND" + OUTBOUND = "OUTBOUND" + BOTH = "BOTH" + + +@dataclass +class FirewallRule: + """Represents a firewall rule""" + rule_id: str + priority: int # Lower number = higher priority + action: FirewallAction + direction: FirewallDirection + + # Match criteria + source_ip: Optional[str] = None # IP or CIDR + dest_ip: Optional[str] = None # IP or CIDR + source_port: Optional[str] = None # Port or range (e.g., "80", "80-90", "80,443") + dest_port: Optional[str] = None # Port or range + protocol: Optional[str] = None # TCP, UDP, ICMP, or None for any + + # Metadata + description: str = "" + enabled: bool = True + created_time: float = 0 + hit_count: int = 0 + last_hit: Optional[float] = None + + def __post_init__(self): + if self.created_time == 0: + self.created_time = time.time() + + def record_hit(self): + """Record a rule hit""" + self.hit_count += 1 + self.last_hit = time.time() + + def to_dict(self) -> Dict: + """Convert rule to dictionary""" + return { + 'rule_id': self.rule_id, + 'priority': self.priority, + 'action': self.action.value, + 'direction': self.direction.value, + 'source_ip': self.source_ip, + 'dest_ip': self.dest_ip, + 'source_port': self.source_port, + 'dest_port': self.dest_port, + 'protocol': self.protocol, + 'description': self.description, + 'enabled': self.enabled, + 'created_time': self.created_time, + 'hit_count': self.hit_count, + 'last_hit': self.last_hit + } + + +@dataclass +class FirewallLogEntry: + """Represents a firewall log entry""" + timestamp: float + action: str + rule_id: Optional[str] + source_ip: str + dest_ip: str + source_port: int + dest_port: int + protocol: str + packet_size: int + reason: str = "" + + def to_dict(self) -> Dict: + """Convert log entry to dictionary""" + return { + 'timestamp': self.timestamp, + 'action': self.action, + 'rule_id': self.rule_id, + 'source_ip': self.source_ip, + 'dest_ip': self.dest_ip, + 'source_port': self.source_port, + 'dest_port': self.dest_port, + 'protocol': self.protocol, + 'packet_size': self.packet_size, + 'reason': self.reason + } + + +class FirewallEngine: + """Firewall engine implementation""" + + def __init__(self, config: Dict): + self.config = config + self.rules: Dict[str, FirewallRule] = {} + self.logs: List[FirewallLogEntry] = [] + self.lock = threading.Lock() + + # Configuration + self.default_policy = FirewallAction(config.get('default_policy', 'ACCEPT')) + self.log_blocked = config.get('log_blocked', True) + self.log_accepted = config.get('log_accepted', False) + self.max_log_entries = config.get('max_log_entries', 10000) + + # Statistics + self.stats = { + 'packets_processed': 0, + 'packets_accepted': 0, + 'packets_dropped': 0, + 'packets_rejected': 0, + 'rules_hit': 0, + 'default_policy_hits': 0 + } + + # Load initial rules + initial_rules = config.get('rules', []) + for rule_config in initial_rules: + self._add_rule_from_config(rule_config) + + def _add_rule_from_config(self, rule_config: Dict): + """Add rule from configuration""" + rule = FirewallRule( + rule_id=rule_config['rule_id'], + priority=rule_config.get('priority', 100), + action=FirewallAction(rule_config['action']), + direction=FirewallDirection(rule_config.get('direction', 'BOTH')), + source_ip=rule_config.get('source_ip'), + dest_ip=rule_config.get('dest_ip'), + source_port=rule_config.get('source_port'), + dest_port=rule_config.get('dest_port'), + protocol=rule_config.get('protocol'), + description=rule_config.get('description', ''), + enabled=rule_config.get('enabled', True) + ) + + with self.lock: + self.rules[rule.rule_id] = rule + + def _match_ip(self, ip: str, pattern: str) -> bool: + """Match IP address against pattern (IP or CIDR)""" + try: + if '/' in pattern: + # CIDR notation + network = ipaddress.ip_network(pattern, strict=False) + return ipaddress.ip_address(ip) in network + else: + # Exact IP match + return ip == pattern + except (ipaddress.AddressValueError, ValueError): + return False + + def _match_port(self, port: int, pattern: str) -> bool: + """Match port against pattern (port, range, or list)""" + try: + if ',' in pattern: + # List of ports: "80,443,8080" + ports = [int(p.strip()) for p in pattern.split(',')] + return port in ports + elif '-' in pattern: + # Port range: "80-90" + start, end = map(int, pattern.split('-', 1)) + return start <= port <= end + else: + # Single port: "80" + return port == int(pattern) + except (ValueError, TypeError): + return False + + def _match_protocol(self, protocol: str, pattern: str) -> bool: + """Match protocol against pattern""" + if pattern is None: + return True # Match any protocol + return protocol.upper() == pattern.upper() + + def _evaluate_rule(self, rule: FirewallRule, packet: ParsedPacket, direction: FirewallDirection) -> bool: + """Evaluate if a rule matches a packet""" + if not rule.enabled: + return False + + # Check direction + if rule.direction != FirewallDirection.BOTH and rule.direction != direction: + return False + + # Check source IP + if rule.source_ip and not self._match_ip(packet.ip_header.source_ip, rule.source_ip): + return False + + # Check destination IP + if rule.dest_ip and not self._match_ip(packet.ip_header.dest_ip, rule.dest_ip): + return False + + # Check protocol + if packet.transport_header: + if isinstance(packet.transport_header, TCPHeader): + protocol = 'TCP' + source_port = packet.transport_header.source_port + dest_port = packet.transport_header.dest_port + elif isinstance(packet.transport_header, UDPHeader): + protocol = 'UDP' + source_port = packet.transport_header.source_port + dest_port = packet.transport_header.dest_port + else: + protocol = 'OTHER' + source_port = 0 + dest_port = 0 + else: + protocol = 'OTHER' + source_port = 0 + dest_port = 0 + + if not self._match_protocol(protocol, rule.protocol): + return False + + # Check source port + if rule.source_port and not self._match_port(source_port, rule.source_port): + return False + + # Check destination port + if rule.dest_port and not self._match_port(dest_port, rule.dest_port): + return False + + return True + + def _log_packet(self, action: str, packet: ParsedPacket, rule_id: Optional[str] = None, reason: str = ""): + """Log packet processing""" + if not (self.log_blocked or self.log_accepted): + return + + # Only log if configured + if action == 'ACCEPT' and not self.log_accepted: + return + if action in ['DROP', 'REJECT'] and not self.log_blocked: + return + + # Extract packet information + if packet.transport_header: + if isinstance(packet.transport_header, (TCPHeader, UDPHeader)): + source_port = packet.transport_header.source_port + dest_port = packet.transport_header.dest_port + protocol = 'TCP' if isinstance(packet.transport_header, TCPHeader) else 'UDP' + else: + source_port = 0 + dest_port = 0 + protocol = 'OTHER' + else: + source_port = 0 + dest_port = 0 + protocol = 'OTHER' + + log_entry = FirewallLogEntry( + timestamp=time.time(), + action=action, + rule_id=rule_id, + source_ip=packet.ip_header.source_ip, + dest_ip=packet.ip_header.dest_ip, + source_port=source_port, + dest_port=dest_port, + protocol=protocol, + packet_size=len(packet.raw_packet), + reason=reason + ) + + with self.lock: + self.logs.append(log_entry) + + # Trim logs if too many + if len(self.logs) > self.max_log_entries: + self.logs = self.logs[-self.max_log_entries:] + + def process_packet(self, packet: ParsedPacket, direction: FirewallDirection) -> FirewallAction: + """Process packet through firewall rules""" + self.stats['packets_processed'] += 1 + + # Get sorted rules by priority + with self.lock: + sorted_rules = sorted(self.rules.values(), key=lambda r: r.priority) + + # Evaluate rules in order + for rule in sorted_rules: + if self._evaluate_rule(rule, packet, direction): + rule.record_hit() + self.stats['rules_hit'] += 1 + + # Log the action + self._log_packet(rule.action.value, packet, rule.rule_id, f"Matched rule: {rule.description}") + + # Update statistics + if rule.action == FirewallAction.ACCEPT: + self.stats['packets_accepted'] += 1 + elif rule.action == FirewallAction.DROP: + self.stats['packets_dropped'] += 1 + elif rule.action == FirewallAction.REJECT: + self.stats['packets_rejected'] += 1 + + return rule.action + + # No rule matched, apply default policy + self.stats['default_policy_hits'] += 1 + self._log_packet(self.default_policy.value, packet, None, "Default policy") + + if self.default_policy == FirewallAction.ACCEPT: + self.stats['packets_accepted'] += 1 + elif self.default_policy == FirewallAction.DROP: + self.stats['packets_dropped'] += 1 + elif self.default_policy == FirewallAction.REJECT: + self.stats['packets_rejected'] += 1 + + return self.default_policy + + def add_rule(self, rule: FirewallRule) -> bool: + """Add firewall rule""" + with self.lock: + if rule.rule_id in self.rules: + return False + self.rules[rule.rule_id] = rule + return True + + def remove_rule(self, rule_id: str) -> bool: + """Remove firewall rule""" + with self.lock: + if rule_id in self.rules: + del self.rules[rule_id] + return True + return False + + def update_rule(self, rule_id: str, **kwargs) -> bool: + """Update firewall rule""" + with self.lock: + if rule_id not in self.rules: + return False + + rule = self.rules[rule_id] + for key, value in kwargs.items(): + if hasattr(rule, key): + if key in ['action', 'direction']: + # Handle enum values + if key == 'action': + value = FirewallAction(value) + elif key == 'direction': + value = FirewallDirection(value) + setattr(rule, key, value) + + return True + + def enable_rule(self, rule_id: str) -> bool: + """Enable firewall rule""" + return self.update_rule(rule_id, enabled=True) + + def disable_rule(self, rule_id: str) -> bool: + """Disable firewall rule""" + return self.update_rule(rule_id, enabled=False) + + def get_rules(self) -> List[Dict]: + """Get all firewall rules""" + with self.lock: + return [rule.to_dict() for rule in sorted(self.rules.values(), key=lambda r: r.priority)] + + def get_rule(self, rule_id: str) -> Optional[Dict]: + """Get specific firewall rule""" + with self.lock: + rule = self.rules.get(rule_id) + return rule.to_dict() if rule else None + + def get_logs(self, limit: int = 100, filter_action: Optional[str] = None) -> List[Dict]: + """Get firewall logs""" + with self.lock: + logs = self.logs.copy() + + # Filter by action if specified + if filter_action: + logs = [log for log in logs if log.action == filter_action.upper()] + + # Return most recent logs + return [log.to_dict() for log in logs[-limit:]] + + def clear_logs(self): + """Clear firewall logs""" + with self.lock: + self.logs.clear() + + def get_stats(self) -> Dict: + """Get firewall statistics""" + with self.lock: + stats = self.stats.copy() + stats['total_rules'] = len(self.rules) + stats['enabled_rules'] = sum(1 for rule in self.rules.values() if rule.enabled) + stats['log_entries'] = len(self.logs) + stats['default_policy'] = self.default_policy.value + + return stats + + def reset_stats(self): + """Reset firewall statistics""" + self.stats = { + 'packets_processed': 0, + 'packets_accepted': 0, + 'packets_dropped': 0, + 'packets_rejected': 0, + 'rules_hit': 0, + 'default_policy_hits': 0 + } + + # Reset rule hit counts + with self.lock: + for rule in self.rules.values(): + rule.hit_count = 0 + rule.last_hit = None + + def set_default_policy(self, policy: str): + """Set default firewall policy""" + self.default_policy = FirewallAction(policy.upper()) + + def export_rules(self) -> List[Dict]: + """Export rules for backup/configuration""" + return self.get_rules() + + def import_rules(self, rules_config: List[Dict], replace: bool = False): + """Import rules from configuration""" + if replace: + with self.lock: + self.rules.clear() + + for rule_config in rules_config: + self._add_rule_from_config(rule_config) + + +class FirewallRuleBuilder: + """Helper class to build firewall rules""" + + def __init__(self, rule_id: str): + self.rule_id = rule_id + self.priority = 100 + self.action = FirewallAction.ACCEPT + self.direction = FirewallDirection.BOTH + self.source_ip = None + self.dest_ip = None + self.source_port = None + self.dest_port = None + self.protocol = None + self.description = "" + self.enabled = True + + def set_priority(self, priority: int) -> 'FirewallRuleBuilder': + self.priority = priority + return self + + def set_action(self, action: str) -> 'FirewallRuleBuilder': + self.action = FirewallAction(action.upper()) + return self + + def set_direction(self, direction: str) -> 'FirewallRuleBuilder': + self.direction = FirewallDirection(direction.upper()) + return self + + def set_source_ip(self, ip: str) -> 'FirewallRuleBuilder': + self.source_ip = ip + return self + + def set_dest_ip(self, ip: str) -> 'FirewallRuleBuilder': + self.dest_ip = ip + return self + + def set_source_port(self, port: str) -> 'FirewallRuleBuilder': + self.source_port = port + return self + + def set_dest_port(self, port: str) -> 'FirewallRuleBuilder': + self.dest_port = port + return self + + def set_protocol(self, protocol: str) -> 'FirewallRuleBuilder': + self.protocol = protocol.upper() + return self + + def set_description(self, description: str) -> 'FirewallRuleBuilder': + self.description = description + return self + + def set_enabled(self, enabled: bool) -> 'FirewallRuleBuilder': + self.enabled = enabled + return self + + def build(self) -> FirewallRule: + """Build the firewall rule""" + return FirewallRule( + rule_id=self.rule_id, + priority=self.priority, + action=self.action, + direction=self.direction, + source_ip=self.source_ip, + dest_ip=self.dest_ip, + source_port=self.source_port, + dest_port=self.dest_port, + protocol=self.protocol, + description=self.description, + enabled=self.enabled + ) + diff --git a/core/ip_parser.py b/core/ip_parser.py new file mode 100644 index 0000000000000000000000000000000000000000..afc2ed84346aa1aa419ddd8350286270a6055f05 --- /dev/null +++ b/core/ip_parser.py @@ -0,0 +1,546 @@ +""" +IP Parser/Assembler Module + +Handles IPv4 packet parsing and construction: +- Parse IPv4, UDP, and TCP headers +- Calculate and verify checksums +- Handle packet fragmentation and reassembly +- Support various IP options +""" + +import struct +import socket +from typing import Dict, List, Optional, Tuple +from dataclasses import dataclass +from enum import Enum + + +class IPProtocol(Enum): + ICMP = 1 + TCP = 6 + UDP = 17 + + +@dataclass +class IPv4Header: + """IPv4 header structure""" + version: int = 4 + ihl: int = 5 # Internet Header Length (in 32-bit words) + tos: int = 0 # Type of Service + total_length: int = 0 + identification: int = 0 + flags: int = 0 # 3 bits: Reserved, Don't Fragment, More Fragments + fragment_offset: int = 0 # 13 bits + ttl: int = 64 # Time to Live + protocol: int = 0 + header_checksum: int = 0 + source_ip: str = '0.0.0.0' + dest_ip: str = '0.0.0.0' + options: bytes = b'' + + @property + def header_length(self) -> int: + """Get header length in bytes""" + return self.ihl * 4 + + @property + def dont_fragment(self) -> bool: + """Check if Don't Fragment flag is set""" + return bool(self.flags & 0x2) + + @property + def more_fragments(self) -> bool: + """Check if More Fragments flag is set""" + return bool(self.flags & 0x1) + + @property + def is_fragment(self) -> bool: + """Check if this is a fragment""" + return self.more_fragments or self.fragment_offset > 0 + + +@dataclass +class TCPHeader: + """TCP header structure""" + source_port: int = 0 + dest_port: int = 0 + seq_num: int = 0 + ack_num: int = 0 + data_offset: int = 5 # Header length in 32-bit words + reserved: int = 0 + flags: int = 0 # 9 bits: NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN + window_size: int = 65535 + checksum: int = 0 + urgent_pointer: int = 0 + options: bytes = b'' + + @property + def header_length(self) -> int: + """Get header length in bytes""" + return self.data_offset * 4 + + # TCP Flag properties + @property + def fin(self) -> bool: + return bool(self.flags & 0x01) + + @property + def syn(self) -> bool: + return bool(self.flags & 0x02) + + @property + def rst(self) -> bool: + return bool(self.flags & 0x04) + + @property + def psh(self) -> bool: + return bool(self.flags & 0x08) + + @property + def ack(self) -> bool: + return bool(self.flags & 0x10) + + @property + def urg(self) -> bool: + return bool(self.flags & 0x20) + + def set_flag(self, flag_name: str, value: bool = True): + """Set TCP flag""" + flag_bits = { + 'fin': 0x01, 'syn': 0x02, 'rst': 0x04, 'psh': 0x08, + 'ack': 0x10, 'urg': 0x20, 'ece': 0x40, 'cwr': 0x80, 'ns': 0x100 + } + + if flag_name.lower() in flag_bits: + bit = flag_bits[flag_name.lower()] + if value: + self.flags |= bit + else: + self.flags &= ~bit + + +@dataclass +class UDPHeader: + """UDP header structure""" + source_port: int = 0 + dest_port: int = 0 + length: int = 8 # Header + data length + checksum: int = 0 + + @property + def header_length(self) -> int: + """Get header length in bytes (always 8 for UDP)""" + return 8 + + +@dataclass +class ParsedPacket: + """Parsed packet structure""" + ip_header: IPv4Header + transport_header: Optional[object] = None # TCPHeader or UDPHeader + payload: bytes = b'' + raw_packet: bytes = b'' + + +class IPParser: + """IPv4 packet parser and assembler""" + + @staticmethod + def calculate_checksum(data: bytes) -> int: + """Calculate Internet checksum""" + # Pad data to even length + if len(data) % 2: + data += b'\x00' + + checksum = 0 + for i in range(0, len(data), 2): + word = (data[i] << 8) + data[i + 1] + checksum += word + + # Add carry bits + while checksum >> 16: + checksum = (checksum & 0xFFFF) + (checksum >> 16) + + # One's complement + return (~checksum) & 0xFFFF + + @staticmethod + def verify_checksum(data: bytes, checksum: int) -> bool: + """Verify Internet checksum""" + calculated = IPParser.calculate_checksum(data) + return calculated == checksum or (calculated + checksum) == 0xFFFF + + @classmethod + def parse_ipv4_header(cls, data: bytes) -> Tuple[IPv4Header, int]: + """Parse IPv4 header from raw bytes""" + if len(data) < 20: + raise ValueError("IPv4 header too short") + + # Parse fixed part of header + header_data = struct.unpack('!BBHHHBBH4s4s', data[:20]) + + header = IPv4Header() + version_ihl = header_data[0] + header.version = (version_ihl >> 4) & 0xF + header.ihl = version_ihl & 0xF + header.tos = header_data[1] + header.total_length = header_data[2] + header.identification = header_data[3] + flags_fragment = header_data[4] + header.flags = (flags_fragment >> 13) & 0x7 + header.fragment_offset = flags_fragment & 0x1FFF + header.ttl = header_data[5] + header.protocol = header_data[6] + header.header_checksum = header_data[7] + header.source_ip = socket.inet_ntoa(header_data[8]) + header.dest_ip = socket.inet_ntoa(header_data[9]) + + # Validate version + if header.version != 4: + raise ValueError(f"Unsupported IP version: {header.version}") + + # Parse options if present + options_length = header.header_length - 20 + if options_length > 0: + if len(data) < 20 + options_length: + raise ValueError("IPv4 options truncated") + header.options = data[20:20 + options_length] + + return header, header.header_length + + @classmethod + def parse_tcp_header(cls, data: bytes) -> Tuple[TCPHeader, int]: + """Parse TCP header from raw bytes""" + if len(data) < 20: + raise ValueError("TCP header too short") + + # Parse fixed part of header + header_data = struct.unpack('!HHIIBBHHH', data[:20]) + + header = TCPHeader() + header.source_port = header_data[0] + header.dest_port = header_data[1] + header.seq_num = header_data[2] + header.ack_num = header_data[3] + offset_reserved = header_data[4] + header.data_offset = (offset_reserved >> 4) & 0xF + header.reserved = (offset_reserved >> 1) & 0x7 + header.flags = ((offset_reserved & 0x1) << 8) | header_data[5] + header.window_size = header_data[6] + header.checksum = header_data[7] + header.urgent_pointer = header_data[8] + + # Parse options if present + options_length = header.header_length - 20 + if options_length > 0: + if len(data) < 20 + options_length: + raise ValueError("TCP options truncated") + header.options = data[20:20 + options_length] + + return header, header.header_length + + @classmethod + def parse_udp_header(cls, data: bytes) -> Tuple[UDPHeader, int]: + """Parse UDP header from raw bytes""" + if len(data) < 8: + raise ValueError("UDP header too short") + + header_data = struct.unpack('!HHHH', data[:8]) + + header = UDPHeader() + header.source_port = header_data[0] + header.dest_port = header_data[1] + header.length = header_data[2] + header.checksum = header_data[3] + + return header, 8 + + @classmethod + def parse_packet(cls, data: bytes) -> ParsedPacket: + """Parse complete packet""" + packet = ParsedPacket(raw_packet=data) + + # Parse IP header + packet.ip_header, ip_header_len = cls.parse_ipv4_header(data) + + # Extract payload after IP header + ip_payload = data[ip_header_len:packet.ip_header.total_length] + + # Parse transport layer header + if packet.ip_header.protocol == IPProtocol.TCP.value: + packet.transport_header, transport_header_len = cls.parse_tcp_header(ip_payload) + packet.payload = ip_payload[transport_header_len:] + elif packet.ip_header.protocol == IPProtocol.UDP.value: + packet.transport_header, transport_header_len = cls.parse_udp_header(ip_payload) + packet.payload = ip_payload[transport_header_len:] + else: + # Unsupported protocol, treat as raw payload + packet.payload = ip_payload + + return packet + + @classmethod + def build_ipv4_header(cls, header: IPv4Header) -> bytes: + """Build IPv4 header as bytes""" + # Calculate header length including options + header.ihl = (20 + len(header.options) + 3) // 4 # Round up to 32-bit boundary + + # Build header without checksum + version_ihl = (header.version << 4) | header.ihl + flags_fragment = (header.flags << 13) | header.fragment_offset + + header_data = struct.pack( + '!BBHHHBBH4s4s', + version_ihl, header.tos, header.total_length, + header.identification, flags_fragment, + header.ttl, header.protocol, 0, # Checksum = 0 for calculation + socket.inet_aton(header.source_ip), + socket.inet_aton(header.dest_ip) + ) + + # Add options and padding + if header.options: + header_data += header.options + # Pad to 32-bit boundary + padding_needed = (header.ihl * 4) - len(header_data) + if padding_needed > 0: + header_data += b'\x00' * padding_needed + + # Calculate and insert checksum + checksum = cls.calculate_checksum(header_data) + header_data = header_data[:10] + struct.pack('!H', checksum) + header_data[12:] + + return header_data + + @classmethod + def build_tcp_header(cls, header: TCPHeader, source_ip: str, dest_ip: str, payload: bytes) -> bytes: + """Build TCP header as bytes with checksum""" + # Calculate header length including options + header.data_offset = (20 + len(header.options) + 3) // 4 # Round up to 32-bit boundary + + # Build header without checksum + offset_reserved_flags = (header.data_offset << 12) | (header.reserved << 9) | header.flags + + header_data = struct.pack( + '!HHIIHHH', + header.source_port, header.dest_port, + header.seq_num, header.ack_num, + offset_reserved_flags, header.window_size, + 0, header.urgent_pointer # Checksum = 0 for calculation + ) + + # Add options and padding + if header.options: + header_data += header.options + # Pad to 32-bit boundary + padding_needed = (header.data_offset * 4) - len(header_data) + if padding_needed > 0: + header_data += b'\x00' * padding_needed + + # Calculate TCP checksum with pseudo-header + pseudo_header = struct.pack( + '!4s4sBBH', + socket.inet_aton(source_ip), + socket.inet_aton(dest_ip), + 0, IPProtocol.TCP.value, + len(header_data) + len(payload) + ) + + checksum_data = pseudo_header + header_data + payload + checksum = cls.calculate_checksum(checksum_data) + + # Insert checksum + header_data = header_data[:16] + struct.pack('!H', checksum) + header_data[18:] + + return header_data + + @classmethod + def build_udp_header(cls, header: UDPHeader, source_ip: str, dest_ip: str, payload: bytes) -> bytes: + """Build UDP header as bytes with checksum""" + header.length = 8 + len(payload) + + # Build header without checksum + header_data = struct.pack( + '!HHHH', + header.source_port, header.dest_port, + header.length, 0 # Checksum = 0 for calculation + ) + + # Calculate UDP checksum with pseudo-header (optional for IPv4) + if header.checksum != 0: # If checksum is required + pseudo_header = struct.pack( + '!4s4sBBH', + socket.inet_aton(source_ip), + socket.inet_aton(dest_ip), + 0, IPProtocol.UDP.value, + header.length + ) + + checksum_data = pseudo_header + header_data + payload + checksum = cls.calculate_checksum(checksum_data) + + # Insert checksum + header_data = header_data[:6] + struct.pack('!H', checksum) + header_data[8:] + + return header_data + + @classmethod + def build_packet(cls, ip_header: IPv4Header, transport_header: Optional[object] = None, payload: bytes = b'') -> bytes: + """Build complete packet""" + transport_data = b'' + + # Build transport header + if transport_header: + if isinstance(transport_header, TCPHeader): + transport_data = cls.build_tcp_header( + transport_header, ip_header.source_ip, ip_header.dest_ip, payload + ) + elif isinstance(transport_header, UDPHeader): + transport_data = cls.build_udp_header( + transport_header, ip_header.source_ip, ip_header.dest_ip, payload + ) + + # Update IP header total length + ip_header.total_length = ip_header.header_length + len(transport_data) + len(payload) + + # Build IP header + ip_data = cls.build_ipv4_header(ip_header) + + # Combine all parts + return ip_data + transport_data + payload + + +class PacketFragmenter: + """Handle packet fragmentation and reassembly""" + + def __init__(self, mtu: int = 1500): + self.mtu = mtu + self.fragments: Dict[Tuple[str, str, int], List[Tuple[int, bytes]]] = {} # (src, dst, id) -> [(offset, data)] + + def fragment_packet(self, packet: bytes, mtu: int = None) -> List[bytes]: + """Fragment a packet if it exceeds MTU""" + if mtu is None: + mtu = self.mtu + + if len(packet) <= mtu: + return [packet] + + # Parse original packet + parsed = IPParser.parse_packet(packet) + ip_header = parsed.ip_header + + # Don't fragment if DF flag is set + if ip_header.dont_fragment: + raise ValueError("Packet too large and Don't Fragment flag is set") + + fragments = [] + payload_mtu = mtu - ip_header.header_length + payload_mtu = (payload_mtu // 8) * 8 # Must be multiple of 8 bytes + + # Get the payload to fragment (everything after IP header) + payload_start = ip_header.header_length + payload = packet[payload_start:] + + offset = 0 + while offset < len(payload): + # Create fragment + fragment_payload = payload[offset:offset + payload_mtu] + + # Create new IP header for fragment + frag_header = IPv4Header( + version=ip_header.version, + ihl=ip_header.ihl, + tos=ip_header.tos, + identification=ip_header.identification, + ttl=ip_header.ttl, + protocol=ip_header.protocol, + source_ip=ip_header.source_ip, + dest_ip=ip_header.dest_ip, + options=ip_header.options + ) + + # Set fragment offset and flags + frag_header.fragment_offset = (ip_header.fragment_offset * 8 + offset) // 8 + frag_header.flags = ip_header.flags + + # Set More Fragments flag if not last fragment + if offset + len(fragment_payload) < len(payload): + frag_header.flags |= 0x1 # More Fragments + else: + frag_header.flags &= ~0x1 # Clear More Fragments + + # Build fragment + fragment = IPParser.build_packet(frag_header, payload=fragment_payload) + fragments.append(fragment) + + offset += len(fragment_payload) + + return fragments + + def reassemble_packet(self, packet: bytes) -> Optional[bytes]: + """Reassemble fragmented packet""" + parsed = IPParser.parse_packet(packet) + ip_header = parsed.ip_header + + # If not a fragment, return as-is + if not ip_header.is_fragment: + return packet + + # Create fragment key + key = (ip_header.source_ip, ip_header.dest_ip, ip_header.identification) + + # Store fragment + if key not in self.fragments: + self.fragments[key] = [] + + payload_start = ip_header.header_length + fragment_data = packet[payload_start:] + self.fragments[key].append((ip_header.fragment_offset * 8, fragment_data)) + + # Check if we have all fragments + fragments = sorted(self.fragments[key]) + + # Verify we have contiguous fragments starting from 0 + expected_offset = 0 + complete_payload = b'' + + for offset, data in fragments: + if offset != expected_offset: + return None # Missing fragment + + complete_payload += data + expected_offset += len(data) + + # Check if last fragment (no More Fragments flag) + last_fragment = None + for frag_packet in [packet]: # We only have current packet, need to track all + frag_parsed = IPParser.parse_packet(frag_packet) + if not frag_parsed.ip_header.more_fragments: + last_fragment = frag_parsed + break + + if last_fragment is None: + return None # Don't have last fragment yet + + # Reassemble complete packet + complete_header = IPv4Header( + version=ip_header.version, + ihl=ip_header.ihl, + tos=ip_header.tos, + identification=ip_header.identification, + flags=ip_header.flags & ~0x1, # Clear More Fragments + fragment_offset=0, + ttl=ip_header.ttl, + protocol=ip_header.protocol, + source_ip=ip_header.source_ip, + dest_ip=ip_header.dest_ip, + options=ip_header.options + ) + + complete_packet = IPParser.build_packet(complete_header, payload=complete_payload) + + # Clean up fragments + del self.fragments[key] + + return complete_packet + diff --git a/core/logger.py b/core/logger.py new file mode 100644 index 0000000000000000000000000000000000000000..e66f5de7bb5f2def6d22251e44f4c54da357db12 --- /dev/null +++ b/core/logger.py @@ -0,0 +1,555 @@ +""" +Logger Module + +Centralized logging system for the virtual ISP stack: +- Structured logging with multiple levels +- Log aggregation and filtering +- Real-time log streaming +- Log persistence and rotation +""" + +import logging +import logging.handlers +import time +import threading +import json +import os +from typing import Dict, List, Optional, Any, Callable +from dataclasses import dataclass, asdict +from enum import Enum +from collections import deque +import queue + + +class LogLevel(Enum): + DEBUG = "DEBUG" + INFO = "INFO" + WARNING = "WARNING" + ERROR = "ERROR" + CRITICAL = "CRITICAL" + + +class LogCategory(Enum): + SYSTEM = "SYSTEM" + DHCP = "DHCP" + NAT = "NAT" + FIREWALL = "FIREWALL" + TCP = "TCP" + ROUTER = "ROUTER" + BRIDGE = "BRIDGE" + SOCKET = "SOCKET" + SESSION = "SESSION" + SECURITY = "SECURITY" + PERFORMANCE = "PERFORMANCE" + + +@dataclass +class LogEntry: + """Structured log entry""" + timestamp: float + level: str + category: str + module: str + message: str + session_id: Optional[str] = None + client_id: Optional[str] = None + source_ip: Optional[str] = None + dest_ip: Optional[str] = None + protocol: Optional[str] = None + metadata: Dict[str, Any] = None + + def __post_init__(self): + if self.timestamp == 0: + self.timestamp = time.time() + if self.metadata is None: + self.metadata = {} + + def to_dict(self) -> Dict: + """Convert to dictionary""" + return asdict(self) + + def to_json(self) -> str: + """Convert to JSON string""" + return json.dumps(self.to_dict(), default=str) + + +class LogFilter: + """Log filtering class""" + + def __init__(self): + self.level_filter: Optional[LogLevel] = None + self.category_filter: Optional[LogCategory] = None + self.module_filter: Optional[str] = None + self.session_filter: Optional[str] = None + self.client_filter: Optional[str] = None + self.ip_filter: Optional[str] = None + self.text_filter: Optional[str] = None + self.time_range: Optional[tuple] = None + + def matches(self, entry: LogEntry) -> bool: + """Check if log entry matches filter criteria""" + # Level filter + if self.level_filter: + entry_level_value = getattr(logging, entry.level) + filter_level_value = getattr(logging, self.level_filter.value) + if entry_level_value < filter_level_value: + return False + + # Category filter + if self.category_filter and entry.category != self.category_filter.value: + return False + + # Module filter + if self.module_filter and self.module_filter.lower() not in entry.module.lower(): + return False + + # Session filter + if self.session_filter and entry.session_id != self.session_filter: + return False + + # Client filter + if self.client_filter and entry.client_id != self.client_filter: + return False + + # IP filter + if self.ip_filter: + if (entry.source_ip != self.ip_filter and + entry.dest_ip != self.ip_filter): + return False + + # Text filter + if self.text_filter and self.text_filter.lower() not in entry.message.lower(): + return False + + # Time range filter + if self.time_range: + start_time, end_time = self.time_range + if not (start_time <= entry.timestamp <= end_time): + return False + + return True + + +class LogSubscriber: + """Log subscriber for real-time streaming""" + + def __init__(self, subscriber_id: str, callback: Callable[[LogEntry], None], + log_filter: Optional[LogFilter] = None): + self.subscriber_id = subscriber_id + self.callback = callback + self.filter = log_filter or LogFilter() + self.created_time = time.time() + self.message_count = 0 + self.last_message_time = None + self.is_active = True + + def send_log(self, entry: LogEntry) -> bool: + """Send log entry to subscriber if it matches filter""" + if not self.is_active: + return False + + if self.filter.matches(entry): + try: + self.callback(entry) + self.message_count += 1 + self.last_message_time = time.time() + return True + except Exception as e: + print(f"Error sending log to subscriber {self.subscriber_id}: {e}") + self.is_active = False + return False + + return False + + +class VirtualISPLogger: + """Centralized logger for Virtual ISP stack""" + + def __init__(self, config: Dict): + self.config = config + self.log_entries: deque = deque(maxlen=config.get('max_memory_logs', 10000)) + self.subscribers: Dict[str, LogSubscriber] = {} + self.lock = threading.Lock() + + # Configuration + self.log_level = LogLevel(config.get('log_level', 'INFO')) + self.log_to_file = config.get('log_to_file', True) + self.log_file_path = config.get('log_file_path', '/tmp/virtual_isp.log') + self.log_file_max_size = config.get('log_file_max_size', 10 * 1024 * 1024) # 10MB + self.log_file_backup_count = config.get('log_file_backup_count', 5) + self.log_to_console = config.get('log_to_console', True) + self.structured_logging = config.get('structured_logging', True) + + # Statistics + self.stats = { + 'total_logs': 0, + 'logs_by_level': {level.value: 0 for level in LogLevel}, + 'logs_by_category': {cat.value: 0 for cat in LogCategory}, + 'active_subscribers': 0, + 'file_logs_written': 0, + 'console_logs_written': 0, + 'dropped_logs': 0 + } + + # Setup logging + self._setup_logging() + + # Background processing + self.running = False + self.log_queue = queue.Queue() + self.processing_thread = None + + def _setup_logging(self): + """Setup Python logging infrastructure""" + # Create logger + self.logger = logging.getLogger('virtual_isp') + self.logger.setLevel(getattr(logging, self.log_level.value)) + + # Remove existing handlers + for handler in self.logger.handlers[:]: + self.logger.removeHandler(handler) + + # Console handler + if self.log_to_console: + console_handler = logging.StreamHandler() + if self.structured_logging: + console_formatter = logging.Formatter( + '%(asctime)s - %(name)s - %(levelname)s - %(message)s' + ) + else: + console_formatter = logging.Formatter('%(message)s') + console_handler.setFormatter(console_formatter) + self.logger.addHandler(console_handler) + + # File handler with rotation + if self.log_to_file: + # Ensure log directory exists + log_dir = os.path.dirname(self.log_file_path) + if log_dir and not os.path.exists(log_dir): + os.makedirs(log_dir, exist_ok=True) + + file_handler = logging.handlers.RotatingFileHandler( + self.log_file_path, + maxBytes=self.log_file_max_size, + backupCount=self.log_file_backup_count + ) + + if self.structured_logging: + file_formatter = logging.Formatter( + '%(asctime)s - %(name)s - %(levelname)s - %(message)s' + ) + else: + file_formatter = logging.Formatter('%(message)s') + + file_handler.setFormatter(file_formatter) + self.logger.addHandler(file_handler) + + def _process_log_queue(self): + """Background thread to process log queue""" + while self.running: + try: + # Get log entry from queue (with timeout) + try: + entry = self.log_queue.get(timeout=1.0) + except queue.Empty: + continue + + # Store in memory + with self.lock: + self.log_entries.append(entry) + + # Send to subscribers + inactive_subscribers = [] + with self.lock: + for subscriber_id, subscriber in self.subscribers.items(): + if not subscriber.send_log(entry): + inactive_subscribers.append(subscriber_id) + + # Remove inactive subscribers + for subscriber_id in inactive_subscribers: + self.remove_subscriber(subscriber_id) + + # Update statistics + self.stats['total_logs'] += 1 + self.stats['logs_by_level'][entry.level] += 1 + self.stats['logs_by_category'][entry.category] += 1 + + # Mark task as done + self.log_queue.task_done() + + except Exception as e: + print(f"Error processing log queue: {e}") + time.sleep(1) + + def log(self, level: LogLevel, category: LogCategory, module: str, message: str, + session_id: Optional[str] = None, client_id: Optional[str] = None, + source_ip: Optional[str] = None, dest_ip: Optional[str] = None, + protocol: Optional[str] = None, **metadata): + """Log a message""" + # Check if we should log this level + level_value = getattr(logging, level.value) + min_level_value = getattr(logging, self.log_level.value) + if level_value < min_level_value: + return + + # Create log entry + entry = LogEntry( + timestamp=time.time(), + level=level.value, + category=category.value, + module=module, + message=message, + session_id=session_id, + client_id=client_id, + source_ip=source_ip, + dest_ip=dest_ip, + protocol=protocol, + metadata=metadata + ) + + # Add to queue for background processing + try: + self.log_queue.put_nowait(entry) + except queue.Full: + self.stats['dropped_logs'] += 1 + + # Also log through Python logging system + if self.structured_logging: + log_data = entry.to_dict() + log_message = f"{message} | {json.dumps(log_data, default=str)}" + else: + log_message = message + + # Log to Python logger + python_logger_level = getattr(logging, level.value) + self.logger.log(python_logger_level, log_message) + + # Update console/file stats + if self.log_to_console: + self.stats['console_logs_written'] += 1 + if self.log_to_file: + self.stats['file_logs_written'] += 1 + + def debug(self, category: LogCategory, module: str, message: str, **kwargs): + """Log debug message""" + self.log(LogLevel.DEBUG, category, module, message, **kwargs) + + def info(self, category: LogCategory, module: str, message: str, **kwargs): + """Log info message""" + self.log(LogLevel.INFO, category, module, message, **kwargs) + + def warning(self, category: LogCategory, module: str, message: str, **kwargs): + """Log warning message""" + self.log(LogLevel.WARNING, category, module, message, **kwargs) + + def error(self, category: LogCategory, module: str, message: str, **kwargs): + """Log error message""" + self.log(LogLevel.ERROR, category, module, message, **kwargs) + + def critical(self, category: LogCategory, module: str, message: str, **kwargs): + """Log critical message""" + self.log(LogLevel.CRITICAL, category, module, message, **kwargs) + + def add_subscriber(self, subscriber_id: str, callback: Callable[[LogEntry], None], + log_filter: Optional[LogFilter] = None) -> bool: + """Add log subscriber for real-time streaming""" + with self.lock: + if subscriber_id in self.subscribers: + return False + + subscriber = LogSubscriber(subscriber_id, callback, log_filter) + self.subscribers[subscriber_id] = subscriber + self.stats['active_subscribers'] = len(self.subscribers) + + return True + + def remove_subscriber(self, subscriber_id: str) -> bool: + """Remove log subscriber""" + with self.lock: + if subscriber_id in self.subscribers: + del self.subscribers[subscriber_id] + self.stats['active_subscribers'] = len(self.subscribers) + return True + return False + + def get_logs(self, limit: int = 100, offset: int = 0, + log_filter: Optional[LogFilter] = None) -> List[Dict]: + """Get logs with filtering and pagination""" + with self.lock: + # Convert deque to list for easier manipulation + all_logs = list(self.log_entries) + + # Apply filter + if log_filter: + filtered_logs = [entry for entry in all_logs if log_filter.matches(entry)] + else: + filtered_logs = all_logs + + # Sort by timestamp (newest first) + filtered_logs.sort(key=lambda x: x.timestamp, reverse=True) + + # Apply pagination + paginated_logs = filtered_logs[offset:offset + limit] + + return [entry.to_dict() for entry in paginated_logs] + + def search_logs(self, query: str, limit: int = 100) -> List[Dict]: + """Search logs by text query""" + log_filter = LogFilter() + log_filter.text_filter = query + + return self.get_logs(limit=limit, log_filter=log_filter) + + def get_logs_by_session(self, session_id: str, limit: int = 100) -> List[Dict]: + """Get logs for specific session""" + log_filter = LogFilter() + log_filter.session_filter = session_id + + return self.get_logs(limit=limit, log_filter=log_filter) + + def get_logs_by_client(self, client_id: str, limit: int = 100) -> List[Dict]: + """Get logs for specific client""" + log_filter = LogFilter() + log_filter.client_filter = client_id + + return self.get_logs(limit=limit, log_filter=log_filter) + + def get_logs_by_ip(self, ip_address: str, limit: int = 100) -> List[Dict]: + """Get logs for specific IP address""" + log_filter = LogFilter() + log_filter.ip_filter = ip_address + + return self.get_logs(limit=limit, log_filter=log_filter) + + def get_recent_errors(self, limit: int = 50) -> List[Dict]: + """Get recent error and critical logs""" + log_filter = LogFilter() + log_filter.level_filter = LogLevel.ERROR + + return self.get_logs(limit=limit, log_filter=log_filter) + + def clear_logs(self): + """Clear all logs from memory""" + with self.lock: + self.log_entries.clear() + + def get_stats(self) -> Dict: + """Get logging statistics""" + with self.lock: + stats = self.stats.copy() + stats['memory_logs_count'] = len(self.log_entries) + stats['active_subscribers'] = len(self.subscribers) + stats['queue_size'] = self.log_queue.qsize() + + return stats + + def reset_stats(self): + """Reset logging statistics""" + self.stats = { + 'total_logs': 0, + 'logs_by_level': {level.value: 0 for level in LogLevel}, + 'logs_by_category': {cat.value: 0 for cat in LogCategory}, + 'active_subscribers': len(self.subscribers), + 'file_logs_written': 0, + 'console_logs_written': 0, + 'dropped_logs': 0 + } + + def export_logs(self, format: str = 'json', log_filter: Optional[LogFilter] = None) -> str: + """Export logs in specified format""" + logs = self.get_logs(limit=10000, log_filter=log_filter) + + if format == 'json': + return json.dumps(logs, indent=2, default=str) + elif format == 'csv': + import csv + import io + + output = io.StringIO() + if logs: + writer = csv.DictWriter(output, fieldnames=logs[0].keys()) + writer.writeheader() + writer.writerows(logs) + + return output.getvalue() + else: + raise ValueError(f"Unsupported export format: {format}") + + def set_log_level(self, level: LogLevel): + """Set logging level""" + self.log_level = level + self.logger.setLevel(getattr(logging, level.value)) + + def start(self): + """Start logger""" + self.running = True + self.processing_thread = threading.Thread(target=self._process_log_queue, daemon=True) + self.processing_thread.start() + + self.info(LogCategory.SYSTEM, 'logger', 'Virtual ISP Logger started') + + def stop(self): + """Stop logger""" + self.info(LogCategory.SYSTEM, 'logger', 'Virtual ISP Logger stopping') + + self.running = False + + # Wait for queue to be processed + self.log_queue.join() + + # Wait for processing thread + if self.processing_thread: + self.processing_thread.join() + + # Remove all subscribers + with self.lock: + self.subscribers.clear() + + print("Virtual ISP Logger stopped") + + +# Global logger instance +_global_logger: Optional[VirtualISPLogger] = None + + +def get_logger() -> Optional[VirtualISPLogger]: + """Get global logger instance""" + return _global_logger + + +def init_logger(config: Dict) -> VirtualISPLogger: + """Initialize global logger""" + global _global_logger + _global_logger = VirtualISPLogger(config) + return _global_logger + + +def log_debug(category: LogCategory, module: str, message: str, **kwargs): + """Global debug logging function""" + if _global_logger: + _global_logger.debug(category, module, message, **kwargs) + + +def log_info(category: LogCategory, module: str, message: str, **kwargs): + """Global info logging function""" + if _global_logger: + _global_logger.info(category, module, message, **kwargs) + + +def log_warning(category: LogCategory, module: str, message: str, **kwargs): + """Global warning logging function""" + if _global_logger: + _global_logger.warning(category, module, message, **kwargs) + + +def log_error(category: LogCategory, module: str, message: str, **kwargs): + """Global error logging function""" + if _global_logger: + _global_logger.error(category, module, message, **kwargs) + + +def log_critical(category: LogCategory, module: str, message: str, **kwargs): + """Global critical logging function""" + if _global_logger: + _global_logger.critical(category, module, message, **kwargs) + diff --git a/core/nat_engine.py b/core/nat_engine.py new file mode 100644 index 0000000000000000000000000000000000000000..a888e4a14eddfab59ebdcc963c5a30e20c64c239 --- /dev/null +++ b/core/nat_engine.py @@ -0,0 +1,638 @@ +""" +NAT Engine Module + +Implements Network Address Translation: +- Map (virtualIP, virtualPort) to (hostIP, hostPort) +- Maintain connection tracking table +- Handle port allocation and deallocation +- Support connection state tracking +""" + +import time +import threading +import socket +import random +import struct +from typing import Dict, Optional, Tuple, Set +from dataclasses import dataclass +from enum import Enum + +# Assuming IPProtocol is defined elsewhere or will be defined +# from .ip_parser import IPProtocol + +class NATType(Enum): + SNAT = "SNAT" # Source NAT + DNAT = "DNAT" # Destination NAT + + +@dataclass +class NATSession: + """Represents a NAT session""" + # Virtual (internal) endpoint + virtual_ip: str + virtual_port: int + + # Real (external) endpoint + real_ip: str + real_port: int + + # Host (translated) endpoint + host_ip: str + host_port: int + + # Session metadata + protocol: int # IP protocol number (e.g., 6 for TCP, 17 for UDP) + nat_type: NATType + created_time: float + last_activity: float + bytes_in: int = 0 + bytes_out: int = 0 + packets_in: int = 0 + packets_out: int = 0 + + @property + def session_id(self) -> str: + """Get unique session identifier""" + return f"{self.virtual_ip}:{self.virtual_port}-{self.real_ip}:{self.real_port}-{self.protocol}" + + @property + def is_expired(self) -> bool: + """Check if session has expired""" + timeout = 300 if self.protocol == socket.IPPROTO_TCP else 60 # 5 min for TCP, 1 min for UDP + return time.time() - self.last_activity > timeout + + @property + def duration(self) -> float: + """Get session duration in seconds""" + return time.time() - self.created_time + + def update_activity(self, bytes_transferred: int = 0, direction: str = 'out'): + """Update session activity""" + self.last_activity = time.time() + + if direction == 'out': + self.bytes_out += bytes_transferred + self.packets_out += 1 + else: + self.bytes_in += bytes_transferred + self.packets_in += 1 + + +class PortPool: + """Manages available ports for NAT""" + + def __init__(self, start_port: int = 10000, end_port: int = 65535): + self.start_port = start_port + self.end_port = end_port + self.available_ports: Set[int] = set(range(start_port, end_port + 1)) + self.allocated_ports: Dict[int, str] = {} # port -> session_id + self.lock = threading.Lock() + + def allocate_port(self, session_id: str) -> Optional[int]: + """Allocate a port for a session""" + with self.lock: + if not self.available_ports: + return None + + # Try to get a random port to distribute load + port = random.choice(list(self.available_ports)) + self.available_ports.remove(port) + self.allocated_ports[port] = session_id + + return port + + def release_port(self, port: int) -> bool: + """Release a port back to the pool""" + with self.lock: + if port in self.allocated_ports: + del self.allocated_ports[port] + if self.start_port <= port <= self.end_port: + self.available_ports.add(port) + return True + return False + + def get_session_for_port(self, port: int) -> Optional[str]: + """Get session ID for a port""" + with self.lock: + return self.allocated_ports.get(port) + + def get_stats(self) -> Dict: + """Get port pool statistics""" + with self.lock: + return { + 'total_ports': self.end_port - self.start_port + 1, + 'available_ports': len(self.available_ports), + 'allocated_ports': len(self.allocated_ports), + 'utilization': len(self.allocated_ports) / (self.end_port - self.start_port + 1) + } + + +class NATEngine: + """Network Address Translation engine""" + + def __init__(self, config: Dict): + self.config = config + self.sessions: Dict[str, NATSession] = {} # session_id -> session + self.virtual_to_session: Dict[Tuple[str, int, int], str] = {} # (vip, vport, proto) -> session_id + self.host_to_session: Dict[Tuple[str, int, int], str] = {} # (hip, hport, proto) -> session_id + self.lock = threading.Lock() + + # Port pool for outbound connections + self.port_pool = PortPool( + config.get('port_range_start', 10000), + config.get('port_range_end', 65535) + ) + + # Host IP for outbound connections + self.host_ip = config.get('host_ip', self._get_default_host_ip()) + + # Session timeout + self.session_timeout = config.get('session_timeout', 300) + + # Statistics + self.stats = { + 'total_sessions': 0, + 'active_sessions': 0, + 'expired_sessions': 0, + 'port_exhaustion_events': 0, + 'bytes_translated': 0, + 'packets_translated': 0 + } + + # Cleanup thread + self.running = False + self.cleanup_thread = None + + def _get_default_host_ip(self) -> str: + """Get default host IP address""" + try: + # Connect to a remote address to determine local IP + with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s: + s.connect(('8.8.8.8', 80)) + return s.getsockname()[0] + except Exception: + return '127.0.0.1' + + def _cleanup_expired_sessions(self): + """Clean up expired sessions""" + current_time = time.time() + expired_sessions = [] + + with self.lock: + for session_id, session in self.sessions.items(): + if session.is_expired: + expired_sessions.append(session_id) + + for session_id in expired_sessions: + self._remove_session(session_id) + self.stats['expired_sessions'] += 1 + + def _remove_session(self, session_id: str): + """Remove a session and clean up resources""" + with self.lock: + if session_id not in self.sessions: + return + + session = self.sessions[session_id] + + # Remove from lookup tables + virtual_key = (session.virtual_ip, session.virtual_port, session.protocol) + if virtual_key in self.virtual_to_session: + del self.virtual_to_session[virtual_key] + + host_key = (session.host_ip, session.host_port, session.protocol) + if host_key in self.host_to_session: + del self.host_to_session[host_key] + + # Release port + self.port_pool.release_port(session.host_port) + + # Remove session + del self.sessions[session_id] + + self.stats['active_sessions'] = len(self.sessions) + + def create_outbound_session(self, virtual_ip: str, virtual_port: int, + real_ip: str, real_port: int, protocol: int) -> Optional[NATSession]: + """Create NAT session for outbound connection""" + # Allocate host port + session_id = f"{virtual_ip}:{virtual_port}-{real_ip}:{real_port}-{protocol}" + host_port = self.port_pool.allocate_port(session_id) + + if host_port is None: + self.stats['port_exhaustion_events'] += 1 + return None + + # Create session + session = NATSession( + virtual_ip=virtual_ip, + virtual_port=virtual_port, + real_ip=real_ip, + real_port=real_port, + host_ip=self.host_ip, + host_port=host_port, + protocol=protocol, + nat_type=NATType.SNAT, + created_time=time.time(), + last_activity=time.time() + ) + + with self.lock: + self.sessions[session_id] = session + + # Add to lookup tables + virtual_key = (virtual_ip, virtual_port, protocol) + self.virtual_to_session[virtual_key] = session_id + + host_key = (self.host_ip, host_port, protocol) + self.host_to_session[host_key] = session_id + + self.stats['total_sessions'] += 1 + self.stats['active_sessions'] = len(self.sessions) + + return session + + def translate_outbound(self, virtual_ip: str, virtual_port: int, + real_ip: str, real_port: int, protocol: int) -> Optional[Tuple[str, int]]: + """Translate outbound packet (virtual -> host)""" + virtual_key = (virtual_ip, virtual_port, protocol) + + with self.lock: + session_id = self.virtual_to_session.get(virtual_key) + + if session_id: + session = self.sessions[session_id] + session.update_activity(direction='out') + return (session.host_ip, session.host_port) + else: + # Create new session + session = self.create_outbound_session(virtual_ip, virtual_port, real_ip, real_port, protocol) + if session: + return (session.host_ip, session.host_port) + + return None + + def translate_inbound(self, host_ip: str, host_port: int, protocol: int) -> Optional[Tuple[str, int]]: + """Translate inbound packet (host -> virtual)""" + host_key = (host_ip, host_port, protocol) + + with self.lock: + session_id = self.host_to_session.get(host_key) + + if session_id and session_id in self.sessions: + session = self.sessions[session_id] + session.update_activity(direction='in') + return (session.virtual_ip, session.virtual_port) + + return None + + def get_session_by_virtual(self, virtual_ip: str, virtual_port: int, protocol: int) -> Optional[NATSession]: + """Get session by virtual endpoint""" + virtual_key = (virtual_ip, virtual_port, protocol) + + with self.lock: + session_id = self.virtual_to_session.get(virtual_key) + if session_id and session_id in self.sessions: + return self.sessions[session_id] + + return None + + def get_session_by_host(self, host_ip: str, host_port: int, protocol: int) -> Optional[NATSession]: + """Get session by host endpoint""" + host_key = (host_ip, host_port, protocol) + + with self.lock: + session_id = self.host_to_session.get(host_key) + if session_id and session_id in self.sessions: + return self.sessions[session_id] + + return None + + def close_session(self, session_id: str) -> bool: + """Manually close a session""" + with self.lock: + if session_id in self.sessions: + self._remove_session(session_id) + return True + return False + + def close_session_by_virtual(self, virtual_ip: str, virtual_port: int, protocol: int) -> bool: + """Close session by virtual endpoint""" + virtual_key = (virtual_ip, virtual_port, protocol) + + with self.lock: + session_id = self.virtual_to_session.get(virtual_key) + if session_id: + self._remove_session(session_id) + return True + return False + + def get_sessions(self) -> Dict[str, Dict]: + """Get all active sessions""" + with self.lock: + return { + session_id: { + 'virtual_ip': session.virtual_ip, + 'virtual_port': session.virtual_port, + 'real_ip': session.real_ip, + 'real_port': session.real_port, + 'host_ip': session.host_ip, + 'host_port': session.host_port, + 'protocol': session.protocol, + 'nat_type': session.nat_type.value, + 'created_time': session.created_time, + 'last_activity': session.last_activity, + 'duration': session.duration, + 'bytes_in': session.bytes_in, + 'bytes_out': session.bytes_out, + 'packets_in': session.packets_in, + 'packets_out': session.packets_out, + 'is_expired': session.is_expired + } + for session_id, session in self.sessions.items() + } + + def get_stats(self) -> Dict: + """Get NAT statistics""" + port_stats = self.port_pool.get_stats() + + with self.lock: + current_stats = self.stats.copy() + current_stats['active_sessions'] = len(self.sessions) + current_stats.update(port_stats) + + return current_stats + + def update_packet_stats(self, bytes_count: int): + """Update packet statistics""" + self.stats['bytes_translated'] += bytes_count + self.stats['packets_translated'] += 1 + + def _cleanup_loop(self): + """Background cleanup loop""" + while self.running: + try: + # print("NAT cleanup loop: Cleaning expired sessions...") # Debug print + self._cleanup_expired_sessions() + time.sleep(0.1) # Shorter sleep for faster testing + except Exception as e: + print(f"NAT cleanup error: {e}") + time.sleep(0.1) + + def start(self): + """Start NAT engine""" + self.running = True + self.cleanup_thread = threading.Thread(target=self._cleanup_loop, daemon=True) + self.cleanup_thread.start() + # print(f"NAT engine started - Host IP: {self.host_ip}, Port range: {self.port_pool.start_port}-{self.port_pool.end_port}") + + def stop(self): + """Stop NAT engine""" + # print("Stopping NAT engine...") # Debug print + self.running = False + if self.cleanup_thread and self.cleanup_thread.is_alive(): + self.cleanup_thread.join(timeout=1) # Add timeout to join + if self.cleanup_thread.is_alive(): + print("NAT cleanup thread did not terminate in time.") # Debug print + + # Close all sessions + with self.lock: + session_ids = list(self.sessions.keys()) + for session_id in session_ids: + self._remove_session(session_id) + + # print("NAT engine stopped") + + def _calculate_ip_checksum(self, ip_header_no_checksum: bytes) -> int: + """Calculate the IP header checksum.""" + # IP header checksum calculation (simplified for demonstration) + # This is a basic implementation and might need refinement for production use + s = 0 + # loop through header words + for i in range(0, len(ip_header_no_checksum), 2): + w = (ip_header_no_checksum[i] << 8) + (ip_header_no_checksum[i+1]) + s = s + w + + s = (s & 0xffff) + (s >> 16) + s = s + (s >> 16) + return ~s & 0xffff + + def process_inbound_packet(self, packet: bytes) -> Optional[bytes]: + """Process an inbound packet (from internet to VPN client) for DNAT.""" + # Parse IP header + # Assuming Ethernet frame, IP header starts at offset 14 + # For simplicity, let's assume we are only dealing with IPv4 for now + ip_header_offset = 14 + ip_header_length = (packet[ip_header_offset] & 0xF) * 4 + ip_header = packet[ip_header_offset : ip_header_offset + ip_header_length] + + # Unpack IP header (version_ihl, tos, total_length, identification, fragment_offset, ttl, protocol, header_checksum, source_address, destination_address) + iph = struct.unpack('!BBHHHBBH4s4s', ip_header) + + protocol = iph[6] + source_ip = socket.inet_ntoa(iph[8]) + dest_ip = socket.inet_ntoa(iph[9]) + + # Only process TCP/UDP for now + if protocol not in [socket.IPPROTO_TCP, socket.IPPROTO_UDP]: + return None + + # Parse TCP/UDP header + transport_header_offset = ip_header_offset + ip_header_length + if protocol == socket.IPPROTO_TCP: + tcp_header = packet[transport_header_offset : transport_header_offset + 20] + tcph = struct.unpack('!HHLLBBHHH', tcp_header) + source_port = tcph[0] + dest_port = tcph[1] + elif protocol == socket.IPPROTO_UDP: + udp_header = packet[transport_header_offset : transport_header_offset + 8] + udph = struct.unpack('!HHHH', udp_header) + source_port = udph[0] + dest_port = udph[1] + else: + return None + + # Check for DNAT rule match (simplified for now, actual DNAT rules would be in DNATEngine) + # For now, assume we are looking for a session based on host_ip (d_addr) and host_port (dest_port) + translated_endpoint = self.translate_inbound(dest_ip, dest_port, protocol) + + if translated_endpoint: + virtual_ip, virtual_port = translated_endpoint + + # Reconstruct packet with translated destination IP and port + # Recalculate IP header checksum + new_dest_ip_bytes = socket.inet_aton(virtual_ip) + + # Rebuild IP header with new destination IP + # Need to recalculate checksum for IP header + # For simplicity, we'll set checksum to 0 and assume it's recalculated later or by OS + new_ip_header_raw = struct.pack('!BBHHHBBH4s4s', iph[0], iph[1], iph[2], iph[3], iph[4], iph[5], iph[6], 0, iph[8], new_dest_ip_bytes) + new_ip_header_checksum = self._calculate_ip_checksum(new_ip_header_raw) + new_ip_header = struct.pack('!BBHHHBBH4s4s', iph[0], iph[1], iph[2], iph[3], iph[4], iph[5], iph[6], new_ip_header_checksum, iph[8], new_dest_ip_bytes) + + # Rebuild TCP/UDP header with new destination port + if protocol == socket.IPPROTO_TCP: + # Recalculate TCP checksum (requires pseudo-header, IP header, and TCP data) + new_tcp_header_raw = struct.pack('!HHLLBBHHH', source_port, virtual_port, tcph[2], tcph[3], tcph[4], tcph[5], tcph[6], 0, tcph[8]) + # For now, setting checksum to 0. Proper recalculation is complex. + new_tcp_header = struct.pack('!HHLLBBHHH', source_port, virtual_port, tcph[2], tcph[3], tcph[4], tcph[5], tcph[6], 0, tcph[8]) + return packet[:ip_header_offset] + new_ip_header + new_tcp_header + packet[transport_header_offset + 20:] + elif protocol == socket.IPPROTO_UDP: + # Recalculate UDP checksum (requires pseudo-header, IP header, and UDP data) + new_udp_header_raw = struct.pack('!HHHH', source_port, virtual_port, udph[2], 0) + # For now, setting checksum to 0. Proper recalculation is complex. + new_udp_header = struct.pack('!HHHH', source_port, virtual_port, udph[2], 0) + return packet[:ip_header_offset] + new_ip_header + new_udp_header + packet[transport_header_offset + 8:] + + return None + + def process_outbound_packet(self, packet: bytes) -> Optional[bytes]: + """Process an outbound packet (from VPN client to internet) for SNAT.""" + # Parse IP header + ip_header_offset = 14 + ip_header_length = (packet[ip_header_offset] & 0xF) * 4 + ip_header = packet[ip_header_offset : ip_header_offset + ip_header_length] + + # Unpack IP header + iph = struct.unpack('!BBHHHBBH4s4s', ip_header) + + protocol = iph[6] + source_ip = socket.inet_ntoa(iph[8]) + dest_ip = socket.inet_ntoa(iph[9]) + + # Only process TCP/UDP for now + if protocol not in [socket.IPPROTO_TCP, socket.IPPROTO_UDP]: + return None + + # Parse TCP/UDP header + transport_header_offset = ip_header_offset + ip_header_length + if protocol == socket.IPPROTO_TCP: + tcp_header = packet[transport_header_offset : transport_header_offset + 20] + tcph = struct.unpack('!HHLLBBHHH', tcp_header) + source_port = tcph[0] + dest_port = tcph[1] + elif protocol == socket.IPPROTO_UDP: + udp_header = packet[transport_header_offset : transport_header_offset + 8] + udph = struct.unpack('!HHHH', udp_header) + source_port = udph[0] + dest_port = udph[1] + else: + return None + + # Perform SNAT + translated_endpoint = self.translate_outbound(source_ip, source_port, dest_ip, dest_port, protocol) + + if translated_endpoint: + host_ip, host_port = translated_endpoint + + # Reconstruct packet with translated source IP and port + # Recalculate IP header checksum + new_source_ip_bytes = socket.inet_aton(host_ip) + + # Rebuild IP header with new source IP + new_ip_header_raw = struct.pack('!BBHHHBBH4s4s', iph[0], iph[1], iph[2], iph[3], iph[4], iph[5], iph[6], 0, new_source_ip_bytes, iph[9]) + new_ip_header_checksum = self._calculate_ip_checksum(new_ip_header_raw) + new_ip_header = struct.pack('!BBHHHBBH4s4s', iph[0], iph[1], iph[2], iph[3], iph[4], iph[5], iph[6], new_ip_header_checksum, new_source_ip_bytes, iph[9]) + + # Rebuild TCP/UDP header with new source port + if protocol == socket.IPPROTO_TCP: + # Recalculate TCP checksum + new_tcp_header_raw = struct.pack('!HHLLBBHHH', host_port, dest_port, tcph[2], tcph[3], tcph[4], tcph[5], tcph[6], 0, tcph[8]) + # For now, setting checksum to 0. Proper recalculation is complex. + new_tcp_header = struct.pack('!HHLLBBHHH', host_port, dest_port, tcph[2], tcph[3], tcph[4], tcph[5], tcph[6], 0, tcph[8]) + return packet[:ip_header_offset] + new_ip_header + new_tcp_header + packet[transport_header_offset + 20:] + elif protocol == socket.IPPROTO_UDP: + # Recalculate UDP checksum + new_udp_header_raw = struct.pack('!HHHH', host_port, dest_port, udph[2], 0) + # For now, setting checksum to 0. Proper recalculation is complex. + new_udp_header = struct.pack('!HHHH', host_port, dest_port, udph[2], 0) + return packet[:ip_header_offset] + new_ip_header + new_udp_header + packet[transport_header_offset + 8:] + + return None + + +class NATRule: + """Represents a NAT rule for DNAT (port forwarding)""" + + def __init__(self, external_port: int, internal_ip: str, internal_port: int, + protocol: int, enabled: bool = True): + self.external_port = external_port + self.internal_ip = internal_ip + self.internal_port = internal_port + self.protocol = protocol + self.enabled = enabled + self.created_time = time.time() + self.hit_count = 0 + self.last_hit = None + + def matches(self, port: int, protocol: int) -> bool: + """Check if rule matches the given port and protocol""" + return (self.enabled and + self.external_port == port and + self.protocol == protocol) + + def record_hit(self): + """Record a rule hit""" + self.hit_count += 1 + self.last_hit = time.time() + + def to_dict(self) -> Dict: + """Convert rule to dictionary""" + return { + 'external_port': self.external_port, + 'internal_ip': self.internal_ip, + 'internal_port': self.internal_port, + 'protocol': self.protocol, + 'enabled': self.enabled, + 'created_time': self.created_time, + 'hit_count': self.hit_count, + 'last_hit': self.last_hit + } + + +class DNATEngine: + """Destination NAT engine for port forwarding""" + + def __init__(self): + self.rules: Dict[str, NATRule] = {} # rule_id -> rule + self.lock = threading.Lock() + + def add_rule(self, rule_id: str, external_port: int, internal_ip: str, + internal_port: int, protocol: int) -> bool: + """Add DNAT rule""" + with self.lock: + if rule_id in self.rules: + return False + rule = NATRule(external_port, internal_ip, internal_port, protocol) + self.rules[rule_id] = rule + return True + + def remove_rule(self, rule_id: str) -> bool: + """Remove DNAT rule""" + with self.lock: + if rule_id in self.rules: + del self.rules[rule_id] + return True + return False + + def get_rule(self, rule_id: str) -> Optional[NATRule]: + """Get DNAT rule by ID""" + with self.lock: + return self.rules.get(rule_id) + + def get_matching_rule(self, port: int, protocol: int) -> Optional[NATRule]: + """Get matching DNAT rule for given port and protocol""" + with self.lock: + for rule in self.rules.values(): + if rule.matches(port, protocol): + rule.record_hit() + return rule + return None + + def get_all_rules(self) -> Dict[str, Dict]: + """Get all DNAT rules""" + with self.lock: + return {rule_id: rule.to_dict() for rule_id, rule in self.rules.items()} + + + diff --git a/core/openvpn_manager.py b/core/openvpn_manager.py new file mode 100644 index 0000000000000000000000000000000000000000..e3ffdfddb6fd8a9bbb95f018dd457b2753415c68 --- /dev/null +++ b/core/openvpn_manager.py @@ -0,0 +1,508 @@ +""" +OpenVPN Manager Module + +Manages OpenVPN server integration with the Virtual ISP Stack +""" + +import os +import json +import subprocess +import threading +import time +import logging +from typing import Dict, List, Optional, Any +from dataclasses import dataclass, asdict +import ipaddress + +logger = logging.getLogger(__name__) + +@dataclass +class VPNClient: + """Represents a connected VPN client""" + client_id: str + common_name: str + ip_address: str + connected_at: float + bytes_received: int = 0 + bytes_sent: int = 0 + status: str = "connected" + routed_through_vpn: bool = False + +@dataclass +class VPNServerStatus: + """Represents VPN server status""" + is_running: bool + connected_clients: int + total_bytes_received: int + total_bytes_sent: int + uptime: float + server_ip: str + server_port: int + +class OpenVPNManager: + """Manages OpenVPN server and client connections with traffic routing""" + + def __init__(self, config: Dict[str, Any]): + self.config = config + self.server_config_path = "/etc/openvpn/server/server.conf" + self.status_log_path = "/tmp/openvpn/openvpn-status.log" + self.clients: Dict[str, VPNClient] = {} + self.server_process = None + self.is_running = False + self.start_time = None + + # VPN network configuration + self.vpn_network = ipaddress.IPv4Network("10.8.0.0/24") + self.vpn_server_ip = "10.8.0.1" + self.vpn_port = 1194 + + # Integration with ISP stack + self.dhcp_server = None + self.nat_engine = None + self.firewall = None + self.router = None + self.traffic_router = None # New traffic router component + + # Status monitoring thread + self.monitor_thread = None + self.monitor_running = False + + # Client configuration storage + self.config_storage_path = "/tmp/vpn_client_configs" + os.makedirs(self.config_storage_path, exist_ok=True) + + def set_isp_components(self, dhcp_server=None, nat_engine=None, firewall=None, router=None, traffic_router=None): + """Set references to ISP stack components for integration""" + self.dhcp_server = dhcp_server + self.nat_engine = nat_engine + self.firewall = firewall + self.router = router + self.traffic_router = traffic_router + + # Configure traffic router with other components + if self.traffic_router: + self.traffic_router.set_components( + nat_engine=nat_engine, + firewall=firewall, + dhcp_server=dhcp_server + ) + + def start_server(self) -> bool: + """Start the OpenVPN server with traffic routing""" + try: + if self.is_running: + logger.warning("OpenVPN server is already running") + return True + + # Ensure configuration exists + if not os.path.exists(self.server_config_path): + logger.error(f"OpenVPN server configuration not found: {self.server_config_path}") + return False + + # Start traffic router first + if self.traffic_router and not self.traffic_router.is_running: + if not self.traffic_router.start(): + logger.error("Failed to start traffic router") + return False + + + # Start OpenVPN server + self.server_process = subprocess.Popen(['sudo', 'openvpn', '--config', self.server_config_path], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) + self.is_running = True + self.start_time = time.time() + logger.info("OpenVPN server started successfully") + + # Start monitoring thread + self.start_monitoring() + + # Configure firewall rules for VPN + self._configure_vpn_firewall() + + # Configure NAT for VPN traffic + self._configure_vpn_nat() + + return True + + except Exception as e: + logger.error(f"Error starting OpenVPN server: {e}") + return False + + def stop_server(self) -> bool: + """Stop the OpenVPN server and traffic routing""" + try: + if not self.is_running: + logger.warning("OpenVPN server is not running") + return True + + # Stop monitoring + self.stop_monitoring() + + # Remove all client routes before stopping + if self.traffic_router: + for client_id in list(self.clients.keys()): + self.traffic_router.remove_client_route(client_id) + + # Stop OpenVPN server + if self.server_process: + self.server_process.terminate() + self.server_process.wait(timeout=5) + if self.server_process.poll() is None: + self.server_process.kill() + self.server_process = None + self.is_running = False + self.start_time = None + self.clients.clear() + logger.info("OpenVPN server stopped successfully") + return True + + except Exception as e: + logger.error(f"Error stopping OpenVPN server: {e}") + return False + + def start_monitoring(self): + """Start the client monitoring thread""" + if self.monitor_thread and self.monitor_thread.is_alive(): + return + + self.monitor_running = True + self.monitor_thread = threading.Thread(target=self._monitor_clients, daemon=True) + self.monitor_thread.start() + logger.info("Started OpenVPN client monitoring") + + def stop_monitoring(self): + """Stop the client monitoring thread""" + self.monitor_running = False + if self.monitor_thread: + self.monitor_thread.join(timeout=5) + logger.info("Stopped OpenVPN client monitoring") + + def _monitor_clients(self): + """Monitor connected VPN clients""" + while self.monitor_running: + try: + self._update_client_status() + time.sleep(10) # Update every 10 seconds + except Exception as e: + logger.error(f"Error monitoring VPN clients: {e}") + time.sleep(30) # Wait longer on error + + def _update_client_status(self): + """Update client status from OpenVPN status log and manage traffic routing""" + try: + with open(self.status_log_path, 'r') as f: + lines = f.readlines() + + new_clients = {} + client_section = False + for line in lines: + if line.startswith('ROUTING TABLE'): + client_section = False + if client_section and not line.startswith('GLOBAL STATS'): + parts = line.strip().split(',') + if len(parts) >= 5: + common_name = parts[0] + real_ip_port = parts[1] + virtual_ip = parts[2] + bytes_received = int(parts[3]) + bytes_sent = int(parts[4]) + connected_since = float(parts[5]) # Assuming this is a timestamp + + # Extract IP address from real_ip_port (e.g., 1.2.3.4:12345) + ip_address = real_ip_port.split(':')[0] + + client = VPNClient( + client_id=common_name, + common_name=common_name, + ip_address=virtual_ip, + connected_at=connected_since, + bytes_received=bytes_received, + bytes_sent=bytes_sent, + status="connected", + routed_through_vpn=True + ) + new_clients[common_name] = client + if line.startswith('COMMON NAME'): + client_section = True + self.clients = new_clients + + except Exception as e: + logger.error(f"Error updating client status: {e}") + + def _sync_with_dhcp(self): + """Sync VPN clients with DHCP server""" + try: + for client in self.clients.values(): + if client.ip_address != "unknown": + # Register VPN client IP with DHCP server + # This allows the ISP stack to track VPN clients + if hasattr(self.dhcp_server, 'register_static_lease'): + self.dhcp_server.register_static_lease( + client.common_name, + client.ip_address, + "VPN Client" + ) + except Exception as e: + logger.error(f"Error syncing with DHCP: {e}") + + def _configure_vpn_firewall(self): + """Configure firewall rules for VPN traffic""" + try: + if not self.firewall: + return + + # Add firewall rules for VPN + vpn_rules = [ + { + "rule_id": "allow_openvpn", + "priority": 10, + "action": "ACCEPT", + "direction": "BOTH", + "dest_port": str(self.vpn_port), + "protocol": "UDP", + "description": "Allow OpenVPN traffic", + "enabled": True + }, + { + "rule_id": "allow_vpn_network", + "priority": 11, + "action": "ACCEPT", + "direction": "BOTH", + "source_network": str(self.vpn_network), + "description": "Allow VPN client network traffic", + "enabled": True + } + ] + + for rule in vpn_rules: + if hasattr(self.firewall, 'add_rule'): + self.firewall.add_rule(rule) + + logger.info("Configured firewall rules for VPN") + + except Exception as e: + logger.error(f"Error configuring VPN firewall: {e}") + + def _configure_vpn_nat(self): + """Configure NAT for VPN traffic""" + try: + # NAT configuration will be handled by the external environment (e.g., HuggingFace Spaces setup) + # or by the underlying network infrastructure. We are removing direct iptables calls. + logger.info("Skipping direct iptables NAT configuration as per instructions.") + + except Exception as e: + logger.error(f"Error configuring VPN NAT: {e}") + + def get_server_status(self) -> VPNServerStatus: + """Get current server status""" + total_bytes_received = sum(client.bytes_received for client in self.clients.values()) + total_bytes_sent = sum(client.bytes_sent for client in self.clients.values()) + uptime = time.time() - self.start_time if self.start_time else 0 + + return VPNServerStatus( + is_running=self.is_running, + connected_clients=len(self.clients), + total_bytes_received=total_bytes_received, + total_bytes_sent=total_bytes_sent, + uptime=uptime, + server_ip=self.vpn_server_ip, + server_port=self.vpn_port + ) + def get_connected_clients(self) -> List[Dict[str, Any]]: + """Get list of connected clients""" + return [asdict(client) for client in self.clients.values()] + + def disconnect_client(self, client_id: str) -> bool: + """Disconnect a specific client""" + try: + if client_id not in self.clients: + return False + + # Send kill signal to specific client + # This requires OpenVPN management interface, simplified for now + logger.info(f"Disconnecting client: {client_id}") + + # Remove from clients dict + del self.clients[client_id] + return True + + except Exception as e: + logger.error(f"Error disconnecting client {client_id}: {e}") + return False + + def generate_client_config(self, client_name: str, server_ip: str) -> str: + """Generate client configuration file with embedded certificates""" + try: + # Read real CA certificate + ca_cert_path = "/etc/openvpn/server/ca.crt" + with open(ca_cert_path, 'r') as f: + ca_cert = f.read() + + client_cert_path = f"/home/ubuntu/easy-rsa/pki/issued/{client_name}.crt" + with open(client_cert_path, 'r') as f: + client_cert = f.read() + + client_key_path = f"/home/ubuntu/easy-rsa/pki/private/{client_name}.key" + with open(client_key_path, 'r') as f: + client_key = f.read() + + # Generate complete client configuration + client_config = f"""# OpenVPN Client Configuration for {client_name} +# Generated by Virtual ISP Stack +# Server: {server_ip}:{self.vpn_port} + +client +dev tun +proto udp +remote {server_ip} {self.vpn_port} +resolv-retry infinite +nobind +persist-key +persist-tun +cipher AES-256-CBC +auth SHA256 +verb 3 +key-direction 1 +redirect-gateway def1 bypass-dhcp +dhcp-option DNS 8.8.8.8 +dhcp-option DNS 8.8.4.4 +remote-cert-tls server + +# Embedded CA Certificate + +{ca_cert} + + +# Embedded Client Certificate + +{client_cert} + + +# Embedded Client Private Key + +{client_key} + + +# TLS Authentication Key (optional, for extra security) +# +# -----BEGIN OpenVPN Static key V1----- +# [TLS-AUTH-KEY-CONTENT-WOULD-GO-HERE] +# -----END OpenVPN Static key V1----- +# +""" + + logger.info(f"Generated client configuration for {client_name}") + return client_config + + except Exception as e: + logger.error(f"Error generating client config: {e}") + return "" + + def save_client_config(self, client_name: str, config_content: str) -> bool: + """Save client configuration to storage""" + try: + config_file_path = os.path.join(self.config_storage_path, f"{client_name}.ovpn") + with open(config_file_path, 'w') as f: + f.write(config_content) + + logger.info(f"Saved client configuration for {client_name}") + return True + + except Exception as e: + logger.error(f"Error saving client config for {client_name}: {e}") + return False + + def load_client_config(self, client_name: str) -> str: + """Load client configuration from storage""" + try: + config_file_path = os.path.join(self.config_storage_path, f"{client_name}.ovpn") + if not os.path.exists(config_file_path): + return "" + + with open(config_file_path, 'r') as f: + config_content = f.read() + + logger.info(f"Loaded client configuration for {client_name}") + return config_content + + except Exception as e: + logger.error(f"Error loading client config for {client_name}: {e}") + return "" + + def list_client_configs(self) -> List[str]: + """List all stored client configurations""" + try: + config_files = [] + if os.path.exists(self.config_storage_path): + for filename in os.listdir(self.config_storage_path): + if filename.endswith('.ovpn'): + client_name = filename[:-5] # Remove .ovpn extension + config_files.append(client_name) + + return config_files + + except Exception as e: + logger.error(f"Error listing client configs: {e}") + return [] + + def delete_client_config(self, client_name: str) -> bool: + """Delete client configuration from storage""" + try: + config_file_path = os.path.join(self.config_storage_path, f"{client_name}.ovpn") + if os.path.exists(config_file_path): + os.remove(config_file_path) + logger.info(f"Deleted client configuration for {client_name}") + return True + else: + logger.warning(f"Client configuration for {client_name} not found") + return False + + except Exception as e: + logger.error(f"Error deleting client config for {client_name}: {e}") + return False + + def generate_and_save_client_config(self, client_name: str, server_ip: str) -> str: + """Generate client configuration and save it to storage""" + try: + config_content = self.generate_client_config(client_name, server_ip) + if config_content: + if self.save_client_config(client_name, config_content): + return config_content + return "" + + except Exception as e: + logger.error(f"Error generating and saving client config for {client_name}: {e}") + return "" + + def get_statistics(self) -> Dict[str, Any]: + """Get comprehensive VPN statistics""" + status = self.get_server_status() + + return { + "server_status": asdict(status), + "connected_clients": self.get_connected_clients(), + "network_config": { + "vpn_network": str(self.vpn_network), + "server_ip": self.vpn_server_ip, + "server_port": self.vpn_port + }, + "integration_status": { + "dhcp_integrated": self.dhcp_server is not None, + "nat_integrated": self.nat_engine is not None, + "firewall_integrated": self.firewall is not None, + "router_integrated": self.router is not None + } + } + +# Global OpenVPN manager instance +openvpn_manager = None + +def initialize_openvpn_manager(config: Dict[str, Any]) -> OpenVPNManager: + """Initialize the OpenVPN manager""" + global openvpn_manager + openvpn_manager = OpenVPNManager(config) + return openvpn_manager + +def get_openvpn_manager() -> Optional[OpenVPNManager]: + """Get the global OpenVPN manager instance""" + return openvpn_manager + diff --git a/core/packet_bridge.py b/core/packet_bridge.py new file mode 100644 index 0000000000000000000000000000000000000000..189e9e99a3b03fe359f3baabf3ac5ba09209813f --- /dev/null +++ b/core/packet_bridge.py @@ -0,0 +1,664 @@ +""" +Packet Bridge Module + +Handles communication with virtual clients: +- Accept packet streams over WebSocket/TCP +- Deliver response packets back to clients +- Frame processing (Ethernet → IPv4) +- Connection management +""" + +import asyncio +import websockets +import socket +import threading +import time +import struct +from typing import Dict, List, Optional, Callable, Set, Any, Tuple +from dataclasses import dataclass +from enum import Enum +import json +import logging + +from .ip_parser import IPParser, ParsedPacket + + +class BridgeType(Enum): + WEBSOCKET = "WEBSOCKET" + TCP_SOCKET = "TCP_SOCKET" + UDP_SOCKET = "UDP_SOCKET" + + +@dataclass +class ClientConnection: + """Represents a client connection to the bridge""" + client_id: str + bridge_type: BridgeType + remote_address: str + remote_port: int + websocket: Optional[Any] = None # WebSocket connection + socket: Optional['socket.socket'] = None # TCP/UDP socket + connected_time: float = 0 + last_activity: float = 0 + packets_received: int = 0 + packets_sent: int = 0 + bytes_received: int = 0 + bytes_sent: int = 0 + is_active: bool = True + + def __post_init__(self): + if self.connected_time == 0: + self.connected_time = time.time() + if self.last_activity == 0: + self.last_activity = time.time() + + def update_activity(self, packet_count: int = 1, byte_count: int = 0, direction: str = 'received'): + """Update connection activity""" + self.last_activity = time.time() + + if direction == 'received': + self.packets_received += packet_count + self.bytes_received += byte_count + else: + self.packets_sent += packet_count + self.bytes_sent += byte_count + + def to_dict(self) -> Dict: + """Convert to dictionary""" + return { + 'client_id': self.client_id, + 'bridge_type': self.bridge_type.value, + 'remote_address': self.remote_address, + 'remote_port': self.remote_port, + 'connected_time': self.connected_time, + 'last_activity': self.last_activity, + 'packets_received': self.packets_received, + 'packets_sent': self.packets_sent, + 'bytes_received': self.bytes_received, + 'bytes_sent': self.bytes_sent, + 'is_active': self.is_active, + 'duration': time.time() - self.connected_time + } + + +class EthernetFrame: + """Ethernet frame parser""" + + def __init__(self): + self.dest_mac = b'\x00' * 6 + self.src_mac = b'\x00' * 6 + self.ethertype = 0x0800 # IPv4 + self.payload = b'' + + @classmethod + def parse(cls, data: bytes) -> Optional['EthernetFrame']: + """Parse Ethernet frame from raw bytes""" + if len(data) < 14: # Minimum Ethernet header size + return None + + frame = cls() + frame.dest_mac = data[0:6] + frame.src_mac = data[6:12] + frame.ethertype = struct.unpack('!H', data[12:14])[0] + frame.payload = data[14:] + + return frame + + def build(self) -> bytes: + """Build Ethernet frame as bytes""" + header = self.dest_mac + self.src_mac + struct.pack('!H', self.ethertype) + return header + self.payload + + def is_ipv4(self) -> bool: + """Check if frame contains IPv4 packet""" + return self.ethertype == 0x0800 + + def is_arp(self) -> bool: + """Check if frame contains ARP packet""" + return self.ethertype == 0x0806 + + +class PacketBridge: + """Packet bridge implementation""" + + def __init__(self, config: Dict): + self.config = config + self.clients: Dict[str, ClientConnection] = {} + self.packet_handlers: List[Callable[[ParsedPacket, str], Optional[bytes]]] = [] + self.lock = threading.Lock() + + # Configuration + self.websocket_host = config.get('websocket_host', '0.0.0.0') + self.websocket_port = config.get('websocket_port', 8765) + self.tcp_host = config.get('tcp_host', '0.0.0.0') + self.tcp_port = config.get('tcp_port', 8766) + self.max_clients = config.get('max_clients', 100) + self.client_timeout = config.get('client_timeout', 300) + + # WebSocket server + self.websocket_server = None + self.tcp_server_socket = None + + # Background tasks + self.running = False + self.websocket_task = None + self.tcp_task = None + self.cleanup_task = None + + # Statistics + self.stats = { + 'total_clients': 0, + 'active_clients': 0, + 'packets_processed': 0, + 'packets_forwarded': 0, + 'packets_dropped': 0, + 'bytes_processed': 0, + 'websocket_connections': 0, + 'tcp_connections': 0, + 'connection_errors': 0 + } + + # Event loop + self.loop = None + + def add_packet_handler(self, handler: Callable[[ParsedPacket, str], Optional[bytes]]): + """Add packet handler function""" + self.packet_handlers.append(handler) + + def remove_packet_handler(self, handler: Callable[[ParsedPacket, str], Optional[bytes]]): + """Remove packet handler function""" + if handler in self.packet_handlers: + self.packet_handlers.remove(handler) + + def _generate_client_id(self, remote_address: str, remote_port: int) -> str: + """Generate unique client ID""" + timestamp = int(time.time() * 1000) + return f"client_{remote_address}_{remote_port}_{timestamp}" + + def _process_ethernet_frame(self, frame_data: bytes, client_id: str) -> Optional[bytes]: + """Process Ethernet frame and extract IP packet""" + try: + # Parse Ethernet frame + frame = EthernetFrame.parse(frame_data) + if not frame or not frame.is_ipv4(): + return None + + # Parse IP packet + packet = IPParser.parse_packet(frame.payload) + self.stats['packets_processed'] += 1 + self.stats['bytes_processed'] += len(frame_data) + + # Process through packet handlers + response_packet = None + for handler in self.packet_handlers: + try: + response = handler(packet, client_id) + if response: + response_packet = response + break + except Exception as e: + logging.error(f"Packet handler error: {e}") + + if response_packet: + # Wrap response in Ethernet frame + response_frame = EthernetFrame() + response_frame.dest_mac = frame.src_mac + response_frame.src_mac = frame.dest_mac + response_frame.ethertype = 0x0800 + response_frame.payload = response_packet + + self.stats['packets_forwarded'] += 1 + return response_frame.build() + else: + self.stats['packets_dropped'] += 1 + return None + + except Exception as e: + logging.error(f"Error processing Ethernet frame: {e}") + self.stats['packets_dropped'] += 1 + return None + + async def _handle_websocket_client(self, websocket, path): + """Handle WebSocket client connection""" + client_address = websocket.remote_address + client_id = self._generate_client_id(client_address[0], client_address[1]) + + # Create client connection + client = ClientConnection( + client_id=client_id, + bridge_type=BridgeType.WEBSOCKET, + remote_address=client_address[0], + remote_port=client_address[1], + websocket=websocket + ) + + with self.lock: + if len(self.clients) >= self.max_clients: + await websocket.close(code=1013, reason="Too many clients") + return + + self.clients[client_id] = client + + self.stats['total_clients'] += 1 + self.stats['active_clients'] = len(self.clients) + self.stats['websocket_connections'] += 1 + + logging.info(f"WebSocket client connected: {client_id} from {client_address}") + + try: + async for message in websocket: + if isinstance(message, bytes): + # Binary message - treat as Ethernet frame + client.update_activity(1, len(message), 'received') + + response = self._process_ethernet_frame(message, client_id) + if response: + await websocket.send(response) + client.update_activity(1, len(response), 'sent') + + elif isinstance(message, str): + # Text message - treat as control message + try: + control_msg = json.loads(message) + await self._handle_control_message(client, control_msg) + except json.JSONDecodeError: + logging.warning(f"Invalid control message from {client_id}: {message}") + + except websockets.exceptions.ConnectionClosed: + logging.info(f"WebSocket client disconnected: {client_id}") + except Exception as e: + logging.error(f"WebSocket client error: {e}") + self.stats['connection_errors'] += 1 + + finally: + # Clean up client + with self.lock: + if client_id in self.clients: + self.clients[client_id].is_active = False + del self.clients[client_id] + + self.stats['active_clients'] = len(self.clients) + + async def _handle_control_message(self, client: ClientConnection, message: Dict): + """Handle control message from client""" + msg_type = message.get('type') + + if msg_type == 'ping': + # Respond to ping + response = {'type': 'pong', 'timestamp': time.time()} + await client.websocket.send(json.dumps(response)) + + elif msg_type == 'stats': + # Send client statistics + response = { + 'type': 'stats', + 'client_stats': client.to_dict(), + 'bridge_stats': self.get_stats() + } + await client.websocket.send(json.dumps(response)) + + elif msg_type == 'config': + # Handle configuration updates + config_data = message.get('data', {}) + # Process configuration updates here + response = {'type': 'config_ack', 'status': 'ok'} + await client.websocket.send(json.dumps(response)) + + def _handle_tcp_client(self, client_socket: socket.socket, client_address: Tuple[str, int]): + """Handle TCP client connection""" + client_id = self._generate_client_id(client_address[0], client_address[1]) + + # Create client connection + client = ClientConnection( + client_id=client_id, + bridge_type=BridgeType.TCP_SOCKET, + remote_address=client_address[0], + remote_port=client_address[1], + socket=client_socket + ) + + with self.lock: + if len(self.clients) >= self.max_clients: + client_socket.close() + return + + self.clients[client_id] = client + + self.stats['total_clients'] += 1 + self.stats['active_clients'] = len(self.clients) + self.stats['tcp_connections'] += 1 + + logging.info(f"TCP client connected: {client_id} from {client_address}") + + try: + client_socket.settimeout(self.client_timeout) + + while client.is_active: + try: + # Read frame length (4 bytes) + length_data = client_socket.recv(4) + if not length_data: + break + + frame_length = struct.unpack('!I', length_data)[0] + if frame_length > 65536: # Sanity check + break + + # Read frame data + frame_data = b'' + while len(frame_data) < frame_length: + chunk = client_socket.recv(frame_length - len(frame_data)) + if not chunk: + break + frame_data += chunk + + if len(frame_data) != frame_length: + break + + client.update_activity(1, len(frame_data), 'received') + + # Process frame + response = self._process_ethernet_frame(frame_data, client_id) + if response: + # Send response with length prefix + response_length = struct.pack('!I', len(response)) + client_socket.send(response_length + response) + client.update_activity(1, len(response), 'sent') + + except socket.timeout: + continue + except Exception as e: + logging.error(f"TCP client error: {e}") + break + + except Exception as e: + logging.error(f"TCP client handler error: {e}") + self.stats['connection_errors'] += 1 + + finally: + # Clean up client + try: + client_socket.close() + except: + pass + + with self.lock: + if client_id in self.clients: + self.clients[client_id].is_active = False + del self.clients[client_id] + + self.stats['active_clients'] = len(self.clients) + logging.info(f"TCP client disconnected: {client_id}") + + def _tcp_server_loop(self): + """TCP server loop""" + try: + self.tcp_server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + self.tcp_server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + self.tcp_server_socket.bind((self.tcp_host, self.tcp_port)) + self.tcp_server_socket.listen(10) + + logging.info(f"TCP bridge server listening on {self.tcp_host}:{self.tcp_port}") + + while self.running: + try: + client_socket, client_address = self.tcp_server_socket.accept() + + # Handle client in separate thread + client_thread = threading.Thread( + target=self._handle_tcp_client, + args=(client_socket, client_address), + daemon=True + ) + client_thread.start() + + except socket.error as e: + if self.running: + logging.error(f"TCP server error: {e}") + time.sleep(1) + + except Exception as e: + logging.error(f"TCP server loop error: {e}") + + finally: + if self.tcp_server_socket: + self.tcp_server_socket.close() + + def _cleanup_loop(self): + """Background cleanup loop""" + while self.running: + try: + current_time = time.time() + expired_clients = [] + + with self.lock: + for client_id, client in self.clients.items(): + # Mark inactive clients for removal + if current_time - client.last_activity > self.client_timeout: + expired_clients.append(client_id) + + # Clean up expired clients + for client_id in expired_clients: + with self.lock: + if client_id in self.clients: + client = self.clients[client_id] + client.is_active = False + + # Close connections + if client.websocket: + try: + asyncio.run_coroutine_threadsafe( + client.websocket.close(), + self.loop + ) + except: + pass + + if client.socket: + try: + client.socket.close() + except: + pass + + del self.clients[client_id] + logging.info(f"Cleaned up expired client: {client_id}") + + self.stats['active_clients'] = len(self.clients) + + time.sleep(30) # Cleanup every 30 seconds + + except Exception as e: + logging.error(f"Cleanup loop error: {e}") + time.sleep(5) + + def send_packet_to_client(self, client_id: str, packet_data: bytes) -> bool: + """Send packet to specific client""" + with self.lock: + client = self.clients.get(client_id) + + if not client or not client.is_active: + return False + + try: + if client.bridge_type == BridgeType.WEBSOCKET: + # Send via WebSocket + if client.websocket: + asyncio.run_coroutine_threadsafe( + client.websocket.send(packet_data), + self.loop + ) + client.update_activity(1, len(packet_data), 'sent') + return True + + elif client.bridge_type == BridgeType.TCP_SOCKET: + # Send via TCP socket with length prefix + if client.socket: + length_prefix = struct.pack('!I', len(packet_data)) + client.socket.send(length_prefix + packet_data) + client.update_activity(1, len(packet_data), 'sent') + return True + + except Exception as e: + logging.error(f"Failed to send packet to client {client_id}: {e}") + # Mark client as inactive + client.is_active = False + + return False + + def broadcast_packet(self, packet_data: bytes, exclude_client: Optional[str] = None) -> int: + """Broadcast packet to all clients""" + sent_count = 0 + + with self.lock: + client_ids = list(self.clients.keys()) + + for client_id in client_ids: + if client_id != exclude_client: + if self.send_packet_to_client(client_id, packet_data): + sent_count += 1 + + return sent_count + + def get_clients(self) -> Dict[str, Dict]: + """Get all connected clients""" + with self.lock: + return { + client_id: client.to_dict() + for client_id, client in self.clients.items() + } + + def get_client(self, client_id: str) -> Optional[Dict]: + """Get specific client""" + with self.lock: + client = self.clients.get(client_id) + return client.to_dict() if client else None + + def disconnect_client(self, client_id: str) -> bool: + """Disconnect specific client""" + with self.lock: + client = self.clients.get(client_id) + if not client: + return False + + client.is_active = False + + # Close connection + if client.websocket: + try: + asyncio.run_coroutine_threadsafe( + client.websocket.close(), + self.loop + ) + except: + pass + + if client.socket: + try: + client.socket.close() + except: + pass + + del self.clients[client_id] + self.stats['active_clients'] = len(self.clients) + + return True + + def get_stats(self) -> Dict: + """Get bridge statistics""" + with self.lock: + stats = self.stats.copy() + stats['active_clients'] = len(self.clients) + + return stats + + def reset_stats(self): + """Reset bridge statistics""" + self.stats = { + 'total_clients': 0, + 'active_clients': len(self.clients), + 'packets_processed': 0, + 'packets_forwarded': 0, + 'packets_dropped': 0, + 'bytes_processed': 0, + 'websocket_connections': 0, + 'tcp_connections': 0, + 'connection_errors': 0 + } + + async def start_websocket_server(self): + """Start WebSocket server""" + try: + self.websocket_server = await websockets.serve( + self._handle_websocket_client, + self.websocket_host, + self.websocket_port, + max_size=1024*1024, # 1MB max message size + ping_interval=30, + ping_timeout=10 + ) + + logging.info(f"WebSocket bridge server started on {self.websocket_host}:{self.websocket_port}") + + # Keep server running + await self.websocket_server.wait_closed() + + except Exception as e: + logging.error(f"WebSocket server error: {e}") + + def start(self): + """Start packet bridge""" + self.running = True + + # Start event loop + self.loop = asyncio.new_event_loop() + asyncio.set_event_loop(self.loop) + + # Start WebSocket server in a separate thread + websocket_thread = threading.Thread(target=self._run_websocket_server_in_thread, daemon=True) + websocket_thread.start() + + # Start TCP server in separate thread + tcp_thread = threading.Thread(target=self._tcp_server_loop, daemon=True) + tcp_thread.start() + + # Start cleanup thread + cleanup_thread = threading.Thread(target=self._cleanup_loop, daemon=True) + cleanup_thread.start() + + logging.info("Packet bridge started") + + + + def stop(self): + """Stop packet bridge""" + self.running = False + + # Close WebSocket server + if self.websocket_server: + self.websocket_server.close() + + # Close TCP server + if self.tcp_server_socket: + self.tcp_server_socket.close() + + # Disconnect all clients + with self.lock: + client_ids = list(self.clients.keys()) + + for client_id in client_ids: + self.disconnect_client(client_id) + + # Stop event loop + if self.loop and not self.loop.is_closed(): + self.loop.call_soon_threadsafe(self.loop.stop) + + logging.info("Packet bridge stopped") + + + + def _run_websocket_server_in_thread(self): + """Run the WebSocket server in a separate thread with its own event loop.""" + asyncio.set_event_loop(self.loop) + self.loop.run_until_complete(self.start_websocket_server()) + + diff --git a/core/session_tracker.py b/core/session_tracker.py new file mode 100644 index 0000000000000000000000000000000000000000..96e7ca8d46757491605aad218399f958cbe31885 --- /dev/null +++ b/core/session_tracker.py @@ -0,0 +1,602 @@ +""" +Session Tracker Module + +Manages and tracks all network sessions across the virtual ISP stack: +- Unified session management across all modules +- Session lifecycle tracking +- Performance metrics and analytics +- Session correlation and debugging +""" + +import time +import threading +import uuid +from typing import Dict, List, Optional, Set, Any, Tuple +from dataclasses import dataclass, field +from enum import Enum +import json + +from .dhcp_server import DHCPLease +from .nat_engine import NATSession +from .tcp_engine import TCPConnection +from .socket_translator import SocketConnection + + +class SessionType(Enum): + DHCP_LEASE = "DHCP_LEASE" + NAT_SESSION = "NAT_SESSION" + TCP_CONNECTION = "TCP_CONNECTION" + SOCKET_CONNECTION = "SOCKET_CONNECTION" + BRIDGE_CLIENT = "BRIDGE_CLIENT" + + +class SessionState(Enum): + INITIALIZING = "INITIALIZING" + ACTIVE = "ACTIVE" + IDLE = "IDLE" + CLOSING = "CLOSING" + CLOSED = "CLOSED" + ERROR = "ERROR" + + +@dataclass +class SessionMetrics: + """Session performance metrics""" + bytes_in: int = 0 + bytes_out: int = 0 + packets_in: int = 0 + packets_out: int = 0 + errors: int = 0 + retransmits: int = 0 + rtt_samples: List[float] = field(default_factory=list) + + @property + def total_bytes(self) -> int: + return self.bytes_in + self.bytes_out + + @property + def total_packets(self) -> int: + return self.packets_in + self.packets_out + + @property + def average_rtt(self) -> float: + return sum(self.rtt_samples) / len(self.rtt_samples) if self.rtt_samples else 0.0 + + def update_bytes(self, bytes_in: int = 0, bytes_out: int = 0): + """Update byte counters""" + self.bytes_in += bytes_in + self.bytes_out += bytes_out + + def update_packets(self, packets_in: int = 0, packets_out: int = 0): + """Update packet counters""" + self.packets_in += packets_in + self.packets_out += packets_out + + def add_rtt_sample(self, rtt: float): + """Add RTT sample""" + self.rtt_samples.append(rtt) + # Keep only last 100 samples + if len(self.rtt_samples) > 100: + self.rtt_samples = self.rtt_samples[-100:] + + def to_dict(self) -> Dict: + """Convert to dictionary""" + return { + 'bytes_in': self.bytes_in, + 'bytes_out': self.bytes_out, + 'packets_in': self.packets_in, + 'packets_out': self.packets_out, + 'total_bytes': self.total_bytes, + 'total_packets': self.total_packets, + 'errors': self.errors, + 'retransmits': self.retransmits, + 'average_rtt': self.average_rtt, + 'rtt_samples_count': len(self.rtt_samples) + } + + +@dataclass +class UnifiedSession: + """Unified session representation""" + session_id: str + session_type: SessionType + state: SessionState + created_time: float + last_activity: float + + # Session identifiers + virtual_ip: Optional[str] = None + virtual_port: Optional[int] = None + real_ip: Optional[str] = None + real_port: Optional[int] = None + protocol: Optional[str] = None + + # Related sessions (for correlation) + related_sessions: Set[str] = field(default_factory=set) + parent_session: Optional[str] = None + child_sessions: Set[str] = field(default_factory=set) + + # Metrics + metrics: SessionMetrics = field(default_factory=SessionMetrics) + + # Additional data + metadata: Dict[str, Any] = field(default_factory=dict) + + def __post_init__(self): + if not self.session_id: + self.session_id = str(uuid.uuid4()) + if self.created_time == 0: + self.created_time = time.time() + if self.last_activity == 0: + self.last_activity = time.time() + + def update_activity(self): + """Update last activity timestamp""" + self.last_activity = time.time() + + def add_related_session(self, session_id: str): + """Add related session""" + self.related_sessions.add(session_id) + + def add_child_session(self, session_id: str): + """Add child session""" + self.child_sessions.add(session_id) + + def set_parent_session(self, session_id: str): + """Set parent session""" + self.parent_session = session_id + + @property + def duration(self) -> float: + """Get session duration in seconds""" + return time.time() - self.created_time + + @property + def idle_time(self) -> float: + """Get idle time in seconds""" + return time.time() - self.last_activity + + def to_dict(self) -> Dict: + """Convert to dictionary""" + return { + 'session_id': self.session_id, + 'session_type': self.session_type.value, + 'state': self.state.value, + 'created_time': self.created_time, + 'last_activity': self.last_activity, + 'duration': self.duration, + 'idle_time': self.idle_time, + 'virtual_ip': self.virtual_ip, + 'virtual_port': self.virtual_port, + 'real_ip': self.real_ip, + 'real_port': self.real_port, + 'protocol': self.protocol, + 'related_sessions': list(self.related_sessions), + 'parent_session': self.parent_session, + 'child_sessions': list(self.child_sessions), + 'metrics': self.metrics.to_dict(), + 'metadata': self.metadata + } + + +class SessionTracker: + """Unified session tracker""" + + def __init__(self, config: Dict): + self.config = config + self.sessions: Dict[str, UnifiedSession] = {} + self.session_index: Dict[Tuple[str, str], Set[str]] = {} # (type, key) -> session_ids + self.lock = threading.Lock() + + # Configuration + self.max_sessions = config.get('max_sessions', 10000) + self.session_timeout = config.get('session_timeout', 3600) + self.cleanup_interval = config.get('cleanup_interval', 300) + self.metrics_retention = config.get('metrics_retention', 86400) # 24 hours + + # Statistics + self.stats = { + 'total_sessions': 0, + 'active_sessions': 0, + 'expired_sessions': 0, + 'session_types': {t.value: 0 for t in SessionType}, + 'session_states': {s.value: 0 for s in SessionState}, + 'cleanup_runs': 0, + 'correlations_created': 0 + } + + # Background tasks + self.running = False + self.cleanup_thread = None + + def _generate_session_key(self, session_type: SessionType, **kwargs) -> str: + """Generate session key for indexing""" + if session_type == SessionType.DHCP_LEASE: + return f"dhcp_{kwargs.get('mac_address', 'unknown')}" + elif session_type == SessionType.NAT_SESSION: + return f"nat_{kwargs.get('virtual_ip', '')}_{kwargs.get('virtual_port', 0)}_{kwargs.get('protocol', '')}" + elif session_type == SessionType.TCP_CONNECTION: + return f"tcp_{kwargs.get('local_ip', '')}_{kwargs.get('local_port', 0)}_{kwargs.get('remote_ip', '')}_{kwargs.get('remote_port', 0)}" + elif session_type == SessionType.SOCKET_CONNECTION: + return f"socket_{kwargs.get('connection_id', 'unknown')}" + elif session_type == SessionType.BRIDGE_CLIENT: + return f"bridge_{kwargs.get('client_id', 'unknown')}" + else: + return f"unknown_{time.time()}" + + def _add_to_index(self, session: UnifiedSession): + """Add session to search index""" + # Index by type + type_key = (session.session_type.value, 'all') + if type_key not in self.session_index: + self.session_index[type_key] = set() + self.session_index[type_key].add(session.session_id) + + # Index by IP addresses + if session.virtual_ip: + ip_key = ('virtual_ip', session.virtual_ip) + if ip_key not in self.session_index: + self.session_index[ip_key] = set() + self.session_index[ip_key].add(session.session_id) + + if session.real_ip: + ip_key = ('real_ip', session.real_ip) + if ip_key not in self.session_index: + self.session_index[ip_key] = set() + self.session_index[ip_key].add(session.session_id) + + # Index by protocol + if session.protocol: + proto_key = ('protocol', session.protocol) + if proto_key not in self.session_index: + self.session_index[proto_key] = set() + self.session_index[proto_key].add(session.session_id) + + def _remove_from_index(self, session: UnifiedSession): + """Remove session from search index""" + for key, session_set in self.session_index.items(): + session_set.discard(session.session_id) + + def create_session(self, session_type: SessionType, **kwargs) -> str: + """Create new session""" + with self.lock: + # Check session limit + if len(self.sessions) >= self.max_sessions: + # Remove oldest expired session + self._cleanup_expired_sessions() + if len(self.sessions) >= self.max_sessions: + return None + + # Create session + session = UnifiedSession( + session_id=kwargs.get('session_id', str(uuid.uuid4())), + session_type=session_type, + state=SessionState.INITIALIZING, + virtual_ip=kwargs.get('virtual_ip'), + virtual_port=kwargs.get('virtual_port'), + real_ip=kwargs.get('real_ip'), + real_port=kwargs.get('real_port'), + protocol=kwargs.get('protocol'), + metadata=kwargs.get('metadata', {}) + ) + + # Add to sessions + self.sessions[session.session_id] = session + self._add_to_index(session) + + # Update statistics + self.stats['total_sessions'] += 1 + self.stats['active_sessions'] = len(self.sessions) + self.stats['session_types'][session_type.value] += 1 + self.stats['session_states'][SessionState.INITIALIZING.value] += 1 + + return session.session_id + + def update_session(self, session_id: str, **kwargs) -> bool: + """Update session""" + with self.lock: + session = self.sessions.get(session_id) + if not session: + return False + + # Update fields + old_state = session.state + + for key, value in kwargs.items(): + if hasattr(session, key): + setattr(session, key, value) + + session.update_activity() + + # Update state statistics + if 'state' in kwargs and kwargs['state'] != old_state: + self.stats['session_states'][old_state.value] -= 1 + self.stats['session_states'][kwargs['state'].value] += 1 + + return True + + def close_session(self, session_id: str, reason: str = "") -> bool: + """Close session""" + with self.lock: + session = self.sessions.get(session_id) + if not session: + return False + + old_state = session.state + session.state = SessionState.CLOSED + session.update_activity() + + if reason: + session.metadata['close_reason'] = reason + + # Update statistics + self.stats['session_states'][old_state.value] -= 1 + self.stats['session_states'][SessionState.CLOSED.value] += 1 + + return True + + def remove_session(self, session_id: str) -> bool: + """Remove session completely""" + with self.lock: + session = self.sessions.get(session_id) + if not session: + return False + + # Remove from index + self._remove_from_index(session) + + # Remove from sessions + del self.sessions[session_id] + + # Update statistics + self.stats['active_sessions'] = len(self.sessions) + self.stats['session_types'][session.session_type.value] -= 1 + self.stats['session_states'][session.state.value] -= 1 + + return True + + def get_session(self, session_id: str) -> Optional[UnifiedSession]: + """Get session by ID""" + with self.lock: + return self.sessions.get(session_id) + + def find_sessions(self, **criteria) -> List[UnifiedSession]: + """Find sessions by criteria""" + with self.lock: + matching_sessions = [] + + # Use index if possible + if 'session_type' in criteria: + type_key = (criteria['session_type'].value if isinstance(criteria['session_type'], SessionType) else criteria['session_type'], 'all') + candidate_ids = self.session_index.get(type_key, set()) + elif 'virtual_ip' in criteria: + ip_key = ('virtual_ip', criteria['virtual_ip']) + candidate_ids = self.session_index.get(ip_key, set()) + elif 'real_ip' in criteria: + ip_key = ('real_ip', criteria['real_ip']) + candidate_ids = self.session_index.get(ip_key, set()) + elif 'protocol' in criteria: + proto_key = ('protocol', criteria['protocol']) + candidate_ids = self.session_index.get(proto_key, set()) + else: + candidate_ids = set(self.sessions.keys()) + + # Filter candidates + for session_id in candidate_ids: + session = self.sessions.get(session_id) + if not session: + continue + + match = True + for key, value in criteria.items(): + if hasattr(session, key): + session_value = getattr(session, key) + if isinstance(value, (SessionType, SessionState)): + if session_value != value: + match = False + break + elif session_value != value: + match = False + break + else: + match = False + break + + if match: + matching_sessions.append(session) + + return matching_sessions + + def correlate_sessions(self, session_id1: str, session_id2: str, relationship: str = 'related') -> bool: + """Create correlation between sessions""" + with self.lock: + session1 = self.sessions.get(session_id1) + session2 = self.sessions.get(session_id2) + + if not session1 or not session2: + return False + + if relationship == 'parent_child': + session1.add_child_session(session_id2) + session2.set_parent_session(session_id1) + else: + session1.add_related_session(session_id2) + session2.add_related_session(session_id1) + + self.stats['correlations_created'] += 1 + return True + + def update_metrics(self, session_id: str, **metrics) -> bool: + """Update session metrics""" + with self.lock: + session = self.sessions.get(session_id) + if not session: + return False + + session.update_activity() + + # Update metrics + if 'bytes_in' in metrics or 'bytes_out' in metrics: + session.metrics.update_bytes( + metrics.get('bytes_in', 0), + metrics.get('bytes_out', 0) + ) + + if 'packets_in' in metrics or 'packets_out' in metrics: + session.metrics.update_packets( + metrics.get('packets_in', 0), + metrics.get('packets_out', 0) + ) + + if 'rtt' in metrics: + session.metrics.add_rtt_sample(metrics['rtt']) + + if 'errors' in metrics: + session.metrics.errors += metrics['errors'] + + if 'retransmits' in metrics: + session.metrics.retransmits += metrics['retransmits'] + + return True + + def _cleanup_expired_sessions(self): + """Clean up expired sessions""" + current_time = time.time() + expired_sessions = [] + + for session_id, session in self.sessions.items(): + # Check if session is expired + if (session.state == SessionState.CLOSED and + current_time - session.last_activity > self.cleanup_interval): + expired_sessions.append(session_id) + elif (session.state != SessionState.CLOSED and + current_time - session.last_activity > self.session_timeout): + expired_sessions.append(session_id) + + # Remove expired sessions + for session_id in expired_sessions: + self.remove_session(session_id) + self.stats['expired_sessions'] += 1 + + def _cleanup_loop(self): + """Background cleanup loop""" + while self.running: + try: + with self.lock: + self._cleanup_expired_sessions() + self.stats['cleanup_runs'] += 1 + + time.sleep(self.cleanup_interval) + + except Exception as e: + print(f"Session tracker cleanup error: {e}") + time.sleep(60) + + def get_sessions(self, limit: int = 100, offset: int = 0, **filters) -> List[Dict]: + """Get sessions with pagination and filtering""" + with self.lock: + if filters: + sessions = self.find_sessions(**filters) + else: + sessions = list(self.sessions.values()) + + # Sort by last activity (most recent first) + sessions.sort(key=lambda s: s.last_activity, reverse=True) + + # Apply pagination + paginated_sessions = sessions[offset:offset + limit] + + return [session.to_dict() for session in paginated_sessions] + + def get_session_summary(self) -> Dict: + """Get session summary statistics""" + with self.lock: + summary = { + 'total_sessions': len(self.sessions), + 'by_type': {}, + 'by_state': {}, + 'by_protocol': {}, + 'active_sessions_by_age': { + 'last_hour': 0, + 'last_day': 0, + 'older': 0 + } + } + + current_time = time.time() + hour_ago = current_time - 3600 + day_ago = current_time - 86400 + + for session in self.sessions.values(): + # Count by type + session_type = session.session_type.value + summary['by_type'][session_type] = summary['by_type'].get(session_type, 0) + 1 + + # Count by state + session_state = session.state.value + summary['by_state'][session_state] = summary['by_state'].get(session_state, 0) + 1 + + # Count by protocol + if session.protocol: + summary['by_protocol'][session.protocol] = summary['by_protocol'].get(session.protocol, 0) + 1 + + # Count by age + if session.last_activity > hour_ago: + summary['active_sessions_by_age']['last_hour'] += 1 + elif session.last_activity > day_ago: + summary['active_sessions_by_age']['last_day'] += 1 + else: + summary['active_sessions_by_age']['older'] += 1 + + return summary + + def get_stats(self) -> Dict: + """Get tracker statistics""" + with self.lock: + stats = self.stats.copy() + stats['active_sessions'] = len(self.sessions) + + return stats + + def reset_stats(self): + """Reset statistics""" + self.stats = { + 'total_sessions': len(self.sessions), + 'active_sessions': len(self.sessions), + 'expired_sessions': 0, + 'session_types': {t.value: 0 for t in SessionType}, + 'session_states': {s.value: 0 for s in SessionState}, + 'cleanup_runs': 0, + 'correlations_created': 0 + } + + # Recalculate current counts + with self.lock: + for session in self.sessions.values(): + self.stats['session_types'][session.session_type.value] += 1 + self.stats['session_states'][session.state.value] += 1 + + def export_sessions(self, format: str = 'json') -> str: + """Export sessions data""" + with self.lock: + sessions_data = [session.to_dict() for session in self.sessions.values()] + + if format == 'json': + return json.dumps(sessions_data, indent=2, default=str) + else: + raise ValueError(f"Unsupported export format: {format}") + + def start(self): + """Start session tracker""" + self.running = True + self.cleanup_thread = threading.Thread(target=self._cleanup_loop, daemon=True) + self.cleanup_thread.start() + print("Session tracker started") + + def stop(self): + """Stop session tracker""" + self.running = False + if self.cleanup_thread: + self.cleanup_thread.join() + print("Session tracker stopped") + diff --git a/core/socket_translator.py b/core/socket_translator.py new file mode 100644 index 0000000000000000000000000000000000000000..d4f307307cbf149019788a752a978e9fad22a7a0 --- /dev/null +++ b/core/socket_translator.py @@ -0,0 +1,653 @@ +""" +Socket Translator Module + +Bridges virtual connections to real host sockets: +- Map virtual connections to host sockets/HTTP clients +- Bidirectional data streaming +- Connection lifecycle management +- Protocol translation (TCP/UDP to host sockets) +""" + +import socket +import threading +import time +import asyncio +import aiohttp +import ssl +from typing import Dict, Optional, Callable, Tuple, Any +from dataclasses import dataclass +from enum import Enum +import urllib.parse +import json + +from .tcp_engine import TCPConnection + + +class ConnectionType(Enum): + TCP_SOCKET = "TCP_SOCKET" + UDP_SOCKET = "UDP_SOCKET" + HTTP_CLIENT = "HTTP_CLIENT" + HTTPS_CLIENT = "HTTPS_CLIENT" + + +@dataclass +class SocketConnection: + """Represents a socket connection""" + connection_id: str + connection_type: ConnectionType + virtual_connection: Optional[TCPConnection] + host_socket: Optional[socket.socket] + remote_host: str + remote_port: int + created_time: float + last_activity: float + bytes_sent: int = 0 + bytes_received: int = 0 + is_connected: bool = False + error_count: int = 0 + + def update_activity(self, bytes_transferred: int = 0, direction: str = 'sent'): + """Update connection activity""" + self.last_activity = time.time() + if direction == 'sent': + self.bytes_sent += bytes_transferred + else: + self.bytes_received += bytes_transferred + + def to_dict(self) -> Dict: + """Convert to dictionary""" + return { + 'connection_id': self.connection_id, + 'connection_type': self.connection_type.value, + 'remote_host': self.remote_host, + 'remote_port': self.remote_port, + 'created_time': self.created_time, + 'last_activity': self.last_activity, + 'bytes_sent': self.bytes_sent, + 'bytes_received': self.bytes_received, + 'is_connected': self.is_connected, + 'error_count': self.error_count, + 'duration': time.time() - self.created_time + } + + +class HTTPRequest: + """Represents an HTTP request""" + + def __init__(self, method: str = 'GET', path: str = '/', headers: Dict[str, str] = None, body: bytes = b''): + self.method = method.upper() + self.path = path + self.headers = headers or {} + self.body = body + self.version = 'HTTP/1.1' + + @classmethod + def parse(cls, data: bytes) -> Optional['HTTPRequest']: + """Parse HTTP request from raw data""" + try: + lines = data.decode('utf-8', errors='ignore').split('\r\n') + if not lines: + return None + + # Parse request line + request_line = lines[0].split(' ') + if len(request_line) < 3: + return None + + method, path, version = request_line[0], request_line[1], request_line[2] + + # Parse headers + headers = {} + body_start = 1 + for i, line in enumerate(lines[1:], 1): + if line == '': + body_start = i + 1 + break + if ':' in line: + key, value = line.split(':', 1) + headers[key.strip().lower()] = value.strip() + + # Parse body + body_lines = lines[body_start:] + body = '\r\n'.join(body_lines).encode('utf-8') + + return cls(method, path, headers, body) + + except Exception: + return None + + def to_bytes(self) -> bytes: + """Convert to raw HTTP request""" + request_line = f"{self.method} {self.path} {self.version}\r\n" + + # Add default headers + if 'host' not in self.headers: + self.headers['host'] = 'localhost' + if 'user-agent' not in self.headers: + self.headers['user-agent'] = 'VirtualISP/1.0' + if self.body and 'content-length' not in self.headers: + self.headers['content-length'] = str(len(self.body)) + + # Build headers + header_lines = [] + for key, value in self.headers.items(): + header_lines.append(f"{key}: {value}\r\n") + + # Combine all parts + request_data = request_line + ''.join(header_lines) + '\r\n' + return request_data.encode('utf-8') + self.body + + +class HTTPResponse: + """Represents an HTTP response""" + + def __init__(self, status_code: int = 200, reason: str = 'OK', headers: Dict[str, str] = None, body: bytes = b''): + self.status_code = status_code + self.reason = reason + self.headers = headers or {} + self.body = body + self.version = 'HTTP/1.1' + + @classmethod + def parse(cls, data: bytes) -> Optional['HTTPResponse']: + """Parse HTTP response from raw data""" + try: + lines = data.decode('utf-8', errors='ignore').split('\r\n') + if not lines: + return None + + # Parse status line + status_line = lines[0].split(' ', 2) + if len(status_line) < 3: + return None + + version, status_code, reason = status_line[0], int(status_line[1]), status_line[2] + + # Parse headers + headers = {} + body_start = 1 + for i, line in enumerate(lines[1:], 1): + if line == '': + body_start = i + 1 + break + if ':' in line: + key, value = line.split(':', 1) + headers[key.strip().lower()] = value.strip() + + # Parse body + body_lines = lines[body_start:] + body = '\r\n'.join(body_lines).encode('utf-8') + + return cls(status_code, reason, headers, body) + + except Exception: + return None + + def to_bytes(self) -> bytes: + """Convert to raw HTTP response""" + status_line = f"{self.version} {self.status_code} {self.reason}\r\n" + + # Add default headers + if 'content-length' not in self.headers and self.body: + self.headers['content-length'] = str(len(self.body)) + if 'server' not in self.headers: + self.headers['server'] = 'VirtualISP/1.0' + + # Build headers + header_lines = [] + for key, value in self.headers.items(): + header_lines.append(f"{key}: {value}\r\n") + + # Combine all parts + response_data = status_line + ''.join(header_lines) + '\r\n' + return response_data.encode('utf-8') + self.body + + +class SocketTranslator: + """Socket translator implementation""" + + def __init__(self, config: Dict): + self.config = config + self.connections: Dict[str, SocketConnection] = {} + self.lock = threading.Lock() + + # Configuration + self.connect_timeout = config.get('connect_timeout', 10) + self.read_timeout = config.get('read_timeout', 30) + self.max_connections = config.get('max_connections', 1000) + self.buffer_size = config.get('buffer_size', 8192) + + # HTTP client session + self.http_session = None + self.loop = None + + # Statistics + self.stats = { + 'total_connections': 0, + 'active_connections': 0, + 'failed_connections': 0, + 'bytes_transferred': 0, + 'http_requests': 0, + 'tcp_connections': 0, + 'udp_connections': 0 + } + + # Background tasks + self.running = False + self.cleanup_thread = None + + async def _init_http_session(self): + """Initialize HTTP client session""" + connector = aiohttp.TCPConnector( + limit=100, + limit_per_host=10, + ttl_dns_cache=300, + use_dns_cache=True, + ) + + timeout = aiohttp.ClientTimeout( + total=self.read_timeout, + connect=self.connect_timeout + ) + + self.http_session = aiohttp.ClientSession( + connector=connector, + timeout=timeout, + headers={'User-Agent': 'VirtualISP/1.0'} + ) + + def _is_http_request(self, data: bytes) -> bool: + """Check if data looks like an HTTP request""" + try: + first_line = data.split(b'\r\n')[0].decode('utf-8', errors='ignore') + methods = ['GET', 'POST', 'PUT', 'DELETE', 'HEAD', 'OPTIONS', 'PATCH', 'TRACE'] + return any(first_line.startswith(method + ' ') for method in methods) + except: + return False + + def _determine_connection_type(self, remote_host: str, remote_port: int, data: bytes = b'') -> ConnectionType: + """Determine the appropriate connection type""" + # Check for HTTP/HTTPS based on port and data + if remote_port == 80 or (data and self._is_http_request(data)): + return ConnectionType.HTTP_CLIENT + elif remote_port == 443: + return ConnectionType.HTTPS_CLIENT + else: + return ConnectionType.TCP_SOCKET + + def create_connection(self, virtual_conn: TCPConnection, remote_host: str, remote_port: int, + initial_data: bytes = b'') -> Optional[SocketConnection]: + """Create a new socket connection""" + connection_id = f"{virtual_conn.connection_id}->{remote_host}:{remote_port}" + + # Check connection limit + with self.lock: + if len(self.connections) >= self.max_connections: + return None + + # Determine connection type + conn_type = self._determine_connection_type(remote_host, remote_port, initial_data) + + # Create socket connection + socket_conn = SocketConnection( + connection_id=connection_id, + connection_type=conn_type, + virtual_connection=virtual_conn, + host_socket=None, + remote_host=remote_host, + remote_port=remote_port, + created_time=time.time(), + last_activity=time.time() + ) + + with self.lock: + self.connections[connection_id] = socket_conn + + # Establish connection based on type + if conn_type in [ConnectionType.HTTP_CLIENT, ConnectionType.HTTPS_CLIENT]: + success = self._create_http_connection(socket_conn, initial_data) + else: + success = self._create_tcp_connection(socket_conn, initial_data) + + if success: + self.stats['total_connections'] += 1 + self.stats['active_connections'] = len(self.connections) + + if conn_type in [ConnectionType.HTTP_CLIENT, ConnectionType.HTTPS_CLIENT]: + self.stats['http_requests'] += 1 + else: + self.stats['tcp_connections'] += 1 + else: + self.stats['failed_connections'] += 1 + with self.lock: + if connection_id in self.connections: + del self.connections[connection_id] + return None + + return socket_conn + + def _create_tcp_connection(self, socket_conn: SocketConnection, initial_data: bytes) -> bool: + """Create TCP socket connection""" + try: + # Create socket + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.settimeout(self.connect_timeout) + + # Connect + sock.connect((socket_conn.remote_host, socket_conn.remote_port)) + sock.settimeout(self.read_timeout) + + socket_conn.host_socket = sock + socket_conn.is_connected = True + + # Send initial data if any + if initial_data: + sock.send(initial_data) + socket_conn.update_activity(len(initial_data), 'sent') + + # Start background thread for receiving data + thread = threading.Thread( + target=self._tcp_receive_loop, + args=(socket_conn,), + daemon=True + ) + thread.start() + + return True + + except Exception as e: + print(f"Failed to create TCP connection to {socket_conn.remote_host}:{socket_conn.remote_port}: {e}") + socket_conn.error_count += 1 + return False + + def _create_http_connection(self, socket_conn: SocketConnection, initial_data: bytes) -> bool: + """Create HTTP connection""" + try: + # Parse HTTP request + http_request = HTTPRequest.parse(initial_data) + if not http_request: + return False + + # Set host header + http_request.headers['host'] = socket_conn.remote_host + + # Start async HTTP request + if self.loop and not self.loop.is_closed(): + asyncio.run_coroutine_threadsafe( + self._handle_http_request(socket_conn, http_request), + self.loop + ) + else: + # Fallback to sync HTTP handling + return self._handle_http_request_sync(socket_conn, http_request) + + return True + + except Exception as e: + print(f"Failed to create HTTP connection to {socket_conn.remote_host}:{socket_conn.remote_port}: {e}") + socket_conn.error_count += 1 + return False + + async def _handle_http_request(self, socket_conn: SocketConnection, http_request: HTTPRequest): + """Handle HTTP request asynchronously""" + try: + if not self.http_session: + await self._init_http_session() + + # Build URL + scheme = 'https' if socket_conn.connection_type == ConnectionType.HTTPS_CLIENT else 'http' + url = f"{scheme}://{socket_conn.remote_host}:{socket_conn.remote_port}{http_request.path}" + + # Make request + async with self.http_session.request( + method=http_request.method, + url=url, + headers=http_request.headers, + data=http_request.body + ) as response: + # Read response + response_body = await response.read() + + # Create HTTP response + http_response = HTTPResponse( + status_code=response.status, + reason=response.reason or 'OK', + headers=dict(response.headers), + body=response_body + ) + + # Send response back to virtual connection + response_data = http_response.to_bytes() + if socket_conn.virtual_connection and socket_conn.virtual_connection.on_data_received: + socket_conn.virtual_connection.on_data_received(response_data) + + socket_conn.update_activity(len(response_data), 'received') + self.stats['bytes_transferred'] += len(response_data) + + except Exception as e: + print(f"HTTP request failed: {e}") + socket_conn.error_count += 1 + + # Send error response + error_response = HTTPResponse( + status_code=500, + reason='Internal Server Error', + body=f"Error: {str(e)}".encode('utf-8') + ) + + response_data = error_response.to_bytes() + if socket_conn.virtual_connection and socket_conn.virtual_connection.on_data_received: + socket_conn.virtual_connection.on_data_received(response_data) + + def _handle_http_request_sync(self, socket_conn: SocketConnection, http_request: HTTPRequest) -> bool: + """Handle HTTP request synchronously (fallback)""" + try: + # Use urllib for sync HTTP requests + scheme = 'https' if socket_conn.connection_type == ConnectionType.HTTPS_CLIENT else 'http' + url = f"{scheme}://{socket_conn.remote_host}:{socket_conn.remote_port}{http_request.path}" + + import urllib.request + import urllib.error + + # Create request + req = urllib.request.Request( + url, + data=http_request.body if http_request.body else None, + headers=http_request.headers, + method=http_request.method + ) + + # Make request + with urllib.request.urlopen(req, timeout=self.read_timeout) as response: + response_body = response.read() + + # Create HTTP response + http_response = HTTPResponse( + status_code=response.getcode(), + reason='OK', + headers=dict(response.headers), + body=response_body + ) + + # Send response back to virtual connection + response_data = http_response.to_bytes() + if socket_conn.virtual_connection and socket_conn.virtual_connection.on_data_received: + socket_conn.virtual_connection.on_data_received(response_data) + + socket_conn.update_activity(len(response_data), 'received') + self.stats['bytes_transferred'] += len(response_data) + + return True + + except Exception as e: + print(f"Sync HTTP request failed: {e}") + socket_conn.error_count += 1 + return False + + def _tcp_receive_loop(self, socket_conn: SocketConnection): + """Background loop for receiving TCP data""" + sock = socket_conn.host_socket + if not sock: + return + + try: + while socket_conn.is_connected: + try: + data = sock.recv(self.buffer_size) + if not data: + break + + # Forward data to virtual connection + if socket_conn.virtual_connection and socket_conn.virtual_connection.on_data_received: + socket_conn.virtual_connection.on_data_received(data) + + socket_conn.update_activity(len(data), 'received') + self.stats['bytes_transferred'] += len(data) + + except socket.timeout: + continue + except Exception as e: + print(f"TCP receive error: {e}") + break + + finally: + self._close_connection(socket_conn.connection_id) + + def send_data(self, connection_id: str, data: bytes) -> bool: + """Send data through socket connection""" + with self.lock: + socket_conn = self.connections.get(connection_id) + + if not socket_conn or not socket_conn.is_connected: + return False + + try: + if socket_conn.connection_type in [ConnectionType.HTTP_CLIENT, ConnectionType.HTTPS_CLIENT]: + # For HTTP connections, treat as new request + return self._create_http_connection(socket_conn, data) + else: + # TCP connection + if socket_conn.host_socket: + socket_conn.host_socket.send(data) + socket_conn.update_activity(len(data), 'sent') + self.stats['bytes_transferred'] += len(data) + return True + + except Exception as e: + print(f"Failed to send data: {e}") + socket_conn.error_count += 1 + self._close_connection(connection_id) + + return False + + def _close_connection(self, connection_id: str): + """Close socket connection""" + with self.lock: + socket_conn = self.connections.get(connection_id) + if not socket_conn: + return + + # Close socket + if socket_conn.host_socket: + try: + socket_conn.host_socket.close() + except: + pass + + socket_conn.is_connected = False + + # Remove from connections + del self.connections[connection_id] + + self.stats['active_connections'] = len(self.connections) + + def close_connection(self, connection_id: str) -> bool: + """Manually close connection""" + self._close_connection(connection_id) + return True + + def get_connection(self, connection_id: str) -> Optional[SocketConnection]: + """Get socket connection""" + with self.lock: + return self.connections.get(connection_id) + + def get_connections(self) -> Dict[str, Dict]: + """Get all socket connections""" + with self.lock: + return { + conn_id: conn.to_dict() + for conn_id, conn in self.connections.items() + } + + def get_stats(self) -> Dict: + """Get socket translator statistics""" + with self.lock: + stats = self.stats.copy() + stats['active_connections'] = len(self.connections) + + return stats + + def _cleanup_loop(self): + """Background cleanup loop""" + while self.running: + try: + current_time = time.time() + expired_connections = [] + + with self.lock: + for conn_id, conn in self.connections.items(): + # Close connections that have been inactive too long + if current_time - conn.last_activity > self.read_timeout * 2: + expired_connections.append(conn_id) + + for conn_id in expired_connections: + self._close_connection(conn_id) + + time.sleep(30) # Cleanup every 30 seconds + + except Exception as e: + print(f"Socket translator cleanup error: {e}") + time.sleep(5) + + def start(self): + """Start socket translator""" + self.running = True + + # Start event loop for async HTTP + try: + self.loop = asyncio.new_event_loop() + asyncio.set_event_loop(self.loop) + + # Start cleanup thread + self.cleanup_thread = threading.Thread(target=self._cleanup_loop, daemon=True) + self.cleanup_thread.start() + + print("Socket translator started") + except Exception as e: + print(f"Failed to start socket translator: {e}") + + def stop(self): + """Stop socket translator""" + self.running = False + + # Close all connections + with self.lock: + connection_ids = list(self.connections.keys()) + + for conn_id in connection_ids: + self._close_connection(conn_id) + + # Close HTTP session + if self.http_session: + asyncio.run_coroutine_threadsafe(self.http_session.close(), self.loop) + + # Close event loop + if self.loop and not self.loop.is_closed(): + self.loop.call_soon_threadsafe(self.loop.stop) + + # Wait for cleanup thread + if self.cleanup_thread: + self.cleanup_thread.join() + + print("Socket translator stopped") + diff --git a/core/tcp_engine.py b/core/tcp_engine.py new file mode 100644 index 0000000000000000000000000000000000000000..624b6013af6cfd04d55c0f64c9c9a0849c34a9a2 --- /dev/null +++ b/core/tcp_engine.py @@ -0,0 +1,716 @@ +""" +TCP Engine Module + +Implements a complete TCP state machine in user-space: +- Full TCP state machine (SYN, SYN-ACK, ESTABLISHED, FIN, RST) +- Sequence and acknowledgment number tracking +- Sliding window implementation +- Retransmission and timeout handling +- Congestion control +""" + +import time +import threading +import random +from typing import Dict, List, Optional, Tuple, Callable +from dataclasses import dataclass, field +from enum import Enum +from collections import deque + +from .ip_parser import TCPHeader, IPv4Header, IPParser + + +class TCPState(Enum): + CLOSED = "CLOSED" + LISTEN = "LISTEN" + SYN_SENT = "SYN_SENT" + SYN_RECEIVED = "SYN_RECEIVED" + ESTABLISHED = "ESTABLISHED" + FIN_WAIT_1 = "FIN_WAIT_1" + FIN_WAIT_2 = "FIN_WAIT_2" + CLOSE_WAIT = "CLOSE_WAIT" + CLOSING = "CLOSING" + LAST_ACK = "LAST_ACK" + TIME_WAIT = "TIME_WAIT" + + +@dataclass +class TCPSegment: + """Represents a TCP segment""" + seq_num: int + ack_num: int + flags: int + window: int + data: bytes + timestamp: float = field(default_factory=time.time) + retransmit_count: int = 0 + + @property + def data_length(self) -> int: + """Get data length""" + return len(self.data) + + @property + def seq_end(self) -> int: + """Get sequence number after this segment""" + length = self.data_length + # SYN and FIN consume one sequence number + if self.flags & 0x02: # SYN + length += 1 + if self.flags & 0x01: # FIN + length += 1 + return self.seq_num + length + + +@dataclass +class TCPConnection: + """Represents a TCP connection state""" + # Connection identification + local_ip: str + local_port: int + remote_ip: str + remote_port: int + + # State + state: TCPState = TCPState.CLOSED + + # Sequence numbers + local_seq: int = 0 + local_ack: int = 0 + remote_seq: int = 0 + remote_ack: int = 0 + initial_seq: int = 0 + + # Window management + local_window: int = 65535 + remote_window: int = 65535 + window_scale: int = 0 + + # Buffers + send_buffer: deque = field(default_factory=deque) + recv_buffer: deque = field(default_factory=deque) + out_of_order_buffer: Dict[int, bytes] = field(default_factory=dict) + + # Retransmission + unacked_segments: Dict[int, TCPSegment] = field(default_factory=dict) + retransmit_timer: Optional[float] = None + rto: float = 1.0 # Retransmission timeout + srtt: float = 0.0 # Smoothed round-trip time + rttvar: float = 0.0 # Round-trip time variation + + # Congestion control + cwnd: int = 1 # Congestion window (in MSS units) + ssthresh: int = 65535 # Slow start threshold + mss: int = 1460 # Maximum segment size + + # Timers + last_activity: float = field(default_factory=time.time) + time_wait_start: Optional[float] = None + + # Callbacks + on_data_received: Optional[Callable[[bytes], None]] = None + on_connection_closed: Optional[Callable[[], None]] = None + + @property + def connection_id(self) -> str: + """Get unique connection identifier""" + return f"{self.local_ip}:{self.local_port}-{self.remote_ip}:{self.remote_port}" + + @property + def is_established(self) -> bool: + """Check if connection is established""" + return self.state == TCPState.ESTABLISHED + + @property + def can_send_data(self) -> bool: + """Check if connection can send data""" + return self.state in [TCPState.ESTABLISHED, TCPState.CLOSE_WAIT] + + @property + def effective_window(self) -> int: + """Get effective send window""" + return min(self.remote_window, self.cwnd * self.mss) + + +class TCPEngine: + """TCP state machine implementation""" + + def __init__(self, config: Dict): + self.config = config + self.connections: Dict[str, TCPConnection] = {} + self.listening_ports: Dict[int, Callable] = {} # port -> accept callback + self.lock = threading.Lock() + self.running = False + self.timer_thread = None + + # Default configuration + self.default_mss = config.get('mss', 1460) + self.default_window = config.get('initial_window', 65535) + self.max_retries = config.get('max_retries', 3) + self.connection_timeout = config.get('timeout', 300) + self.time_wait_timeout = config.get('time_wait_timeout', 120) + + def _generate_isn(self) -> int: + """Generate Initial Sequence Number""" + return random.randint(0, 0xFFFFFFFF) + + def _get_connection_key(self, local_ip: str, local_port: int, remote_ip: str, remote_port: int) -> str: + """Get connection key""" + return f"{local_ip}:{local_port}-{remote_ip}:{remote_port}" + + def _create_tcp_segment(self, conn: TCPConnection, flags: int, data: bytes = b'') -> TCPSegment: + """Create TCP segment""" + segment = TCPSegment( + seq_num=conn.local_seq, + ack_num=conn.local_ack, + flags=flags, + window=conn.local_window, + data=data + ) + return segment + + def _build_tcp_packet(self, conn: TCPConnection, segment: TCPSegment) -> bytes: + """Build complete TCP packet""" + # Create IP header + ip_header = IPv4Header( + protocol=6, # TCP + source_ip=conn.local_ip, + dest_ip=conn.remote_ip, + ttl=64 + ) + + # Create TCP header + tcp_header = TCPHeader( + source_port=conn.local_port, + dest_port=conn.remote_port, + seq_num=segment.seq_num, + ack_num=segment.ack_num, + flags=segment.flags, + window_size=segment.window + ) + + # Build packet + return IPParser.build_packet(ip_header, tcp_header, segment.data) + + def _update_rto(self, conn: TCPConnection, rtt: float): + """Update retransmission timeout using RFC 6298""" + if conn.srtt == 0: + # First RTT measurement + conn.srtt = rtt + conn.rttvar = rtt / 2 + else: + # Subsequent measurements + alpha = 0.125 + beta = 0.25 + conn.rttvar = (1 - beta) * conn.rttvar + beta * abs(conn.srtt - rtt) + conn.srtt = (1 - alpha) * conn.srtt + alpha * rtt + + # Calculate RTO + conn.rto = max(1.0, conn.srtt + 4 * conn.rttvar) + conn.rto = min(conn.rto, 60.0) # Cap at 60 seconds + + def _update_congestion_window(self, conn: TCPConnection, acked_bytes: int): + """Update congestion window (simplified congestion control)""" + if conn.cwnd < conn.ssthresh: + # Slow start + conn.cwnd += 1 + else: + # Congestion avoidance + conn.cwnd += max(1, conn.mss * conn.mss // conn.cwnd) + + def _handle_retransmission(self, conn: TCPConnection): + """Handle segment retransmission""" + current_time = time.time() + + # Find segments that need retransmission + to_retransmit = [] + for seq_num, segment in conn.unacked_segments.items(): + if current_time - segment.timestamp > conn.rto: + if segment.retransmit_count < self.max_retries: + to_retransmit.append(segment) + else: + # Max retries exceeded, close connection + self._close_connection(conn, reset=True) + return + + # Retransmit segments + for segment in to_retransmit: + segment.retransmit_count += 1 + segment.timestamp = current_time + + # Exponential backoff + conn.rto = min(conn.rto * 2, 60.0) + + # Congestion control: reduce window + conn.ssthresh = max(conn.cwnd // 2, 2) + conn.cwnd = 1 + + # Send retransmitted segment + packet = self._build_tcp_packet(conn, segment) + self._send_packet(packet) + + def _send_packet(self, packet: bytes): + """Send packet (to be implemented by integration layer)""" + # This will be connected to the packet bridge + pass + + def _close_connection(self, conn: TCPConnection, reset: bool = False): + """Close connection""" + if reset: + # Send RST + segment = self._create_tcp_segment(conn, 0x04) # RST flag + packet = self._build_tcp_packet(conn, segment) + self._send_packet(packet) + conn.state = TCPState.CLOSED + else: + # Normal close + if conn.state == TCPState.ESTABLISHED: + # Send FIN + segment = self._create_tcp_segment(conn, 0x01) # FIN flag + packet = self._build_tcp_packet(conn, segment) + self._send_packet(packet) + conn.local_seq += 1 + conn.state = TCPState.FIN_WAIT_1 + + # Cleanup if closed + if conn.state == TCPState.CLOSED: + if conn.on_connection_closed: + conn.on_connection_closed() + + with self.lock: + if conn.connection_id in self.connections: + del self.connections[conn.connection_id] + + def listen(self, port: int, accept_callback: Callable): + """Listen on port for incoming connections""" + with self.lock: + self.listening_ports[port] = accept_callback + + def connect(self, local_ip: str, local_port: int, remote_ip: str, remote_port: int) -> Optional[TCPConnection]: + """Initiate outbound connection""" + conn_key = self._get_connection_key(local_ip, local_port, remote_ip, remote_port) + + # Create connection + conn = TCPConnection( + local_ip=local_ip, + local_port=local_port, + remote_ip=remote_ip, + remote_port=remote_port, + state=TCPState.SYN_SENT, + local_seq=self._generate_isn(), + mss=self.default_mss, + local_window=self.default_window + ) + conn.initial_seq = conn.local_seq + + with self.lock: + self.connections[conn_key] = conn + + # Send SYN + segment = self._create_tcp_segment(conn, 0x02) # SYN flag + packet = self._build_tcp_packet(conn, segment) + self._send_packet(packet) + + # Track unacked segment + conn.unacked_segments[conn.local_seq] = segment + conn.local_seq += 1 + conn.retransmit_timer = time.time() + + return conn + + def send_data(self, conn: TCPConnection, data: bytes) -> bool: + """Send data on established connection""" + if not conn.can_send_data: + return False + + # Add to send buffer + conn.send_buffer.append(data) + + # Try to send immediately + self._try_send_data(conn) + + return True + + def _try_send_data(self, conn: TCPConnection): + """Try to send buffered data""" + while conn.send_buffer and len(conn.unacked_segments) * conn.mss < conn.effective_window: + data = conn.send_buffer.popleft() + + # Split data if larger than MSS + while data: + chunk = data[:conn.mss] + data = data[conn.mss:] + + # Create and send segment + segment = self._create_tcp_segment(conn, 0x18, chunk) # PSH+ACK flags + packet = self._build_tcp_packet(conn, segment) + self._send_packet(packet) + + # Track unacked segment + conn.unacked_segments[conn.local_seq] = segment + conn.local_seq += len(chunk) + + if not data: + break + + def process_packet(self, packet_data: bytes) -> bool: + """Process incoming TCP packet""" + try: + # Parse packet + parsed = IPParser.parse_packet(packet_data) + if not isinstance(parsed.transport_header, TCPHeader): + return False + + ip_header = parsed.ip_header + tcp_header = parsed.transport_header + payload = parsed.payload + + # Find or create connection + conn_key = self._get_connection_key( + ip_header.dest_ip, tcp_header.dest_port, + ip_header.source_ip, tcp_header.source_port + ) + + with self.lock: + conn = self.connections.get(conn_key) + + # Handle new connection (SYN to listening port) + if not conn and tcp_header.syn and not tcp_header.ack: + if tcp_header.dest_port in self.listening_ports: + conn = self._handle_new_connection(ip_header, tcp_header) + if conn: + self.connections[conn_key] = conn + + if not conn: + # Send RST for unknown connection + self._send_rst(ip_header, tcp_header) + return False + + # Process segment + return self._process_segment(conn, tcp_header, payload) + + except Exception as e: + print(f"Error processing TCP packet: {e}") + return False + + def _handle_new_connection(self, ip_header: IPv4Header, tcp_header: TCPHeader) -> Optional[TCPConnection]: + """Handle new incoming connection""" + accept_callback = self.listening_ports.get(tcp_header.dest_port) + if not accept_callback: + return None + + # Create connection + conn = TCPConnection( + local_ip=ip_header.dest_ip, + local_port=tcp_header.dest_port, + remote_ip=ip_header.source_ip, + remote_port=tcp_header.source_port, + state=TCPState.SYN_RECEIVED, + local_seq=self._generate_isn(), + remote_seq=tcp_header.seq_num, + local_ack=tcp_header.seq_num + 1, + mss=self.default_mss, + local_window=self.default_window + ) + conn.initial_seq = conn.local_seq + + # Send SYN-ACK + segment = self._create_tcp_segment(conn, 0x12) # SYN+ACK flags + packet = self._build_tcp_packet(conn, segment) + self._send_packet(packet) + + # Track unacked segment + conn.unacked_segments[conn.local_seq] = segment + conn.local_seq += 1 + conn.retransmit_timer = time.time() + + # Call accept callback + accept_callback(conn) + + return conn + + def _process_segment(self, conn: TCPConnection, tcp_header: TCPHeader, payload: bytes) -> bool: + """Process TCP segment based on connection state""" + conn.last_activity = time.time() + + # Handle RST + if tcp_header.rst: + conn.state = TCPState.CLOSED + self._close_connection(conn) + return True + + # State machine + if conn.state == TCPState.SYN_SENT: + return self._handle_syn_sent(conn, tcp_header, payload) + elif conn.state == TCPState.SYN_RECEIVED: + return self._handle_syn_received(conn, tcp_header, payload) + elif conn.state == TCPState.ESTABLISHED: + return self._handle_established(conn, tcp_header, payload) + elif conn.state == TCPState.FIN_WAIT_1: + return self._handle_fin_wait_1(conn, tcp_header, payload) + elif conn.state == TCPState.FIN_WAIT_2: + return self._handle_fin_wait_2(conn, tcp_header, payload) + elif conn.state == TCPState.CLOSE_WAIT: + return self._handle_close_wait(conn, tcp_header, payload) + elif conn.state == TCPState.CLOSING: + return self._handle_closing(conn, tcp_header, payload) + elif conn.state == TCPState.LAST_ACK: + return self._handle_last_ack(conn, tcp_header, payload) + elif conn.state == TCPState.TIME_WAIT: + return self._handle_time_wait(conn, tcp_header, payload) + + return False + + def _handle_syn_sent(self, conn: TCPConnection, tcp_header: TCPHeader, payload: bytes) -> bool: + """Handle segment in SYN_SENT state""" + if tcp_header.syn and tcp_header.ack: + # SYN-ACK received + if tcp_header.ack_num == conn.local_seq: + conn.remote_seq = tcp_header.seq_num + conn.local_ack = tcp_header.seq_num + 1 + conn.remote_window = tcp_header.window_size + + # Remove SYN from unacked segments + if conn.local_seq - 1 in conn.unacked_segments: + del conn.unacked_segments[conn.local_seq - 1] + + # Send ACK + segment = self._create_tcp_segment(conn, 0x10) # ACK flag + packet = self._build_tcp_packet(conn, segment) + self._send_packet(packet) + + conn.state = TCPState.ESTABLISHED + return True + + return False + + def _handle_syn_received(self, conn: TCPConnection, tcp_header: TCPHeader, payload: bytes) -> bool: + """Handle segment in SYN_RECEIVED state""" + if tcp_header.ack and tcp_header.ack_num == conn.local_seq: + # ACK for our SYN-ACK + conn.remote_window = tcp_header.window_size + + # Remove SYN-ACK from unacked segments + if conn.local_seq - 1 in conn.unacked_segments: + del conn.unacked_segments[conn.local_seq - 1] + + conn.state = TCPState.ESTABLISHED + return True + + return False + + def _handle_established(self, conn: TCPConnection, tcp_header: TCPHeader, payload: bytes) -> bool: + """Handle segment in ESTABLISHED state""" + # Handle ACK + if tcp_header.ack: + self._process_ack(conn, tcp_header.ack_num) + + # Handle data + if payload and tcp_header.seq_num == conn.local_ack: + conn.local_ack += len(payload) + + # Deliver data + if conn.on_data_received: + conn.on_data_received(payload) + + # Send ACK + segment = self._create_tcp_segment(conn, 0x10) # ACK flag + packet = self._build_tcp_packet(conn, segment) + self._send_packet(packet) + + # Handle FIN + if tcp_header.fin: + conn.local_ack += 1 + + # Send ACK + segment = self._create_tcp_segment(conn, 0x10) # ACK flag + packet = self._build_tcp_packet(conn, segment) + self._send_packet(packet) + + conn.state = TCPState.CLOSE_WAIT + + return True + + def _handle_fin_wait_1(self, conn: TCPConnection, tcp_header: TCPHeader, payload: bytes) -> bool: + """Handle segment in FIN_WAIT_1 state""" + if tcp_header.ack: + self._process_ack(conn, tcp_header.ack_num) + if not conn.unacked_segments: # Our FIN was ACKed + conn.state = TCPState.FIN_WAIT_2 + + if tcp_header.fin: + conn.local_ack += 1 + + # Send ACK + segment = self._create_tcp_segment(conn, 0x10) # ACK flag + packet = self._build_tcp_packet(conn, segment) + self._send_packet(packet) + + if conn.state == TCPState.FIN_WAIT_2: + conn.state = TCPState.TIME_WAIT + conn.time_wait_start = time.time() + else: + conn.state = TCPState.CLOSING + + return True + + def _handle_fin_wait_2(self, conn: TCPConnection, tcp_header: TCPHeader, payload: bytes) -> bool: + """Handle segment in FIN_WAIT_2 state""" + if tcp_header.fin: + conn.local_ack += 1 + + # Send ACK + segment = self._create_tcp_segment(conn, 0x10) # ACK flag + packet = self._build_tcp_packet(conn, segment) + self._send_packet(packet) + + conn.state = TCPState.TIME_WAIT + conn.time_wait_start = time.time() + + return True + + def _handle_close_wait(self, conn: TCPConnection, tcp_header: TCPHeader, payload: bytes) -> bool: + """Handle segment in CLOSE_WAIT state""" + # Application should close the connection + return True + + def _handle_closing(self, conn: TCPConnection, tcp_header: TCPHeader, payload: bytes) -> bool: + """Handle segment in CLOSING state""" + if tcp_header.ack: + self._process_ack(conn, tcp_header.ack_num) + if not conn.unacked_segments: # Our FIN was ACKed + conn.state = TCPState.TIME_WAIT + conn.time_wait_start = time.time() + + return True + + def _handle_last_ack(self, conn: TCPConnection, tcp_header: TCPHeader, payload: bytes) -> bool: + """Handle segment in LAST_ACK state""" + if tcp_header.ack: + self._process_ack(conn, tcp_header.ack_num) + if not conn.unacked_segments: # Our FIN was ACKed + conn.state = TCPState.CLOSED + self._close_connection(conn) + + return True + + def _handle_time_wait(self, conn: TCPConnection, tcp_header: TCPHeader, payload: bytes) -> bool: + """Handle segment in TIME_WAIT state""" + # Just acknowledge any segments + if tcp_header.seq_num == conn.local_ack: + segment = self._create_tcp_segment(conn, 0x10) # ACK flag + packet = self._build_tcp_packet(conn, segment) + self._send_packet(packet) + + return True + + def _process_ack(self, conn: TCPConnection, ack_num: int): + """Process ACK and remove acknowledged segments""" + acked_segments = [] + acked_bytes = 0 + + for seq_num, segment in list(conn.unacked_segments.items()): + if seq_num < ack_num: + acked_segments.append((seq_num, segment)) + acked_bytes += segment.data_length + del conn.unacked_segments[seq_num] + + # Update RTT and congestion window + if acked_segments: + # Use first acked segment for RTT calculation + rtt = time.time() - acked_segments[0][1].timestamp + self._update_rto(conn, rtt) + self._update_congestion_window(conn, acked_bytes) + + # Try to send more data + self._try_send_data(conn) + + def _send_rst(self, ip_header: IPv4Header, tcp_header: TCPHeader): + """Send RST for unknown connection""" + # Create RST response + rst_ip = IPv4Header( + protocol=6, + source_ip=ip_header.dest_ip, + dest_ip=ip_header.source_ip, + ttl=64 + ) + + rst_tcp = TCPHeader( + source_port=tcp_header.dest_port, + dest_port=tcp_header.source_port, + seq_num=tcp_header.ack_num if tcp_header.ack else 0, + ack_num=tcp_header.seq_num + 1 if tcp_header.syn else tcp_header.seq_num, + flags=0x14 if tcp_header.ack else 0x04 # RST+ACK or RST + ) + + packet = IPParser.build_packet(rst_ip, rst_tcp) + self._send_packet(packet) + + def _timer_loop(self): + """Timer loop for handling timeouts""" + while self.running: + current_time = time.time() + + with self.lock: + connections_to_check = list(self.connections.values()) + + for conn in connections_to_check: + # Handle retransmissions + if conn.unacked_segments: + self._handle_retransmission(conn) + + # Handle connection timeout + if current_time - conn.last_activity > self.connection_timeout: + self._close_connection(conn, reset=True) + + # Handle TIME_WAIT timeout + if (conn.state == TCPState.TIME_WAIT and + conn.time_wait_start and + current_time - conn.time_wait_start > self.time_wait_timeout): + conn.state = TCPState.CLOSED + self._close_connection(conn) + + time.sleep(1) # Check every second + + def start(self): + """Start TCP engine""" + self.running = True + self.timer_thread = threading.Thread(target=self._timer_loop, daemon=True) + self.timer_thread.start() + print("TCP engine started") + + def stop(self): + """Stop TCP engine""" + self.running = False + if self.timer_thread: + self.timer_thread.join() + + # Close all connections + with self.lock: + for conn in list(self.connections.values()): + self._close_connection(conn, reset=True) + + print("TCP engine stopped") + + def get_connections(self) -> Dict[str, Dict]: + """Get current connections""" + with self.lock: + return { + conn_id: { + 'local_ip': conn.local_ip, + 'local_port': conn.local_port, + 'remote_ip': conn.remote_ip, + 'remote_port': conn.remote_port, + 'state': conn.state.value, + 'local_seq': conn.local_seq, + 'local_ack': conn.local_ack, + 'remote_seq': conn.remote_seq, + 'remote_ack': conn.remote_ack, + 'window_size': conn.local_window, + 'cwnd': conn.cwnd, + 'unacked_segments': len(conn.unacked_segments), + 'last_activity': conn.last_activity + } + for conn_id, conn in self.connections.items() + } + diff --git a/core/traffic_router.py b/core/traffic_router.py new file mode 100644 index 0000000000000000000000000000000000000000..6d0d924140ca467e9b5ddf4ce22ad317b50c427f --- /dev/null +++ b/core/traffic_router.py @@ -0,0 +1,132 @@ +""" +Traffic Router Module + +Handles routing of all client traffic through VPN for free data access using async TCP sockets. +Optimized for performance and scalability. +""" + + +import asyncio +import socket +import logging +from typing import Dict, Any, Optional + +logger = logging.getLogger(__name__) + + +class TrafficRouter: + """Manages traffic routing for VPN clients using async TCP sockets""" + + def __init__(self, config: Dict[str, Any], nat_engine: Any = None): + self.config = config + self.is_running = False + self.vpn_host = self.config.get("vpn_host", "127.0.0.1") + self.vpn_port = self.config.get("vpn_port", 9000) + self.internet_host = self.config.get("internet_host", "0.0.0.0") + self.internet_port = self.config.get("internet_port", 9001) + self.nat_engine = nat_engine + self.loop = None + self.vpn_server = None + self.internet_server = None + self.connections = set() + self.stats = { + "total_connections": 0, + "active_connections": 0, + "bytes_forwarded": 0, + "errors": 0 + } + + async def start(self): + """Start the traffic router using asyncio TCP servers""" + if self.is_running: + logger.warning("Traffic Router is already running") + return True + + self.is_running = True + self.loop = asyncio.get_event_loop() + self.vpn_server = await asyncio.start_server( + lambda r, w: self._handle_connection(r, w, "VPN"), + self.vpn_host, self.vpn_port) + self.internet_server = await asyncio.start_server( + lambda r, w: self._handle_connection(r, w, "Internet"), + self.internet_host, self.internet_port) + + logger.info(f"Traffic Router started on TCP endpoints: {self.vpn_host}:{self.vpn_port} and {self.internet_host}:{self.internet_port}") + return True + + async def stop(self): + """Stop the traffic router""" + logger.info("Stopping Traffic Router...") + self.is_running = False + if self.vpn_server: + self.vpn_server.close() + await self.vpn_server.wait_closed() + if self.internet_server: + self.internet_server.close() + await self.internet_server.wait_closed() + for conn in list(self.connections): + conn.close() + logger.info("Traffic Router stopped") + return True + + async def _handle_connection(self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter, source_name: str): + """Handle a new connection and forward data asynchronously.""" + peer = writer.get_extra_info("peername") + logger.info(f"Accepted connection from {peer} on {source_name}") + self.connections.add(writer) + try: + while self.is_running: + data = await reader.read(4096) + if not data: + break + processed_data = None + if self.nat_engine: + if source_name == "VPN": + processed_data = self.nat_engine.process_outbound_packet(data) + elif source_name == "Internet": + processed_data = self.nat_engine.process_inbound_packet(data) + if processed_data: + await self._forward_data(processed_data, source_name) + elif not self.nat_engine: + await self._forward_data(data, source_name) + self.stats["bytes_forwarded"] += len(data) + except Exception as e: + self.stats["errors"] += 1 + logger.error(f"Error in {source_name} connection: {e}") + finally: + writer.close() + await writer.wait_closed() + self.connections.discard(writer) + + async def _forward_data(self, data: bytes, source_name: str): + """Forward data to the opposite endpoint.""" + # This is a placeholder for actual forwarding logic. + # You may want to implement connection pooling or load balancing here. + # For demo, just log the forwarding event. + logger.debug(f"Forwarded {len(data)} bytes from {source_name}") + + def get_stats(self) -> Dict[str, Any]: + """Get traffic router statistics""" + return { + "is_running": self.is_running, + "vpn_host": self.vpn_host, + "vpn_port": self.vpn_port, + "internet_host": self.internet_host, + "internet_port": self.internet_port, + "total_connections": self.stats["total_connections"], + "active_connections": len(self.connections), + "bytes_forwarded": self.stats["bytes_forwarded"], + "errors": self.stats["errors"] + } + + + + + + + def set_components(self, nat_engine: Any = None): + """Set references to other components for inter-operation.""" + if nat_engine: + self.nat_engine = nat_engine + + diff --git a/core/virtual_router.py b/core/virtual_router.py new file mode 100644 index 0000000000000000000000000000000000000000..30cf50c9dab5044f8b628e93383aea9052b18313 --- /dev/null +++ b/core/virtual_router.py @@ -0,0 +1,565 @@ +""" +Virtual Router Module + +Implements packet routing between virtual clients and external internet: +- Maintain routing table for virtual network +- Forward packets based on destination IP +- Handle internal vs external routing decisions +- Support static route configuration +""" + +import ipaddress +import time +import threading +from typing import Dict, List, Optional, Tuple, Set +from dataclasses import dataclass +from enum import Enum + +from .ip_parser import ParsedPacket, IPv4Header + + +class RouteType(Enum): + DIRECT = "DIRECT" # Directly connected network + STATIC = "STATIC" # Static route + DEFAULT = "DEFAULT" # Default route + + +@dataclass +class RouteEntry: + """Represents a routing table entry""" + destination: str # Network in CIDR notation (e.g., "10.0.0.0/24") + gateway: Optional[str] # Next hop IP (None for direct routes) + interface: str # Interface name or identifier + metric: int # Route metric (lower is preferred) + route_type: RouteType + created_time: float + last_used: Optional[float] = None + use_count: int = 0 + + def __post_init__(self): + if self.created_time == 0: + self.created_time = time.time() + + def record_use(self): + """Record route usage""" + self.use_count += 1 + self.last_used = time.time() + + def matches_destination(self, ip: str) -> bool: + """Check if this route matches the destination IP""" + try: + network = ipaddress.ip_network(self.destination, strict=False) + return ipaddress.ip_address(ip) in network + except (ipaddress.AddressValueError, ValueError): + return False + + def to_dict(self) -> Dict: + """Convert route to dictionary""" + return { + 'destination': self.destination, + 'gateway': self.gateway, + 'interface': self.interface, + 'metric': self.metric, + 'route_type': self.route_type.value, + 'created_time': self.created_time, + 'last_used': self.last_used, + 'use_count': self.use_count + } + + +@dataclass +class Interface: + """Represents a network interface""" + name: str + ip_address: str + netmask: str + network: str # Network in CIDR notation + enabled: bool = True + mtu: int = 1500 + created_time: float = 0 + + def __post_init__(self): + if self.created_time == 0: + self.created_time = time.time() + + # Calculate network if not provided + if not self.network: + try: + interface_network = ipaddress.ip_interface(f"{self.ip_address}/{self.netmask}") + self.network = str(interface_network.network) + except (ipaddress.AddressValueError, ValueError): + self.network = "0.0.0.0/0" + + def is_local_address(self, ip: str) -> bool: + """Check if IP address belongs to this interface's network""" + try: + network = ipaddress.ip_network(self.network, strict=False) + return ipaddress.ip_address(ip) in network + except (ipaddress.AddressValueError, ValueError): + return False + + def to_dict(self) -> Dict: + """Convert interface to dictionary""" + return { + 'name': self.name, + 'ip_address': self.ip_address, + 'netmask': self.netmask, + 'network': self.network, + 'enabled': self.enabled, + 'mtu': self.mtu, + 'created_time': self.created_time + } + + +class VirtualRouter: + """Virtual router implementation""" + + def __init__(self, config: Dict): + self.config = config + self.routing_table: List[RouteEntry] = [] + self.interfaces: Dict[str, Interface] = {} + self.arp_table: Dict[str, str] = {} # IP -> MAC mapping + self.lock = threading.Lock() + + # Router configuration + self.router_id = config.get('router_id', 'virtual-router-1') + self.default_gateway = config.get('default_gateway') + + # Statistics + self.stats = { + 'packets_routed': 0, + 'packets_dropped': 0, + 'route_lookups': 0, + 'arp_requests': 0, + 'arp_replies': 0, + 'routing_errors': 0 + } + + # Initialize interfaces and routes + self._initialize_interfaces() + self._initialize_routes() + + def _initialize_interfaces(self): + """Initialize network interfaces from configuration""" + interfaces_config = self.config.get('interfaces', []) + + for iface_config in interfaces_config: + interface = Interface( + name=iface_config['name'], + ip_address=iface_config['ip_address'], + netmask=iface_config.get('netmask', '255.255.255.0'), + network=iface_config.get('network'), + enabled=iface_config.get('enabled', True), + mtu=iface_config.get('mtu', 1500) + ) + + with self.lock: + self.interfaces[interface.name] = interface + + # Add direct route for interface network + self.add_route( + destination=interface.network, + gateway=None, + interface=interface.name, + metric=0, + route_type=RouteType.DIRECT + ) + + def _initialize_routes(self): + """Initialize static routes from configuration""" + routes_config = self.config.get('static_routes', []) + + for route_config in routes_config: + self.add_route( + destination=route_config['destination'], + gateway=route_config.get('gateway'), + interface=route_config['interface'], + metric=route_config.get('metric', 10), + route_type=RouteType.STATIC + ) + + # Add default route if configured + if self.default_gateway: + # Find interface for default gateway + default_interface = None + for interface in self.interfaces.values(): + if interface.is_local_address(self.default_gateway): + default_interface = interface.name + break + + if default_interface: + self.add_route( + destination="0.0.0.0/0", + gateway=self.default_gateway, + interface=default_interface, + metric=100, + route_type=RouteType.DEFAULT + ) + + def add_interface(self, name: str, ip_address: str, netmask: str = "255.255.255.0", + network: Optional[str] = None, mtu: int = 1500) -> bool: + """Add network interface""" + with self.lock: + if name in self.interfaces: + return False + + interface = Interface( + name=name, + ip_address=ip_address, + netmask=netmask, + network=network, + mtu=mtu + ) + + self.interfaces[name] = interface + + # Add direct route for interface network + self.add_route( + destination=interface.network, + gateway=None, + interface=name, + metric=0, + route_type=RouteType.DIRECT + ) + + return True + + def remove_interface(self, name: str) -> bool: + """Remove network interface""" + with self.lock: + if name not in self.interfaces: + return False + + # Remove interface + del self.interfaces[name] + + # Remove routes associated with this interface + self.routing_table = [ + route for route in self.routing_table + if route.interface != name + ] + + return True + + def enable_interface(self, name: str) -> bool: + """Enable network interface""" + with self.lock: + if name in self.interfaces: + self.interfaces[name].enabled = True + return True + return False + + def disable_interface(self, name: str) -> bool: + """Disable network interface""" + with self.lock: + if name in self.interfaces: + self.interfaces[name].enabled = False + return True + return False + + def add_route(self, destination: str, gateway: Optional[str], interface: str, + metric: int = 10, route_type: RouteType = RouteType.STATIC) -> bool: + """Add route to routing table""" + try: + # Validate destination network + ipaddress.ip_network(destination, strict=False) + + # Validate gateway if provided + if gateway: + ipaddress.ip_address(gateway) + + route = RouteEntry( + destination=destination, + gateway=gateway, + interface=interface, + metric=metric, + route_type=route_type, + created_time=time.time() + ) + + with self.lock: + # Check if interface exists + if interface not in self.interfaces: + return False + + # Remove existing route with same destination and interface + self.routing_table = [ + r for r in self.routing_table + if not (r.destination == destination and r.interface == interface) + ] + + # Add new route + self.routing_table.append(route) + + # Sort by metric (lower metric = higher priority) + self.routing_table.sort(key=lambda r: (r.metric, r.created_time)) + + return True + + except (ipaddress.AddressValueError, ValueError): + return False + + def remove_route(self, destination: str, interface: str) -> bool: + """Remove route from routing table""" + with self.lock: + original_count = len(self.routing_table) + self.routing_table = [ + route for route in self.routing_table + if not (route.destination == destination and route.interface == interface) + ] + return len(self.routing_table) < original_count + + def lookup_route(self, destination_ip: str) -> Optional[RouteEntry]: + """Look up route for destination IP""" + self.stats['route_lookups'] += 1 + + with self.lock: + # Find all matching routes + matching_routes = [] + for route in self.routing_table: + # Skip disabled interfaces + interface = self.interfaces.get(route.interface) + if not interface or not interface.enabled: + continue + + if route.matches_destination(destination_ip): + matching_routes.append(route) + + if not matching_routes: + self.stats['routing_errors'] += 1 + return None + + # Sort by specificity (longest prefix match) and then by metric + def route_priority(route): + try: + network = ipaddress.ip_network(route.destination, strict=False) + return (-network.prefixlen, route.metric, route.created_time) + except: + return (0, route.metric, route.created_time) + + matching_routes.sort(key=route_priority) + best_route = matching_routes[0] + best_route.record_use() + + return best_route + + def route_packet(self, packet: ParsedPacket) -> Optional[Tuple[str, str]]: + """Route packet and return (next_hop_ip, interface)""" + self.stats['packets_routed'] += 1 + + destination_ip = packet.ip_header.dest_ip + + # Look up route + route = self.lookup_route(destination_ip) + if not route: + self.stats['packets_dropped'] += 1 + return None + + # Determine next hop + if route.gateway: + next_hop = route.gateway + else: + # Direct route - destination is next hop + next_hop = destination_ip + + return (next_hop, route.interface) + + def is_local_destination(self, ip: str) -> bool: + """Check if IP is a local destination (belongs to router interfaces)""" + with self.lock: + for interface in self.interfaces.values(): + if interface.ip_address == ip: + return True + return False + + def is_local_network(self, ip: str) -> bool: + """Check if IP belongs to any local network""" + with self.lock: + for interface in self.interfaces.values(): + if interface.is_local_address(ip): + return True + return False + + def get_interface_for_ip(self, ip: str) -> Optional[Interface]: + """Get interface that can reach the given IP""" + with self.lock: + for interface in self.interfaces.values(): + if interface.enabled and interface.is_local_address(ip): + return interface + return None + + def add_arp_entry(self, ip: str, mac: str): + """Add ARP table entry""" + with self.lock: + self.arp_table[ip] = mac + + def get_arp_entry(self, ip: str) -> Optional[str]: + """Get MAC address from ARP table""" + with self.lock: + return self.arp_table.get(ip) + + def remove_arp_entry(self, ip: str) -> bool: + """Remove ARP table entry""" + with self.lock: + if ip in self.arp_table: + del self.arp_table[ip] + return True + return False + + def clear_arp_table(self): + """Clear ARP table""" + with self.lock: + self.arp_table.clear() + + def get_routing_table(self) -> List[Dict]: + """Get routing table""" + with self.lock: + return [route.to_dict() for route in self.routing_table] + + def get_interfaces(self) -> Dict[str, Dict]: + """Get network interfaces""" + with self.lock: + return { + name: interface.to_dict() + for name, interface in self.interfaces.items() + } + + def get_arp_table(self) -> Dict[str, str]: + """Get ARP table""" + with self.lock: + return self.arp_table.copy() + + def get_stats(self) -> Dict: + """Get router statistics""" + with self.lock: + stats = self.stats.copy() + stats['total_routes'] = len(self.routing_table) + stats['total_interfaces'] = len(self.interfaces) + stats['enabled_interfaces'] = sum(1 for iface in self.interfaces.values() if iface.enabled) + stats['arp_entries'] = len(self.arp_table) + + return stats + + def reset_stats(self): + """Reset router statistics""" + self.stats = { + 'packets_routed': 0, + 'packets_dropped': 0, + 'route_lookups': 0, + 'arp_requests': 0, + 'arp_replies': 0, + 'routing_errors': 0 + } + + # Reset route usage statistics + with self.lock: + for route in self.routing_table: + route.use_count = 0 + route.last_used = None + + def flush_routes(self, route_type: Optional[RouteType] = None): + """Flush routes of specified type (or all if None)""" + with self.lock: + if route_type: + self.routing_table = [ + route for route in self.routing_table + if route.route_type != route_type + ] + else: + self.routing_table.clear() + + def export_config(self) -> Dict: + """Export router configuration""" + return { + 'router_id': self.router_id, + 'default_gateway': self.default_gateway, + 'interfaces': [ + { + 'name': iface.name, + 'ip_address': iface.ip_address, + 'netmask': iface.netmask, + 'network': iface.network, + 'enabled': iface.enabled, + 'mtu': iface.mtu + } + for iface in self.interfaces.values() + ], + 'static_routes': [ + { + 'destination': route.destination, + 'gateway': route.gateway, + 'interface': route.interface, + 'metric': route.metric + } + for route in self.routing_table + if route.route_type == RouteType.STATIC + ] + } + + def import_config(self, config: Dict): + """Import router configuration""" + # Clear existing configuration + with self.lock: + self.interfaces.clear() + self.routing_table.clear() + self.arp_table.clear() + + # Update router settings + self.router_id = config.get('router_id', self.router_id) + self.default_gateway = config.get('default_gateway', self.default_gateway) + + # Reinitialize from new config + self.config.update(config) + self._initialize_interfaces() + self._initialize_routes() + + +class RouterUtils: + """Utility functions for router operations""" + + @staticmethod + def ip_to_int(ip: str) -> int: + """Convert IP address to integer""" + return int(ipaddress.ip_address(ip)) + + @staticmethod + def int_to_ip(ip_int: int) -> str: + """Convert integer to IP address""" + return str(ipaddress.ip_address(ip_int)) + + @staticmethod + def calculate_network(ip: str, netmask: str) -> str: + """Calculate network address from IP and netmask""" + try: + interface = ipaddress.ip_interface(f"{ip}/{netmask}") + return str(interface.network) + except (ipaddress.AddressValueError, ValueError): + return "0.0.0.0/0" + + @staticmethod + def is_private_ip(ip: str) -> bool: + """Check if IP address is private""" + try: + ip_obj = ipaddress.ip_address(ip) + return ip_obj.is_private + except (ipaddress.AddressValueError, ValueError): + return False + + @staticmethod + def is_multicast_ip(ip: str) -> bool: + """Check if IP address is multicast""" + try: + ip_obj = ipaddress.ip_address(ip) + return ip_obj.is_multicast + except (ipaddress.AddressValueError, ValueError): + return False + + @staticmethod + def validate_cidr(cidr: str) -> bool: + """Validate CIDR notation""" + try: + ipaddress.ip_network(cidr, strict=False) + return True + except (ipaddress.AddressValueError, ValueError): + return False + diff --git a/database/app.db b/database/app.db new file mode 100644 index 0000000000000000000000000000000000000000..1e9e410dcacd0863d14e86be974a6befedd406e0 Binary files /dev/null and b/database/app.db differ diff --git a/flask_app.log b/flask_app.log new file mode 100644 index 0000000000000000000000000000000000000000..4309a387f869f642b40e78b5c32b644fb0986785 Binary files /dev/null and b/flask_app.log differ diff --git a/main.py b/main.py new file mode 100644 index 0000000000000000000000000000000000000000..744526469d303b1828a7e5eb978a2f3d46074ee5 --- /dev/null +++ b/main.py @@ -0,0 +1,76 @@ +import os +import sys +# DON\'T CHANGE THIS !!! +sys.path.insert(0, os.path.dirname(os.path.dirname(__file__))) + +from flask import Flask, send_from_directory +from models.user import db +from routes.user import user_bp +from routes.auth import auth_bp +from routes.isp_api import init_engines, isp_api +from core.openvpn_manager import initialize_openvpn_manager + + +app = Flask(__name__, static_folder=os.path.join(os.path.dirname(__file__), 'static')) +app.config['SECRET_KEY'] = 'asdf#FGSgvasgf$5$WGT' + +app.register_blueprint(user_bp, url_prefix='/api') +app.register_blueprint(isp_api, url_prefix='/api') +app.register_blueprint(auth_bp, url_prefix='/api') + +# uncomment if you need to use database +app.config['SQLALCHEMY_DATABASE_URI'] = f"sqlite:///{os.path.join(os.path.dirname(__file__), 'database', 'app.db')}" +app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False +db.init_app(app) + +with app.app_context(): + db.create_all() + +# Default configuration for engines +app.config["dhcp"] = { + "network": "10.0.0.0/24", + "range_start": "10.0.0.10", + "range_end": "10.0.0.100", + "lease_time": 3600, + "gateway": "10.0.0.1", + "dns_servers": ["8.8.8.8", "8.8.4.4"] +} + +# Initialize engines only once, when the Flask app is not in debug mode's reloader process +if not app.debug or os.environ.get('WERKZEUG_RUN_MAIN') == 'true': + init_engines(app.config) + initialize_openvpn_manager(app.config) + +@app.route("/auth") +def serve_auth(): + return send_from_directory(app.static_folder, "auth.html") + +@app.route("/dashboard") +def serve_dashboard(): + return send_from_directory(app.static_folder, "dashboard.html") + +@app.route("/") +def serve_root(): + return send_from_directory(app.static_folder, "index.html") + +@app.route('/') +def serve(path): + static_folder_path = app.static_folder + if static_folder_path is None: + return "Static folder not configured", 404 + + if path != "" and os.path.exists(os.path.join(static_folder_path, path)): + return send_from_directory(static_folder_path, path) + else: + index_path = os.path.join(static_folder_path, 'index.html') + if os.path.exists(index_path): + return send_from_directory(static_folder_path, 'index.html') + else: + return "index.html not found", 404 + + +if __name__ == '__main__': + app.run(host='0.0.0.0', port=5000, debug=False) + + + diff --git a/main_isp.py b/main_isp.py new file mode 100644 index 0000000000000000000000000000000000000000..aa03c3f6ef7f9377fc1711dcefaa05cbe3255931 --- /dev/null +++ b/main_isp.py @@ -0,0 +1,273 @@ +""" +Main ISP Application + +Integrates all core modules and provides the main application entry point +""" + +import os +import sys +import json +import threading +import time +from flask import Flask +from flask_cors import CORS + +# Add project root to path +sys.path.insert(0, os.path.dirname(os.path.dirname(__file__))) + +# Import routes and core modules +from routes.isp_api import isp_api, init_engines + + +def load_config(): + """Load configuration from file or use defaults""" + config_file = os.path.join(os.path.dirname(__file__), 'config.json') + + default_config = { + "dhcp": { + "network": "10.0.0.0/24", + "range_start": "10.0.0.10", + "range_end": "10.0.0.100", + "lease_time": 3600, + "gateway": "10.0.0.1", + "dns_servers": ["8.8.8.8", "8.8.4.4"] + }, + "nat": { + "port_range_start": 10000, + "port_range_end": 65535, + "session_timeout": 300, + "host_ip": "0.0.0.0" + }, + "firewall": { + "default_policy": "ACCEPT", + "log_blocked": True, + "log_accepted": False, + "max_log_entries": 10000, + "rules": [ + { + "rule_id": "allow_dhcp", + "priority": 1, + "action": "ACCEPT", + "direction": "BOTH", + "dest_port": "67,68", + "protocol": "UDP", + "description": "Allow DHCP traffic", + "enabled": True + }, + { + "rule_id": "allow_dns", + "priority": 2, + "action": "ACCEPT", + "direction": "BOTH", + "dest_port": "53", + "protocol": "UDP", + "description": "Allow DNS traffic", + "enabled": True + } + ] + }, + "tcp": { + "initial_window": 65535, + "max_retries": 3, + "timeout": 300, + "time_wait_timeout": 120, + "mss": 1460 + }, + "router": { + "router_id": "virtual-isp-router", + "default_gateway": "10.0.0.1", + "interfaces": [ + { + "name": "virtual0", + "ip_address": "10.0.0.1", + "netmask": "255.255.255.0", + "enabled": True, + "mtu": 1500 + } + ], + "static_routes": [] + }, + "socket_translator": { + "connect_timeout": 10, + "read_timeout": 30, + "max_connections": 1000, + "buffer_size": 8192 + }, + "packet_bridge": { + "websocket_host": "0.0.0.0", + "websocket_port": 8765, + "tcp_host": "0.0.0.0", + "tcp_port": 8766, + "max_clients": 100, + "client_timeout": 300 + }, + "session_tracker": { + "max_sessions": 10000, + "session_timeout": 3600, + "cleanup_interval": 300, + "metrics_retention": 86400 + }, + "logger": { + "log_level": "INFO", + "log_to_file": True, + "log_file_path": "/tmp/virtual_isp.log", + "log_file_max_size": 10485760, + "log_file_backup_count": 5, + "log_to_console": True, + "structured_logging": True, + "max_memory_logs": 10000 + }, + "openvpn": { + "server_config_path": "/etc/openvpn/server/server.conf", + "ca_cert_path": "/etc/openvpn/server/ca.crt", + "server_cert_path": "/etc/openvpn/server/server.crt", + "server_key_path": "/etc/openvpn/server/server.key", + "dh_path": "/etc/openvpn/server/dh.pem", + "vpn_network": "10.8.0.0/24", + "vpn_server_ip": "10.8.0.1", + "vpn_port": 1194, + "protocol": "udp", + "auto_start": False, + "client_to_client": False, + "push_routes": [ + "redirect-gateway def1 bypass-dhcp", + "dhcp-option DNS 8.8.8.8", + "dhcp-option DNS 8.8.4.4" + ] + } + } + + if os.path.exists(config_file): + try: + with open(config_file, 'r') as f: + file_config = json.load(f) + + # Merge with defaults + def merge_config(default, override): + result = default.copy() + for key, value in override.items(): + if key in result and isinstance(result[key], dict) and isinstance(value, dict): + result[key] = merge_config(result[key], value) + else: + result[key] = value + return result + + return merge_config(default_config, file_config) + + except Exception as e: + print(f"Error loading config file: {e}") + print("Using default configuration") + return default_config + else: + # Save default config + try: + with open(config_file, 'w') as f: + json.dump(default_config, f, indent=2) + print(f"Created default configuration file: {config_file}") + except Exception as e: + print(f"Could not save default config: {e}") + + return default_config + + +def create_app(): + """Create and configure Flask application""" + app = Flask(__name__, static_folder=os.path.join(os.path.dirname(__file__), 'static')) + + # Enable CORS for all routes + CORS(app, origins="*", allow_headers=["Content-Type", "Authorization"]) + + # Load configuration + config = load_config() + app.config['ISP_CONFIG'] = config + + # Register blueprints + app.register_blueprint(isp_api, url_prefix='/api') + + # Initialize engines + init_engines(config) + + # Serve static files + @app.route('/', defaults={'path': ''}) + @app.route('/') + def serve_static(path): + static_folder_path = app.static_folder + if static_folder_path is None: + return "Static folder not configured", 404 + + if path != "" and os.path.exists(os.path.join(static_folder_path, path)): + return app.send_static_file(path) + else: + index_path = os.path.join(static_folder_path, 'index.html') + if os.path.exists(index_path): + return app.send_static_file('index.html') + else: + return """ + + + + Virtual ISP Stack + + + +
+

Virtual ISP Stack

+
+

System Status

+

The Virtual ISP Stack is running successfully!

+

API Endpoint: /api/status

+

System Stats: /api/stats

+
+ +

Available API Endpoints

+ + +

WebSocket Bridge

+

WebSocket server running on port 8765 for packet bridge connections.

+

TCP server running on port 8766 for packet bridge connections.

+
+ + + """, 200 + + return app + + +def main(): + """Main application entry point""" + print("Starting Virtual ISP Stack...") + + # Create Flask app + app = create_app() + + # Start the application + print("Virtual ISP Stack started successfully!") + print("API available at: http://0.0.0.0:5000/api/") + print("WebSocket bridge at: ws://0.0.0.0:8765") + print("TCP bridge at: tcp://0.0.0.0:8766") + + # Run Flask app + app.run(host='0.0.0.0', port=5000, debug=False, threaded=True) + + +if __name__ == '__main__': + main() + diff --git a/models/__pycache__/enhanced_user.cpython-311.pyc b/models/__pycache__/enhanced_user.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..faa4a28ab2e32229b7bbf8f771d0be3f2678709c Binary files /dev/null and b/models/__pycache__/enhanced_user.cpython-311.pyc differ diff --git a/models/__pycache__/user.cpython-311.pyc b/models/__pycache__/user.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..db7ef14703ce538461a52bb4712d4f98d69556df Binary files /dev/null and b/models/__pycache__/user.cpython-311.pyc differ diff --git a/models/enhanced_user.py b/models/enhanced_user.py new file mode 100644 index 0000000000000000000000000000000000000000..fdb52b69b2e8e730da5b146e8e2b452e9c077eeb --- /dev/null +++ b/models/enhanced_user.py @@ -0,0 +1,427 @@ +""" +Enhanced User Model with Authentication and VPN Client Management + +This module provides comprehensive user management with security features, +VPN client management, and session tracking capabilities. +""" + + +from werkzeug.security import generate_password_hash, check_password_hash +from datetime import datetime, timedelta +import secrets +import jwt +import re +from flask import current_app + +from .user import db + +class User(db.Model): + """Enhanced User model with authentication and VPN management""" + __tablename__ = 'users' + + id = db.Column(db.Integer, primary_key=True) + username = db.Column(db.String(80), unique=True, nullable=False, index=True) + email = db.Column(db.String(120), unique=True, nullable=False, index=True) + password_hash = db.Column(db.String(255), nullable=False) + salt = db.Column(db.String(32), nullable=False) + created_at = db.Column(db.DateTime, default=datetime.utcnow) + last_login = db.Column(db.DateTime) + is_active = db.Column(db.Boolean, default=True) + is_admin = db.Column(db.Boolean, default=False) + subscription_type = db.Column(db.String(20), default='free') + subscription_expires = db.Column(db.DateTime) + max_concurrent_connections = db.Column(db.Integer, default=1) + bandwidth_limit_mbps = db.Column(db.Integer, default=10) + email_verified = db.Column(db.Boolean, default=False) + email_verification_token = db.Column(db.String(64)) + two_factor_enabled = db.Column(db.Boolean, default=False) + two_factor_secret = db.Column(db.String(32)) + password_reset_token = db.Column(db.String(64)) + password_reset_expires = db.Column(db.DateTime) + failed_login_attempts = db.Column(db.Integer, default=0) + account_locked_until = db.Column(db.DateTime) + + # Relationships + vpn_clients = db.relationship('VPNClient', backref='user', lazy=True, cascade='all, delete-orphan') + vpn_sessions = db.relationship('VPNSession', backref='user', lazy=True) + + def __init__(self, username, email, password=None): + self.username = username + self.email = email + if password: + self.set_password(password) + self.email_verification_token = secrets.token_urlsafe(32) + + def set_password(self, password): + """Set password with secure hashing and salt""" + if not self.validate_password_strength(password): + raise ValueError("Password does not meet security requirements") + + self.salt = secrets.token_hex(16) + self.password_hash = generate_password_hash(password + self.salt) + self.failed_login_attempts = 0 + self.account_locked_until = None + + def check_password(self, password): + """Verify password against hash""" + if self.is_account_locked(): + return False + + is_valid = check_password_hash(self.password_hash, password + self.salt) + + if is_valid: + self.failed_login_attempts = 0 + self.last_login = datetime.utcnow() + else: + self.failed_login_attempts += 1 + if self.failed_login_attempts >= 5: + self.account_locked_until = datetime.utcnow() + timedelta(minutes=30) + + return is_valid + + def is_account_locked(self): + """Check if account is locked due to failed login attempts""" + if self.account_locked_until and datetime.utcnow() < self.account_locked_until: + return True + elif self.account_locked_until and datetime.utcnow() >= self.account_locked_until: + # Unlock account + self.account_locked_until = None + self.failed_login_attempts = 0 + return False + + @staticmethod + def validate_password_strength(password): + """Validate password meets security requirements""" + if len(password) < 8: + return False + if not re.search(r'[A-Z]', password): + return False + if not re.search(r'[a-z]', password): + return False + if not re.search(r'\d', password): + return False + if not re.search(r'[!@#$%^&*(),.?":{}|<>]', password): + return False + return True + + @staticmethod + def validate_email(email): + """Validate email format""" + pattern = r'^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$' + return re.match(pattern, email) is not None + + @staticmethod + def validate_username(username): + """Validate username format""" + if len(username) < 3 or len(username) > 80: + return False + if not re.match(r'^[a-zA-Z0-9_-]+$', username): + return False + return True + + def generate_auth_token(self, expires_in=3600): + """Generate JWT authentication token""" + payload = { + 'user_id': self.id, + 'username': self.username, + 'email': self.email, + 'subscription_type': self.subscription_type, + 'is_admin': self.is_admin, + 'exp': datetime.utcnow() + timedelta(seconds=expires_in), + 'iat': datetime.utcnow() + } + return jwt.encode(payload, current_app.config['SECRET_KEY'], algorithm='HS256') + + def generate_refresh_token(self, expires_in=2592000): # 30 days + """Generate refresh token for extended sessions""" + payload = { + 'user_id': self.id, + 'type': 'refresh', + 'exp': datetime.utcnow() + timedelta(seconds=expires_in), + 'iat': datetime.utcnow() + } + return jwt.encode(payload, current_app.config['SECRET_KEY'], algorithm='HS256') + + @staticmethod + def verify_auth_token(token): + """Verify JWT authentication token""" + try: + payload = jwt.decode(token, current_app.config['SECRET_KEY'], algorithms=['HS256']) + if payload.get('type') == 'refresh': + return None # Refresh tokens cannot be used for authentication + return User.query.get(payload['user_id']) + except jwt.ExpiredSignatureError: + return None + except jwt.InvalidTokenError: + return None + + @staticmethod + def verify_refresh_token(token): + """Verify refresh token and return user""" + try: + payload = jwt.decode(token, current_app.config['SECRET_KEY'], algorithms=['HS256']) + if payload.get('type') != 'refresh': + return None + return User.query.get(payload['user_id']) + except jwt.ExpiredSignatureError: + return None + except jwt.InvalidTokenError: + return None + + def generate_password_reset_token(self): + """Generate password reset token""" + self.password_reset_token = secrets.token_urlsafe(32) + self.password_reset_expires = datetime.utcnow() + timedelta(hours=1) + return self.password_reset_token + + def verify_password_reset_token(self, token): + """Verify password reset token""" + if (self.password_reset_token == token and + self.password_reset_expires and + datetime.utcnow() < self.password_reset_expires): + return True + return False + + def reset_password(self, new_password, token): + """Reset password using reset token""" + if not self.verify_password_reset_token(token): + return False + + self.set_password(new_password) + self.password_reset_token = None + self.password_reset_expires = None + return True + + def verify_email(self, token): + """Verify email using verification token""" + if self.email_verification_token == token: + self.email_verified = True + self.email_verification_token = None + return True + return False + + def can_create_vpn_client(self): + """Check if user can create additional VPN clients""" + active_clients = len([c for c in self.vpn_clients if c.is_active]) + + if self.subscription_type == 'free': + return active_clients < 1 + elif self.subscription_type == 'premium': + return active_clients < 5 + elif self.subscription_type == 'enterprise': + return active_clients < 50 + + return False + + def get_active_sessions_count(self): + """Get count of active VPN sessions""" + return len([s for s in self.vpn_sessions if s.disconnected_at is None]) + + def can_connect_vpn(self): + """Check if user can establish new VPN connections""" + active_sessions = self.get_active_sessions_count() + return active_sessions < self.max_concurrent_connections + + def get_bandwidth_usage_today(self): + """Get bandwidth usage for today""" + today = datetime.utcnow().date() + today_sessions = [s for s in self.vpn_sessions + if s.connected_at and s.connected_at.date() == today] + + total_bytes = sum(s.bytes_received + s.bytes_sent for s in today_sessions) + return total_bytes + + def is_subscription_active(self): + """Check if subscription is active""" + if self.subscription_type == 'free': + return True + + return (self.subscription_expires and + datetime.utcnow() < self.subscription_expires) + + def to_dict(self, include_sensitive=False): + """Convert user to dictionary""" + data = { + 'id': self.id, + 'username': self.username, + 'email': self.email, + 'created_at': self.created_at.isoformat() if self.created_at else None, + 'last_login': self.last_login.isoformat() if self.last_login else None, + 'is_active': self.is_active, + 'subscription_type': self.subscription_type, + 'subscription_expires': self.subscription_expires.isoformat() if self.subscription_expires else None, + 'max_concurrent_connections': self.max_concurrent_connections, + 'bandwidth_limit_mbps': self.bandwidth_limit_mbps, + 'email_verified': self.email_verified, + 'two_factor_enabled': self.two_factor_enabled, + 'is_subscription_active': self.is_subscription_active(), + 'active_vpn_clients': len([c for c in self.vpn_clients if c.is_active]), + 'active_sessions': self.get_active_sessions_count(), + 'can_create_vpn_client': self.can_create_vpn_client(), + 'can_connect_vpn': self.can_connect_vpn() + } + + if include_sensitive and (self.is_admin or include_sensitive == 'self'): + data.update({ + 'is_admin': self.is_admin, + 'failed_login_attempts': self.failed_login_attempts, + 'account_locked': self.is_account_locked(), + 'bandwidth_usage_today': self.get_bandwidth_usage_today() + }) + + return data + + +class VPNClient(db.Model): + """VPN Client configuration and management""" + __tablename__ = 'vpn_clients' + + id = db.Column(db.Integer, primary_key=True) + user_id = db.Column(db.Integer, db.ForeignKey('users.id'), nullable=False) + client_name = db.Column(db.String(100), nullable=False) + protocol = db.Column(db.String(20), nullable=False) # openvpn, ikev2, wireguard + certificate_serial = db.Column(db.String(50), unique=True) + private_key_path = db.Column(db.String(255)) + certificate_path = db.Column(db.String(255)) + config_file_path = db.Column(db.String(255)) + created_at = db.Column(db.DateTime, default=datetime.utcnow) + last_connected = db.Column(db.DateTime) + is_active = db.Column(db.Boolean, default=True) + device_type = db.Column(db.String(50)) # windows, macos, linux, ios, android + public_key = db.Column(db.Text) # For WireGuard + + # Relationships + sessions = db.relationship('VPNSession', backref='vpn_client', lazy=True) + + def __init__(self, user_id, client_name, protocol, device_type=None): + self.user_id = user_id + self.client_name = client_name + self.protocol = protocol + self.device_type = device_type + + def get_active_sessions(self): + """Get active sessions for this client""" + return [s for s in self.sessions if s.disconnected_at is None] + + def get_total_bandwidth_usage(self): + """Get total bandwidth usage for this client""" + return sum(s.bytes_received + s.bytes_sent for s in self.sessions) + + def to_dict(self): + """Convert VPN client to dictionary""" + return { + 'id': self.id, + 'client_name': self.client_name, + 'protocol': self.protocol, + 'device_type': self.device_type, + 'created_at': self.created_at.isoformat() if self.created_at else None, + 'last_connected': self.last_connected.isoformat() if self.last_connected else None, + 'is_active': self.is_active, + 'certificate_serial': self.certificate_serial, + 'active_sessions': len(self.get_active_sessions()), + 'total_bandwidth_usage': self.get_total_bandwidth_usage() + } + + +class VPNSession(db.Model): + """VPN Session tracking""" + __tablename__ = 'vpn_sessions' + + id = db.Column(db.Integer, primary_key=True) + user_id = db.Column(db.Integer, db.ForeignKey('users.id'), nullable=False) + client_id = db.Column(db.Integer, db.ForeignKey('vpn_clients.id'), nullable=False) + server_protocol = db.Column(db.String(20), nullable=False) + client_ip = db.Column(db.String(15)) + server_ip = db.Column(db.String(15)) + client_real_ip = db.Column(db.String(45)) # Support IPv6 + connected_at = db.Column(db.DateTime, default=datetime.utcnow) + disconnected_at = db.Column(db.DateTime) + bytes_received = db.Column(db.BigInteger, default=0) + bytes_sent = db.Column(db.BigInteger, default=0) + session_duration = db.Column(db.Integer) # in seconds + disconnect_reason = db.Column(db.String(100)) + + def __init__(self, user_id, client_id, server_protocol, client_ip=None, server_ip=None, client_real_ip=None): + self.user_id = user_id + self.client_id = client_id + self.server_protocol = server_protocol + self.client_ip = client_ip + self.server_ip = server_ip + self.client_real_ip = client_real_ip + + def disconnect(self, reason=None): + """Mark session as disconnected""" + self.disconnected_at = datetime.utcnow() + self.disconnect_reason = reason + if self.connected_at: + self.session_duration = int((self.disconnected_at - self.connected_at).total_seconds()) + + def is_active(self): + """Check if session is active""" + return self.disconnected_at is None + + def get_duration(self): + """Get session duration in seconds""" + if self.disconnected_at: + return self.session_duration + elif self.connected_at: + return int((datetime.utcnow() - self.connected_at).total_seconds()) + return 0 + + def to_dict(self): + """Convert VPN session to dictionary""" + return { + 'id': self.id, + 'client_id': self.client_id, + 'server_protocol': self.server_protocol, + 'client_ip': self.client_ip, + 'server_ip': self.server_ip, + 'client_real_ip': self.client_real_ip, + 'connected_at': self.connected_at.isoformat() if self.connected_at else None, + 'disconnected_at': self.disconnected_at.isoformat() if self.disconnected_at else None, + 'bytes_received': self.bytes_received, + 'bytes_sent': self.bytes_sent, + 'session_duration': self.get_duration(), + 'disconnect_reason': self.disconnect_reason, + 'is_active': self.is_active() + } + + +class ServerConfiguration(db.Model): + """VPN Server configuration management""" + __tablename__ = 'server_configurations' + + id = db.Column(db.Integer, primary_key=True) + protocol = db.Column(db.String(20), nullable=False) + server_name = db.Column(db.String(100), nullable=False) + listen_port = db.Column(db.Integer, nullable=False) + network_cidr = db.Column(db.String(18), nullable=False) + dns_servers = db.Column(db.Text) # JSON string + routes = db.Column(db.Text) # JSON string + is_active = db.Column(db.Boolean, default=True) + created_at = db.Column(db.DateTime, default=datetime.utcnow) + updated_at = db.Column(db.DateTime, default=datetime.utcnow, onupdate=datetime.utcnow) + max_clients = db.Column(db.Integer, default=100) + + def __init__(self, protocol, server_name, listen_port, network_cidr): + self.protocol = protocol + self.server_name = server_name + self.listen_port = listen_port + self.network_cidr = network_cidr + + def to_dict(self): + """Convert server configuration to dictionary""" + return { + 'id': self.id, + 'protocol': self.protocol, + 'server_name': self.server_name, + 'listen_port': self.listen_port, + 'network_cidr': self.network_cidr, + 'dns_servers': self.dns_servers, + 'routes': self.routes, + 'is_active': self.is_active, + 'created_at': self.created_at.isoformat() if self.created_at else None, + 'updated_at': self.updated_at.isoformat() if self.updated_at else None, + 'max_clients': self.max_clients + } + diff --git a/models/user.py b/models/user.py new file mode 100644 index 0000000000000000000000000000000000000000..5b22876ec21365a88dce106eb3421aa478c408b9 --- /dev/null +++ b/models/user.py @@ -0,0 +1,20 @@ +from flask_sqlalchemy import SQLAlchemy + +db = SQLAlchemy() + +class User(db.Model): + id = db.Column(db.Integer, primary_key=True) + username = db.Column(db.String(80), unique=True, nullable=False) + email = db.Column(db.String(120), unique=True, nullable=False) + + def __repr__(self): + return f'' + + def to_dict(self): + return { + 'id': self.id, + 'username': self.username, + 'email': self.email + } + + diff --git a/openvpn/ca.crt b/openvpn/ca.crt new file mode 100644 index 0000000000000000000000000000000000000000..66be16dd47c7e4f711ec299165d07708621f0f61 --- /dev/null +++ b/openvpn/ca.crt @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDMzCCAhugAwIBAgIUNO765P4t/yD/PnIFTMVs0Q32TJYwDQYJKoZIhvcNAQEL +BQAwDjEMMAoGA1UEAwwDeWVzMB4XDTI1MDgwMjAxMjkzNVoXDTM1MDczMTAxMjkz +NVowDjEMMAoGA1UEAwwDeWVzMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEAtwhMGXouHnHBRd2RhdrW8sOMgqt4wDXZC0J+4UMjOX6Y7t2O1Sgw/sWhwFPk +QF/cMoQIvsucklPogcnzzGtv9zDkAXyVyCC27UYbg8JfWZK3ZMrt6dfEmYf4KKXm +D6PLn9guxzBB63dhEWx/7fd6H9C/rK/u0rOh15DQRnfEI468cmXS5uNg8ke/73+y +Gzb6q7ZOFByBAwM0hW0lStBaIIcxouFrIK8B72O8H+6t10K1GvgiBhKvM3cc8dpN +y4qvRoN/o+eXarZG7G9dfm9OFgdd9LoXPTTbO+ftFPKOq4F41PnMd2Zcyk7P3GCr +3oK7NbISxZ5efLpy45lgSpqKBwIDAQABo4GIMIGFMB0GA1UdDgQWBBQIi0Er30cV +Qzi+U/LPV4Lf3yvGIzBJBgNVHSMEQjBAgBQIi0Er30cVQzi+U/LPV4Lf3yvGI6ES +pBAwDjEMMAoGA1UEAwwDeWVzghQ07vrk/i3/IP8+cgVMxWzRDfZMljAMBgNVHRME +BTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAHzfSFbi1G7WC +vMSOqSv4/jlBExnz/AlLUBHhgDomIdLK8Pb3tyCD5IYkmi0NT5x6DORcOV2ow1JZ +o4BL7OVV+fhz3VKXEpG+s3gq5j2m+raqLtu6QKBGg7SIUZ4MLjggvAcPjsK+n8sK +86sAUFVTccBxJlKBShAUPSNihyWwxB4PQFvwhefNQSoID1kAB2Fzf1beMX6Gp6Lj +ldI6e63lpYtIbp4+2F5SxJ/hGTUx+nWbOAHPvhBfhN6sEu9G1C5KPR0cm+xxOpZ9 +lA7y4Dea7pyVybR/b7lFquE3TReXCoLx79UNNSv8erIlsy1jh9yXDnTCk8SN1dpO +YwJ9U0AHXA== +-----END CERTIFICATE----- diff --git a/openvpn/dh.pem b/openvpn/dh.pem new file mode 100644 index 0000000000000000000000000000000000000000..9a5cb9c0ea1af5cae3a96f0c00447a042480d3fc --- /dev/null +++ b/openvpn/dh.pem @@ -0,0 +1,8 @@ +-----BEGIN DH PARAMETERS----- +MIIBCAKCAQEAlPRBW0tYm271xYHi15JrD3JRlpvdjAm+CZoEq0ElLXvSlIKaNQls +ITH+KIBBX3pgbFFk03fO9ApF0kSOzycRRCuW970iCkDoFUN9y58EG+BI863FkU1h +3dx+c59HqdWXkzFK+SmTfKIe12alZFik5G0Xs0hkphCgPaXvWlojorjQoRfKySw3 +VxpybKS83+l3t2ER3Z03IRvWinlnuxVAcymzeSR9hwIMJi3RmYmNmdXNel/WFAo2 +zT5j2f2OZHtnBhvo1V92Rml+5rJksPX4lJMRNwVEnXwqVUyCQOTTiGTUjLOO2gdk +HLhH5teetBdKL4tFcldeIJSk3e0oWXbURwIBAg== +-----END DH PARAMETERS----- diff --git a/openvpn/server.conf b/openvpn/server.conf new file mode 100644 index 0000000000000000000000000000000000000000..19fab790e448915f98d995a3671cefacfe2f7bda --- /dev/null +++ b/openvpn/server.conf @@ -0,0 +1,21 @@ +port 1194 +proto udp +dev tun +ca /etc/openvpn/server/ca.crt +cert /etc/openvpn/server/server.crt +key /etc/openvpn/server/server.key +dh /etc/openvpn/server/dh.pem +server 10.8.0.0 255.255.255.0 +ifconfig-pool-persist ipp.txt +push "redirect-gateway def1 bypass-dhcp" +push "dhcp-option DNS 8.8.8.8" +push "dhcp-option DNS 8.8.4.4" +keepalive 10 120 +cipher AES-256-CBC +persist-key +persist-tun +status openvpn-status.log +verb 3 +explicit-exit-notify 1 + + diff --git a/openvpn/server.crt b/openvpn/server.crt new file mode 100644 index 0000000000000000000000000000000000000000..632c8dcb115e79d89784c4dc1c1f5da493c7b800 --- /dev/null +++ b/openvpn/server.crt @@ -0,0 +1,86 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: + dd:b5:29:c9:70:b2:b3:65:70:ac:0f:57:30:15:b4:2a + Signature Algorithm: sha256WithRSAEncryption + Issuer: CN=yes + Validity + Not Before: Aug 2 01:29:38 2025 GMT + Not After : Nov 5 01:29:38 2027 GMT + Subject: CN=server + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:dd:9e:02:fb:e3:57:cd:51:43:36:6a:2f:30:f5: + a1:42:5c:16:f1:7b:4b:0a:aa:b1:34:b5:86:51:3e: + 6b:82:2e:59:df:42:21:cf:65:14:ea:8c:93:3c:0a: + 72:a5:2e:0f:64:1a:ec:76:52:18:b2:d3:a0:df:df: + 19:83:7e:39:9e:f5:16:18:36:34:ae:57:cf:2c:89: + 7c:c5:97:e3:8f:d0:83:08:7f:14:0c:74:2c:d2:95: + 09:6e:42:99:a0:28:69:83:68:f4:9c:0e:b5:3e:08: + 8f:d8:06:ec:d5:aa:c8:bc:19:4b:ff:e4:99:50:12: + 67:25:d4:79:94:1f:3d:64:b2:c8:00:ea:97:c2:df: + b8:1c:dc:69:47:9f:59:df:03:06:5a:32:7a:fa:51: + 96:45:9a:b7:e7:03:ef:9d:3b:94:51:9d:08:69:bb: + b0:3e:c8:9c:a3:a0:9c:18:aa:e9:88:ec:96:c3:71: + b1:f6:a7:09:ff:c0:56:b1:24:22:ab:fc:9a:c5:fc: + fd:67:8e:1a:86:ff:0a:5b:28:46:b4:20:93:05:b6: + ff:87:93:66:7d:ae:92:c4:0d:20:99:e9:c5:b8:3d: + 41:3a:06:83:49:e5:13:2e:d6:33:94:45:6a:36:84: + f9:c9:61:fe:98:3a:6e:41:ed:d8:8c:f1:55:3d:6d: + 53:fb + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Basic Constraints: + CA:FALSE + X509v3 Subject Key Identifier: + F4:62:12:72:49:40:C2:8A:46:5A:CB:71:BE:33:58:25:B3:E0:01:AC + X509v3 Authority Key Identifier: + keyid:08:8B:41:2B:DF:47:15:43:38:BE:53:F2:CF:57:82:DF:DF:2B:C6:23 + DirName:/CN=yes + serial:34:EE:FA:E4:FE:2D:FF:20:FF:3E:72:05:4C:C5:6C:D1:0D:F6:4C:96 + X509v3 Extended Key Usage: + TLS Web Server Authentication + X509v3 Key Usage: + Digital Signature, Key Encipherment + X509v3 Subject Alternative Name: + DNS:server + Signature Algorithm: sha256WithRSAEncryption + Signature Value: + 85:f7:59:01:c2:99:23:c3:9a:99:2a:0a:bc:5d:7d:1c:e8:7c: + e9:23:a5:87:08:bd:45:1b:a7:a9:b7:3a:06:b6:91:86:ac:61: + 03:ae:cd:65:80:0e:e4:81:dc:38:b3:fe:6d:6f:02:e4:9e:43: + 95:d0:a6:38:30:53:52:14:f1:96:2a:30:69:2f:56:24:65:ba: + 53:c0:b0:22:23:2b:18:37:a1:0c:45:07:cb:ec:a9:71:f7:96: + 2a:d2:18:94:f0:07:18:1f:4c:d2:c5:d5:66:8f:1d:5c:08:8d: + 02:00:d6:0d:df:fd:6e:1e:2a:47:8c:30:fd:5b:46:56:0a:5a: + d4:6d:d4:99:c8:94:26:36:0b:86:30:dd:cb:3a:2e:a2:f3:80: + 0f:62:80:f8:9d:ec:98:f2:96:20:4f:46:01:ae:9d:35:7f:34: + 21:d7:71:89:b6:7a:ce:94:7e:14:e6:bf:b6:08:44:39:24:db: + aa:cf:54:46:34:8f:67:6c:72:22:f1:eb:e9:94:7d:73:26:f3: + 2f:72:fe:28:b3:cb:28:c3:4c:14:3d:c3:81:1e:8d:96:96:e5: + df:af:c4:0a:06:71:16:df:8f:a3:30:50:79:45:95:4c:e8:57: + ee:ed:38:dd:82:8e:0e:b1:2b:4d:27:2b:6f:bc:c8:1c:91:de: + 2c:55:69:38 +-----BEGIN CERTIFICATE----- +MIIDWDCCAkCgAwIBAgIRAN21KclwsrNlcKwPVzAVtCowDQYJKoZIhvcNAQELBQAw +DjEMMAoGA1UEAwwDeWVzMB4XDTI1MDgwMjAxMjkzOFoXDTI3MTEwNTAxMjkzOFow +ETEPMA0GA1UEAwwGc2VydmVyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEA3Z4C++NXzVFDNmovMPWhQlwW8XtLCqqxNLWGUT5rgi5Z30Ihz2UU6oyTPApy +pS4PZBrsdlIYstOg398Zg345nvUWGDY0rlfPLIl8xZfjj9CDCH8UDHQs0pUJbkKZ +oChpg2j0nA61PgiP2Abs1arIvBlL/+SZUBJnJdR5lB89ZLLIAOqXwt+4HNxpR59Z +3wMGWjJ6+lGWRZq35wPvnTuUUZ0IabuwPsico6CcGKrpiOyWw3Gx9qcJ/8BWsSQi +q/yaxfz9Z44ahv8KWyhGtCCTBbb/h5Nmfa6SxA0gmenFuD1BOgaDSeUTLtYzlEVq +NoT5yWH+mDpuQe3YjPFVPW1T+wIDAQABo4GtMIGqMAkGA1UdEwQCMAAwHQYDVR0O +BBYEFPRiEnJJQMKKRlrLcb4zWCWz4AGsMEkGA1UdIwRCMECAFAiLQSvfRxVDOL5T +8s9Xgt/fK8YjoRKkEDAOMQwwCgYDVQQDDAN5ZXOCFDTu+uT+Lf8g/z5yBUzFbNEN +9kyWMBMGA1UdJQQMMAoGCCsGAQUFBwMBMAsGA1UdDwQEAwIFoDARBgNVHREECjAI +ggZzZXJ2ZXIwDQYJKoZIhvcNAQELBQADggEBAIX3WQHCmSPDmpkqCrxdfRzofOkj +pYcIvUUbp6m3Oga2kYasYQOuzWWADuSB3Diz/m1vAuSeQ5XQpjgwU1IU8ZYqMGkv +ViRlulPAsCIjKxg3oQxFB8vsqXH3lirSGJTwBxgfTNLF1WaPHVwIjQIA1g3f/W4e +KkeMMP1bRlYKWtRt1JnIlCY2C4Yw3cs6LqLzgA9igPid7JjyliBPRgGunTV/NCHX +cYm2es6UfhTmv7YIRDkk26rPVEY0j2dsciLx6+mUfXMm8y9y/iizyyjDTBQ9w4Ee +jZaW5d+vxAoGcRbfj6MwUHlFlUzoV+7tON2Cjg6xK00nK2+8yByR3ixVaTg= +-----END CERTIFICATE----- diff --git a/openvpn/server.key b/openvpn/server.key new file mode 100644 index 0000000000000000000000000000000000000000..5dc09a113ebb97573c013874f10d939c9721907f --- /dev/null +++ b/openvpn/server.key @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDdngL741fNUUM2 +ai8w9aFCXBbxe0sKqrE0tYZRPmuCLlnfQiHPZRTqjJM8CnKlLg9kGux2Uhiy06Df +3xmDfjme9RYYNjSuV88siXzFl+OP0IMIfxQMdCzSlQluQpmgKGmDaPScDrU+CI/Y +BuzVqsi8GUv/5JlQEmcl1HmUHz1kssgA6pfC37gc3GlHn1nfAwZaMnr6UZZFmrfn +A++dO5RRnQhpu7A+yJyjoJwYqumI7JbDcbH2pwn/wFaxJCKr/JrF/P1njhqG/wpb +KEa0IJMFtv+Hk2Z9rpLEDSCZ6cW4PUE6BoNJ5RMu1jOURWo2hPnJYf6YOm5B7diM +8VU9bVP7AgMBAAECggEATtwR0sEYtspSYPQS+9iD/AGZ9m75in+n1Ao+E/3isq28 +tDmrn0moUjgYklZjakzEFEqSVx4qhMPSrKcORKCvb1Vl+dKcF2fOpFn+KK++Pagk +YGsb3ryeUIbRFsejM/79YNIBrOB89OiGCwiX0QZXLLvRs+qL9Za+1pLPenpNVd2w +zL+AZ8QkJZdHn1vOZt9vKRlpe8psAt64RHb+LqhYWfeLlpIUjpM5Vu9FFewMGPrw +n+GVCzK4ylq0pJ9bYwKI5Hw4qnJ3j5bGIumEjYBqqmef1+OTD3r/wyhTGpK9RRAu +WD9YGJeQx3ybzRL7Wj6k5g0dn+UA82Lh7Y8n9IoSaQKBgQDqP/BU2KapOHgFt2DE +WHU/+zA7/kfMJMGB5dYy8oXTxUY7WuqX9lja3rC0XuH10JTD6Q21jkTujc0T5/1B +4KxuX+nQP/T9b4XzVM3pKWVmHUt6wf24sbuTNxOy/Q/wC7eCnkr04CEl0vf3E56N +JaLG11dbpcn+9RC9FlUhlYY8QwKBgQDyMcz43915YGOQMkGVZFPvKyOy7ol4fFZv +VRfRoGx9CfHCIOfh9vmlUy6TR4qAQkCnkL730OsxpW3aDTe3qcAcmhiK7u5TfWrE +cd1WgrkymJ8hyEk6FSV0GMKrccQeEo2T95cKnk6lNXnEdNp5kx7LBQhL36fEtMXS +FGCcRkNp6QKBgAbm6WLmm0qDIm4wsAY5AQNomEw8OstWDemQ5xXLNYw+1Mns7Nqb +ZJTWWOiHnyrKAYggNsoxrfBFd1Rt0nV9dDcwVkhPih1pis3XotWK5bTzigTM8Hff +rMIyrj7o2+5bugV8OoMqk2903t+F0XchM8GeGLHXmbMMb3jSzqFVsYXXAoGBAII1 +Z/99S7LPsXd6rWvFzqJMzRqLx/iw0D92viGDYBAxYnp9+myvvTO27tlbowilleEA +nsrY1TmRuOd8J7JkXtaBuiQnpJXaXaZTmS3DhhG/n/4nkcbaS5KJJU/LECcizl74 +w4l/5sRHZbnLIRIvmGSJxhYUnjvQ/HGfZvldhSzRAoGBAMVTrxWedC2XeSMwjdhF +zeDBAp/dTMEnRaS0j3rp+4a4l7Sus1L/p8gBrJtnf/B43bNvQ5cr2jwH7Ql5cF1A +A7hpZ3C0trNaf6WqslJQhN8j8Cs85S/8rPGM5yAfyzKTMe0ytLUjn+XiQCqCUFcT +Inqx4ll7r2tlcI3aMlvN2qsd +-----END PRIVATE KEY----- diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..988f37c745ddc7f913632bb7b21281627dc2954e --- /dev/null +++ b/requirements.txt @@ -0,0 +1,33 @@ +# Core Flask dependencies +Flask==3.1.1 +flask-cors==6.0.0 +Flask-SQLAlchemy==3.1.1 +Werkzeug==3.1.3 + +# Database +SQLAlchemy==2.0.41 + +# Async and networking +aiohttp==3.12.15 +aiohappyeyeballs==2.6.1 +aiosignal==1.4.0 +websockets==15.0.1 + +# Utilities +attrs==25.3.0 +blinker==1.9.0 +click==8.2.1 +frozenlist==1.7.0 +greenlet==3.2.3 +idna==3.10 +itsdangerous==2.2.0 +Jinja2==3.1.6 +MarkupSafe==3.0.2 +multidict==6.6.3 +propcache==0.3.2 +typing_extensions==4.14.0 +yarl==1.20.1 +jwt +# Additional dependencies for VPN management +psutil==5.9.8 + diff --git a/routes/__pycache__/auth.cpython-311.pyc b/routes/__pycache__/auth.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c8702db1d7739b1321b46948c6c4d2fd659685ac Binary files /dev/null and b/routes/__pycache__/auth.cpython-311.pyc differ diff --git a/routes/__pycache__/isp_api.cpython-311.pyc b/routes/__pycache__/isp_api.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f13d0d83004893069e23d4435effcb97c3732626 Binary files /dev/null and b/routes/__pycache__/isp_api.cpython-311.pyc differ diff --git a/routes/__pycache__/user.cpython-311.pyc b/routes/__pycache__/user.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d85f705ee9a66d9d58c8ca4590d4bc5596f777b1 Binary files /dev/null and b/routes/__pycache__/user.cpython-311.pyc differ diff --git a/routes/__pycache__/vpn_client.cpython-311.pyc b/routes/__pycache__/vpn_client.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5184c677591679e774c837571a7a9c5d1d269dba Binary files /dev/null and b/routes/__pycache__/vpn_client.cpython-311.pyc differ diff --git a/routes/__pycache__/vpn_server.cpython-311.pyc b/routes/__pycache__/vpn_server.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0494c2d4c9ee5e932466faf6ff4c2c7426fdd1b6 Binary files /dev/null and b/routes/__pycache__/vpn_server.cpython-311.pyc differ diff --git a/routes/auth.py b/routes/auth.py new file mode 100644 index 0000000000000000000000000000000000000000..b4cdbd41018ad0ac922f4cba2a03fc74aec82658 --- /dev/null +++ b/routes/auth.py @@ -0,0 +1,566 @@ +""" +Authentication Routes + +Flask routes for user registration, login, password reset, and account management +""" + +from flask import Blueprint, request, jsonify, current_app +from flask_cors import cross_origin +from models.user import db +from models.enhanced_user import User, VPNClient +from functools import wraps +import logging +import re +import secrets +from datetime import datetime, timedelta + +logger = logging.getLogger(__name__) + +auth_bp = Blueprint('auth', __name__) + +def token_required(f): + """Decorator to require valid JWT token""" + @wraps(f) + def decorated(*args, **kwargs): + token = None + + # Get token from Authorization header + if 'Authorization' in request.headers: + auth_header = request.headers['Authorization'] + try: + token = auth_header.split(" ")[1] # Bearer + except IndexError: + return jsonify({'error': 'Invalid token format'}), 401 + + if not token: + return jsonify({'error': 'Token is missing'}), 401 + + try: + current_user = User.verify_auth_token(token) + if current_user is None: + return jsonify({'error': 'Token is invalid or expired'}), 401 + + if not current_user.is_active: + return jsonify({'error': 'Account is deactivated'}), 401 + + except Exception as e: + logger.error(f"Token verification error: {e}") + return jsonify({'error': 'Token verification failed'}), 401 + + return f(current_user, *args, **kwargs) + + return decorated + +def admin_required(f): + """Decorator to require admin privileges""" + @wraps(f) + def decorated(current_user, *args, **kwargs): + if not current_user.is_admin: + return jsonify({'error': 'Admin privileges required'}), 403 + return f(current_user, *args, **kwargs) + + return decorated + +@auth_bp.route('/register', methods=['POST']) +@cross_origin() +def register(): + """User registration endpoint""" + try: + data = request.get_json() + + if not data: + return jsonify({'error': 'No data provided'}), 400 + + # Validate required fields + required_fields = ['username', 'email', 'password'] + for field in required_fields: + if field not in data or not data[field]: + return jsonify({'error': f'{field} is required'}), 400 + + username = data['username'].strip() + email = data['email'].strip().lower() + password = data['password'] + + # Validate input format + if not User.validate_username(username): + return jsonify({ + 'error': 'Username must be 3-80 characters and contain only letters, numbers, hyphens, and underscores' + }), 400 + + if not User.validate_email(email): + return jsonify({'error': 'Invalid email format'}), 400 + + if not User.validate_password_strength(password): + return jsonify({ + 'error': 'Password must be at least 8 characters with uppercase, lowercase, number, and special character' + }), 400 + + # Check if user already exists + if User.query.filter_by(username=username).first(): + return jsonify({'error': 'Username already exists'}), 409 + + if User.query.filter_by(email=email).first(): + return jsonify({'error': 'Email already registered'}), 409 + + # Create new user + user = User(username=username, email=email, password=password) + + # Set subscription based on registration data + subscription_type = data.get('subscription_type', 'free') + if subscription_type in ['free', 'premium', 'enterprise']: + user.subscription_type = subscription_type + + # Set limits based on subscription + if subscription_type == 'premium': + user.max_concurrent_connections = 3 + user.bandwidth_limit_mbps = 50 + user.subscription_expires = datetime.utcnow() + timedelta(days=30) + elif subscription_type == 'enterprise': + user.max_concurrent_connections = 10 + user.bandwidth_limit_mbps = 100 + user.subscription_expires = datetime.utcnow() + timedelta(days=30) + + db.session.add(user) + db.session.commit() + + logger.info(f"New user registered: {username} ({email})") + + # Generate tokens + auth_token = user.generate_auth_token() + refresh_token = user.generate_refresh_token() + + return jsonify({ + 'message': 'User registered successfully', + 'user': user.to_dict(), + 'auth_token': auth_token, + 'refresh_token': refresh_token, + 'email_verification_required': not user.email_verified + }), 201 + + except ValueError as e: + return jsonify({'error': str(e)}), 400 + except Exception as e: + logger.error(f"Registration error: {e}") + db.session.rollback() + return jsonify({'error': 'Registration failed'}), 500 + +@auth_bp.route('/login', methods=['POST']) +@cross_origin() +def login(): + """User login endpoint""" + try: + data = request.get_json() + + if not data: + return jsonify({'error': 'No data provided'}), 400 + + # Validate required fields + if 'login' not in data or 'password' not in data: + return jsonify({'error': 'Login and password are required'}), 400 + + login_field = data['login'].strip() + password = data['password'] + + # Find user by username or email + user = None + if '@' in login_field: + user = User.query.filter_by(email=login_field.lower()).first() + else: + user = User.query.filter_by(username=login_field).first() + + if not user: + return jsonify({'error': 'Invalid credentials'}), 401 + + if not user.is_active: + return jsonify({'error': 'Account is deactivated'}), 401 + + if user.is_account_locked(): + return jsonify({ + 'error': 'Account is temporarily locked due to failed login attempts' + }), 423 + + if not user.check_password(password): + db.session.commit() # Save failed attempt count + return jsonify({'error': 'Invalid credentials'}), 401 + + # Update last login + user.last_login = datetime.utcnow() + db.session.commit() + + logger.info(f"User logged in: {user.username}") + + # Generate tokens + auth_token = user.generate_auth_token() + refresh_token = user.generate_refresh_token() + + return jsonify({ + 'message': 'Login successful', + 'user': user.to_dict(include_sensitive='self'), + 'auth_token': auth_token, + 'refresh_token': refresh_token + }), 200 + + except Exception as e: + logger.error(f"Login error: {e}") + return jsonify({'error': 'Login failed'}), 500 + +@auth_bp.route('/refresh', methods=['POST']) +@cross_origin() +def refresh_token(): + """Refresh authentication token""" + try: + data = request.get_json() + + if not data or 'refresh_token' not in data: + return jsonify({'error': 'Refresh token is required'}), 400 + + refresh_token = data['refresh_token'] + user = User.verify_refresh_token(refresh_token) + + if not user: + return jsonify({'error': 'Invalid or expired refresh token'}), 401 + + if not user.is_active: + return jsonify({'error': 'Account is deactivated'}), 401 + + # Generate new tokens + new_auth_token = user.generate_auth_token() + new_refresh_token = user.generate_refresh_token() + + return jsonify({ + 'auth_token': new_auth_token, + 'refresh_token': new_refresh_token, + 'user': user.to_dict() + }), 200 + + except Exception as e: + logger.error(f"Token refresh error: {e}") + return jsonify({'error': 'Token refresh failed'}), 500 + +@auth_bp.route('/logout', methods=['POST']) +@cross_origin() +@token_required +def logout(current_user): + """User logout endpoint""" + try: + # In a production system, you would invalidate the token + # For now, we just return success + logger.info(f"User logged out: {current_user.username}") + + return jsonify({'message': 'Logout successful'}), 200 + + except Exception as e: + logger.error(f"Logout error: {e}") + return jsonify({'error': 'Logout failed'}), 500 + +@auth_bp.route('/profile', methods=['GET']) +@cross_origin() +@token_required +def get_profile(current_user): + """Get user profile""" + try: + return jsonify({ + 'user': current_user.to_dict(include_sensitive='self') + }), 200 + + except Exception as e: + logger.error(f"Profile retrieval error: {e}") + return jsonify({'error': 'Failed to retrieve profile'}), 500 + +@auth_bp.route('/profile', methods=['PUT']) +@cross_origin() +@token_required +def update_profile(current_user): + """Update user profile""" + try: + data = request.get_json() + + if not data: + return jsonify({'error': 'No data provided'}), 400 + + # Update allowed fields + if 'email' in data: + new_email = data['email'].strip().lower() + if new_email != current_user.email: + if not User.validate_email(new_email): + return jsonify({'error': 'Invalid email format'}), 400 + + # Check if email is already taken + existing_user = User.query.filter_by(email=new_email).first() + if existing_user and existing_user.id != current_user.id: + return jsonify({'error': 'Email already registered'}), 409 + + current_user.email = new_email + current_user.email_verified = False + current_user.email_verification_token = secrets.token_urlsafe(32) + + db.session.commit() + + logger.info(f"Profile updated for user: {current_user.username}") + + return jsonify({ + 'message': 'Profile updated successfully', + 'user': current_user.to_dict(include_sensitive='self') + }), 200 + + except Exception as e: + logger.error(f"Profile update error: {e}") + db.session.rollback() + return jsonify({'error': 'Profile update failed'}), 500 + +@auth_bp.route('/change-password', methods=['POST']) +@cross_origin() +@token_required +def change_password(current_user): + """Change user password""" + try: + data = request.get_json() + + if not data: + return jsonify({'error': 'No data provided'}), 400 + + # Validate required fields + required_fields = ['current_password', 'new_password'] + for field in required_fields: + if field not in data: + return jsonify({'error': f'{field} is required'}), 400 + + current_password = data['current_password'] + new_password = data['new_password'] + + # Verify current password + if not current_user.check_password(current_password): + return jsonify({'error': 'Current password is incorrect'}), 401 + + # Validate new password + if not User.validate_password_strength(new_password): + return jsonify({ + 'error': 'New password must be at least 8 characters with uppercase, lowercase, number, and special character' + }), 400 + + # Set new password + current_user.set_password(new_password) + db.session.commit() + + logger.info(f"Password changed for user: {current_user.username}") + + return jsonify({'message': 'Password changed successfully'}), 200 + + except ValueError as e: + return jsonify({'error': str(e)}), 400 + except Exception as e: + logger.error(f"Password change error: {e}") + db.session.rollback() + return jsonify({'error': 'Password change failed'}), 500 + +@auth_bp.route('/forgot-password', methods=['POST']) +@cross_origin() +def forgot_password(): + """Request password reset""" + try: + data = request.get_json() + + if not data or 'email' not in data: + return jsonify({'error': 'Email is required'}), 400 + + email = data['email'].strip().lower() + user = User.query.filter_by(email=email).first() + + if user: + reset_token = user.generate_password_reset_token() + db.session.commit() + + # In a production system, you would send an email here + logger.info(f"Password reset requested for user: {user.username}") + + # For development, return the token (remove in production) + return jsonify({ + 'message': 'Password reset instructions sent to email', + 'reset_token': reset_token # Remove this in production + }), 200 + else: + # Don't reveal if email exists + return jsonify({ + 'message': 'If the email exists, password reset instructions have been sent' + }), 200 + + except Exception as e: + logger.error(f"Password reset request error: {e}") + return jsonify({'error': 'Password reset request failed'}), 500 + +@auth_bp.route('/reset-password', methods=['POST']) +@cross_origin() +def reset_password(): + """Reset password with token""" + try: + data = request.get_json() + + if not data: + return jsonify({'error': 'No data provided'}), 400 + + # Validate required fields + required_fields = ['email', 'token', 'new_password'] + for field in required_fields: + if field not in data: + return jsonify({'error': f'{field} is required'}), 400 + + email = data['email'].strip().lower() + token = data['token'] + new_password = data['new_password'] + + user = User.query.filter_by(email=email).first() + + if not user: + return jsonify({'error': 'Invalid reset request'}), 400 + + if not User.validate_password_strength(new_password): + return jsonify({ + 'error': 'Password must be at least 8 characters with uppercase, lowercase, number, and special character' + }), 400 + + if user.reset_password(new_password, token): + db.session.commit() + logger.info(f"Password reset completed for user: {user.username}") + return jsonify({'message': 'Password reset successfully'}), 200 + else: + return jsonify({'error': 'Invalid or expired reset token'}), 400 + + except ValueError as e: + return jsonify({'error': str(e)}), 400 + except Exception as e: + logger.error(f"Password reset error: {e}") + db.session.rollback() + return jsonify({'error': 'Password reset failed'}), 500 + +@auth_bp.route('/verify-email', methods=['POST']) +@cross_origin() +def verify_email(): + """Verify email address""" + try: + data = request.get_json() + + if not data or 'token' not in data: + return jsonify({'error': 'Verification token is required'}), 400 + + token = data['token'] + + # Find user by verification token + user = User.query.filter_by(email_verification_token=token).first() + + if not user: + return jsonify({'error': 'Invalid verification token'}), 400 + + if user.verify_email(token): + db.session.commit() + logger.info(f"Email verified for user: {user.username}") + return jsonify({'message': 'Email verified successfully'}), 200 + else: + return jsonify({'error': 'Email verification failed'}), 400 + + except Exception as e: + logger.error(f"Email verification error: {e}") + return jsonify({'error': 'Email verification failed'}), 500 + +@auth_bp.route('/users', methods=['GET']) +@cross_origin() +@token_required +@admin_required +def list_users(current_user): + """List all users (admin only)""" + try: + page = request.args.get('page', 1, type=int) + per_page = request.args.get('per_page', 20, type=int) + search = request.args.get('search', '') + + query = User.query + + if search: + query = query.filter( + db.or_( + User.username.contains(search), + User.email.contains(search) + ) + ) + + users = query.paginate( + page=page, + per_page=per_page, + error_out=False + ) + + return jsonify({ + 'users': [user.to_dict(include_sensitive=True) for user in users.items], + 'total': users.total, + 'pages': users.pages, + 'current_page': page, + 'per_page': per_page + }), 200 + + except Exception as e: + logger.error(f"User listing error: {e}") + return jsonify({'error': 'Failed to retrieve users'}), 500 + +@auth_bp.route('/users/', methods=['GET']) +@cross_origin() +@token_required +@admin_required +def get_user(current_user, user_id): + """Get specific user details (admin only)""" + try: + user = User.query.get_or_404(user_id) + + return jsonify({ + 'user': user.to_dict(include_sensitive=True) + }), 200 + + except Exception as e: + logger.error(f"User retrieval error: {e}") + return jsonify({'error': 'Failed to retrieve user'}), 500 + +@auth_bp.route('/users//deactivate', methods=['POST']) +@cross_origin() +@token_required +@admin_required +def deactivate_user(current_user, user_id): + """Deactivate user account (admin only)""" + try: + user = User.query.get_or_404(user_id) + + if user.id == current_user.id: + return jsonify({'error': 'Cannot deactivate your own account'}), 400 + + user.is_active = False + db.session.commit() + + logger.info(f"User deactivated by admin {current_user.username}: {user.username}") + + return jsonify({'message': 'User deactivated successfully'}), 200 + + except Exception as e: + logger.error(f"User deactivation error: {e}") + db.session.rollback() + return jsonify({'error': 'Failed to deactivate user'}), 500 + +@auth_bp.route('/users//activate', methods=['POST']) +@cross_origin() +@token_required +@admin_required +def activate_user(current_user, user_id): + """Activate user account (admin only)""" + try: + user = User.query.get_or_404(user_id) + + user.is_active = True + user.failed_login_attempts = 0 + user.account_locked_until = None + db.session.commit() + + logger.info(f"User activated by admin {current_user.username}: {user.username}") + + return jsonify({'message': 'User activated successfully'}), 200 + + except Exception as e: + logger.error(f"User activation error: {e}") + db.session.rollback() + return jsonify({'error': 'Failed to activate user'}), 500 + diff --git a/routes/isp_api.py b/routes/isp_api.py new file mode 100644 index 0000000000000000000000000000000000000000..299dce26bd12081eaad967ff31a7fa1a1989a31e --- /dev/null +++ b/routes/isp_api.py @@ -0,0 +1,433 @@ +""" +ISP API Routes + +Flask routes for the Virtual ISP Stack API endpoints +""" + +from flask import Blueprint, jsonify, request, Response, send_file +from flask_cors import cross_origin +import json +import time +from typing import Dict, Any +import asyncio + +# Import core modules +from core.dhcp_server import DHCPServer +from core.nat_engine import NATEngine +from core.firewall import FirewallEngine, FirewallRule, FirewallRuleBuilder, FirewallAction, FirewallDirection +from core.tcp_engine import TCPEngine +from core.virtual_router import VirtualRouter +from core.socket_translator import SocketTranslator +from core.packet_bridge import PacketBridge +from core.session_tracker import SessionTracker, SessionType, SessionState +from core.logger import VirtualISPLogger, LogLevel, LogCategory, LogFilter +from core.openvpn_manager import OpenVPNManager, initialize_openvpn_manager, get_openvpn_manager +from core.traffic_router import TrafficRouter + +# Create blueprint +isp_api = Blueprint("isp_api", __name__) + +# Global instances (will be initialized by main app) +nat_engine: NATEngine = None +traffic_router: TrafficRouter = None +logger: VirtualISPLogger = None + +def init_engines(config: Dict[str, Any]): + """Initialize all ISP stack engines including traffic router""" + global nat_engine, traffic_router, logger + + try: + # Initialize logger first + logger = VirtualISPLogger({}) + logger.start() + + # Initialize NAT engine + nat_engine = NATEngine(config.get("nat", {})) + + # Initialize OpenVPN Manager + initialize_openvpn_manager(config.get("openvpn", {})) + + # Initialize traffic router with NAT engine + traffic_router = TrafficRouter(config.get("traffic_router", {})) + traffic_router.set_components(nat_engine=nat_engine) + + # Start all engines + nat_engine.start() + + # Run traffic router in a new event loop + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + loop.run_until_complete(traffic_router.start()) + + logger.log(LogLevel.INFO, LogCategory.SYSTEM, "api", "All engines initialized and started") + + return True + + except Exception as e: + print(f"Error initializing engines: {e}") + return False + + +# Default configuration +DEFAULT_CONFIG = { + "nat": { + "port_range_start": 10000, + "port_range_end": 65535, + "session_timeout": 300 + }, + "traffic_router": { + "vpn_interface": "eth0", + "internet_interface": "eth1", + }, +} + + +@isp_api.route("/api/status", methods=["GET"]) +@cross_origin() +def get_status(): + """Get system status including traffic router""" + try: + # Collect status from all components + status = { + "components": {}, + "stats": {}, + "timestamp": time.time(), + } + + # Component status + status["components"]["nat_engine"] = nat_engine.is_running if nat_engine else False + status["components"]["traffic_router"] = traffic_router.is_running if traffic_router else False + status["components"]["logger"] = logger.is_running if logger else False + + # Statistics + status["stats"]["nat_sessions"] = len(nat_engine.get_sessions()) if nat_engine else 0 + + # Traffic router stats + if traffic_router: + traffic_stats = traffic_router.get_stats() + status["stats"]["traffic_router"] = traffic_stats + + return jsonify({ + "status": "success", + "system_status": status + }) + + except Exception as e: + return jsonify({ + "status": "error", + "message": str(e) + }), 500 + + +@isp_api.route("/api/config", methods=["POST"]) +@cross_origin() +def update_config(): + """Update system configuration""" + try: + config_data = request.get_json() + + # Here you would update the actual configuration + # For now, just return success + + if logger: + logger.log(LogLevel.INFO, LogCategory.SYSTEM, "api", "Configuration updated", metadata=config_data) + + return jsonify({ + "status": "success", + "message": "Configuration updated successfully" + }) + + except Exception as e: + if logger: + logger.log(LogLevel.ERROR, LogCategory.SYSTEM, "api", f"Configuration update failed: {str(e)}") + + return jsonify({ + "status": "error", + "message": str(e) + }), 500 + + +# NAT endpoints +@isp_api.route("/nat/sessions", methods=["GET"]) +@cross_origin() +def get_nat_sessions(): + """Get NAT session table""" + try: + if not nat_engine: + return jsonify({"status": "error", "message": "NAT engine not initialized"}), 500 + + sessions = nat_engine.get_sessions() + + return jsonify({ + "status": "success", + "sessions": sessions, + "count": len(sessions) + }) + + except Exception as e: + return jsonify({ + "status": "error", + "message": str(e) + }), 500 + + +@isp_api.route("/nat/stats", methods=["GET"]) +@cross_origin() +def get_nat_stats(): + """Get NAT statistics""" + try: + if not nat_engine: + return jsonify({"status": "error", "message": "NAT engine not initialized"}), 500 + + stats = nat_engine.get_stats() + + return jsonify({ + "status": "success", + "stats": stats + }) + + except Exception as e: + return jsonify({ + "status": "error", + "message": str(e) + }), 500 + + + + + + + + + +@isp_api.route("/openvpn/start", methods=["POST"]) +@cross_origin() +def start_openvpn(): + """Start the OpenVPN server.""" + try: + openvpn_manager = get_openvpn_manager() + if not openvpn_manager: + return jsonify({"status": "error", "message": "OpenVPN manager not initialized"}), 500 + + openvpn_manager.start_server() + if logger: + logger.log(LogLevel.INFO, LogCategory.SYSTEM, "api", "OpenVPN server started via API") + return jsonify({"status": "success", "message": "OpenVPN server started successfully"}) + except Exception as e: + if logger: + logger.log(LogLevel.ERROR, LogCategory.SYSTEM, "api", f"Failed to start OpenVPN server via API: {str(e)}") + return jsonify({"status": "error", "message": str(e)}), 500 + + + + +@isp_api.route("/openvpn/status", methods=["GET"]) +@cross_origin() +def get_openvpn_status(): + """Get OpenVPN server status.""" + try: + openvpn_manager = get_openvpn_manager() + if not openvpn_manager: + return jsonify({"status": "error", "message": "OpenVPN manager not initialized"}), 500 + + status = openvpn_manager.get_server_status() + clients = openvpn_manager.get_connected_clients() + + return jsonify({ + "status": "success", + "server_status": status.__dict__, + "connected_clients": clients + }) + except Exception as e: + if logger: + logger.log(LogLevel.ERROR, LogCategory.SYSTEM, "api", f"Failed to get OpenVPN server status: {str(e)}") + return jsonify({"status": "error", "message": str(e)}), 500 + + + + +@isp_api.route("/openvpn/simulate_client", methods=["POST"]) +@cross_origin() +def simulate_openvpn_client(): + """Simulate a VPN client connection.""" + try: + openvpn_manager = get_openvpn_manager() + if not openvpn_manager: + return jsonify({"status": "error", "message": "OpenVPN manager not initialized"}), 500 + + data = request.get_json() + client_id = data.get("client_id") + ip_address = data.get("ip_address") + + if not client_id or not ip_address: + return jsonify({"status": "error", "message": "client_id and ip_address are required"}), 400 + + # Directly add client to the manager's clients dictionary + from core.openvpn_manager import VPNClient + client = VPNClient( + client_id=client_id, + common_name=client_id, + ip_address=ip_address, + connected_at=time.time(), + bytes_received=0, + bytes_sent=0, + status="connected", + routed_through_vpn=True + ) + openvpn_manager.clients[client_id] = client + + if logger: + logger.log(LogLevel.INFO, LogCategory.SYSTEM, "api", f"Simulated VPN client connection: {client_id} ({ip_address})") + return jsonify({"status": "success", "message": f"Simulated client {client_id} connected with IP {ip_address}"}) + except Exception as e: + if logger: + logger.log(LogLevel.ERROR, LogCategory.SYSTEM, "api", f"Failed to simulate VPN client connection: {str(e)}") + return jsonify({"status": "error", "message": str(e)}), 500 + +@isp_api.route("/get_engines", methods=["GET"]) +@cross_origin() +def get_engines(): + """Get references to NAT engine and Traffic Router for testing purposes.""" + try: + global nat_engine, traffic_router + return jsonify({ + "status": "success", + "nat_engine_initialized": nat_engine is not None, + "traffic_router_initialized": traffic_router is not None + }) + except Exception as e: + return jsonify({"status": "error", "message": str(e)}), 500 + + + + +@isp_api.route("/openvpn/generate_client_config", methods=["POST"]) +@cross_origin() +def generate_client_config_route(): + """Generate OpenVPN client configuration file.""" + try: + openvpn_manager = get_openvpn_manager() + if not openvpn_manager: + return jsonify({"status": "error", "message": "OpenVPN manager not initialized"}), 500 + + data = request.get_json() + client_name = data.get("client_name") + server_ip = data.get("server_ip") + + if not client_name or not server_ip: + return jsonify({"status": "error", "message": "client_name and server_ip are required"}), 400 + + config_content = openvpn_manager.generate_client_config(client_name, server_ip) + + if logger: + logger.log(LogLevel.INFO, LogCategory.SYSTEM, "api", f"Generated client config for {client_name}") + + return jsonify({"status": "success", "config": config_content}) + except Exception as e: + if logger: + logger.log(LogLevel.ERROR, LogCategory.SYSTEM, "api", f"Failed to generate client config: {str(e)}") + return jsonify({"status": "error", "message": str(e)}), 500 + + + + +import smtplib +from email.mime.multipart import MIMEMultipart +from email.mime.base import MIMEBase +from email.mime.text import MIMEText +from email import encoders + +# Email configuration (replace with your actual details) +SMTP_SERVER = "smtp.example.com" +SMTP_PORT = 587 +EMAIL_ADDRESS = "your_email@example.com" +EMAIL_PASSWORD = "your_email_password" + +@isp_api.route("/api/openvpn/email_config", methods=["POST"]) +@cross_origin() +def email_client_config(): + """Email OpenVPN client configuration file to the user.""" + try: + data = request.get_json() + recipient_email = data.get("email") + client_name = data.get("client_name") + server_ip = data.get("server_ip") + + if not recipient_email or not client_name or not server_ip: + return jsonify({"status": "error", "message": "email, client_name, and server_ip are required"}), 400 + + openvpn_manager = get_openvpn_manager() + if not openvpn_manager: + return jsonify({"status": "error", "message": "OpenVPN manager not initialized"}), 500 + + config_content = openvpn_manager.generate_client_config(client_name, server_ip) + config_filename = f"{client_name}.ovpn" + config_path = os.path.join(openvpn_manager.config_storage_path, config_filename) + + with open(config_path, "w") as f: + f.write(config_content) + + msg = MIMEMultipart() + msg["From"] = EMAIL_ADDRESS + msg["To"] = recipient_email + msg["Subject"] = "Your OpenVPN Configuration File" + + body = "Please find your OpenVPN configuration file attached." + msg.attach(MIMEText(body, "plain")) + + with open(config_path, "rb") as attachment: + part = MIMEBase("application", "octet-stream") + part.set_payload(attachment.read()) + encoders.encode_base64(part) + part.add_header("Content-Disposition", f"attachment; filename= {config_filename}") + msg.attach(part) + + with smtplib.SMTP(SMTP_SERVER, SMTP_PORT) as server: + server.starttls() + server.login(EMAIL_ADDRESS, EMAIL_PASSWORD) + server.send_message(msg) + + os.remove(config_path) # Clean up the generated file + + if logger: + logger.log(LogLevel.INFO, LogCategory.SYSTEM, "api", f"Emailed client config to {recipient_email} for {client_name}") + return jsonify({"status": "success", "message": f"Configuration emailed to {recipient_email}"}) + + except Exception as e: + if logger: + logger.log(LogLevel.ERROR, LogCategory.SYSTEM, "api", f"Failed to email client config: {str(e)}") + return jsonify({"status": "error", "message": str(e)}), 500 + + + + +@isp_api.route("/api/openvpn/download_config/", methods=["GET"]) +@cross_origin() +def download_client_config(client_name): + """Download OpenVPN client configuration file.""" + try: + openvpn_manager = get_openvpn_manager() + if not openvpn_manager: + return jsonify({"status": "error", "message": "OpenVPN manager not initialized"}), 500 + + # Generate the config (or retrieve if already generated) + # For simplicity, we'll regenerate it here. In a real app, you might store and retrieve. + server_ip = request.host.split(":")[0] # Get current host IP + config_content = openvpn_manager.generate_client_config(client_name, server_ip) + config_filename = f"{client_name}.ovpn" + config_path = os.path.join(openvpn_manager.config_storage_path, config_filename) + + with open(config_path, "w") as f: + f.write(config_content) + + if logger: + logger.log(LogLevel.INFO, LogCategory.SYSTEM, "api", f"Serving client config for download: {client_name}") + + return send_file(config_path, as_attachment=True, download_name=config_filename) + + except Exception as e: + if logger: + logger.log(LogLevel.ERROR, LogCategory.SYSTEM, "api", f"Failed to download client config: {str(e)}") + return jsonify({"status": "error", "message": str(e)}), 500 + + diff --git a/routes/user.py b/routes/user.py new file mode 100644 index 0000000000000000000000000000000000000000..4f0359930c96ff62cf53eca61a867975db4a0bbd --- /dev/null +++ b/routes/user.py @@ -0,0 +1,39 @@ +from flask import Blueprint, request, jsonify +from models.user import User, db + +user_bp = Blueprint('user', __name__) + +@user_bp.route('/users', methods=['GET']) +def get_users(): + users = User.query.all() + return jsonify([user.to_dict() for user in users]) + +@user_bp.route('/users', methods=['POST']) +def create_user(): + + data = request.json + user = User(username=data['username'], email=data['email']) + db.session.add(user) + db.session.commit() + return jsonify(user.to_dict()), 201 + +@user_bp.route('/users/', methods=['GET']) +def get_user(user_id): + user = User.query.get_or_404(user_id) + return jsonify(user.to_dict()) + +@user_bp.route('/users/', methods=['PUT']) +def update_user(user_id): + user = User.query.get_or_404(user_id) + data = request.json + user.username = data.get('username', user.username) + user.email = data.get('email', user.email) + db.session.commit() + return jsonify(user.to_dict()) + +@user_bp.route('/users/', methods=['DELETE']) +def delete_user(user_id): + user = User.query.get_or_404(user_id) + db.session.delete(user) + db.session.commit() + return '', 204 diff --git a/routes/vpn_client.py b/routes/vpn_client.py new file mode 100644 index 0000000000000000000000000000000000000000..1b5819285fb86165a232c01fb2a0dd8bf1a5f8b8 --- /dev/null +++ b/routes/vpn_client.py @@ -0,0 +1,437 @@ +""" +VPN Client Management Routes + +Flask routes for VPN client creation, configuration, and management +""" + +from flask import Blueprint, request, jsonify, send_file, current_app +from flask_cors import cross_origin +from models.enhanced_user import db, User, VPNClient, VPNSession +from routes.auth import token_required +import os +import json +import logging +import tempfile +import zipfile +from datetime import datetime + +logger = logging.getLogger(__name__) + +vpn_client_bp = Blueprint('vpn_client', __name__) + +@vpn_client_bp.route('/vpn-clients', methods=['GET']) +@cross_origin() +@token_required +def list_vpn_clients(current_user): + """List user's VPN clients""" + try: + clients = VPNClient.query.filter_by(user_id=current_user.id).all() + + return jsonify({ + 'clients': [client.to_dict() for client in clients], + 'total': len(clients), + 'can_create_more': current_user.can_create_vpn_client() + }), 200 + + except Exception as e: + logger.error(f"VPN client listing error: {e}") + return jsonify({'error': 'Failed to retrieve VPN clients'}), 500 + +@vpn_client_bp.route('/vpn-clients', methods=['POST']) +@cross_origin() +@token_required +def create_vpn_client(current_user): + """Create new VPN client configuration""" + try: + data = request.get_json() + + if not data: + return jsonify({'error': 'No data provided'}), 400 + + # Validate required fields + required_fields = ['client_name', 'protocol'] + for field in required_fields: + if field not in data or not data[field]: + return jsonify({'error': f'{field} is required'}), 400 + + client_name = data['client_name'].strip() + protocol = data['protocol'].lower() + device_type = data.get('device_type', 'unknown') + + # Validate protocol + supported_protocols = ['openvpn', 'ikev2', 'wireguard'] + if protocol not in supported_protocols: + return jsonify({ + 'error': f'Unsupported protocol. Supported: {", ".join(supported_protocols)}' + }), 400 + + # Validate client name + if len(client_name) < 3 or len(client_name) > 100: + return jsonify({'error': 'Client name must be 3-100 characters'}), 400 + + # Check if user can create more clients + if not current_user.can_create_vpn_client(): + return jsonify({ + 'error': 'Maximum number of VPN clients reached for your subscription' + }), 403 + + # Check if client name already exists for this user + existing_client = VPNClient.query.filter_by( + user_id=current_user.id, + client_name=client_name + ).first() + + if existing_client: + return jsonify({'error': 'Client name already exists'}), 409 + + # Create VPN client + vpn_client = VPNClient( + user_id=current_user.id, + client_name=client_name, + protocol=protocol, + device_type=device_type + ) + + db.session.add(vpn_client) + db.session.commit() + + # Generate certificates and configuration + try: + config_result = generate_client_configuration(vpn_client, current_user) + + # Update client with certificate information + vpn_client.certificate_serial = config_result.get('certificate_serial') + vpn_client.certificate_path = config_result.get('certificate_path') + vpn_client.private_key_path = config_result.get('private_key_path') + vpn_client.config_file_path = config_result.get('config_file_path') + + if protocol == 'wireguard': + vpn_client.public_key = config_result.get('public_key') + + db.session.commit() + + except Exception as e: + logger.error(f"Configuration generation error: {e}") + db.session.rollback() + return jsonify({'error': 'Failed to generate client configuration'}), 500 + + logger.info(f"VPN client created: {client_name} ({protocol}) for user {current_user.username}") + + return jsonify({ + 'message': 'VPN client created successfully', + 'client': vpn_client.to_dict(), + 'download_url': f'/api/vpn-clients/{vpn_client.id}/download' + }), 201 + + except Exception as e: + logger.error(f"VPN client creation error: {e}") + db.session.rollback() + return jsonify({'error': 'Failed to create VPN client'}), 500 + +@vpn_client_bp.route('/vpn-clients/', methods=['GET']) +@cross_origin() +@token_required +def get_vpn_client(current_user, client_id): + """Get VPN client details""" + try: + client = VPNClient.query.filter_by( + id=client_id, + user_id=current_user.id + ).first() + + if not client: + return jsonify({'error': 'VPN client not found'}), 404 + + return jsonify({ + 'client': client.to_dict() + }), 200 + + except Exception as e: + logger.error(f"VPN client retrieval error: {e}") + return jsonify({'error': 'Failed to retrieve VPN client'}), 500 + +@vpn_client_bp.route('/vpn-clients/', methods=['PUT']) +@cross_origin() +@token_required +def update_vpn_client(current_user, client_id): + """Update VPN client""" + try: + client = VPNClient.query.filter_by( + id=client_id, + user_id=current_user.id + ).first() + + if not client: + return jsonify({'error': 'VPN client not found'}), 404 + + data = request.get_json() + + if not data: + return jsonify({'error': 'No data provided'}), 400 + + # Update allowed fields + if 'client_name' in data: + new_name = data['client_name'].strip() + if len(new_name) < 3 or len(new_name) > 100: + return jsonify({'error': 'Client name must be 3-100 characters'}), 400 + + # Check if new name already exists for this user + existing_client = VPNClient.query.filter_by( + user_id=current_user.id, + client_name=new_name + ).first() + + if existing_client and existing_client.id != client.id: + return jsonify({'error': 'Client name already exists'}), 409 + + client.client_name = new_name + + if 'device_type' in data: + client.device_type = data['device_type'] + + if 'is_active' in data: + client.is_active = bool(data['is_active']) + + db.session.commit() + + logger.info(f"VPN client updated: {client.client_name} for user {current_user.username}") + + return jsonify({ + 'message': 'VPN client updated successfully', + 'client': client.to_dict() + }), 200 + + except Exception as e: + logger.error(f"VPN client update error: {e}") + db.session.rollback() + return jsonify({'error': 'Failed to update VPN client'}), 500 + +@vpn_client_bp.route('/vpn-clients/', methods=['DELETE']) +@cross_origin() +@token_required +def delete_vpn_client(current_user, client_id): + """Delete VPN client""" + try: + client = VPNClient.query.filter_by( + id=client_id, + user_id=current_user.id + ).first() + + if not client: + return jsonify({'error': 'VPN client not found'}), 404 + + # Disconnect any active sessions + active_sessions = [s for s in client.sessions if s.is_active()] + for session in active_sessions: + session.disconnect('Client deleted') + + # Remove certificate files + try: + if client.certificate_path and os.path.exists(client.certificate_path): + os.remove(client.certificate_path) + if client.private_key_path and os.path.exists(client.private_key_path): + os.remove(client.private_key_path) + if client.config_file_path and os.path.exists(client.config_file_path): + os.remove(client.config_file_path) + except Exception as e: + logger.warning(f"Failed to remove certificate files: {e}") + + db.session.delete(client) + db.session.commit() + + logger.info(f"VPN client deleted: {client.client_name} for user {current_user.username}") + + return jsonify({'message': 'VPN client deleted successfully'}), 200 + + except Exception as e: + logger.error(f"VPN client deletion error: {e}") + db.session.rollback() + return jsonify({'error': 'Failed to delete VPN client'}), 500 + +@vpn_client_bp.route('/vpn-clients//download', methods=['GET']) +@cross_origin() +@token_required +def download_client_config(current_user, client_id): + """Download VPN client configuration""" + try: + # Get VPN client + vpn_client = VPNClient.query.filter_by( + id=client_id, + user_id=current_user.id + ).first() + + if not vpn_client: + return jsonify({'error': 'VPN client not found'}), 404 + + # Generate configuration if not exists + if not vpn_client.config_data: + config_result = generate_client_configuration(vpn_client, current_user) + if not config_result.get('success'): + return jsonify({'error': config_result.get('error', 'Failed to generate configuration')}), 500 + + # Create response with configuration file + from flask import make_response + + filename = config_result.get('filename', f'{vpn_client.client_name}-{vpn_client.protocol}.conf') + config_content = config_result.get('config', vpn_client.config_data) + + response = make_response(config_content) + response.headers['Content-Type'] = 'application/octet-stream' + response.headers['Content-Disposition'] = f'attachment; filename="{filename}"' + + logger.info(f"Configuration downloaded for client {client_id} by user {current_user.username}") + + return response + + except Exception as e: + logger.error(f"Configuration download error: {e}") + return jsonify({'error': 'Failed to download configuration'}), 500 + +@vpn_client_bp.route('/vpn-clients//regenerate', methods=['POST']) +@cross_origin() +@token_required +def regenerate_vpn_config(current_user, client_id): + """Regenerate VPN client configuration""" + try: + client = VPNClient.query.filter_by( + id=client_id, + user_id=current_user.id + ).first() + + if not client: + return jsonify({'error': 'VPN client not found'}), 404 + + # Disconnect any active sessions + active_sessions = [s for s in client.sessions if s.is_active()] + for session in active_sessions: + session.disconnect('Configuration regenerated') + + # Remove old certificate files + try: + if client.certificate_path and os.path.exists(client.certificate_path): + os.remove(client.certificate_path) + if client.private_key_path and os.path.exists(client.private_key_path): + os.remove(client.private_key_path) + if client.config_file_path and os.path.exists(client.config_file_path): + os.remove(client.config_file_path) + except Exception as e: + logger.warning(f"Failed to remove old certificate files: {e}") + + # Generate new configuration + config_result = generate_client_configuration(client, current_user) + + # Update client with new certificate information + client.certificate_serial = config_result.get('certificate_serial') + client.certificate_path = config_result.get('certificate_path') + client.private_key_path = config_result.get('private_key_path') + client.config_file_path = config_result.get('config_file_path') + + if client.protocol == 'wireguard': + client.public_key = config_result.get('public_key') + + db.session.commit() + + logger.info(f"Configuration regenerated: {client.client_name} for user {current_user.username}") + + return jsonify({ + 'message': 'Configuration regenerated successfully', + 'client': client.to_dict(), + 'download_url': f'/api/vpn-clients/{client.id}/download' + }), 200 + + except Exception as e: + logger.error(f"Configuration regeneration error: {e}") + db.session.rollback() + return jsonify({'error': 'Failed to regenerate configuration'}), 500 + +@vpn_client_bp.route('/vpn-clients//sessions', methods=['GET']) +@cross_origin() +@token_required +def get_client_sessions(current_user, client_id): + """Get VPN client session history""" + try: + client = VPNClient.query.filter_by( + id=client_id, + user_id=current_user.id + ).first() + + if not client: + return jsonify({'error': 'VPN client not found'}), 404 + + page = request.args.get('page', 1, type=int) + per_page = request.args.get('per_page', 20, type=int) + + sessions = VPNSession.query.filter_by(client_id=client_id).order_by( + VPNSession.connected_at.desc() + ).paginate( + page=page, + per_page=per_page, + error_out=False + ) + + return jsonify({ + 'sessions': [session.to_dict() for session in sessions.items], + 'total': sessions.total, + 'pages': sessions.pages, + 'current_page': page, + 'per_page': per_page, + 'active_sessions': len([s for s in sessions.items if s.is_active()]) + }), 200 + + except Exception as e: + logger.error(f"Client sessions retrieval error: {e}") + return jsonify({'error': 'Failed to retrieve sessions'}), 500 + +def generate_client_configuration(vpn_client, user): + """Generate VPN client configuration and certificates""" + try: + # Initialize VPN server manager + vpn_manager = VPNServerManager() + + # Generate client configuration for the specified protocol + config_data = vpn_manager.generate_client_config( + username=user.username, + protocol=vpn_client.protocol, + user_data={ + 'email': user.email, + 'subscription_type': user.subscription_type + } + ) + + # Update VPN client record + vpn_client.server_ip = config_data.get('server_ip') + vpn_client.server_port = config_data.get('server_port') + vpn_client.config_data = config_data.get('config') + vpn_client.status = 'active' + vpn_client.created_at = datetime.utcnow() + + db.session.commit() + + logger.info(f"Generated {vpn_client.protocol} configuration for user {user.username}") + + return { + 'success': True, + 'config': config_data.get('config'), + 'filename': config_data.get('filename'), + 'protocol': config_data.get('protocol'), + 'server_info': { + 'ip': config_data.get('server_ip'), + 'port': config_data.get('server_port') + }, + 'additional_data': { + 'ca_certificate': config_data.get('ca_certificate'), + 'client_certificate': config_data.get('client_certificate'), + 'client_private_key': config_data.get('client_private_key') + } + } + + except Exception as e: + logger.error(f"Failed to generate client configuration: {e}") + db.session.rollback() + return { + 'success': False, + 'error': str(e) + } + + + diff --git a/routes/vpn_server.py b/routes/vpn_server.py new file mode 100644 index 0000000000000000000000000000000000000000..39540e2afec4380424a49c3455765726de6d6497 --- /dev/null +++ b/routes/vpn_server.py @@ -0,0 +1,355 @@ +""" +VPN Server Management Routes + +Provides API endpoints for managing VPN servers and monitoring connections +""" + +import logging +from flask import Blueprint, request, jsonify, current_app +from functools import wraps +from models.enhanced_user import User +from services.vpn_server_manager import VPNServerManager +import jwt + +logger = logging.getLogger(__name__) + +vpn_server_bp = Blueprint('vpn_server', __name__) + +def token_required(f): + """Decorator to require valid JWT token""" + @wraps(f) + def decorated(*args, **kwargs): + token = request.headers.get('Authorization') + + if not token: + return jsonify({'error': 'Token is missing'}), 401 + + try: + if token.startswith('Bearer '): + token = token[7:] + + data = jwt.decode(token, current_app.config['SECRET_KEY'], algorithms=['HS256']) + current_user = User.query.get(data['user_id']) + + if not current_user: + return jsonify({'error': 'Invalid token'}), 401 + + except jwt.ExpiredSignatureError: + return jsonify({'error': 'Token has expired'}), 401 + except jwt.InvalidTokenError: + return jsonify({'error': 'Invalid token'}), 401 + + return f(current_user, *args, **kwargs) + + return decorated + +@vpn_server_bp.route('/server/status', methods=['GET']) +@token_required +def get_server_status(current_user): + """Get VPN server status for all protocols""" + try: + vpn_manager = VPNServerManager() + status = vpn_manager.get_server_status() + + return jsonify({ + 'success': True, + 'status': status, + 'server_info': vpn_manager.get_server_info() + }) + + except Exception as e: + logger.error(f"Server status error: {e}") + return jsonify({'error': 'Failed to get server status'}), 500 + +@vpn_server_bp.route('/server/start/', methods=['POST']) +@token_required +def start_server(current_user, protocol): + """Start VPN server for specified protocol""" + try: + # Check if user has admin privileges (for now, allow all authenticated users) + vpn_manager = VPNServerManager() + + if protocol.lower() == 'openvpn': + success = vpn_manager.start_openvpn_server() + elif protocol.lower() in ['ikev2', 'ipsec']: + success = vpn_manager.start_ipsec_server() + elif protocol.lower() == 'wireguard': + success = vpn_manager.start_wireguard_server() + else: + return jsonify({'error': 'Unsupported protocol'}), 400 + + if success: + return jsonify({ + 'success': True, + 'message': f'{protocol.upper()} server started successfully' + }) + else: + return jsonify({ + 'success': False, + 'error': f'Failed to start {protocol.upper()} server' + }), 500 + + except Exception as e: + logger.error(f"Server start error: {e}") + return jsonify({'error': 'Failed to start server'}), 500 + +@vpn_server_bp.route('/server/stop/', methods=['POST']) +@token_required +def stop_server(current_user, protocol): + """Stop VPN server for specified protocol""" + try: + vpn_manager = VPNServerManager() + + if protocol.lower() == 'openvpn': + success = vpn_manager.stop_openvpn_server() + elif protocol.lower() in ['ikev2', 'ipsec']: + success = vpn_manager.stop_ipsec_server() + elif protocol.lower() == 'wireguard': + success = vpn_manager.stop_wireguard_server() + else: + return jsonify({'error': 'Unsupported protocol'}), 400 + + if success: + return jsonify({ + 'success': True, + 'message': f'{protocol.upper()} server stopped successfully' + }) + else: + return jsonify({ + 'success': False, + 'error': f'Failed to stop {protocol.upper()} server' + }), 500 + + except Exception as e: + logger.error(f"Server stop error: {e}") + return jsonify({'error': 'Failed to stop server'}), 500 + +@vpn_server_bp.route('/server/restart/', methods=['POST']) +@token_required +def restart_server(current_user, protocol): + """Restart VPN server for specified protocol""" + try: + vpn_manager = VPNServerManager() + + # Stop server first + if protocol.lower() == 'openvpn': + vpn_manager.stop_openvpn_server() + success = vpn_manager.start_openvpn_server() + elif protocol.lower() in ['ikev2', 'ipsec']: + vpn_manager.stop_ipsec_server() + success = vpn_manager.start_ipsec_server() + elif protocol.lower() == 'wireguard': + vpn_manager.stop_wireguard_server() + success = vpn_manager.start_wireguard_server() + else: + return jsonify({'error': 'Unsupported protocol'}), 400 + + if success: + return jsonify({ + 'success': True, + 'message': f'{protocol.upper()} server restarted successfully' + }) + else: + return jsonify({ + 'success': False, + 'error': f'Failed to restart {protocol.upper()} server' + }), 500 + + except Exception as e: + logger.error(f"Server restart error: {e}") + return jsonify({'error': 'Failed to restart server'}), 500 + +@vpn_server_bp.route('/server/clients/', methods=['GET']) +@token_required +def get_connected_clients(current_user, protocol): + """Get connected clients for specified protocol""" + try: + vpn_manager = VPNServerManager() + status = vpn_manager.get_server_status() + + protocol_status = status.get(protocol.lower()) + if not protocol_status: + return jsonify({'error': 'Unsupported protocol'}), 400 + + return jsonify({ + 'success': True, + 'protocol': protocol.upper(), + 'clients': protocol_status.get('clients', []), + 'client_count': protocol_status.get('client_count', 0), + 'running': protocol_status.get('running', False) + }) + + except Exception as e: + logger.error(f"Connected clients error: {e}") + return jsonify({'error': 'Failed to get connected clients'}), 500 + +@vpn_server_bp.route('/server/info', methods=['GET']) +@token_required +def get_server_info(current_user): + """Get server information for client configuration""" + try: + vpn_manager = VPNServerManager() + server_info = vpn_manager.get_server_info() + + return jsonify({ + 'success': True, + 'server_info': server_info + }) + + except Exception as e: + logger.error(f"Server info error: {e}") + return jsonify({'error': 'Failed to get server info'}), 500 + +@vpn_server_bp.route('/server/protocols', methods=['GET']) +def get_supported_protocols(): + """Get list of supported VPN protocols""" + try: + protocols = [ + { + 'name': 'OpenVPN', + 'key': 'openvpn', + 'description': 'Secure, reliable, and widely supported VPN protocol', + 'port': 1194, + 'transport': 'UDP', + 'features': [ + 'Cross-platform compatibility', + 'Strong encryption (AES-256)', + 'Certificate-based authentication', + 'Firewall-friendly' + ] + }, + { + 'name': 'IKEv2/IPSec', + 'key': 'ikev2', + 'description': 'Fast and secure protocol, ideal for mobile devices', + 'port': 500, + 'transport': 'UDP', + 'features': [ + 'Excellent mobile support', + 'Fast reconnection', + 'Built into most devices', + 'Strong security' + ] + }, + { + 'name': 'WireGuard', + 'key': 'wireguard', + 'description': 'Modern, fast, and lightweight VPN protocol', + 'port': 51820, + 'transport': 'UDP', + 'features': [ + 'Exceptional performance', + 'Minimal codebase', + 'Modern cryptography', + 'Low battery usage' + ] + } + ] + + return jsonify({ + 'success': True, + 'protocols': protocols + }) + + except Exception as e: + logger.error(f"Protocols info error: {e}") + return jsonify({'error': 'Failed to get protocols info'}), 500 + +@vpn_server_bp.route('/server/logs/', methods=['GET']) +@token_required +def get_server_logs(current_user, protocol): + """Get server logs for specified protocol""" + try: + lines = request.args.get('lines', 50, type=int) + lines = min(lines, 1000) # Limit to 1000 lines max + + log_files = { + 'openvpn': '/var/log/openvpn/openvpn.log', + 'ikev2': '/var/log/syslog', # strongSwan logs to syslog + 'ipsec': '/var/log/syslog', + 'wireguard': '/var/log/syslog' # WireGuard logs to syslog + } + + log_file = log_files.get(protocol.lower()) + if not log_file: + return jsonify({'error': 'Unsupported protocol'}), 400 + + try: + import subprocess + + if protocol.lower() == 'openvpn': + # Get OpenVPN specific logs + result = subprocess.run( + ['tail', '-n', str(lines), log_file], + capture_output=True, text=True + ) + else: + # Filter syslog for protocol-specific entries + if protocol.lower() in ['ikev2', 'ipsec']: + grep_pattern = 'charon' + else: # wireguard + grep_pattern = 'wireguard' + + result = subprocess.run( + f'grep "{grep_pattern}" {log_file} | tail -n {lines}', + shell=True, capture_output=True, text=True + ) + + if result.returncode == 0: + logs = result.stdout.strip().split('\n') if result.stdout.strip() else [] + else: + logs = [] + + except Exception: + logs = [] + + return jsonify({ + 'success': True, + 'protocol': protocol.upper(), + 'logs': logs, + 'line_count': len(logs) + }) + + except Exception as e: + logger.error(f"Server logs error: {e}") + return jsonify({'error': 'Failed to get server logs'}), 500 + +@vpn_server_bp.route('/server/statistics', methods=['GET']) +@token_required +def get_server_statistics(current_user): + """Get comprehensive server statistics""" + try: + vpn_manager = VPNServerManager() + status = vpn_manager.get_server_status() + + # Calculate overall statistics + total_clients = sum(proto.get('client_count', 0) for proto in status.values()) + running_servers = sum(1 for proto in status.values() if proto.get('running', False)) + + statistics = { + 'overview': { + 'total_clients': total_clients, + 'running_servers': running_servers, + 'total_servers': len(status), + 'server_ip': vpn_manager.server_ip + }, + 'protocols': {} + } + + for protocol, proto_status in status.items(): + statistics['protocols'][protocol] = { + 'running': proto_status.get('running', False), + 'port': proto_status.get('port', 0), + 'client_count': proto_status.get('client_count', 0), + 'clients': proto_status.get('clients', []) + } + + return jsonify({ + 'success': True, + 'statistics': statistics + }) + + except Exception as e: + logger.error(f"Server statistics error: {e}") + return jsonify({'error': 'Failed to get server statistics'}), 500 + diff --git a/services/__pycache__/certificate_authority.cpython-311.pyc b/services/__pycache__/certificate_authority.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..73fdac6d942e19aeca53a5f7f68cef470113183f Binary files /dev/null and b/services/__pycache__/certificate_authority.cpython-311.pyc differ diff --git a/services/__pycache__/vpn_server_manager.cpython-311.pyc b/services/__pycache__/vpn_server_manager.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..40a4d9694e6db8b550c719c13b515b2af6abb2b7 Binary files /dev/null and b/services/__pycache__/vpn_server_manager.cpython-311.pyc differ diff --git a/services/certificate_authority.py b/services/certificate_authority.py new file mode 100644 index 0000000000000000000000000000000000000000..8313625f94f2897b2a6cb2305de3bf21ecfb6c8d --- /dev/null +++ b/services/certificate_authority.py @@ -0,0 +1,523 @@ +""" +Certificate Authority Service + +Provides PKI infrastructure for VPN client certificate management +""" + +import os +import subprocess +import logging +from cryptography import x509 +from cryptography.x509.oid import NameOID, ExtensionOID +from cryptography.hazmat.primitives import hashes, serialization +from cryptography.hazmat.primitives.asymmetric import rsa +from datetime import datetime, timedelta + +logger = logging.getLogger(__name__) + +class CertificateAuthority: + """Certificate Authority for VPN client certificates""" + + def __init__(self, ca_dir='/etc/vpn-ca'): + self.ca_dir = ca_dir + self.ca_cert_path = os.path.join(ca_dir, 'ca.crt') + self.ca_key_path = os.path.join(ca_dir, 'ca.key') + self.crl_path = os.path.join(ca_dir, 'crl.pem') + self.serial_file = os.path.join(ca_dir, 'serial') + self.index_file = os.path.join(ca_dir, 'index.txt') + + # Ensure CA directory exists + os.makedirs(ca_dir, mode=0o700, exist_ok=True) + + # Initialize CA if not exists + if not os.path.exists(self.ca_cert_path): + self._create_root_ca() + + # Initialize index file for certificate tracking + if not os.path.exists(self.index_file): + with open(self.index_file, 'w') as f: + f.write('') # Empty index file + + def _create_root_ca(self): + """Create root CA certificate and private key""" + try: + logger.info("Creating root CA certificate") + + # Generate private key + private_key = rsa.generate_private_key( + public_exponent=65537, + key_size=4096 + ) + + # Create certificate + subject = issuer = x509.Name([ + x509.NameAttribute(NameOID.COUNTRY_NAME, "US"), + x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, "Virtual"), + x509.NameAttribute(NameOID.LOCALITY_NAME, "Internet"), + x509.NameAttribute(NameOID.ORGANIZATION_NAME, "VPN Service CA"), + x509.NameAttribute(NameOID.ORGANIZATIONAL_UNIT_NAME, "Certificate Authority"), + x509.NameAttribute(NameOID.COMMON_NAME, "VPN Root CA"), + ]) + + cert = x509.CertificateBuilder().subject_name( + subject + ).issuer_name( + issuer + ).public_key( + private_key.public_key() + ).serial_number( + 1 + ).not_valid_before( + datetime.utcnow() + ).not_valid_after( + datetime.utcnow() + timedelta(days=3650) # 10 years + ).add_extension( + x509.BasicConstraints(ca=True, path_length=None), + critical=True, + ).add_extension( + x509.KeyUsage( + key_cert_sign=True, + crl_sign=True, + digital_signature=False, + key_encipherment=False, + key_agreement=False, + data_encipherment=False, + content_commitment=False, + encipher_only=False, + decipher_only=False + ), + critical=True, + ).add_extension( + x509.SubjectKeyIdentifier.from_public_key(private_key.public_key()), + critical=False, + ).sign(private_key, hashes.SHA256()) + + # Save certificate and private key + with open(self.ca_cert_path, 'wb') as f: + f.write(cert.public_bytes(serialization.Encoding.PEM)) + + with open(self.ca_key_path, 'wb') as f: + f.write(private_key.private_bytes( + encoding=serialization.Encoding.PEM, + format=serialization.PrivateFormat.PKCS8, + encryption_algorithm=serialization.NoEncryption() + )) + + # Set secure permissions + os.chmod(self.ca_key_path, 0o600) + os.chmod(self.ca_cert_path, 0o644) + + # Initialize serial number file + with open(self.serial_file, 'w') as f: + f.write('02') + + logger.info("Root CA certificate created successfully") + + except Exception as e: + logger.error(f"Failed to create root CA: {e}") + raise + + def generate_client_certificate(self, username, email, validity_days=365): + """Generate client certificate for VPN authentication""" + try: + logger.info(f"Generating client certificate for {username}") + + # Load CA certificate and private key + with open(self.ca_cert_path, 'rb') as f: + ca_cert = x509.load_pem_x509_certificate(f.read()) + + with open(self.ca_key_path, 'rb') as f: + ca_private_key = serialization.load_pem_private_key(f.read(), password=None) + + # Generate client private key + client_private_key = rsa.generate_private_key( + public_exponent=65537, + key_size=2048 + ) + + # Get next serial number + serial_number = self._get_next_serial() + + # Create client certificate + subject = x509.Name([ + x509.NameAttribute(NameOID.COUNTRY_NAME, "US"), + x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, "Virtual"), + x509.NameAttribute(NameOID.LOCALITY_NAME, "Internet"), + x509.NameAttribute(NameOID.ORGANIZATION_NAME, "VPN Service"), + x509.NameAttribute(NameOID.ORGANIZATIONAL_UNIT_NAME, "VPN Clients"), + x509.NameAttribute(NameOID.COMMON_NAME, username), + x509.NameAttribute(NameOID.EMAIL_ADDRESS, email), + ]) + + cert = x509.CertificateBuilder().subject_name( + subject + ).issuer_name( + ca_cert.subject + ).public_key( + client_private_key.public_key() + ).serial_number( + serial_number + ).not_valid_before( + datetime.utcnow() + ).not_valid_after( + datetime.utcnow() + timedelta(days=validity_days) + ).add_extension( + x509.BasicConstraints(ca=False, path_length=None), + critical=True, + ).add_extension( + x509.KeyUsage( + key_cert_sign=False, + crl_sign=False, + digital_signature=True, + key_encipherment=True, + key_agreement=False, + data_encipherment=False, + content_commitment=False, + encipher_only=False, + decipher_only=False + ), + critical=True, + ).add_extension( + x509.ExtendedKeyUsage([ + x509.oid.ExtendedKeyUsageOID.CLIENT_AUTH, + ]), + critical=True, + ).add_extension( + x509.SubjectKeyIdentifier.from_public_key(client_private_key.public_key()), + critical=False, + ).add_extension( + x509.AuthorityKeyIdentifier.from_issuer_public_key(ca_cert.public_key()), + critical=False, + ).sign(ca_private_key, hashes.SHA256()) + + # Update certificate index + self._update_certificate_index(cert, 'V') # V = Valid + + logger.info(f"Client certificate generated successfully for {username} (Serial: {serial_number})") + + return { + 'certificate': cert.public_bytes(serialization.Encoding.PEM), + 'private_key': client_private_key.private_bytes( + encoding=serialization.Encoding.PEM, + format=serialization.PrivateFormat.PKCS8, + encryption_algorithm=serialization.NoEncryption() + ), + 'serial_number': serial_number, + 'not_valid_before': cert.not_valid_before, + 'not_valid_after': cert.not_valid_after + } + + except Exception as e: + logger.error(f"Failed to generate client certificate: {e}") + raise + + def revoke_certificate(self, serial_number, reason='unspecified'): + """Revoke a client certificate""" + try: + logger.info(f"Revoking certificate with serial {serial_number}") + + # Update certificate index + self._update_certificate_index_status(serial_number, 'R', reason) + + # Generate new CRL + self._generate_crl() + + logger.info(f"Certificate {serial_number} revoked successfully") + + except Exception as e: + logger.error(f"Failed to revoke certificate {serial_number}: {e}") + raise + + def get_certificate_status(self, serial_number): + """Get certificate status from index""" + try: + with open(self.index_file, 'r') as f: + for line in f: + if line.strip(): + parts = line.strip().split('\t') + if len(parts) >= 3 and parts[3] == str(serial_number): + status = parts[0] + if status == 'V': + return 'valid' + elif status == 'R': + return 'revoked' + elif status == 'E': + return 'expired' + + return 'unknown' + + except Exception as e: + logger.error(f"Failed to get certificate status: {e}") + return 'unknown' + + def list_certificates(self): + """List all certificates in the index""" + try: + certificates = [] + + with open(self.index_file, 'r') as f: + for line in f: + if line.strip(): + parts = line.strip().split('\t') + if len(parts) >= 6: + cert_info = { + 'status': parts[0], + 'expiry_date': parts[1], + 'revocation_date': parts[2] if parts[2] else None, + 'serial_number': parts[3], + 'filename': parts[4], + 'subject': parts[5] + } + certificates.append(cert_info) + + return certificates + + except Exception as e: + logger.error(f"Failed to list certificates: {e}") + return [] + + def _get_next_serial(self): + """Get next serial number for certificate""" + try: + with open(self.serial_file, 'r') as f: + serial = int(f.read().strip(), 16) + except (FileNotFoundError, ValueError): + serial = 2 + + # Update serial file + with open(self.serial_file, 'w') as f: + f.write(f'{serial + 1:02X}') + + return serial + + def _update_certificate_index(self, cert, status): + """Update certificate index with new certificate""" + try: + # Format: status \t expiry_date \t revocation_date \t serial \t filename \t subject + expiry_date = cert.not_valid_after.strftime('%y%m%d%H%M%SZ') + serial_hex = f'{cert.serial_number:02X}' + subject_str = cert.subject.rfc4514_string() + + index_line = f"{status}\t{expiry_date}\t\t{serial_hex}\tunknown\t{subject_str}\n" + + with open(self.index_file, 'a') as f: + f.write(index_line) + + except Exception as e: + logger.error(f"Failed to update certificate index: {e}") + raise + + def _update_certificate_index_status(self, serial_number, new_status, reason=None): + """Update certificate status in index""" + try: + lines = [] + updated = False + + with open(self.index_file, 'r') as f: + lines = f.readlines() + + for i, line in enumerate(lines): + if line.strip(): + parts = line.strip().split('\t') + if len(parts) >= 4 and parts[3] == str(serial_number): + # Update status + parts[0] = new_status + if new_status == 'R' and reason: + # Add revocation date + parts[2] = datetime.utcnow().strftime('%y%m%d%H%M%SZ') + + lines[i] = '\t'.join(parts) + '\n' + updated = True + break + + if updated: + with open(self.index_file, 'w') as f: + f.writelines(lines) + + except Exception as e: + logger.error(f"Failed to update certificate status: {e}") + raise + + def _generate_crl(self): + """Generate Certificate Revocation List""" + try: + logger.info("Generating Certificate Revocation List") + + # Load CA certificate and private key + with open(self.ca_cert_path, 'rb') as f: + ca_cert = x509.load_pem_x509_certificate(f.read()) + + with open(self.ca_key_path, 'rb') as f: + ca_private_key = serialization.load_pem_private_key(f.read(), password=None) + + # Get revoked certificates + revoked_certs = [] + + with open(self.index_file, 'r') as f: + for line in f: + if line.strip(): + parts = line.strip().split('\t') + if len(parts) >= 4 and parts[0] == 'R': + serial_number = int(parts[3], 16) + revocation_date = datetime.strptime(parts[2], '%y%m%d%H%M%SZ') + + revoked_cert = x509.RevokedCertificateBuilder().serial_number( + serial_number + ).revocation_date( + revocation_date + ).build() + + revoked_certs.append(revoked_cert) + + # Build CRL + crl_builder = x509.CertificateRevocationListBuilder().issuer_name( + ca_cert.subject + ).last_update( + datetime.utcnow() + ).next_update( + datetime.utcnow() + timedelta(days=30) + ) + + for revoked_cert in revoked_certs: + crl_builder = crl_builder.add_revoked_certificate(revoked_cert) + + crl = crl_builder.sign(ca_private_key, hashes.SHA256()) + + # Save CRL + with open(self.crl_path, 'wb') as f: + f.write(crl.public_bytes(serialization.Encoding.PEM)) + + logger.info("CRL generated successfully") + + except Exception as e: + logger.error(f"Failed to generate CRL: {e}") + raise + + def get_ca_certificate(self): + """Get CA certificate in PEM format""" + try: + with open(self.ca_cert_path, 'rb') as f: + return f.read() + except Exception as e: + logger.error(f"Failed to read CA certificate: {e}") + return None + + def get_crl(self): + """Get Certificate Revocation List""" + try: + if os.path.exists(self.crl_path): + with open(self.crl_path, 'rb') as f: + return f.read() + else: + # Generate CRL if it doesn't exist + self._generate_crl() + with open(self.crl_path, 'rb') as f: + return f.read() + except Exception as e: + logger.error(f"Failed to read CRL: {e}") + return None + + def verify_certificate(self, cert_pem): + """Verify a certificate against the CA""" + try: + # Load certificates + cert = x509.load_pem_x509_certificate(cert_pem) + + with open(self.ca_cert_path, 'rb') as f: + ca_cert = x509.load_pem_x509_certificate(f.read()) + + # Verify signature + ca_public_key = ca_cert.public_key() + ca_public_key.verify( + cert.signature, + cert.tbs_certificate_bytes, + cert.signature_algorithm_oid._name + ) + + # Check validity period + now = datetime.utcnow() + if now < cert.not_valid_before or now > cert.not_valid_after: + return False, "Certificate expired or not yet valid" + + # Check revocation status + status = self.get_certificate_status(cert.serial_number) + if status == 'revoked': + return False, "Certificate revoked" + + return True, "Certificate valid" + + except Exception as e: + logger.error(f"Certificate verification failed: {e}") + return False, str(e) + + def cleanup_expired_certificates(self): + """Clean up expired certificates from the index""" + try: + logger.info("Cleaning up expired certificates") + + lines = [] + updated_count = 0 + + with open(self.index_file, 'r') as f: + lines = f.readlines() + + now = datetime.utcnow() + + for i, line in enumerate(lines): + if line.strip(): + parts = line.strip().split('\t') + if len(parts) >= 2 and parts[0] == 'V': + try: + expiry_date = datetime.strptime(parts[1], '%y%m%d%H%M%SZ') + if now > expiry_date: + # Mark as expired + parts[0] = 'E' + lines[i] = '\t'.join(parts) + '\n' + updated_count += 1 + except ValueError: + continue + + if updated_count > 0: + with open(self.index_file, 'w') as f: + f.writelines(lines) + + logger.info(f"Marked {updated_count} certificates as expired") + + except Exception as e: + logger.error(f"Failed to cleanup expired certificates: {e}") + + def get_statistics(self): + """Get CA statistics""" + try: + stats = { + 'total_certificates': 0, + 'valid_certificates': 0, + 'revoked_certificates': 0, + 'expired_certificates': 0 + } + + with open(self.index_file, 'r') as f: + for line in f: + if line.strip(): + parts = line.strip().split('\t') + if len(parts) >= 1: + stats['total_certificates'] += 1 + status = parts[0] + if status == 'V': + stats['valid_certificates'] += 1 + elif status == 'R': + stats['revoked_certificates'] += 1 + elif status == 'E': + stats['expired_certificates'] += 1 + + return stats + + except Exception as e: + logger.error(f"Failed to get CA statistics: {e}") + return { + 'total_certificates': 0, + 'valid_certificates': 0, + 'revoked_certificates': 0, + 'expired_certificates': 0 + } + diff --git a/services/vpn_server_manager.py b/services/vpn_server_manager.py new file mode 100644 index 0000000000000000000000000000000000000000..b4e6bfcd9d4dec61b979ec064f6a36289448b89b --- /dev/null +++ b/services/vpn_server_manager.py @@ -0,0 +1,969 @@ +""" +VPN Server Manager + +Manages multiple VPN protocols including OpenVPN, IKEv2/IPSec, and WireGuard +""" + +import os +import subprocess +import logging +import json +import tempfile +import shutil +from pathlib import Path +from typing import Dict, List, Optional, Tuple +from datetime import datetime, timedelta + +logger = logging.getLogger(__name__) + +class VPNServerManager: + """Manages VPN server configurations and operations""" + + def __init__(self, config_dir='/etc/vpn-server'): + self.config_dir = config_dir + self.openvpn_dir = os.path.join(config_dir, 'openvpn') + self.ipsec_dir = os.path.join(config_dir, 'ipsec') + self.wireguard_dir = os.path.join(config_dir, 'wireguard') + + # Create directories + for directory in [self.config_dir, self.openvpn_dir, self.ipsec_dir, self.wireguard_dir]: + os.makedirs(directory, mode=0o755, exist_ok=True) + + # Server configuration + self.server_ip = self._get_server_ip() + self.openvpn_port = 1194 + self.ipsec_port = 500 + self.wireguard_port = 51820 + + # Initialize servers + self._initialize_servers() + + def _get_server_ip(self): + """Get server public IP address""" + try: + # Try to get public IP + result = subprocess.run(['curl', '-s', 'ifconfig.me'], + capture_output=True, text=True, timeout=10) + if result.returncode == 0 and result.stdout.strip(): + return result.stdout.strip() + except: + pass + + # Fallback to local IP + try: + result = subprocess.run(['hostname', '-I'], + capture_output=True, text=True) + if result.returncode == 0: + return result.stdout.strip().split()[0] + except: + pass + + return '127.0.0.1' + + def _initialize_servers(self): + """Initialize VPN server configurations""" + try: + logger.info("Initializing VPN servers") + + # Initialize OpenVPN + self._setup_openvpn_server() + + # Initialize IKEv2/IPSec + self._setup_ipsec_server() + + # Initialize WireGuard + self._setup_wireguard_server() + + logger.info("VPN servers initialized successfully") + + except Exception as e: + logger.error(f"Failed to initialize VPN servers: {e}") + raise + + def _setup_openvpn_server(self): + """Set up OpenVPN server configuration""" + try: + logger.info("Setting up OpenVPN server") + + # Create server configuration + server_conf = f""" +# OpenVPN Server Configuration +port {self.openvpn_port} +proto udp +dev tun + +# Certificates and keys +ca /etc/vpn-ca/ca.crt +cert /etc/vpn-server/openvpn/server.crt +key /etc/vpn-server/openvpn/server.key +dh /etc/vpn-server/openvpn/dh.pem + +# Network configuration +server 10.8.0.0 255.255.255.0 +ifconfig-pool-persist /var/log/openvpn/ipp.txt + +# Client configuration +client-config-dir /etc/vpn-server/openvpn/ccd +client-to-client + +# Security +tls-auth /etc/vpn-server/openvpn/ta.key 0 +cipher AES-256-GCM +auth SHA256 +tls-version-min 1.2 + +# Networking +push "redirect-gateway def1 bypass-dhcp" +push "dhcp-option DNS 8.8.8.8" +push "dhcp-option DNS 8.8.4.4" + +# Connection settings +keepalive 10 120 +comp-lzo +persist-key +persist-tun + +# Logging +status /var/log/openvpn/openvpn-status.log +log-append /var/log/openvpn/openvpn.log +verb 3 +mute 20 + +# Management interface +management 127.0.0.1 7505 + +# User/group +user nobody +group nogroup +""" + + server_conf_path = os.path.join(self.openvpn_dir, 'server.conf') + with open(server_conf_path, 'w') as f: + f.write(server_conf) + + # Create client config directory + ccd_dir = os.path.join(self.openvpn_dir, 'ccd') + os.makedirs(ccd_dir, mode=0o755, exist_ok=True) + + # Create log directory + os.makedirs('/var/log/openvpn', mode=0o755, exist_ok=True) + + # Generate server certificates if they don't exist + self._generate_openvpn_certificates() + + logger.info("OpenVPN server configuration created") + + except Exception as e: + logger.error(f"Failed to setup OpenVPN server: {e}") + raise + + def _generate_openvpn_certificates(self): + """Generate OpenVPN server certificates""" + try: + server_cert_path = os.path.join(self.openvpn_dir, 'server.crt') + server_key_path = os.path.join(self.openvpn_dir, 'server.key') + dh_path = os.path.join(self.openvpn_dir, 'dh.pem') + ta_key_path = os.path.join(self.openvpn_dir, 'ta.key') + + # Generate server certificate using our CA + from services.certificate_authority import CertificateAuthority + ca = CertificateAuthority() + + if not os.path.exists(server_cert_path): + logger.info("Generating OpenVPN server certificate") + + cert_data = ca.generate_client_certificate( + username='openvpn-server', + email='server@vpn.local', + validity_days=3650 + ) + + with open(server_cert_path, 'wb') as f: + f.write(cert_data['certificate']) + + with open(server_key_path, 'wb') as f: + f.write(cert_data['private_key']) + + os.chmod(server_key_path, 0o600) + + # Generate DH parameters + if not os.path.exists(dh_path): + logger.info("Generating DH parameters (this may take a while)") + subprocess.run(['openssl', 'dhparam', '-out', dh_path, '2048'], + check=True) + + # Generate TLS auth key + if not os.path.exists(ta_key_path): + logger.info("Generating TLS auth key") + subprocess.run(['openvpn', '--genkey', '--secret', ta_key_path], + check=True) + os.chmod(ta_key_path, 0o600) + + except Exception as e: + logger.error(f"Failed to generate OpenVPN certificates: {e}") + raise + + def _setup_ipsec_server(self): + """Set up IKEv2/IPSec server configuration""" + try: + logger.info("Setting up IKEv2/IPSec server") + + # strongSwan configuration + ipsec_conf = f""" +# strongSwan IPSec configuration +config setup + charondebug="ike 1, knl 1, cfg 0" + uniqueids=no + +conn ikev2-vpn + auto=add + compress=no + type=tunnel + keyexchange=ikev2 + fragmentation=yes + forceencaps=yes + + # Server side + left=%any + leftid=@vpn.server.local + leftcert=server.crt + leftsendcert=always + leftsubnet=0.0.0.0/0 + + # Client side + right=%any + rightid=%any + rightsourceip=10.10.10.0/24 + rightdns=8.8.8.8,8.8.4.4 + + # Security + ike=chacha20poly1305-sha256-curve25519-prfsha256,aes256gcm16-sha384-prfsha384-ecp384,aes256-sha1-modp1024,aes128-sha1-modp1024,3des-sha1-modp1024! + esp=chacha20poly1305-sha256,aes256gcm16-ecp384,aes256-sha256,aes256-sha1,3des-sha1! + + # Authentication + leftauth=pubkey + rightauth=eap-mschapv2 + rightsendcert=never + eap_identity=%identity + + # Other settings + dpdaction=clear + dpddelay=300s + rekey=no +""" + + # strongSwan configuration + ipsec_conf_path = os.path.join(self.ipsec_dir, 'ipsec.conf') + with open(ipsec_conf_path, 'w') as f: + f.write(ipsec_conf) + + # strongSwan secrets configuration + ipsec_secrets = """ +# strongSwan IPSec secrets +: RSA "server.key" +""" + + ipsec_secrets_path = os.path.join(self.ipsec_dir, 'ipsec.secrets') + with open(ipsec_secrets_path, 'w') as f: + f.write(ipsec_secrets) + + os.chmod(ipsec_secrets_path, 0o600) + + # Generate IPSec certificates + self._generate_ipsec_certificates() + + logger.info("IKEv2/IPSec server configuration created") + + except Exception as e: + logger.error(f"Failed to setup IKEv2/IPSec server: {e}") + raise + + def _generate_ipsec_certificates(self): + """Generate IKEv2/IPSec server certificates""" + try: + server_cert_path = os.path.join(self.ipsec_dir, 'server.crt') + server_key_path = os.path.join(self.ipsec_dir, 'server.key') + ca_cert_path = os.path.join(self.ipsec_dir, 'ca.crt') + + # Copy CA certificate + if not os.path.exists(ca_cert_path): + shutil.copy('/etc/vpn-ca/ca.crt', ca_cert_path) + + # Generate server certificate using our CA + if not os.path.exists(server_cert_path): + logger.info("Generating IKEv2 server certificate") + + from services.certificate_authority import CertificateAuthority + ca = CertificateAuthority() + + cert_data = ca.generate_client_certificate( + username='ipsec-server', + email='ipsec@vpn.local', + validity_days=3650 + ) + + with open(server_cert_path, 'wb') as f: + f.write(cert_data['certificate']) + + with open(server_key_path, 'wb') as f: + f.write(cert_data['private_key']) + + os.chmod(server_key_path, 0o600) + + except Exception as e: + logger.error(f"Failed to generate IKEv2 certificates: {e}") + raise + + def _setup_wireguard_server(self): + """Set up WireGuard server configuration""" + try: + logger.info("Setting up WireGuard server") + + # Generate server keys if they don't exist + server_private_key_path = os.path.join(self.wireguard_dir, 'server_private.key') + server_public_key_path = os.path.join(self.wireguard_dir, 'server_public.key') + + if not os.path.exists(server_private_key_path): + # Generate private key + result = subprocess.run(['wg', 'genkey'], capture_output=True, text=True) + if result.returncode == 0: + private_key = result.stdout.strip() + + with open(server_private_key_path, 'w') as f: + f.write(private_key) + os.chmod(server_private_key_path, 0o600) + + # Generate public key + result = subprocess.run(['wg', 'pubkey'], + input=private_key, + capture_output=True, text=True) + if result.returncode == 0: + public_key = result.stdout.strip() + + with open(server_public_key_path, 'w') as f: + f.write(public_key) + + # Read keys + with open(server_private_key_path, 'r') as f: + server_private_key = f.read().strip() + + with open(server_public_key_path, 'r') as f: + server_public_key = f.read().strip() + + # WireGuard server configuration + wg_conf = f""" +[Interface] +PrivateKey = {server_private_key} +Address = 10.13.13.1/24 +ListenPort = {self.wireguard_port} +PostUp = iptables -A FORWARD -i %i -j ACCEPT; iptables -A FORWARD -o %i -j ACCEPT; iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE +PostDown = iptables -D FORWARD -i %i -j ACCEPT; iptables -D FORWARD -o %i -j ACCEPT; iptables -t nat -D POSTROUTING -o eth0 -j MASQUERADE + +# Client configurations will be added here +""" + + wg_conf_path = os.path.join(self.wireguard_dir, 'wg0.conf') + with open(wg_conf_path, 'w') as f: + f.write(wg_conf) + + logger.info("WireGuard server configuration created") + + except Exception as e: + logger.error(f"Failed to setup WireGuard server: {e}") + # WireGuard might not be available, continue without it + logger.warning("WireGuard setup failed, continuing without WireGuard support") + + def start_openvpn_server(self): + """Start OpenVPN server""" + try: + logger.info("Starting OpenVPN server") + + # Enable IP forwarding + subprocess.run(['sudo', 'sysctl', '-w', 'net.ipv4.ip_forward=1'], check=True) + + # Start OpenVPN + config_path = os.path.join(self.openvpn_dir, 'server.conf') + cmd = ['sudo', 'openvpn', '--config', config_path, '--daemon'] + + result = subprocess.run(cmd, capture_output=True, text=True) + if result.returncode == 0: + logger.info("OpenVPN server started successfully") + return True + else: + logger.error(f"Failed to start OpenVPN server: {result.stderr}") + return False + + except Exception as e: + logger.error(f"Error starting OpenVPN server: {e}") + return False + + def start_ipsec_server(self): + """Start IKEv2/IPSec server""" + try: + logger.info("Starting IKEv2/IPSec server") + + # Enable IP forwarding + subprocess.run(['sudo', 'sysctl', '-w', 'net.ipv4.ip_forward=1'], check=True) + + # Start strongSwan + result = subprocess.run(['sudo', 'systemctl', 'start', 'strongswan-starter'], + capture_output=True, text=True) + if result.returncode == 0: + logger.info("IKEv2/IPSec server started successfully") + return True + else: + logger.error(f"Failed to start IKEv2/IPSec server: {result.stderr}") + return False + + except Exception as e: + logger.error(f"Error starting IKEv2/IPSec server: {e}") + return False + + def start_wireguard_server(self): + """Start WireGuard server""" + try: + logger.info("Starting WireGuard server") + + # Enable IP forwarding + subprocess.run(['sudo', 'sysctl', '-w', 'net.ipv4.ip_forward=1'], check=True) + + # Start WireGuard + config_path = os.path.join(self.wireguard_dir, 'wg0.conf') + result = subprocess.run(['sudo', 'wg-quick', 'up', config_path], + capture_output=True, text=True) + if result.returncode == 0: + logger.info("WireGuard server started successfully") + return True + else: + logger.error(f"Failed to start WireGuard server: {result.stderr}") + return False + + except Exception as e: + logger.error(f"Error starting WireGuard server: {e}") + return False + + def stop_openvpn_server(self): + """Stop OpenVPN server""" + try: + subprocess.run(['sudo', 'pkill', '-f', 'openvpn.*server.conf'], check=False) + logger.info("OpenVPN server stopped") + return True + except Exception as e: + logger.error(f"Error stopping OpenVPN server: {e}") + return False + + def stop_ipsec_server(self): + """Stop IKEv2/IPSec server""" + try: + subprocess.run(['sudo', 'systemctl', 'stop', 'strongswan-starter'], check=False) + logger.info("IKEv2/IPSec server stopped") + return True + except Exception as e: + logger.error(f"Error stopping IKEv2/IPSec server: {e}") + return False + + def stop_wireguard_server(self): + """Stop WireGuard server""" + try: + config_path = os.path.join(self.wireguard_dir, 'wg0.conf') + subprocess.run(['sudo', 'wg-quick', 'down', config_path], check=False) + logger.info("WireGuard server stopped") + return True + except Exception as e: + logger.error(f"Error stopping WireGuard server: {e}") + return False + + def get_server_status(self): + """Get status of all VPN servers""" + status = { + 'openvpn': self._check_openvpn_status(), + 'ipsec': self._check_ipsec_status(), + 'wireguard': self._check_wireguard_status() + } + return status + + def _check_openvpn_status(self): + """Check OpenVPN server status""" + try: + result = subprocess.run(['pgrep', '-f', 'openvpn.*server.conf'], + capture_output=True, text=True) + if result.returncode == 0: + # Get connected clients + try: + with open('/var/log/openvpn/openvpn-status.log', 'r') as f: + content = f.read() + # Parse client connections + clients = [] + in_client_section = False + for line in content.split('\n'): + if line.startswith('Common Name,Real Address'): + in_client_section = True + continue + elif line.startswith('ROUTING TABLE'): + in_client_section = False + break + elif in_client_section and line.strip(): + parts = line.split(',') + if len(parts) >= 4: + clients.append({ + 'common_name': parts[0], + 'real_address': parts[1], + 'bytes_received': parts[2], + 'bytes_sent': parts[3], + 'connected_since': parts[4] if len(parts) > 4 else 'Unknown' + }) + + return { + 'running': True, + 'port': self.openvpn_port, + 'clients': clients, + 'client_count': len(clients) + } + except: + return { + 'running': True, + 'port': self.openvpn_port, + 'clients': [], + 'client_count': 0 + } + else: + return { + 'running': False, + 'port': self.openvpn_port, + 'clients': [], + 'client_count': 0 + } + except Exception as e: + logger.error(f"Error checking OpenVPN status: {e}") + return { + 'running': False, + 'port': self.openvpn_port, + 'clients': [], + 'client_count': 0, + 'error': str(e) + } + + def _check_ipsec_status(self): + """Check IKEv2/IPSec server status""" + try: + result = subprocess.run(['sudo', 'systemctl', 'is-active', 'strongswan-starter'], + capture_output=True, text=True) + running = result.stdout.strip() == 'active' + + clients = [] + if running: + # Get connected clients + try: + result = subprocess.run(['sudo', 'swanctl', '--list-sas'], + capture_output=True, text=True) + if result.returncode == 0: + # Parse strongSwan output for connected clients + # This is a simplified parser + for line in result.stdout.split('\n'): + if 'ESTABLISHED' in line: + clients.append({ + 'connection': line.strip(), + 'status': 'ESTABLISHED' + }) + except: + pass + + return { + 'running': running, + 'port': self.ipsec_port, + 'clients': clients, + 'client_count': len(clients) + } + except Exception as e: + logger.error(f"Error checking IKEv2/IPSec status: {e}") + return { + 'running': False, + 'port': self.ipsec_port, + 'clients': [], + 'client_count': 0, + 'error': str(e) + } + + def _check_wireguard_status(self): + """Check WireGuard server status""" + try: + result = subprocess.run(['sudo', 'wg', 'show'], + capture_output=True, text=True) + if result.returncode == 0 and 'wg0' in result.stdout: + # Parse WireGuard output for connected clients + clients = [] + current_peer = None + + for line in result.stdout.split('\n'): + line = line.strip() + if line.startswith('peer:'): + if current_peer: + clients.append(current_peer) + current_peer = {'public_key': line.split(':', 1)[1].strip()} + elif current_peer and line.startswith('endpoint:'): + current_peer['endpoint'] = line.split(':', 1)[1].strip() + elif current_peer and line.startswith('allowed ips:'): + current_peer['allowed_ips'] = line.split(':', 1)[1].strip() + elif current_peer and line.startswith('latest handshake:'): + current_peer['latest_handshake'] = line.split(':', 1)[1].strip() + + if current_peer: + clients.append(current_peer) + + return { + 'running': True, + 'port': self.wireguard_port, + 'clients': clients, + 'client_count': len(clients) + } + else: + return { + 'running': False, + 'port': self.wireguard_port, + 'clients': [], + 'client_count': 0 + } + except Exception as e: + logger.error(f"Error checking WireGuard status: {e}") + return { + 'running': False, + 'port': self.wireguard_port, + 'clients': [], + 'client_count': 0, + 'error': str(e) + } + + def generate_client_config(self, username: str, protocol: str, user_data: dict) -> dict: + """Generate client configuration for specified protocol""" + try: + if protocol.lower() == 'openvpn': + return self._generate_openvpn_client_config(username, user_data) + elif protocol.lower() in ['ikev2', 'ipsec']: + return self._generate_ipsec_client_config(username, user_data) + elif protocol.lower() == 'wireguard': + return self._generate_wireguard_client_config(username, user_data) + else: + raise ValueError(f"Unsupported protocol: {protocol}") + + except Exception as e: + logger.error(f"Failed to generate client config for {username}: {e}") + raise + + def _generate_openvpn_client_config(self, username: str, user_data: dict) -> dict: + """Generate OpenVPN client configuration""" + try: + # Generate client certificate + from services.certificate_authority import CertificateAuthority + ca = CertificateAuthority() + + cert_data = ca.generate_client_certificate( + username=username, + email=user_data.get('email', f'{username}@vpn.local') + ) + + # Read CA certificate and TLS auth key + with open('/etc/vpn-ca/ca.crt', 'r') as f: + ca_cert = f.read() + + with open(os.path.join(self.openvpn_dir, 'ta.key'), 'r') as f: + ta_key = f.read() + + # Generate client configuration + client_config = f""" +# OpenVPN Client Configuration for {username} +client +dev tun +proto udp +remote {self.server_ip} {self.openvpn_port} +resolv-retry infinite +nobind +persist-key +persist-tun +remote-cert-tls server +cipher AES-256-GCM +auth SHA256 +tls-version-min 1.2 +key-direction 1 +verb 3 + + +{ca_cert} + + + +{cert_data['certificate'].decode('utf-8')} + + + +{cert_data['private_key'].decode('utf-8')} + + + +{ta_key} + +""" + + return { + 'config': client_config, + 'filename': f'{username}-openvpn.ovpn', + 'protocol': 'OpenVPN', + 'server_ip': self.server_ip, + 'server_port': self.openvpn_port + } + + except Exception as e: + logger.error(f"Failed to generate OpenVPN client config: {e}") + raise + + def _generate_ipsec_client_config(self, username: str, user_data: dict) -> dict: + """Generate IKEv2/IPSec client configuration""" + try: + # Read CA certificate + with open('/etc/vpn-ca/ca.crt', 'r') as f: + ca_cert = f.read() + + # Generate client certificate + from services.certificate_authority import CertificateAuthority + ca = CertificateAuthority() + + cert_data = ca.generate_client_certificate( + username=username, + email=user_data.get('email', f'{username}@vpn.local') + ) + + # For mobile clients, we'll provide instructions and certificates + config_instructions = f""" +IKEv2/IPSec VPN Configuration for {username} + +Server Details: +- Server Address: {self.server_ip} +- Remote ID: vpn.server.local +- Local ID: {username}@vpn.local + +Authentication: +- Type: Certificate +- Install the CA certificate and client certificate on your device + +For iOS: +1. Install the CA certificate first +2. Install the client certificate +3. Go to Settings > General > VPN & Device Management +4. Add VPN Configuration > IKEv2 +5. Enter server details above + +For Android: +1. Install the CA certificate in Settings > Security > Encryption & credentials +2. Install the client certificate +3. Go to Settings > Network & internet > VPN +4. Add VPN profile with IKEv2/IPSec PSK + +For Windows: +1. Install certificates in Certificate Manager +2. Add VPN connection with IKEv2 protocol +3. Use certificate authentication + +For macOS: +1. Install certificates in Keychain Access +2. Add VPN configuration in Network preferences +3. Select IKEv2 and certificate authentication +""" + + return { + 'config': config_instructions, + 'ca_certificate': ca_cert, + 'client_certificate': cert_data['certificate'].decode('utf-8'), + 'client_private_key': cert_data['private_key'].decode('utf-8'), + 'filename': f'{username}-ikev2-config.txt', + 'protocol': 'IKEv2/IPSec', + 'server_ip': self.server_ip, + 'server_port': self.ipsec_port + } + + except Exception as e: + logger.error(f"Failed to generate IKEv2 client config: {e}") + raise + + def _generate_wireguard_client_config(self, username: str, user_data: dict) -> dict: + """Generate WireGuard client configuration""" + try: + # Generate client keys + result = subprocess.run(['wg', 'genkey'], capture_output=True, text=True) + if result.returncode != 0: + raise Exception("Failed to generate WireGuard private key") + + client_private_key = result.stdout.strip() + + result = subprocess.run(['wg', 'pubkey'], + input=client_private_key, + capture_output=True, text=True) + if result.returncode != 0: + raise Exception("Failed to generate WireGuard public key") + + client_public_key = result.stdout.strip() + + # Read server public key + with open(os.path.join(self.wireguard_dir, 'server_public.key'), 'r') as f: + server_public_key = f.read().strip() + + # Assign client IP (simple allocation) + client_ip = f"10.13.13.{hash(username) % 200 + 10}" + + # Generate client configuration + client_config = f""" +[Interface] +PrivateKey = {client_private_key} +Address = {client_ip}/32 +DNS = 8.8.8.8 + +[Peer] +PublicKey = {server_public_key} +Endpoint = {self.server_ip}:{self.wireguard_port} +AllowedIPs = 0.0.0.0/0 +PersistentKeepalive = 25 +""" + + # Add client to server configuration + self._add_wireguard_peer(username, client_public_key, client_ip) + + return { + 'config': client_config, + 'filename': f'{username}-wireguard.conf', + 'protocol': 'WireGuard', + 'server_ip': self.server_ip, + 'server_port': self.wireguard_port, + 'client_public_key': client_public_key + } + + except Exception as e: + logger.error(f"Failed to generate WireGuard client config: {e}") + raise + + def _add_wireguard_peer(self, username: str, client_public_key: str, client_ip: str): + """Add WireGuard peer to server configuration""" + try: + wg_conf_path = os.path.join(self.wireguard_dir, 'wg0.conf') + + peer_config = f""" +# Client: {username} +[Peer] +PublicKey = {client_public_key} +AllowedIPs = {client_ip}/32 +""" + + with open(wg_conf_path, 'a') as f: + f.write(peer_config) + + # If WireGuard is running, add peer dynamically + try: + subprocess.run(['sudo', 'wg', 'set', 'wg0', 'peer', client_public_key, + 'allowed-ips', f'{client_ip}/32'], check=True) + except: + # Server might not be running, that's okay + pass + + except Exception as e: + logger.error(f"Failed to add WireGuard peer: {e}") + raise + + def revoke_client_access(self, username: str, protocol: str): + """Revoke client access for specified protocol""" + try: + if protocol.lower() == 'openvpn': + return self._revoke_openvpn_client(username) + elif protocol.lower() in ['ikev2', 'ipsec']: + return self._revoke_ipsec_client(username) + elif protocol.lower() == 'wireguard': + return self._revoke_wireguard_client(username) + else: + raise ValueError(f"Unsupported protocol: {protocol}") + + except Exception as e: + logger.error(f"Failed to revoke client access for {username}: {e}") + raise + + def _revoke_openvpn_client(self, username: str): + """Revoke OpenVPN client certificate""" + try: + from services.certificate_authority import CertificateAuthority + ca = CertificateAuthority() + + # Find certificate by username and revoke it + certificates = ca.list_certificates() + for cert in certificates: + if username in cert.get('subject', ''): + ca.revoke_certificate(cert['serial_number']) + logger.info(f"Revoked OpenVPN certificate for {username}") + return True + + logger.warning(f"No OpenVPN certificate found for {username}") + return False + + except Exception as e: + logger.error(f"Failed to revoke OpenVPN client: {e}") + raise + + def _revoke_ipsec_client(self, username: str): + """Revoke IKEv2/IPSec client certificate""" + try: + # Same as OpenVPN since we use the same CA + return self._revoke_openvpn_client(username) + + except Exception as e: + logger.error(f"Failed to revoke IKEv2 client: {e}") + raise + + def _revoke_wireguard_client(self, username: str): + """Remove WireGuard client from server configuration""" + try: + wg_conf_path = os.path.join(self.wireguard_dir, 'wg0.conf') + + # Read current configuration + with open(wg_conf_path, 'r') as f: + lines = f.readlines() + + # Remove client section + new_lines = [] + skip_section = False + + for line in lines: + if line.strip() == f'# Client: {username}': + skip_section = True + continue + elif line.strip().startswith('# Client:') and skip_section: + skip_section = False + elif line.strip().startswith('[Peer]') and not skip_section: + new_lines.append(line) + elif not skip_section: + new_lines.append(line) + + # Write updated configuration + with open(wg_conf_path, 'w') as f: + f.writelines(new_lines) + + logger.info(f"Removed WireGuard client {username}") + return True + + except Exception as e: + logger.error(f"Failed to revoke WireGuard client: {e}") + raise + + def get_server_info(self): + """Get server information for client configuration""" + return { + 'server_ip': self.server_ip, + 'protocols': { + 'openvpn': { + 'port': self.openvpn_port, + 'protocol': 'UDP' + }, + 'ikev2': { + 'port': self.ipsec_port, + 'protocol': 'UDP' + }, + 'wireguard': { + 'port': self.wireguard_port, + 'protocol': 'UDP' + } + } + } + diff --git a/static/app.js b/static/app.js new file mode 100644 index 0000000000000000000000000000000000000000..fd4c453479196626a25a6af1a638ce454f2ccf2f --- /dev/null +++ b/static/app.js @@ -0,0 +1,1573 @@ +/** + * Virtual ISP Stack Frontend Application + * Native JavaScript implementation for managing the Virtual ISP Stack + */ + +class VirtualISPApp { + constructor() { + this.apiBase = +'/api +'; + this.currentSection = +'dashboard +'; + this.refreshInterval = null; + this.charts = {}; + + this.init(); + } + + async init() { + this.setupEventListeners(); + this.setupNavigation(); + this.setupCharts(); + await this.loadInitialData(); + this.startAutoRefresh(); + + // Hide loading overlay + this.hideLoading(); + + console.log( +'Virtual ISP Stack App initialized +'); + } + + setupEventListeners() { + // Navigation + document.querySelectorAll( +'.nav-item +').forEach(item => { + item.addEventListener( +'click +', (e) => { + const section = e.currentTarget.dataset.section; + this.navigateToSection(section); + }); + }); + + // Tab buttons + document.querySelectorAll( +'.tab-btn +').forEach(btn => { + btn.addEventListener( +'click +', (e) => { + const tab = e.currentTarget.dataset.tab; + this.switchTab(tab); + }); + }); + + // Modal close buttons + document.querySelectorAll( +'.close +').forEach(btn => { + btn.addEventListener( +'click +', (e) => { + const modal = e.currentTarget.closest( +'.modal +'); + this.closeModal(modal.id); + }); + }); + + // Click outside modal to close + document.querySelectorAll( +'.modal +').forEach(modal => { + modal.addEventListener( +'click +', (e) => { + if (e.target === modal) { + this.closeModal(modal.id); + } + }); + }); + + // Form submissions + document.getElementById( +'addRuleForm +')?.addEventListener( +'submit +', (e) => { + e.preventDefault(); + this.addFirewallRule(); + }); + } + + setupNavigation() { + // Set initial active section + this.navigateToSection( +'dashboard +'); + } + + navigateToSection(section) { + // Update navigation + document.querySelectorAll( +'.nav-item +').forEach(item => { + item.classList.remove( +'active +'); + }); + document.querySelector(`[data-section=\n'${section}\n']`).classList.add( +'active +'); + + // Update content + document.querySelectorAll( +'.content-section +').forEach(sec => { + sec.classList.remove( +'active +'); + }); + document.getElementById(section).classList.add( +'active +'); + + this.currentSection = section; + + // Load section-specific data + this.loadSectionData(section); + } + + switchTab(tab) { + const container = event.target.closest( +'.router-tabs +'); + + // Update tab buttons + container.querySelectorAll( +'.tab-btn +').forEach(btn => { + btn.classList.remove( +'active +'); + }); + event.target.classList.add( +'active +'); + + // Update tab content + container.querySelectorAll( +'.tab-pane +').forEach(pane => { + pane.classList.remove( +'active +'); + }); + container.querySelector(`#${tab}`).classList.add( +'active +'); + + // Load tab-specific data + this.loadTabData(tab); + } + + async loadInitialData() { + try { + await Promise.all([ + this.loadSystemStatus(), + this.loadDashboardData(), + this.loadConfiguration() + ]); + } catch (error) { + console.error( +'Error loading initial data: +', error); + this.showToast( +'Error loading initial data +', +'error +'); + } + } + + async loadSectionData(section) { + try { + switch (section) { + case +'dashboard +': + await this.loadDashboardData(); + break; + case +'dhcp +': + await this.loadDHCPData(); + break; + case +'nat +': + await this.loadNATData(); + break; + case +'firewall +': + await this.loadFirewallData(); + break; + case +'router +': + await this.loadRouterData(); + break; + case +'bridge +': + await this.loadBridgeData(); + break; + case +'sessions +': + await this.loadSessionsData(); + break; + case +'logs +': + await this.loadLogsData(); + break; + case +'vpn +': + await this.loadVPNData(); + break; + case +'config +': + await this.loadConfiguration(); + break; + } + } catch (error) { + console.error(`Error loading ${section} data:`, error); + this.showToast(`Error loading ${section} data`, +'error +'); + } + } + + async loadTabData(tab) { + try { + switch (tab) { + case +'routes +': + await this.loadRoutingTable(); + break; + case +'interfaces +': + await this.loadInterfaces(); + break; + case +'arp +': + await this.loadARPTable(); + break; + } + } catch (error) { + console.error(`Error loading ${tab} data:`, error); + } + } + + // API Methods + async apiCall(endpoint, options = {}) { + const url = `${this.apiBase}${endpoint}`; + const defaultOptions = { + headers: { + +'Content-Type +': +'application/json +', + }, + }; + + const response = await fetch(url, { ...defaultOptions, ...options }); + + if (!response.ok) { + throw new Error(`API call failed: ${response.status} ${response.statusText}`); + } + + return await response.json(); + } + + // System Status + async loadSystemStatus() { + try { + const response = await this.apiCall( +'/status +'); + this.updateSystemStatus(response.system_status); + } catch (error) { + console.error( +'Error loading system status: +', error); + this.updateSystemStatusOffline(); + } + } + + updateSystemStatus(status) { + const indicator = document.getElementById( +'systemStatus +'); + const components = status.components; + + // Update header status + const allOnline = Object.values(components).every(c => c === true); + indicator.className = `status-indicator ${allOnline ? +'online +' : +'offline +'}`; + indicator.querySelector( +'span +').textContent = allOnline ? +'All Systems Online +' : +'System Issues +'; + + // Update component status + this.updateComponentStatus(components); + } + + updateSystemStatusOffline() { + const indicator = document.getElementById( +'systemStatus +'); + indicator.className = +'status-indicator offline +'; + indicator.querySelector( +'span +').textContent = +'System Offline +'; + } + + updateComponentStatus(components) { + const container = document.getElementById( +'componentStatus +'); + container.innerHTML = +'' +; + + Object.entries(components).forEach(([name, status]) => { + const item = document.createElement( +'div +'); + item.className = `component-item ${status ? +'online +' : +'offline +'}`; + item.innerHTML = ` + ${this.formatComponentName(name)} + + ${status ? +'Online +' : +'Offline +'} + + `; + container.appendChild(item); + }); + } + + formatComponentName(name) { + return name.replace(/_/g, +' +').replace(/\b\w/g, l => l.toUpperCase()); + } + + // Dashboard Data + async loadDashboardData() { + try { + const [statusResponse, statsResponse] = await Promise.all([ + this.apiCall( +'/status +'), + this.apiCall( +'/stats +') + ]); + + this.updateDashboardStats(statusResponse.system_status.stats); + this.updateCharts(statsResponse.stats); + } catch (error) { + console.error( +'Error loading dashboard data: +', error); + } + } + + updateDashboardStats(stats) { + document.getElementById( +'dhcpLeaseCount +').textContent = stats.dhcp_leases || 0; + document.getElementById( +'natSessionCount +').textContent = stats.nat_sessions || 0; + document.getElementById( +'firewallRuleCount +').textContent = stats.firewall_rules || 0; + document.getElementById( +'bridgeClientCount +').textContent = stats.bridge_clients || 0; + } + + // DHCP Data + async loadDHCPData() { + try { + const response = await this.apiCall( +'/dhcp/leases +'); + this.updateDHCPTable(response.leases); + } catch (error) { + console.error( +'Error loading DHCP data: +', error); + this.updateDHCPTable([]); + } + } + + updateDHCPTable(leases) { + const tbody = document.getElementById( +'dhcpTableBody +'); + tbody.innerHTML = +'' +; + + leases.forEach(lease => { + const row = document.createElement( +'tr +'); + const remaining = Math.max(0, lease.lease_time - (Date.now() / 1000 - lease.lease_start)); + + row.innerHTML = ` + ${lease.mac_address} + ${lease.ip_address} + ${this.formatDuration(lease.lease_time)} + ${this.formatDuration(remaining)} + ${lease.state} + + + + `; + tbody.appendChild(row); + }); + } + + async releaseDHCPLease(macAddress) { + try { + await this.apiCall(`/dhcp/leases/${macAddress}`, { method: +'DELETE +' }); + this.showToast( +'DHCP lease released successfully +', +'success +'); + await this.loadDHCPData(); + } catch (error) { + console.error( +'Error releasing DHCP lease: +', error); + this.showToast( +'Error releasing DHCP lease +', +'error +'); + } + } + + // NAT Data + async loadNATData() { + try { + const [sessionsResponse, statsResponse] = await Promise.all([ + this.apiCall( +'/nat/sessions +'), + this.apiCall( +'/nat/stats +') + ]); + + this.updateNATStats(statsResponse.stats); + this.updateNATTable(sessionsResponse.sessions); + } catch (error) { + console.error( +'Error loading NAT data: +', error); + this.updateNATStats({}); + this.updateNATTable([]); + } + } + + updateNATStats(stats) { + document.getElementById( +'natActiveSessions +').textContent = stats.active_sessions || 0; + document.getElementById( +'natPortUtilization +').textContent = + `${Math.round((stats.ports_used / stats.total_ports) * 100) || 0}%`; + document.getElementById( +'natBytesTranslated +').textContent = + this.formatBytes(stats.bytes_translated || 0); + } + + updateNATTable(sessions) { + const tbody = document.getElementById( +'natTableBody +'); + tbody.innerHTML = +'' +; + + sessions.forEach(session => { + const row = document.createElement( +'tr +'); + row.innerHTML = ` + ${session.virtual_ip}:${session.virtual_port} + ${session.real_ip}:${session.real_port} + ${session.host_ip}:${session.host_port} + ${session.protocol} + ${this.formatDuration(session.duration)} + ${this.formatBytes(session.bytes_in)} / ${this.formatBytes(session.bytes_out)} + + + + `; + tbody.appendChild(row); + }); + } + + // Firewall Data + async loadFirewallData() { + try { + const [rulesResponse, logsResponse, statsResponse] = await Promise.all([ + this.apiCall( +'/firewall/rules +'), + this.apiCall( +'/firewall/logs?limit=50 +'), + this.apiCall( +'/firewall/stats +') + ]); + + this.updateFirewallTable(rulesResponse.rules); + } catch (error) { + console.error( +'Error loading firewall data: +', error); + this.updateFirewallTable([]); + } + } + + updateFirewallTable(rules) { + const tbody = document.getElementById( +'firewallTableBody +'); + tbody.innerHTML = +'' +; + + rules.forEach(rule => { + const row = document.createElement( +'tr +'); + row.innerHTML = ` + ${rule.priority} + ${rule.rule_id} + ${rule.action} + ${rule.direction} + ${rule.source_ip || +'Any +'}${rule.source_port ? +': +' + rule.source_port : +'' +} + ${rule.dest_ip || +'Any +'}${rule.dest_port ? +': +' + rule.dest_port : +'' +} + ${rule.protocol || +'Any +'} + + ${rule.hit_count || 0} + ${rule.enabled ? +'Enabled +' : +'Disabled +'} + + + + `; + tbody.appendChild(row); + }); + } + + async deleteFirewallRule(ruleId) { + try { + await this.apiCall(`/firewall/rules/${ruleId}`, { method: +'DELETE +' }); + this.showToast( +'Firewall rule deleted successfully +', +'success +'); + await this.loadFirewallData(); + } catch (error) { + console.error( +'Error deleting firewall rule: +', error); + this.showToast( +'Error deleting firewall rule +', +'error +'); + } + } + + // Router Data + async loadRouterData() { + await Promise.all([ + this.loadRoutingTable(), + this.loadInterfaces(), + this.loadARPTable() + ]); + } + + async loadRoutingTable() { + try { + const response = await this.apiCall( +'/router/routes +'); + this.updateRoutingTable(response.routes); + } catch (error) { + console.error( +'Error loading routing table: +', error); + this.updateRoutingTable([]); + } + } + + updateRoutingTable(routes) { + const tbody = document.getElementById( +'routesTableBody +'); + tbody.innerHTML = +'' +; + + routes.forEach(route => { + const row = document.createElement( +'tr +'); + row.innerHTML = ` + ${route.destination} + ${route.gateway || +'Direct +'} + ${route.interface} + ${route.metric} + ${route.type} + ${route.use_count || 0} + ${route.last_used ? new Date(route.last_used * 1000).toLocaleString() : +'Never +'} + `; + tbody.appendChild(row); + }); + } + + async loadInterfaces() { + try { + const response = await this.apiCall( +'/router/interfaces +'); + this.updateInterfacesTable(response.interfaces); + } catch (error) { + console.error( +'Error loading interfaces: +', error); + this.updateInterfacesTable([]); + } + } + + updateInterfacesTable(interfaces) { + const tbody = document.getElementById( +'interfacesTableBody +'); + tbody.innerHTML = +'' +; + + interfaces.forEach(iface => { + const row = document.createElement( +'tr +'); + row.innerHTML = ` + ${iface.name} + ${iface.ip_address || +'N/A +'} + ${iface.network || +'N/A +'} + ${iface.mtu || +'N/A +'} + ${iface.status} + + + + `; + tbody.appendChild(row); + }); + } + + async toggleInterfaceStatus(interfaceName) { + try { + const response = await this.apiCall(`/router/interfaces/${interfaceName}/toggle`, { method: +'POST +' }); + if (response.status === +'success +') { + this.showToast(`Interface ${interfaceName} status toggled successfully`, +'success +'); + await this.loadInterfaces(); + } else { + this.showToast(`Failed to toggle interface ${interfaceName} status: ` + response.message, +'error +'); + } + } catch (error) { + console.error(`Error toggling interface ${interfaceName} status:`, error); + this.showToast(`Error toggling interface ${interfaceName} status`, +'error +'); + } + } + + async loadARPTable() { + try { + const response = await this.apiCall( +'/router/arp +'); + this.updateARPTable(response.arp_entries); + } catch (error) { + console.error( +'Error loading ARP table: +', error); + this.updateARPTable([]); + } + } + + updateARPTable(arpEntries) { + const tbody = document.getElementById( +'arpTableBody +'); + tbody.innerHTML = +'' +; + + arpEntries.forEach(entry => { + const row = document.createElement( +'tr +'); + row.innerHTML = ` + ${entry.ip_address} + ${entry.mac_address} + + + + `; + tbody.appendChild(row); + }); + } + + async deleteARPEntry(ipAddress) { + try { + await this.apiCall(`/router/arp/${ipAddress}`, { method: +'DELETE +' }); + this.showToast( +'ARP entry deleted successfully +', +'success +'); + await this.loadARPTable(); + } catch (error) { + console.error( +'Error deleting ARP entry: +', error); + this.showToast( +'Error deleting ARP entry +', +'error +'); + } + } + + // Bridge Data + async loadBridgeData() { + try { + const response = await this.apiCall( +'/bridge/status +'); + this.updateBridgeStatus(response.status); + this.updateBridgeClientsTable(response.clients); + } catch (error) { + console.error( +'Error loading bridge data: +', error); + this.updateBridgeStatusOffline(); + this.updateBridgeClientsTable([]); + } + } + + updateBridgeStatus(status) { + document.getElementById( +'websocketPort +').textContent = status.websocket_port; + document.getElementById( +'websocketStatus +').textContent = status.websocket_status; + document.getElementById( +'tcpPort +').textContent = status.tcp_port; + document.getElementById( +'tcpStatus +').textContent = status.tcp_status; + } + + updateBridgeStatusOffline() { + document.getElementById( +'websocketPort +').textContent = +'N/A +'; + document.getElementById( +'websocketStatus +').textContent = +'Offline +'; + document.getElementById( +'tcpPort +').textContent = +'N/A +'; + document.getElementById( +'tcpStatus +').textContent = +'Offline +'; + } + + updateBridgeClientsTable(clients) { + const tbody = document.getElementById( +'bridgeClientsTableBody +'); + tbody.innerHTML = +'' +; + + clients.forEach(client => { + const row = document.createElement( +'tr +'); + row.innerHTML = ` + ${client.client_id} + ${client.type} + ${client.remote_address} + ${new Date(client.connected_time * 1000).toLocaleString()} + ${this.formatBytes(client.packets_in)} / ${this.formatBytes(client.packets_out)} + ${this.formatBytes(client.bytes_in)} / ${this.formatBytes(client.bytes_out)} + + + + `; + tbody.appendChild(row); + }); + } + + async disconnectBridgeClient(clientId) { + try { + await this.apiCall(`/bridge/clients/${clientId}`, { method: +'DELETE +' }); + this.showToast( +'Bridge client disconnected successfully +', +'success +'); + await this.loadBridgeData(); + } catch (error) { + console.error( +'Error disconnecting bridge client: +', error); + this.showToast( +'Error disconnecting bridge client +', +'error +'); + } + } + + // Sessions Data + async loadSessionsData() { + try { + const response = await this.apiCall( +'/sessions +'); + this.updateSessionsTable(response.sessions); + } catch (error) { + console.error( +'Error loading sessions data: +', error); + this.updateSessionsTable([]); + } + } + + updateSessionsTable(sessions) { + const tbody = document.getElementById( +'sessionsTableBody +'); + tbody.innerHTML = +'' +; + + sessions.forEach(session => { + const row = document.createElement( +'tr +'); + row.innerHTML = ` + ${session.session_id} + ${session.type} + ${session.state} + ${session.virtual_ip}:${session.virtual_port} + ${session.real_ip}:${session.real_port} + ${session.protocol} + ${this.formatDuration(session.duration)} + ${this.formatDuration(session.idle_time)} + ${JSON.stringify(session.metrics)} + `; + tbody.appendChild(row); + }); + } + + // Logs Data + async loadLogsData() { + try { + const response = await this.apiCall( +'/logs +'); + this.updateLogsTable(response.logs); + } catch (error) { + console.error( +'Error loading logs data: +', error); + this.updateLogsTable([]); + } + } + + updateLogsTable(logs) { + const container = document.getElementById( +'logContainer +'); + container.innerHTML = +'' +; + + logs.forEach(log => { + const entry = document.createElement( +'div +'); + entry.className = +'log-entry +'; + entry.innerHTML = ` + ${log.level} +
+
${new Date(log.timestamp * 1000).toLocaleString()}
+
${log.message}
+ ${log.metadata ? `
${JSON.stringify(log.metadata)}
` : +'' +} +
+ `; + container.appendChild(entry); + }); + } + + async filterLogs() { + const level = document.getElementById( +'logLevelFilter +').value; + const search = document.getElementById( +'logSearch +').value; + try { + const response = await this.apiCall(`/logs?level=${level}&search=${search}`); + this.updateLogsTable(response.logs); + } catch (error) { + console.error( +'Error filtering logs: +', error); + this.showToast( +'Error filtering logs +', +'error +'); + } + } + + async clearLogs() { + try { + await this.apiCall( +'/logs/clear +', { method: +'POST +' }); + this.showToast( +'Logs cleared successfully +', +'success +'); + await this.loadLogsData(); + } catch (error) { + console.error( +'Error clearing logs: +', error); + this.showToast( +'Error clearing logs +', +'error +'); + } + } + + // VPN Management Functions + async loadVPNData() { + try { + await Promise.all([ + this.loadVPNStatus(), + this.loadVPNClients() + ]); + } catch (error) { + console.error( +'Error loading VPN data: +', error); + } + } + + async loadVPNStatus() { + try { + const response = await this.apiCall( +'/openvpn/status +'); + this.updateVPNStatus(response.status); + } catch (error) { + console.error( +'Error loading VPN status: +', error); + this.updateVPNStatusOffline(); + } + } + + updateVPNStatus(status) { + document.getElementById( +'vpnServerStatus +').textContent = status.is_running ? +'Running +' : +'Stopped +'; + document.getElementById( +'vpnServerIp +').textContent = status.server_ip || +'- +'; + document.getElementById( +'vpnServerPort +').textContent = status.server_port || +'- +'; + document.getElementById( +'vpnConnectedClients +').textContent = status.connected_clients || 0; + document.getElementById( +'vpnUptime +').textContent = status.uptime ? this.formatDuration(status.uptime) : +'- +'; + document.getElementById( +'vpnBytesReceived +').textContent = this.formatBytes(status.total_bytes_received || 0); + document.getElementById( +'vpnBytesSent +').textContent = this.formatBytes(status.total_bytes_sent || 0); + + // Update button states + const startBtn = document.getElementById( +'startVpnBtn +'); + const stopBtn = document.getElementById( +'stopVpnBtn +'); + + if (status.is_running) { + startBtn.disabled = true; + stopBtn.disabled = false; + startBtn.classList.add( +'disabled +'); + stopBtn.classList.remove( +'disabled +'); + } else { + startBtn.disabled = false; + stopBtn.disabled = true; + startBtn.classList.remove( +'disabled +'); + stopBtn.classList.add( +'disabled +'); + } + } + + updateVPNStatusOffline() { + document.getElementById( +'vpnServerStatus +').textContent = +'Unknown +'; + document.getElementById( +'vpnServerIp +').textContent = +'- +'; + document.getElementById( +'vpnServerPort +').textContent = +'- +'; + document.getElementById( +'vpnConnectedClients +').textContent = +'0 +'; + document.getElementById( +'vpnUptime +').textContent = +'- +'; + document.getElementById( +'vpnBytesReceived +').textContent = +'0 +'; + document.getElementById( +'vpnBytesSent +').textContent = +'0 +'; + + // Enable both buttons when status is unknown + const startBtn = document.getElementById( +'startVpnBtn +'); + const stopBtn = document.getElementById( +'stopVpnBtn +'); + startBtn.disabled = false; + stopBtn.disabled = false; + startBtn.classList.remove( +'disabled +'); + stopBtn.classList.remove( +'disabled +'); + } + + async loadVPNClients() { + try { + const response = await this.apiCall( +'/openvpn/clients +'); + this.updateVPNClientsTable(response.clients); + } catch (error) { + console.error( +'Error loading VPN clients: +', error); + this.updateVPNClientsTable([]); + } + } + + updateVPNClientsTable(clients) { + const tbody = document.getElementById( +'vpnClientsTableBody +'); + tbody.innerHTML = +'' +; + + clients.forEach(client => { + const row = document.createElement( +'tr +'); + const connectedSince = new Date(client.connected_at * 1000).toLocaleString(); + + row.innerHTML = ` + ${client.client_id} + ${client.common_name} + ${client.ip_address} + ${connectedSince} + ${this.formatBytes(client.bytes_received)} + ${this.formatBytes(client.bytes_sent)} + ${client.status} + + + + `; + tbody.appendChild(row); + }); + } + + async startVpnServer() { + try { + this.showLoading(); + const response = await this.apiCall( +'/openvpn/start +', { method: +'POST +' }); + + if (response.status === +'success +') { + this.showToast( +'VPN server started successfully +', +'success +'); + await this.loadVPNStatus(); + } else { + this.showToast( +'Failed to start VPN server: +' + response.message, +'error +'); + } + } catch (error) { + console.error( +'Error starting VPN server: +', error); + this.showToast( +'Error starting VPN server +', +'error +'); + } finally { + this.hideLoading(); + } + } + + async stopVpnServer() { + try { + this.showLoading(); + const response = await this.apiCall( +'/openvpn/stop +', { method: +'POST +' }); + + if (response.status === +'success +') { + this.showToast( +'VPN server stopped successfully +', +'success +'); + await this.loadVPNStatus(); + await this.loadVPNClients(); // Refresh clients list + } else { + this.showToast( +'Failed to stop VPN server: +' + response.message, +'error +'); + } + } catch (error) { + console.error( +'Error stopping VPN server: +', error); + this.showToast( +'Error stopping VPN server +', +'error +'); + } + } + + async disconnectVPNClient(clientId) { + try { + const response = await this.apiCall(`/openvpn/clients/${clientId}`, { method: +'DELETE +' }); + + if (response.status === +'success +') { + this.showToast( +'VPN client disconnected successfully +', +'success +'); + await this.loadVPNClients(); + await this.loadVPNStatus(); // Update client count + } else { + this.showToast( +'Failed to disconnect VPN client: +' + response.message, +'error +'); + } + } catch (error) { + console.error( +'Error disconnecting VPN client: +', error); + this.showToast( +'Error disconnecting VPN client +', +'error +'); + } + } + + async generateClientConfig() { + try { + const clientName = document.getElementById( +'clientName +').value; + const serverIp = document.getElementById( +'serverIp +').value; + + if (!clientName || !serverIp) { + this.showToast( +'Please fill in all fields +', +'error +'); + return; + } + + this.showLoading(); + const response = await this.apiCall( +'/openvpn/generate-config +', { + method: +'POST +', + body: JSON.stringify({ + client_name: clientName, + server_ip: serverIp + }) + }); + + if (response.status === +'success +') { + // Create and download the config file + const blob = new Blob([response.config], { type: +'text/plain +' }); + const url = window.URL.createObjectURL(blob); + const a = document.createElement( +'a +'); + a.href = url; + a.download = `${clientName}.ovpn`; + document.body.appendChild(a); + a.click(); + document.body.removeChild(a); + window.URL.revokeObjectURL(url); + + this.showToast( +'VPN client configuration generated and downloaded +', +'success +'); + this.closeModal( +'generateConfigModal +'); + + // Clear form + document.getElementById( +'generateConfigForm +').reset(); + } else { + this.showToast( +'Failed to generate VPN config: +' + response.message, +'error +'); + } + } catch (error) { + console.error( +'Error generating VPN config: +', error); + this.showToast( +'Error generating VPN config +', +'error +'); + } finally { + this.hideLoading(); + } + } + + refreshVpnStatus() { + this.loadVPNStatus(); + } + + refreshVpnClients() { + this.loadVPNClients(); + } + + showGenerateConfigModal() { + this.showModal( +'generateConfigModal +'); + } + +} + +document.addEventListener("DOMContentLoaded", () => { + window.app = new VirtualISPApp(); +}); + +// Global functions for direct HTML calls +function refreshData() { + app.loadInitialData(); +} + +function refreshDHCPLeases() { + app.loadDHCPData(); +} + +function releaseDHCPLease(macAddress) { + app.releaseDHCPLease(macAddress); +} + +function refreshNATSessions() { + app.loadNATData(); +} + +function closeNATSession(sessionId) { + app.closeNATSession(sessionId); +} + +function showAddRuleModal() { + app.showModal( +'addRuleModal +'); +} + +function refreshFirewallRules() { + app.loadFirewallData(); +} + +function deleteFirewallRule(ruleId) { + app.deleteFirewallRule(ruleId); +} + +function startVpnServer() { + app.startVpnServer(); +} + +function stopVpnServer() { + app.stopVpnServer(); +} + +function disconnectVPNClient(clientId) { + app.disconnectVPNClient(clientId); +} + +function generateClientConfig() { + app.generateClientConfig(); +} + +function refreshVpnStatus() { + app.refreshVpnStatus(); +} + +function refreshVpnClients() { + app.refreshVpnClients(); +} + +function showGenerateConfigModal() { + app.showGenerateConfigModal(); +} + +function filterLogs() { + app.filterLogs(); +} + +function searchLogs() { + app.searchLogs(); +} + +function clearLogs() { + app.clearLogs(); +} + diff --git a/static/auth.css b/static/auth.css new file mode 100644 index 0000000000000000000000000000000000000000..9f619f81a137575fb5e95fa23a42aaf6e8c6e871 --- /dev/null +++ b/static/auth.css @@ -0,0 +1,716 @@ +/* Authentication Page Styles */ + +* { + margin: 0; + padding: 0; + box-sizing: border-box; +} + +body { + font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif; + background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); + min-height: 100vh; + overflow-x: hidden; +} + +/* Background Animation */ +.background-animation { + position: fixed; + top: 0; + left: 0; + width: 100%; + height: 100%; + z-index: -1; + overflow: hidden; +} + +.floating-shapes { + position: relative; + width: 100%; + height: 100%; +} + +.shape { + position: absolute; + background: rgba(255, 255, 255, 0.1); + border-radius: 50%; + animation: float 20s infinite linear; +} + +.shape-1 { + width: 80px; + height: 80px; + top: 20%; + left: 10%; + animation-delay: 0s; +} + +.shape-2 { + width: 120px; + height: 120px; + top: 60%; + left: 80%; + animation-delay: 5s; +} + +.shape-3 { + width: 60px; + height: 60px; + top: 80%; + left: 20%; + animation-delay: 10s; +} + +.shape-4 { + width: 100px; + height: 100px; + top: 30%; + left: 70%; + animation-delay: 15s; +} + +.shape-5 { + width: 140px; + height: 140px; + top: 10%; + left: 50%; + animation-delay: 8s; +} + +@keyframes float { + 0% { + transform: translateY(0px) rotate(0deg); + opacity: 0.7; + } + 50% { + transform: translateY(-100px) rotate(180deg); + opacity: 0.3; + } + 100% { + transform: translateY(0px) rotate(360deg); + opacity: 0.7; + } +} + +/* Container */ +.auth-container { + min-height: 100vh; + display: flex; + flex-direction: column; +} + +/* Header */ +.auth-header { + display: flex; + justify-content: space-between; + align-items: center; + padding: 20px 40px; + background: rgba(255, 255, 255, 0.1); + backdrop-filter: blur(10px); + border-bottom: 1px solid rgba(255, 255, 255, 0.2); +} + +.logo { + display: flex; + align-items: center; + gap: 12px; + color: white; +} + +.logo i { + font-size: 32px; + color: #ffd700; +} + +.logo h1 { + font-size: 28px; + font-weight: 700; + margin: 0; +} + +.auth-nav { + display: flex; + gap: 12px; +} + +.nav-btn { + padding: 10px 20px; + border: 2px solid rgba(255, 255, 255, 0.3); + background: transparent; + color: white; + border-radius: 25px; + cursor: pointer; + font-weight: 500; + transition: all 0.3s ease; +} + +.nav-btn:hover, +.nav-btn.active { + background: rgba(255, 255, 255, 0.2); + border-color: rgba(255, 255, 255, 0.5); + transform: translateY(-2px); +} + +/* Main Content */ +.auth-main { + flex: 1; + display: flex; + justify-content: center; + align-items: center; + padding: 40px 20px; +} + +/* Form Container */ +.auth-form-container { + width: 100%; + max-width: 450px; + animation: slideIn 0.5s ease-out; +} + +@keyframes slideIn { + from { + opacity: 0; + transform: translateY(30px); + } + to { + opacity: 1; + transform: translateY(0); + } +} + +.auth-form { + background: rgba(255, 255, 255, 0.95); + backdrop-filter: blur(20px); + border-radius: 20px; + padding: 40px; + box-shadow: 0 20px 40px rgba(0, 0, 0, 0.1); + border: 1px solid rgba(255, 255, 255, 0.3); +} + +/* Form Header */ +.form-header { + text-align: center; + margin-bottom: 30px; +} + +.form-header h2 { + font-size: 28px; + color: #333; + margin-bottom: 8px; + font-weight: 700; +} + +.form-header p { + color: #666; + font-size: 16px; +} + +/* Form Groups */ +.form-group { + margin-bottom: 20px; +} + +.form-group label { + display: block; + margin-bottom: 8px; + color: #333; + font-weight: 500; + font-size: 14px; +} + +.input-group { + position: relative; + display: flex; + align-items: center; +} + +.input-group i { + position: absolute; + left: 15px; + color: #666; + z-index: 2; +} + +.input-group input { + width: 100%; + padding: 15px 15px 15px 45px; + border: 2px solid #e1e5e9; + border-radius: 12px; + font-size: 16px; + transition: all 0.3s ease; + background: white; +} + +.input-group input:focus { + outline: none; + border-color: #667eea; + box-shadow: 0 0 0 3px rgba(102, 126, 234, 0.1); +} + +.password-toggle { + position: absolute; + right: 15px; + background: none; + border: none; + color: #666; + cursor: pointer; + padding: 5px; + z-index: 2; +} + +.password-toggle:hover { + color: #333; +} + +/* Field Hints */ +.field-hint { + font-size: 12px; + color: #666; + margin-top: 5px; +} + +/* Password Strength */ +.password-strength { + margin-top: 8px; +} + +.strength-bar { + height: 4px; + background: #e1e5e9; + border-radius: 2px; + overflow: hidden; + margin-bottom: 5px; +} + +.strength-fill { + height: 100%; + width: 0%; + background: #e74c3c; + transition: all 0.3s ease; + border-radius: 2px; +} + +.strength-text { + font-size: 12px; + color: #666; +} + +/* Form Options */ +.form-options { + display: flex; + justify-content: space-between; + align-items: center; + margin-bottom: 25px; +} + +.checkbox-container { + display: flex; + align-items: center; + cursor: pointer; + font-size: 14px; + color: #333; +} + +.checkbox-container input { + display: none; +} + +.checkmark { + width: 18px; + height: 18px; + border: 2px solid #e1e5e9; + border-radius: 4px; + margin-right: 8px; + position: relative; + transition: all 0.3s ease; +} + +.checkbox-container input:checked + .checkmark { + background: #667eea; + border-color: #667eea; +} + +.checkbox-container input:checked + .checkmark::after { + content: '✓'; + position: absolute; + top: 50%; + left: 50%; + transform: translate(-50%, -50%); + color: white; + font-size: 12px; + font-weight: bold; +} + +.forgot-password { + color: #667eea; + text-decoration: none; + font-size: 14px; + font-weight: 500; +} + +.forgot-password:hover { + text-decoration: underline; +} + +/* Subscription Options */ +.subscription-options { + display: flex; + flex-direction: column; + gap: 12px; +} + +.radio-option { + display: flex; + align-items: center; + padding: 15px; + border: 2px solid #e1e5e9; + border-radius: 12px; + cursor: pointer; + transition: all 0.3s ease; +} + +.radio-option:hover { + border-color: #667eea; + background: rgba(102, 126, 234, 0.05); +} + +.radio-option input { + display: none; +} + +.radio-custom { + width: 20px; + height: 20px; + border: 2px solid #e1e5e9; + border-radius: 50%; + margin-right: 12px; + position: relative; + transition: all 0.3s ease; +} + +.radio-option input:checked + .radio-custom { + border-color: #667eea; + background: #667eea; +} + +.radio-option input:checked + .radio-custom::after { + content: ''; + position: absolute; + top: 50%; + left: 50%; + transform: translate(-50%, -50%); + width: 8px; + height: 8px; + background: white; + border-radius: 50%; +} + +.option-content { + display: flex; + flex-direction: column; +} + +.option-content strong { + color: #333; + font-weight: 600; + margin-bottom: 2px; +} + +.option-content span { + color: #666; + font-size: 12px; +} + +/* Buttons */ +.auth-btn { + width: 100%; + padding: 15px; + border: none; + border-radius: 12px; + font-size: 16px; + font-weight: 600; + cursor: pointer; + transition: all 0.3s ease; + position: relative; + overflow: hidden; +} + +.auth-btn.primary { + background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); + color: white; +} + +.auth-btn.primary:hover { + transform: translateY(-2px); + box-shadow: 0 10px 25px rgba(102, 126, 234, 0.3); +} + +.auth-btn.primary:active { + transform: translateY(0); +} + +.auth-btn:disabled { + opacity: 0.7; + cursor: not-allowed; + transform: none !important; +} + +.btn-loader { + position: absolute; + top: 50%; + left: 50%; + transform: translate(-50%, -50%); +} + +/* Form Footer */ +.form-footer { + text-align: center; + margin-top: 25px; + padding-top: 20px; + border-top: 1px solid #e1e5e9; +} + +.form-footer p { + color: #666; + font-size: 14px; +} + +.form-footer a { + color: #667eea; + text-decoration: none; + font-weight: 500; +} + +.form-footer a:hover { + text-decoration: underline; +} + +/* Alert Messages */ +.alert-container { + position: fixed; + top: 20px; + right: 20px; + z-index: 1000; + max-width: 400px; +} + +.alert { + padding: 15px 20px; + border-radius: 12px; + margin-bottom: 10px; + display: flex; + align-items: center; + gap: 12px; + animation: slideInRight 0.3s ease-out; + box-shadow: 0 10px 25px rgba(0, 0, 0, 0.1); +} + +@keyframes slideInRight { + from { + opacity: 0; + transform: translateX(100%); + } + to { + opacity: 1; + transform: translateX(0); + } +} + +.alert.success { + background: #d4edda; + color: #155724; + border-left: 4px solid #28a745; +} + +.alert.error { + background: #f8d7da; + color: #721c24; + border-left: 4px solid #dc3545; +} + +.alert.warning { + background: #fff3cd; + color: #856404; + border-left: 4px solid #ffc107; +} + +.alert.info { + background: #d1ecf1; + color: #0c5460; + border-left: 4px solid #17a2b8; +} + +.alert i { + font-size: 18px; +} + +.alert-close { + margin-left: auto; + background: none; + border: none; + font-size: 18px; + cursor: pointer; + opacity: 0.7; + color: inherit; +} + +.alert-close:hover { + opacity: 1; +} + +/* Features Section */ +.features-section { + background: rgba(255, 255, 255, 0.1); + backdrop-filter: blur(10px); + padding: 60px 20px; + border-top: 1px solid rgba(255, 255, 255, 0.2); +} + +.features-container { + max-width: 1200px; + margin: 0 auto; + text-align: center; +} + +.features-container h3 { + color: white; + font-size: 32px; + margin-bottom: 40px; + font-weight: 700; +} + +.features-grid { + display: grid; + grid-template-columns: repeat(auto-fit, minmax(250px, 1fr)); + gap: 30px; +} + +.feature-item { + background: rgba(255, 255, 255, 0.1); + backdrop-filter: blur(10px); + padding: 30px 20px; + border-radius: 15px; + border: 1px solid rgba(255, 255, 255, 0.2); + transition: all 0.3s ease; +} + +.feature-item:hover { + transform: translateY(-5px); + background: rgba(255, 255, 255, 0.15); +} + +.feature-item i { + font-size: 48px; + color: #ffd700; + margin-bottom: 20px; +} + +.feature-item h4 { + color: white; + font-size: 20px; + margin-bottom: 10px; + font-weight: 600; +} + +.feature-item p { + color: rgba(255, 255, 255, 0.8); + font-size: 14px; + line-height: 1.5; +} + +/* Responsive Design */ +@media (max-width: 768px) { + .auth-header { + padding: 15px 20px; + flex-direction: column; + gap: 15px; + } + + .logo h1 { + font-size: 24px; + } + + .auth-form { + padding: 30px 25px; + margin: 0 10px; + } + + .form-header h2 { + font-size: 24px; + } + + .form-options { + flex-direction: column; + gap: 15px; + align-items: flex-start; + } + + .features-container h3 { + font-size: 28px; + } + + .features-grid { + grid-template-columns: 1fr; + gap: 20px; + } + + .alert-container { + left: 20px; + right: 20px; + max-width: none; + } +} + +@media (max-width: 480px) { + .auth-main { + padding: 20px 10px; + } + + .auth-form { + padding: 25px 20px; + } + + .input-group input { + padding: 12px 12px 12px 40px; + font-size: 14px; + } + + .auth-btn { + padding: 12px; + font-size: 14px; + } +} + +/* Loading States */ +.loading .btn-text { + opacity: 0; +} + +.loading .btn-loader { + display: block !important; +} + +/* Validation States */ +.input-group.error input { + border-color: #dc3545; + box-shadow: 0 0 0 3px rgba(220, 53, 69, 0.1); +} + +.input-group.success input { + border-color: #28a745; + box-shadow: 0 0 0 3px rgba(40, 167, 69, 0.1); +} + +.error-message { + color: #dc3545; + font-size: 12px; + margin-top: 5px; + display: flex; + align-items: center; + gap: 5px; +} + +.error-message i { + font-size: 12px; +} + diff --git a/static/auth.html b/static/auth.html new file mode 100644 index 0000000000000000000000000000000000000000..67fed1c9fd1d8b5b460dd13d48be54f1d3e31d1c --- /dev/null +++ b/static/auth.html @@ -0,0 +1,269 @@ + + + + + + VPN Service - Authentication + + + + +
+ +
+
+
+
+
+
+
+
+
+ + +
+ + +
+ + +
+ +
+
+
+

Welcome Back

+

Sign in to your VPN account

+
+ +
+
+ +
+ + +
+
+ +
+ +
+ + + +
+
+ +
+ + Forgot password? +
+ + +
+ + +
+
+ + + + + + +
+ + +
+ + +
+
+

Why Choose SecureVPN?

+
+
+ +

Military-Grade Encryption

+

AES-256 encryption protects your data

+
+
+ +

Global Servers

+

Connect from anywhere in the world

+
+
+ +

Lightning Fast

+

Optimized for speed and performance

+
+
+ +

No Logs Policy

+

Your privacy is our priority

+
+
+
+
+
+ + + + + diff --git a/static/auth.js b/static/auth.js new file mode 100644 index 0000000000000000000000000000000000000000..0fa6e06319f1cc8fa1a802a3458dcebd17bfbb4b --- /dev/null +++ b/static/auth.js @@ -0,0 +1,552 @@ +// Authentication JavaScript + +// API Configuration +const API_BASE_URL = '/api'; + +// DOM Elements +let currentForm = 'login'; + +// Initialize page +document.addEventListener('DOMContentLoaded', function() { + initializeAuth(); + setupPasswordStrength(); + setupFormValidation(); +}); + +function initializeAuth() { + // Set initial form state + showLogin(); +} + +function showLogin() { + hideAllForms(); + document.getElementById('loginForm').style.display = 'block'; + setActiveNavButton('loginNavBtn'); + currentForm = 'login'; +} + +function showRegister() { + hideAllForms(); + document.getElementById('registerForm').style.display = 'block'; + setActiveNavButton('registerNavBtn'); + currentForm = 'register'; +} + +function showForgotPassword() { + hideAllForms(); + document.getElementById('forgotPasswordForm').style.display = 'block'; + clearActiveNavButtons(); + currentForm = 'forgot'; +} + +function hideAllForms() { + const forms = ['loginForm', 'registerForm', 'forgotPasswordForm']; + forms.forEach(formId => { + document.getElementById(formId).style.display = 'none'; + }); +} + +function setActiveNavButton(activeId) { + clearActiveNavButtons(); + document.getElementById(activeId).classList.add('active'); +} + +function clearActiveNavButtons() { + document.querySelectorAll('.nav-btn').forEach(btn => { + btn.classList.remove('active'); + }); +} + +// Password visibility toggle +function togglePassword(inputId) { + const input = document.getElementById(inputId); + const button = input.parentElement.querySelector('.password-toggle i'); + + if (input.type === 'password') { + input.type = 'text'; + button.classList.remove('fa-eye'); + button.classList.add('fa-eye-slash'); + } else { + input.type = 'password'; + button.classList.remove('fa-eye-slash'); + button.classList.add('fa-eye'); + } +} + +// Password strength checker +function setupPasswordStrength() { + const passwordInput = document.getElementById('registerPassword'); + const strengthBar = document.querySelector('.strength-fill'); + const strengthText = document.querySelector('.strength-text'); + + if (passwordInput) { + passwordInput.addEventListener('input', function() { + const password = this.value; + const strength = calculatePasswordStrength(password); + updatePasswordStrength(strength, strengthBar, strengthText); + }); + } +} + +function calculatePasswordStrength(password) { + let score = 0; + let feedback = []; + + // Length check + if (password.length >= 8) score += 20; + else feedback.push('At least 8 characters'); + + // Uppercase check + if (/[A-Z]/.test(password)) score += 20; + else feedback.push('One uppercase letter'); + + // Lowercase check + if (/[a-z]/.test(password)) score += 20; + else feedback.push('One lowercase letter'); + + // Number check + if (/\d/.test(password)) score += 20; + else feedback.push('One number'); + + // Special character check + if (/[!@#$%^&*(),.?":{}|<>]/.test(password)) score += 20; + else feedback.push('One special character'); + + return { score, feedback }; +} + +function updatePasswordStrength(strength, strengthBar, strengthText) { + const { score, feedback } = strength; + + strengthBar.style.width = score + '%'; + + if (score < 40) { + strengthBar.style.background = '#e74c3c'; + strengthText.textContent = 'Weak password'; + strengthText.style.color = '#e74c3c'; + } else if (score < 80) { + strengthBar.style.background = '#f39c12'; + strengthText.textContent = 'Medium password'; + strengthText.style.color = '#f39c12'; + } else { + strengthBar.style.background = '#27ae60'; + strengthText.textContent = 'Strong password'; + strengthText.style.color = '#27ae60'; + } + + if (feedback.length > 0 && score < 100) { + strengthText.textContent += ' - Missing: ' + feedback.join(', '); + } +} + +// Form validation +function setupFormValidation() { + // Real-time validation for username + const usernameInput = document.getElementById('registerUsername'); + if (usernameInput) { + usernameInput.addEventListener('input', validateUsername); + usernameInput.addEventListener('blur', validateUsername); + } + + // Real-time validation for email + const emailInputs = document.querySelectorAll('input[type="email"]'); + emailInputs.forEach(input => { + input.addEventListener('input', validateEmail); + input.addEventListener('blur', validateEmail); + }); + + // Password confirmation validation + const confirmPasswordInput = document.getElementById('confirmPassword'); + if (confirmPasswordInput) { + confirmPasswordInput.addEventListener('input', validatePasswordConfirmation); + confirmPasswordInput.addEventListener('blur', validatePasswordConfirmation); + } +} + +function validateUsername() { + const input = document.getElementById('registerUsername'); + const value = input.value.trim(); + const inputGroup = input.parentElement; + + clearValidationState(inputGroup); + + if (value.length === 0) return; + + if (value.length < 3 || value.length > 80) { + setValidationState(inputGroup, 'error', 'Username must be 3-80 characters'); + return false; + } + + if (!/^[a-zA-Z0-9_-]+$/.test(value)) { + setValidationState(inputGroup, 'error', 'Only letters, numbers, hyphens, and underscores allowed'); + return false; + } + + setValidationState(inputGroup, 'success'); + return true; +} + +function validateEmail() { + const input = this; + const value = input.value.trim(); + const inputGroup = input.parentElement; + + clearValidationState(inputGroup); + + if (value.length === 0) return; + + const emailRegex = /^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$/; + + if (!emailRegex.test(value)) { + setValidationState(inputGroup, 'error', 'Please enter a valid email address'); + return false; + } + + setValidationState(inputGroup, 'success'); + return true; +} + +function validatePasswordConfirmation() { + const passwordInput = document.getElementById('registerPassword'); + const confirmInput = document.getElementById('confirmPassword'); + const inputGroup = confirmInput.parentElement; + + clearValidationState(inputGroup); + + if (confirmInput.value.length === 0) return; + + if (passwordInput.value !== confirmInput.value) { + setValidationState(inputGroup, 'error', 'Passwords do not match'); + return false; + } + + setValidationState(inputGroup, 'success'); + return true; +} + +function setValidationState(inputGroup, state, message = '') { + inputGroup.classList.remove('error', 'success'); + inputGroup.classList.add(state); + + // Remove existing error message + const existingError = inputGroup.parentElement.querySelector('.error-message'); + if (existingError) { + existingError.remove(); + } + + // Add error message if provided + if (message && state === 'error') { + const errorDiv = document.createElement('div'); + errorDiv.className = 'error-message'; + errorDiv.innerHTML = ` ${message}`; + inputGroup.parentElement.appendChild(errorDiv); + } +} + +function clearValidationState(inputGroup) { + inputGroup.classList.remove('error', 'success'); + const existingError = inputGroup.parentElement.querySelector('.error-message'); + if (existingError) { + existingError.remove(); + } +} + +// Form submission handlers +async function handleLogin(event) { + event.preventDefault(); + + const form = event.target; + const formData = new FormData(form); + const loginData = { + login: formData.get('login'), + password: formData.get('password') + }; + + const button = document.getElementById('loginBtn'); + setButtonLoading(button, true); + + try { + const response = await fetch(`${API_BASE_URL}/login`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify(loginData) + }); + + const result = await response.json(); + + if (response.ok) { + // Store tokens + localStorage.setItem('auth_token', result.auth_token); + localStorage.setItem('refresh_token', result.refresh_token); + localStorage.setItem('user_data', JSON.stringify(result.user)); + + showAlert('success', 'Login successful! Redirecting to dashboard...'); + + // Redirect to dashboard + setTimeout(() => { + window.location.href = '/dashboard.html'; + }, 1500); + + } else { + showAlert('error', result.error || 'Login failed'); + } + + } catch (error) { + console.error('Login error:', error); + showAlert('error', 'Network error. Please try again.'); + } finally { + setButtonLoading(button, false); + } +} + +async function handleRegister(event) { + event.preventDefault(); + + const form = event.target; + const formData = new FormData(form); + + // Validate form + if (!validateRegistrationForm(form)) { + showAlert('error', 'Please fix the errors in the form'); + return; + } + + const registerData = { + username: formData.get('username'), + email: formData.get('email'), + password: formData.get('password'), + subscription_type: formData.get('subscriptionType') + }; + + const button = document.getElementById('registerBtn'); + setButtonLoading(button, true); + + try { + const response = await fetch(`${API_BASE_URL}/register`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify(registerData) + }); + + const result = await response.json(); + + if (response.ok) { + // Store tokens + localStorage.setItem('auth_token', result.auth_token); + localStorage.setItem('refresh_token', result.refresh_token); + localStorage.setItem('user_data', JSON.stringify(result.user)); + + showAlert('success', 'Account created successfully! Redirecting to dashboard...'); + + // Redirect to dashboard + setTimeout(() => { + window.location.href = '/dashboard.html'; + }, 1500); + + } else { + showAlert('error', result.error || 'Registration failed'); + } + + } catch (error) { + console.error('Registration error:', error); + showAlert('error', 'Network error. Please try again.'); + } finally { + setButtonLoading(button, false); + } +} + +async function handleForgotPassword(event) { + event.preventDefault(); + + const form = event.target; + const formData = new FormData(form); + const email = formData.get('email'); + + const button = document.getElementById('forgotPasswordBtn'); + setButtonLoading(button, true); + + try { + const response = await fetch(`${API_BASE_URL}/forgot-password`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify({ email }) + }); + + const result = await response.json(); + + if (response.ok) { + showAlert('success', 'Password reset instructions sent to your email'); + + // Show reset token for development (remove in production) + if (result.reset_token) { + showAlert('info', `Development: Reset token is ${result.reset_token}`); + } + + // Switch back to login form + setTimeout(() => { + showLogin(); + }, 3000); + + } else { + showAlert('error', result.error || 'Failed to send reset instructions'); + } + + } catch (error) { + console.error('Forgot password error:', error); + showAlert('error', 'Network error. Please try again.'); + } finally { + setButtonLoading(button, false); + } +} + +function validateRegistrationForm(form) { + let isValid = true; + + // Validate username + const usernameInput = form.querySelector('#registerUsername'); + if (!validateUsername.call(usernameInput)) { + isValid = false; + } + + // Validate email + const emailInput = form.querySelector('#registerEmail'); + if (!validateEmail.call(emailInput)) { + isValid = false; + } + + // Validate password strength + const passwordInput = form.querySelector('#registerPassword'); + const strength = calculatePasswordStrength(passwordInput.value); + if (strength.score < 80) { + const inputGroup = passwordInput.parentElement; + setValidationState(inputGroup, 'error', 'Password is too weak'); + isValid = false; + } + + // Validate password confirmation + if (!validatePasswordConfirmation()) { + isValid = false; + } + + // Validate terms agreement + const termsCheckbox = form.querySelector('#agreeTerms'); + if (!termsCheckbox.checked) { + showAlert('error', 'You must agree to the Terms of Service and Privacy Policy'); + isValid = false; + } + + return isValid; +} + +// Utility functions +function setButtonLoading(button, loading) { + const btnText = button.querySelector('.btn-text'); + const btnLoader = button.querySelector('.btn-loader'); + + if (loading) { + button.classList.add('loading'); + button.disabled = true; + btnText.style.opacity = '0'; + btnLoader.style.display = 'block'; + } else { + button.classList.remove('loading'); + button.disabled = false; + btnText.style.opacity = '1'; + btnLoader.style.display = 'none'; + } +} + +function showAlert(type, message, duration = 5000) { + const alertContainer = document.getElementById('alertContainer'); + + const alert = document.createElement('div'); + alert.className = `alert ${type}`; + + const icon = getAlertIcon(type); + + alert.innerHTML = ` + + ${message} + + `; + + alertContainer.appendChild(alert); + + // Auto-remove alert after duration + setTimeout(() => { + if (alert.parentElement) { + closeAlert(alert.querySelector('.alert-close')); + } + }, duration); +} + +function getAlertIcon(type) { + const icons = { + success: 'fas fa-check-circle', + error: 'fas fa-exclamation-circle', + warning: 'fas fa-exclamation-triangle', + info: 'fas fa-info-circle' + }; + return icons[type] || icons.info; +} + +function closeAlert(button) { + const alert = button.parentElement; + alert.style.animation = 'slideOutRight 0.3s ease-out forwards'; + setTimeout(() => { + if (alert.parentElement) { + alert.parentElement.removeChild(alert); + } + }, 300); +} + +async function verifyTokenAndRedirect(token) { + try { + const response = await fetch(`${API_BASE_URL}/profile`, { + headers: { + 'Authorization': `Bearer ${token}` + } + }); + + if (response.ok) { + // Token is valid, redirect to dashboard + window.location.href = '/dashboard.html'; + } else { + // Token is invalid, remove from storage + localStorage.removeItem('auth_token'); + localStorage.removeItem('refresh_token'); + localStorage.removeItem('user_data'); + } + } catch (error) { + console.error('Token verification error:', error); + // Remove invalid tokens + localStorage.removeItem('auth_token'); + localStorage.removeItem('refresh_token'); + localStorage.removeItem('user_data'); + } +} + +// Add CSS animation for slide out +const style = document.createElement('style'); +style.textContent = ` + @keyframes slideOutRight { + from { + opacity: 1; + transform: translateX(0); + } + to { + opacity: 0; + transform: translateX(100%); + } + } +`; +document.head.appendChild(style); + diff --git a/static/dashboard.css b/static/dashboard.css new file mode 100644 index 0000000000000000000000000000000000000000..d09ec3e1ebd3b1e0d8c750a2b65cf02996298d16 --- /dev/null +++ b/static/dashboard.css @@ -0,0 +1,1126 @@ +/* Dashboard Styles */ +* { + margin: 0; + padding: 0; + box-sizing: border-box; +} + +body { + font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif; + background-color: #f5f7fa; + color: #333; + line-height: 1.6; +} + +/* Header */ +.dashboard-header { + background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); + color: white; + padding: 1rem 2rem; + display: flex; + justify-content: space-between; + align-items: center; + box-shadow: 0 2px 10px rgba(0,0,0,0.1); + position: sticky; + top: 0; + z-index: 1000; +} + +.header-left .logo { + display: flex; + align-items: center; + gap: 0.5rem; +} + +.header-left .logo i { + font-size: 1.5rem; +} + +.header-left .logo h1 { + font-size: 1.5rem; + font-weight: 600; +} + +.user-info { + display: flex; + align-items: center; + gap: 1rem; + position: relative; +} + +.user-name { + font-weight: 500; +} + +.user-menu-btn { + background: none; + border: none; + color: white; + font-size: 1.5rem; + cursor: pointer; + padding: 0.5rem; + border-radius: 50%; + transition: background-color 0.3s; +} + +.user-menu-btn:hover { + background-color: rgba(255,255,255,0.1); +} + +.user-dropdown { + position: absolute; + top: 100%; + right: 0; + background: white; + color: #333; + border-radius: 8px; + box-shadow: 0 4px 20px rgba(0,0,0,0.15); + min-width: 150px; + display: none; + z-index: 1001; +} + +.user-dropdown.show { + display: block; +} + +.user-dropdown a { + display: block; + padding: 0.75rem 1rem; + text-decoration: none; + color: #333; + transition: background-color 0.3s; +} + +.user-dropdown a:hover { + background-color: #f8f9fa; +} + +.user-dropdown a i { + margin-right: 0.5rem; + width: 16px; +} + +/* Dashboard Container */ +.dashboard-container { + display: flex; + min-height: calc(100vh - 80px); +} + +/* Sidebar */ +.sidebar { + width: 250px; + background: white; + box-shadow: 2px 0 10px rgba(0,0,0,0.1); + position: sticky; + top: 80px; + height: calc(100vh - 80px); + overflow-y: auto; +} + +.sidebar-nav ul { + list-style: none; + padding: 1rem 0; +} + +.nav-item a { + display: flex; + align-items: center; + gap: 0.75rem; + padding: 1rem 1.5rem; + text-decoration: none; + color: #666; + transition: all 0.3s; + border-left: 3px solid transparent; +} + +.nav-item a:hover, +.nav-item.active a { + background-color: #f8f9fa; + color: #667eea; + border-left-color: #667eea; +} + +.nav-item a i { + width: 20px; + text-align: center; +} + +/* Main Content */ +.main-content { + flex: 1; + padding: 2rem; + overflow-y: auto; +} + +.content-section { + display: none; +} + +.content-section.active { + display: block; +} + +.section-header { + margin-bottom: 2rem; +} + +.section-header h2 { + font-size: 2rem; + color: #333; + margin-bottom: 0.5rem; +} + +.section-header p { + color: #666; + font-size: 1.1rem; +} + +.section-header .btn { + float: right; + margin-top: -3rem; +} + +/* Stats Grid */ +.stats-grid { + display: grid; + grid-template-columns: repeat(auto-fit, minmax(250px, 1fr)); + gap: 1.5rem; + margin-bottom: 2rem; +} + +.stat-card { + background: white; + padding: 1.5rem; + border-radius: 12px; + box-shadow: 0 2px 10px rgba(0,0,0,0.1); + display: flex; + align-items: center; + gap: 1rem; + transition: transform 0.3s, box-shadow 0.3s; +} + +.stat-card:hover { + transform: translateY(-2px); + box-shadow: 0 4px 20px rgba(0,0,0,0.15); +} + +.stat-icon { + width: 60px; + height: 60px; + border-radius: 12px; + display: flex; + align-items: center; + justify-content: center; + font-size: 1.5rem; + color: white; + background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); +} + +.stat-content h3 { + font-size: 1.8rem; + font-weight: 600; + color: #333; + margin-bottom: 0.25rem; +} + +.stat-content p { + color: #666; + font-size: 0.9rem; +} + +/* Quick Actions */ +.quick-actions { + background: white; + padding: 1.5rem; + border-radius: 12px; + box-shadow: 0 2px 10px rgba(0,0,0,0.1); + margin-bottom: 2rem; +} + +.quick-actions h3 { + margin-bottom: 1rem; + color: #333; +} + +.action-buttons { + display: flex; + gap: 1rem; + flex-wrap: wrap; +} + +.action-btn { + padding: 0.75rem 1.5rem; + border: none; + border-radius: 8px; + cursor: pointer; + font-weight: 500; + text-decoration: none; + display: inline-flex; + align-items: center; + gap: 0.5rem; + transition: all 0.3s; +} + +.action-btn.primary { + background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); + color: white; +} + +.action-btn.primary:hover { + transform: translateY(-1px); + box-shadow: 0 4px 15px rgba(102, 126, 234, 0.4); +} + +.action-btn.secondary { + background: #f8f9fa; + color: #666; + border: 1px solid #e9ecef; +} + +.action-btn.secondary:hover { + background: #e9ecef; + color: #333; +} + +/* Recent Activity */ +.recent-activity { + background: white; + padding: 1.5rem; + border-radius: 12px; + box-shadow: 0 2px 10px rgba(0,0,0,0.1); +} + +.recent-activity h3 { + margin-bottom: 1rem; + color: #333; +} + +.activity-list { + max-height: 300px; + overflow-y: auto; +} + +.activity-item { + display: flex; + align-items: center; + gap: 1rem; + padding: 0.75rem 0; + border-bottom: 1px solid #f1f3f4; +} + +.activity-item:last-child { + border-bottom: none; +} + +.activity-item i { + color: #667eea; + width: 20px; +} + +.activity-item span { + flex: 1; + color: #333; +} + +.activity-item time { + color: #666; + font-size: 0.9rem; +} + +/* Client Filters */ +.client-filters { + background: white; + padding: 1.5rem; + border-radius: 12px; + box-shadow: 0 2px 10px rgba(0,0,0,0.1); + margin-bottom: 1.5rem; + display: flex; + gap: 1.5rem; + flex-wrap: wrap; + align-items: end; +} + +.filter-group { + display: flex; + flex-direction: column; + gap: 0.5rem; +} + +.filter-group label { + font-weight: 500; + color: #333; + font-size: 0.9rem; +} + +.filter-group select, +.filter-group input { + padding: 0.5rem; + border: 1px solid #ddd; + border-radius: 6px; + font-size: 0.9rem; + min-width: 150px; +} + +.filter-group input[type="text"] { + min-width: 200px; +} + +/* Clients Table */ +.clients-table-container { + background: white; + border-radius: 12px; + box-shadow: 0 2px 10px rgba(0,0,0,0.1); + overflow: hidden; +} + +.clients-table { + width: 100%; + border-collapse: collapse; +} + +.clients-table th, +.clients-table td { + padding: 1rem; + text-align: left; + border-bottom: 1px solid #f1f3f4; +} + +.clients-table th { + background: #f8f9fa; + font-weight: 600; + color: #333; +} + +.clients-table tbody tr:hover { + background: #f8f9fa; +} + +.status-badge { + padding: 0.25rem 0.75rem; + border-radius: 20px; + font-size: 0.8rem; + font-weight: 500; + text-transform: uppercase; +} + +.status-badge.active { + background: #d4edda; + color: #155724; +} + +.status-badge.inactive { + background: #f8d7da; + color: #721c24; +} + +.status-badge.revoked { + background: #fff3cd; + color: #856404; +} + +.client-actions { + display: flex; + gap: 0.5rem; +} + +.client-actions .btn { + padding: 0.25rem 0.5rem; + font-size: 0.8rem; +} + +/* Server Grid */ +.server-grid { + display: grid; + grid-template-columns: repeat(auto-fit, minmax(300px, 1fr)); + gap: 1.5rem; + margin-bottom: 2rem; +} + +.server-card { + background: white; + padding: 1.5rem; + border-radius: 12px; + box-shadow: 0 2px 10px rgba(0,0,0,0.1); + transition: transform 0.3s, box-shadow 0.3s; +} + +.server-card:hover { + transform: translateY(-2px); + box-shadow: 0 4px 20px rgba(0,0,0,0.15); +} + +.server-header { + display: flex; + justify-content: space-between; + align-items: center; + margin-bottom: 1rem; +} + +.server-header h3 { + color: #333; + font-size: 1.2rem; +} + +.server-status { + display: flex; + align-items: center; + gap: 0.5rem; + font-size: 0.9rem; + font-weight: 500; +} + +.status-dot { + width: 8px; + height: 8px; + border-radius: 50%; + background: #dc3545; +} + +.server-status.online .status-dot { + background: #28a745; +} + +.server-status.online { + color: #28a745; +} + +.server-status.offline { + color: #dc3545; +} + +.server-info { + margin-bottom: 1rem; +} + +.server-info p { + margin-bottom: 0.5rem; + color: #666; +} + +.server-actions { + display: flex; + gap: 0.5rem; + flex-wrap: wrap; +} + +/* Server Logs */ +.server-logs { + background: white; + padding: 1.5rem; + border-radius: 12px; + box-shadow: 0 2px 10px rgba(0,0,0,0.1); +} + +.server-logs h3 { + margin-bottom: 1rem; + color: #333; +} + +.logs-controls { + display: flex; + gap: 1rem; + margin-bottom: 1rem; + align-items: center; +} + +.logs-controls select { + padding: 0.5rem; + border: 1px solid #ddd; + border-radius: 6px; +} + +.logs-container { + background: #1e1e1e; + color: #f8f8f2; + padding: 1rem; + border-radius: 8px; + font-family: 'Courier New', monospace; + font-size: 0.9rem; + max-height: 400px; + overflow-y: auto; +} + +.logs-content { + white-space: pre-wrap; + word-break: break-all; +} + +/* Protocols Grid */ +.protocols-grid { + display: grid; + grid-template-columns: repeat(auto-fit, minmax(350px, 1fr)); + gap: 1.5rem; +} + +.protocol-card { + background: white; + padding: 2rem; + border-radius: 12px; + box-shadow: 0 2px 10px rgba(0,0,0,0.1); + transition: transform 0.3s, box-shadow 0.3s; +} + +.protocol-card:hover { + transform: translateY(-2px); + box-shadow: 0 4px 20px rgba(0,0,0,0.15); +} + +.protocol-header { + display: flex; + align-items: center; + gap: 1rem; + margin-bottom: 1rem; +} + +.protocol-icon { + width: 50px; + height: 50px; + border-radius: 10px; + display: flex; + align-items: center; + justify-content: center; + font-size: 1.5rem; + color: white; + background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); +} + +.protocol-header h3 { + color: #333; + font-size: 1.3rem; +} + +.protocol-description { + color: #666; + margin-bottom: 1.5rem; + line-height: 1.6; +} + +.protocol-details { + display: grid; + grid-template-columns: 1fr 1fr; + gap: 1rem; + margin-bottom: 1.5rem; +} + +.protocol-detail { + display: flex; + justify-content: space-between; + padding: 0.5rem 0; + border-bottom: 1px solid #f1f3f4; +} + +.protocol-detail:last-child { + border-bottom: none; +} + +.protocol-features { + margin-bottom: 1.5rem; +} + +.protocol-features h4 { + color: #333; + margin-bottom: 0.75rem; + font-size: 1rem; +} + +.protocol-features ul { + list-style: none; + padding: 0; +} + +.protocol-features li { + padding: 0.25rem 0; + color: #666; + display: flex; + align-items: center; + gap: 0.5rem; +} + +.protocol-features li::before { + content: '✓'; + color: #28a745; + font-weight: bold; +} + +/* Usage Stats */ +.usage-stats { + display: grid; + grid-template-columns: repeat(auto-fit, minmax(400px, 1fr)); + gap: 1.5rem; +} + +.usage-card { + background: white; + padding: 1.5rem; + border-radius: 12px; + box-shadow: 0 2px 10px rgba(0,0,0,0.1); +} + +.usage-card h3 { + margin-bottom: 1rem; + color: #333; +} + +.usage-chart { + height: 300px; + display: flex; + align-items: center; + justify-content: center; + background: #f8f9fa; + border-radius: 8px; + color: #666; +} + +.time-range-selector { + float: right; + margin-top: -3rem; +} + +.time-range-selector select { + padding: 0.5rem; + border: 1px solid #ddd; + border-radius: 6px; +} + +/* Security Cards */ +.security-cards { + display: grid; + grid-template-columns: repeat(auto-fit, minmax(300px, 1fr)); + gap: 1.5rem; +} + +.security-card { + background: white; + padding: 2rem; + border-radius: 12px; + box-shadow: 0 2px 10px rgba(0,0,0,0.1); + transition: transform 0.3s, box-shadow 0.3s; +} + +.security-card:hover { + transform: translateY(-2px); + box-shadow: 0 4px 20px rgba(0,0,0,0.15); +} + +.security-card h3 { + color: #333; + margin-bottom: 0.5rem; +} + +.security-card p { + color: #666; + margin-bottom: 1.5rem; +} + +.security-actions { + display: flex; + gap: 0.5rem; + flex-wrap: wrap; +} + +/* Buttons */ +.btn { + padding: 0.75rem 1.5rem; + border: none; + border-radius: 8px; + cursor: pointer; + font-weight: 500; + text-decoration: none; + display: inline-flex; + align-items: center; + gap: 0.5rem; + transition: all 0.3s; + font-size: 0.9rem; +} + +.btn.primary { + background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); + color: white; +} + +.btn.primary:hover { + transform: translateY(-1px); + box-shadow: 0 4px 15px rgba(102, 126, 234, 0.4); +} + +.btn.secondary { + background: #f8f9fa; + color: #666; + border: 1px solid #e9ecef; +} + +.btn.secondary:hover { + background: #e9ecef; + color: #333; +} + +.btn.danger { + background: #dc3545; + color: white; +} + +.btn.danger:hover { + background: #c82333; + transform: translateY(-1px); +} + +.btn.small { + padding: 0.5rem 1rem; + font-size: 0.8rem; +} + +.btn:disabled { + opacity: 0.6; + cursor: not-allowed; + transform: none !important; +} + +/* Modal */ +.modal { + display: none; + position: fixed; + top: 0; + left: 0; + width: 100%; + height: 100%; + background: rgba(0,0,0,0.5); + z-index: 2000; + align-items: center; + justify-content: center; +} + +.modal.show { + display: flex; +} + +.modal-content { + background: white; + border-radius: 12px; + box-shadow: 0 10px 40px rgba(0,0,0,0.3); + max-width: 500px; + width: 90%; + max-height: 90vh; + overflow-y: auto; +} + +.modal-header { + padding: 1.5rem; + border-bottom: 1px solid #f1f3f4; + display: flex; + justify-content: space-between; + align-items: center; +} + +.modal-header h3 { + color: #333; + margin: 0; +} + +.modal-close { + background: none; + border: none; + font-size: 1.2rem; + cursor: pointer; + color: #666; + padding: 0.5rem; + border-radius: 50%; + transition: background-color 0.3s; +} + +.modal-close:hover { + background: #f8f9fa; +} + +.modal-body { + padding: 1.5rem; +} + +/* Form Styles */ +.form-group { + margin-bottom: 1.5rem; +} + +.form-group label { + display: block; + margin-bottom: 0.5rem; + font-weight: 500; + color: #333; +} + +.form-group input, +.form-group select, +.form-group textarea { + width: 100%; + padding: 0.75rem; + border: 1px solid #ddd; + border-radius: 6px; + font-size: 1rem; + transition: border-color 0.3s; +} + +.form-group input:focus, +.form-group select:focus, +.form-group textarea:focus { + outline: none; + border-color: #667eea; + box-shadow: 0 0 0 3px rgba(102, 126, 234, 0.1); +} + +.form-group textarea { + resize: vertical; + min-height: 80px; +} + +.form-actions { + display: flex; + gap: 1rem; + justify-content: flex-end; + margin-top: 2rem; +} + +/* Alert Styles */ +.alert-container { + position: fixed; + top: 100px; + right: 20px; + z-index: 3000; + max-width: 400px; +} + +.alert { + background: white; + border-radius: 8px; + box-shadow: 0 4px 20px rgba(0,0,0,0.15); + padding: 1rem 1.5rem; + margin-bottom: 1rem; + display: flex; + align-items: center; + gap: 1rem; + border-left: 4px solid #667eea; + animation: slideIn 0.3s ease-out; +} + +.alert.success { + border-left-color: #28a745; +} + +.alert.error { + border-left-color: #dc3545; +} + +.alert.warning { + border-left-color: #ffc107; +} + +.alert-icon { + font-size: 1.2rem; +} + +.alert.success .alert-icon { + color: #28a745; +} + +.alert.error .alert-icon { + color: #dc3545; +} + +.alert.warning .alert-icon { + color: #ffc107; +} + +.alert-content { + flex: 1; +} + +.alert-title { + font-weight: 600; + margin-bottom: 0.25rem; + color: #333; +} + +.alert-message { + color: #666; + font-size: 0.9rem; +} + +.alert-close { + background: none; + border: none; + font-size: 1.2rem; + cursor: pointer; + color: #666; + padding: 0.25rem; +} + +@keyframes slideIn { + from { + transform: translateX(100%); + opacity: 0; + } + to { + transform: translateX(0); + opacity: 1; + } +} + +/* Loading Styles */ +.loading-overlay { + display: none; + position: fixed; + top: 0; + left: 0; + width: 100%; + height: 100%; + background: rgba(255,255,255,0.9); + z-index: 4000; + align-items: center; + justify-content: center; + flex-direction: column; + gap: 1rem; +} + +.loading-overlay.show { + display: flex; +} + +.loading-spinner { + width: 40px; + height: 40px; + border: 4px solid #f3f3f3; + border-top: 4px solid #667eea; + border-radius: 50%; + animation: spin 1s linear infinite; +} + +@keyframes spin { + 0% { transform: rotate(0deg); } + 100% { transform: rotate(360deg); } +} + +.loading-row .loading-spinner { + width: 20px; + height: 20px; + border-width: 2px; + margin-right: 0.5rem; + display: inline-block; +} + +/* Responsive Design */ +@media (max-width: 768px) { + .dashboard-container { + flex-direction: column; + } + + .sidebar { + width: 100%; + height: auto; + position: static; + } + + .main-content { + padding: 1rem; + } + + .stats-grid { + grid-template-columns: 1fr; + } + + .server-grid { + grid-template-columns: 1fr; + } + + .protocols-grid { + grid-template-columns: 1fr; + } + + .usage-stats { + grid-template-columns: 1fr; + } + + .security-cards { + grid-template-columns: 1fr; + } + + .client-filters { + flex-direction: column; + align-items: stretch; + } + + .filter-group { + width: 100%; + } + + .filter-group select, + .filter-group input { + min-width: auto; + width: 100%; + } + + .section-header .btn { + float: none; + margin-top: 1rem; + display: block; + width: 100%; + } + + .time-range-selector { + float: none; + margin-top: 1rem; + margin-bottom: 1rem; + } + + .action-buttons { + flex-direction: column; + } + + .action-btn { + justify-content: center; + } + + .server-actions { + justify-content: center; + } + + .security-actions { + justify-content: center; + } + + .form-actions { + flex-direction: column; + } + + .alert-container { + left: 20px; + right: 20px; + max-width: none; + } +} + +@media (max-width: 480px) { + .dashboard-header { + padding: 1rem; + } + + .header-left .logo h1 { + font-size: 1.2rem; + } + + .main-content { + padding: 0.5rem; + } + + .section-header h2 { + font-size: 1.5rem; + } + + .modal-content { + width: 95%; + } + + .modal-header, + .modal-body { + padding: 1rem; + } +} + diff --git a/static/dashboard.html b/static/dashboard.html new file mode 100644 index 0000000000000000000000000000000000000000..65af808a0d737c312da870f3a5bca132603642ec --- /dev/null +++ b/static/dashboard.html @@ -0,0 +1,755 @@ + + + + + + Virtual ISP Stack - Network Management Dashboard + + + + +
+ +
+
+ +
+
+ + System Status +
+
+ +
+
+
+
+ + + + + +
+ +
+
+

System Dashboard

+

Overview of Virtual ISP Stack components and performance

+
+ + +
+
+
+ +
+
+

0

+

DHCP Leases

+
+
+ +
+
+ +
+
+

0

+

NAT Sessions

+
+
+ +
+
+ +
+
+

0

+

Firewall Rules

+
+
+ +
+
+ +
+
+

0

+

Bridge Clients

+
+
+
+ + +
+

Component Status

+
+ +
+
+ + +
+
+

Network Traffic

+ +
+
+

Connection Distribution

+ +
+
+
+ + +
+
+

DHCP Management

+

Manage DHCP leases and configuration

+
+ +
+
+

Active Leases

+ +
+
+ + + + + + + + + + + + + + +
MAC AddressIP AddressLease TimeRemainingStateActions
+
+
+
+ + +
+
+

NAT Management

+

Network Address Translation sessions and statistics

+
+ +
+
+
+ Active Sessions: + 0 +
+
+ Port Utilization: + 0% +
+
+ Bytes Translated: + 0 +
+
+
+ +
+
+

NAT Sessions

+ +
+
+ + + + + + + + + + + + + + + +
Virtual IP:PortReal IP:PortHost IP:PortProtocolDurationBytes In/OutActions
+
+
+
+ + +
+
+

Firewall Management

+

Configure firewall rules and monitor traffic

+
+ +
+ + +
+ +
+
+

Firewall Rules

+
+
+ + + + + + + + + + + + + + + + + + +
PriorityRule IDActionDirectionSourceDestinationProtocolHitsStatusActions
+
+
+
+ + +
+
+

Router Management

+

Routing table and network interfaces

+
+ +
+
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + +
DestinationGatewayInterfaceMetricTypeUse CountLast Used
+
+
+ +
+
+ + + + + + + + + + + + + + +
NameIP AddressNetworkMTUStatusActions
+
+
+ +
+
+ + + + + + + + + + + +
IP AddressMAC AddressActions
+
+
+
+
+
+ + +
+
+

Packet Bridge

+

Connected clients and bridge statistics

+
+ +
+
+

WebSocket Server

+

Port: 8765

+

Status: Active

+
+
+

TCP Server

+

Port: 8766

+

Status: Active

+
+
+ +
+
+

Connected Clients

+ +
+
+ + + + + + + + + + + + + + + +
Client IDTypeRemote AddressConnected TimePackets In/OutBytes In/OutActions
+
+
+
+ + +
+
+

Session Tracking

+

Unified view of all network sessions

+
+ +
+ +
+ +
+
+

Active Sessions

+
+ + +
+
+
+ + + + + + + + + + + + + + + + + +
Session IDTypeStateVirtual IP:PortReal IP:PortProtocolDurationIdle TimeMetrics
+
+
+
+ + +
+
+

System Logs

+

Monitor system events and troubleshoot issues

+
+ +
+
+ + + +
+
+ + +
+
+ +
+ +
+
+ + +
+
+

VPN Management

+

OpenVPN server management and client connections

+
+ + +
+
+
+

OpenVPN Server

+
+ + + +
+
+
+
+ Status: + Unknown +
+
+ Server IP: + - +
+
+ Port: + - +
+
+ Connected Clients: + 0 +
+
+ Uptime: + - +
+
+
+
+ + +
+
+ Total Bytes Received: + 0 +
+
+ Total Bytes Sent: + 0 +
+
+ + +
+
+

Connected VPN Clients

+
+ + +
+
+
+ + + + + + + + + + + + + + + + +
Client IDCommon NameVPN IP AddressConnected SinceBytes ReceivedBytes SentStatusActions
+
+
+
+ + +
+
+

System Configuration

+

Configure system parameters and settings

+
+ +
+
+

DHCP Configuration

+
+ +
+
+ +
+

NAT Configuration

+
+ +
+
+ +
+

Firewall Configuration

+
+ +
+
+
+ +
+ + +
+
+
+
+ + + + + + + + +
+
+ +

Loading...

+
+
+ + +
+ + + + + + + diff --git a/static/dashboard.js b/static/dashboard.js new file mode 100644 index 0000000000000000000000000000000000000000..3a286828828ccf94feeb1ff04f097a2e95e8c44e --- /dev/null +++ b/static/dashboard.js @@ -0,0 +1,730 @@ +// Dashboard JavaScript +class VPNDashboard { + constructor() { + this.token = localStorage.getItem('auth_token'); + this.currentUser = null; + this.clients = []; + this.serverStatus = {}; + this.protocols = []; + + this.init(); + } + async init() { + const token = localStorage.getItem("auth_token"); + if (!token) { + window.location.href = + '/auth.html'; + return; + } + this.token = token; + + try { + await this.loadUserInfo(); + await this.loadInitialData(); + this.setupEventListeners(); + this.startAutoRefresh(); + } catch (error) { + console.error("Dashboard initialization failed:", error); + this.showAlert("error", "Failed to load dashboard", error.message); + // If loading fails, it might be due to an invalid token, redirect to login + localStorage.removeItem("auth_token"); + localStorage.removeItem("refresh_token"); + localStorage.removeItem("user_data"); + window.location.href = "/auth.html"; + } + } async loadUserInfo() { + try { + const response = await this.apiCall("/api/profile"); + this.currentUser = response.user; + document.getElementById('userName').textContent = this.currentUser.username; + } catch (error) { + console.error('Failed to load user info:', error); + throw error; + } + } + + async loadInitialData() { + this.showLoading(true); + + try { + await Promise.all([ + this.loadClients(), + this.loadServerStatus(), + this.loadProtocols(), + this.loadServerStatistics() + ]); + + this.updateOverviewStats(); + this.addActivity('Dashboard loaded successfully'); + } catch (error) { + console.error('Failed to load initial data:', error); + this.showAlert('error', 'Failed to load data', error.message); + } finally { + this.showLoading(false); + } + } + + async loadClients() { + try { + const response = await this.apiCall('/api/vpn-clients'); + this.clients = response.clients || []; + this.renderClientsTable(); + } catch (error) { + console.error('Failed to load clients:', error); + this.clients = []; + } + } + + async loadServerStatus() { + try { + const response = await this.apiCall('/api/server/status'); + this.serverStatus = response.status || {}; + this.serverInfo = response.server_info || {}; + this.renderServerStatus(); + } catch (error) { + console.error('Failed to load server status:', error); + this.serverStatus = {}; + } + } + + async loadProtocols() { + try { + const response = await fetch('/api/server/protocols'); + const data = await response.json(); + this.protocols = data.protocols || []; + this.renderProtocols(); + } catch (error) { + console.error('Failed to load protocols:', error); + this.protocols = []; + } + } + + async loadServerStatistics() { + try { + const response = await this.apiCall('/api/server/statistics'); + this.statistics = response.statistics || {}; + } catch (error) { + console.error('Failed to load server statistics:', error); + this.statistics = {}; + } + } + + updateOverviewStats() { + const totalClients = this.clients.length; + const runningServers = Object.values(this.serverStatus).filter(s => s.running).length; + const serverLocation = this.serverInfo?.server_ip || 'Unknown'; + + document.getElementById('totalClients').textContent = totalClients; + document.getElementById('runningServers').textContent = runningServers; + document.getElementById('serverLocation').textContent = serverLocation; + } + + renderClientsTable() { + const tbody = document.getElementById('clientsTableBody'); + + if (this.clients.length === 0) { + tbody.innerHTML = ` + + + + No VPN clients found. Create your first client to get started. + + + `; + return; + } + + tbody.innerHTML = this.clients.map(client => ` + + + ${this.escapeHtml(client.client_name)} + ${client.description ? `
${this.escapeHtml(client.description)}` : ''} + + + + ${client.protocol.toUpperCase()} + + + + + ${client.status} + + + ${this.formatDate(client.created_at)} + ${client.last_connected ? this.formatDate(client.last_connected) : 'Never'} + +
+ + + +
+ + + `).join(''); + } + + renderServerStatus() { + const protocols = ['openvpn', 'ikev2', 'wireguard']; + + protocols.forEach(protocol => { + const status = this.serverStatus[protocol] || { running: false, client_count: 0, port: 0 }; + const statusElement = document.getElementById(`${protocol}Status`); + const clientsElement = document.getElementById(`${protocol}Clients`); + const portElement = document.getElementById(`${protocol}Port`); + + if (statusElement) { + statusElement.className = `server-status ${status.running ? 'online' : 'offline'}`; + statusElement.innerHTML = ` + + ${status.running ? 'Online' : 'Offline'} + `; + } + + if (clientsElement) { + clientsElement.textContent = status.client_count || 0; + } + + if (portElement) { + portElement.textContent = status.port || 0; + } + }); + } + + renderProtocols() { + const container = document.getElementById('protocolsGrid'); + + container.innerHTML = this.protocols.map(protocol => ` +
+
+
+ +
+

${protocol.name}

+
+

${protocol.description}

+
+
+ Port: + ${protocol.port} +
+
+ Transport: + ${protocol.transport} +
+
+
+

Features:

+
    + ${protocol.features.map(feature => `
  • ${feature}
  • `).join('')} +
+
+ +
+ `).join(''); + } + + setupEventListeners() { + // Close modals when clicking outside + document.addEventListener('click', (e) => { + if (e.target.classList.contains('modal')) { + this.closeModal(e.target.id); + } + }); + + // Close user dropdown when clicking outside + document.addEventListener('click', (e) => { + if (!e.target.closest('.user-menu')) { + document.getElementById('userDropdown').classList.remove('show'); + } + }); + } + + startAutoRefresh() { + // Refresh server status every 30 seconds + setInterval(() => { + this.loadServerStatus(); + }, 30000); + + // Refresh clients every 60 seconds + setInterval(() => { + this.loadClients(); + }, 60000); + } + + // API Helper + async apiCall(endpoint, options = {}) { + const defaultOptions = { + headers: { + 'Authorization': `Bearer ${this.token}`, + 'Content-Type': 'application/json' + } + }; + + const response = await fetch(endpoint, { ...defaultOptions, ...options }); + + if (response.status === 401) { + localStorage.removeItem('auth_token'); + window.location.href = '/auth.html'; + return; + } + + if (!response.ok) { + const error = await response.json().catch(() => ({ error: 'Network error' })); + throw new Error(error.error || 'Request failed'); + } + + return await response.json(); + } + + // Navigation + showSection(sectionId) { + // Hide all sections + document.querySelectorAll('.content-section').forEach(section => { + section.classList.remove('active'); + }); + + // Remove active class from nav items + document.querySelectorAll('.nav-item').forEach(item => { + item.classList.remove('active'); + }); + + // Show selected section + document.getElementById(sectionId).classList.add('active'); + + // Add active class to nav item + document.querySelector(`[data-section="${sectionId}"]`).classList.add('active'); + + // Load section-specific data + this.loadSectionData(sectionId); + } + + async loadSectionData(sectionId) { + switch (sectionId) { + case 'server-status': + await this.loadServerStatus(); + break; + case 'vpn-clients': + await this.loadClients(); + break; + case 'protocols': + await this.loadProtocols(); + break; + } + } + + // Client Management + showCreateClientModal() { + document.getElementById('createClientModal').classList.add('show'); + document.getElementById('createClientForm').reset(); + } + + createClientWithProtocol(protocol) { + this.showCreateClientModal(); + document.getElementById('clientProtocol').value = protocol; + } + + async createVPNClient(event) { + event.preventDefault(); + + const formData = new FormData(event.target); + const clientData = { + client_name: formData.get('clientName'), + protocol: formData.get('protocol'), + description: formData.get('description') + }; + + try { + this.showLoading(true); + + const response = await this.apiCall('/api/vpn-clients', { + method: 'POST', + body: JSON.stringify(clientData) + }); + + this.showAlert('success', 'Client Created', 'VPN client created successfully'); + this.closeModal('createClientModal'); + await this.loadClients(); + this.addActivity(`Created VPN client: ${clientData.client_name}`); + + } catch (error) { + console.error('Failed to create client:', error); + this.showAlert('error', 'Creation Failed', error.message); + } finally { + this.showLoading(false); + } + } + + async downloadClientConfig(clientId) { + try { + const response = await fetch(`/api/vpn-clients/${clientId}/download`, { + headers: { + 'Authorization': `Bearer ${this.token}` + } + }); + + if (!response.ok) { + throw new Error('Download failed'); + } + + const blob = await response.blob(); + const url = window.URL.createObjectURL(blob); + const a = document.createElement('a'); + a.href = url; + a.download = `vpn-client-${clientId}.zip`; + document.body.appendChild(a); + a.click(); + window.URL.revokeObjectURL(url); + document.body.removeChild(a); + + this.addActivity(`Downloaded configuration for client ${clientId}`); + + } catch (error) { + console.error('Download failed:', error); + this.showAlert('error', 'Download Failed', error.message); + } + } + + async revokeClient(clientId) { + if (!confirm('Are you sure you want to revoke this client? This action cannot be undone.')) { + return; + } + + try { + await this.apiCall(`/api/vpn-clients/${clientId}/revoke`, { + method: 'POST' + }); + + this.showAlert('success', 'Client Revoked', 'Client access has been revoked'); + await this.loadClients(); + this.addActivity(`Revoked client ${clientId}`); + + } catch (error) { + console.error('Failed to revoke client:', error); + this.showAlert('error', 'Revocation Failed', error.message); + } + } + + // Server Management + async startServer(protocol) { + try { + this.showLoading(true); + + const response = await this.apiCall(`/api/server/start/${protocol}`, { + method: 'POST' + }); + + this.showAlert('success', 'Server Started', response.message); + await this.loadServerStatus(); + this.addActivity(`Started ${protocol.toUpperCase()} server`); + + } catch (error) { + console.error('Failed to start server:', error); + this.showAlert('error', 'Start Failed', error.message); + } finally { + this.showLoading(false); + } + } + + async stopServer(protocol) { + if (!confirm(`Are you sure you want to stop the ${protocol.toUpperCase()} server?`)) { + return; + } + + try { + this.showLoading(true); + + const response = await this.apiCall(`/api/server/stop/${protocol}`, { + method: 'POST' + }); + + this.showAlert('success', 'Server Stopped', response.message); + await this.loadServerStatus(); + this.addActivity(`Stopped ${protocol.toUpperCase()} server`); + + } catch (error) { + console.error('Failed to stop server:', error); + this.showAlert('error', 'Stop Failed', error.message); + } finally { + this.showLoading(false); + } + } + + async restartServer(protocol) { + if (!confirm(`Are you sure you want to restart the ${protocol.toUpperCase()} server?`)) { + return; + } + + try { + this.showLoading(true); + + const response = await this.apiCall(`/api/server/restart/${protocol}`, { + method: 'POST' + }); + + this.showAlert('success', 'Server Restarted', response.message); + await this.loadServerStatus(); + this.addActivity(`Restarted ${protocol.toUpperCase()} server`); + + } catch (error) { + console.error('Failed to restart server:', error); + this.showAlert('error', 'Restart Failed', error.message); + } finally { + this.showLoading(false); + } + } + + async refreshServerStatus() { + try { + this.showLoading(true); + await this.loadServerStatus(); + this.showAlert('success', 'Status Refreshed', 'Server status updated'); + } catch (error) { + this.showAlert('error', 'Refresh Failed', error.message); + } finally { + this.showLoading(false); + } + } + + async loadServerLogs() { + const protocol = document.getElementById('logProtocol').value; + + try { + const response = await this.apiCall(`/api/server/logs/${protocol}?lines=100`); + const logsContainer = document.getElementById('logsContainer'); + + if (response.logs && response.logs.length > 0) { + logsContainer.innerHTML = ` +
+ ${response.logs.join('\\n')} +
+ `; + } else { + logsContainer.innerHTML = ` +
+ No logs available for ${protocol.toUpperCase()} +
+ `; + } + + // Scroll to bottom + logsContainer.scrollTop = logsContainer.scrollHeight; + + } catch (error) { + console.error('Failed to load logs:', error); + this.showAlert('error', 'Logs Failed', error.message); + } + } + + // Filtering + filterClients() { + const protocolFilter = document.getElementById('protocolFilter').value; + const statusFilter = document.getElementById('statusFilter').value; + const searchFilter = document.getElementById('searchFilter').value.toLowerCase(); + + let filteredClients = this.clients; + + if (protocolFilter) { + filteredClients = filteredClients.filter(client => client.protocol === protocolFilter); + } + + if (statusFilter) { + filteredClients = filteredClients.filter(client => client.status === statusFilter); + } + + if (searchFilter) { + filteredClients = filteredClients.filter(client => + client.client_name.toLowerCase().includes(searchFilter) || + (client.description && client.description.toLowerCase().includes(searchFilter)) + ); + } + + // Temporarily store original clients and render filtered + const originalClients = this.clients; + this.clients = filteredClients; + this.renderClientsTable(); + this.clients = originalClients; + } + + // UI Helpers + showModal(modalId) { + document.getElementById(modalId).classList.add('show'); + } + + closeModal(modalId) { + document.getElementById(modalId).classList.remove('show'); + } + + showLoading(show) { + const overlay = document.getElementById('loadingOverlay'); + if (show) { + overlay.classList.add('show'); + } else { + overlay.classList.remove('show'); + } + } + + showAlert(type, title, message) { + const container = document.getElementById('alertContainer'); + const alertId = 'alert-' + Date.now(); + + const alertHtml = ` +
+
+ +
+
+
${title}
+
${message}
+
+ +
+ `; + + container.insertAdjacentHTML('beforeend', alertHtml); + + // Auto-remove after 5 seconds + setTimeout(() => { + this.closeAlert(alertId); + }, 5000); + } + + closeAlert(alertId) { + const alert = document.getElementById(alertId); + if (alert) { + alert.remove(); + } + } + + addActivity(message) { + const activityList = document.getElementById('activityList'); + const activityHtml = ` +
+ + ${this.escapeHtml(message)} + +
+ `; + + activityList.insertAdjacentHTML('afterbegin', activityHtml); + + // Keep only last 10 activities + const activities = activityList.querySelectorAll('.activity-item'); + if (activities.length > 10) { + activities[activities.length - 1].remove(); + } + } + + // Utility functions + escapeHtml(text) { + const div = document.createElement('div'); + div.textContent = text; + return div.innerHTML; + } + + formatDate(dateString) { + if (!dateString) return 'Never'; + + const date = new Date(dateString); + const now = new Date(); + const diff = now - date; + + if (diff < 60000) return 'Just now'; + if (diff < 3600000) return `${Math.floor(diff / 60000)} minutes ago`; + if (diff < 86400000) return `${Math.floor(diff / 3600000)} hours ago`; + + return date.toLocaleDateString(); + } + + // User menu + toggleUserMenu() { + document.getElementById('userDropdown').classList.toggle('show'); + } + + showProfile() { + this.showAlert('info', 'Profile', 'Profile management coming soon'); + } + + showSettings() { + this.showAlert('info', 'Settings', 'Settings management coming soon'); + } + + logout() { + localStorage.removeItem('auth_token'); + window.location.href = '/auth.html'; + } +} + +// Global functions for onclick handlers +let dashboard; + +function showSection(sectionId) { + dashboard.showSection(sectionId); +} + +function showCreateClientModal() { + dashboard.showCreateClientModal(); +} + +function createVPNClient(event) { + dashboard.createVPNClient(event); +} + +function closeModal(modalId) { + dashboard.closeModal(modalId); +} + +function toggleUserMenu() { + dashboard.toggleUserMenu(); +} + +function showProfile() { + dashboard.showProfile(); +} + +function showSettings() { + dashboard.showSettings(); +} + +function logout() { + dashboard.logout(); +} + +function startServer(protocol) { + dashboard.startServer(protocol); +} + +function stopServer(protocol) { + dashboard.stopServer(protocol); +} + +function restartServer(protocol) { + dashboard.restartServer(protocol); +} + +function refreshServerStatus() { + dashboard.refreshServerStatus(); +} + +function loadServerLogs() { + dashboard.loadServerLogs(); +} + +function filterClients() { + dashboard.filterClients(); +} + +// Initialize dashboard when page loads +document.addEventListener('DOMContentLoaded', () => { + dashboard = new VPNDashboard(); +}); + diff --git a/static/favicon.ico b/static/favicon.ico new file mode 100644 index 0000000000000000000000000000000000000000..755a9d6aef15aca9cd5a74d7b39a88efe2a75142 Binary files /dev/null and b/static/favicon.ico differ diff --git a/static/index.html b/static/index.html new file mode 100644 index 0000000000000000000000000000000000000000..67e9c112b9b068c99451ec2be8e9b5f3b8ec1dbb --- /dev/null +++ b/static/index.html @@ -0,0 +1,23 @@ + + + + + + Redirecting... + + + +

If you are not redirected automatically, please click here.

+ + + diff --git a/static/styles.css b/static/styles.css new file mode 100644 index 0000000000000000000000000000000000000000..1a76fd4321e3453cfd81fe1adcaf0ae81fcc5710 --- /dev/null +++ b/static/styles.css @@ -0,0 +1,1121 @@ +/* Reset and Base Styles */ +* { + margin: 0; + padding: 0; + box-sizing: border-box; +} + +body { + font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif; + background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); + color: #333; + line-height: 1.6; + overflow-x: hidden; +} + +/* App Container */ +.app-container { + display: flex; + min-height: 100vh; + background: rgba(255, 255, 255, 0.95); + backdrop-filter: blur(10px); + margin: 10px; + border-radius: 15px; + box-shadow: 0 20px 40px rgba(0, 0, 0, 0.1); + overflow: hidden; +} + +/* Header */ +.header { + position: fixed; + top: 10px; + left: 270px; + right: 10px; + height: 70px; + background: linear-gradient(135deg, #4facfe 0%, #00f2fe 100%); + border-radius: 15px 15px 0 0; + z-index: 1000; + box-shadow: 0 4px 20px rgba(0, 0, 0, 0.1); +} + +.header-content { + display: flex; + justify-content: space-between; + align-items: center; + height: 100%; + padding: 0 30px; + color: white; +} + +.logo { + display: flex; + align-items: center; + gap: 15px; +} + +.logo i { + font-size: 2rem; + color: #fff; +} + +.logo h1 { + font-size: 1.8rem; + font-weight: 600; + margin: 0; +} + +.header-status { + display: flex; + align-items: center; + gap: 20px; +} + +.status-indicator { + display: flex; + align-items: center; + gap: 8px; + padding: 8px 16px; + background: rgba(255, 255, 255, 0.2); + border-radius: 25px; + transition: all 0.3s ease; +} + +.status-indicator.online i { + color: #4ade80; +} + +.status-indicator.offline i { + color: #ef4444; +} + +.refresh-btn { + padding: 10px; + background: rgba(255, 255, 255, 0.2); + border-radius: 50%; + cursor: pointer; + transition: all 0.3s ease; +} + +.refresh-btn:hover { + background: rgba(255, 255, 255, 0.3); + transform: rotate(180deg); +} + +/* Sidebar */ +.sidebar { + width: 260px; + background: linear-gradient(180deg, #2d3748 0%, #1a202c 100%); + border-radius: 15px 0 0 15px; + padding: 90px 0 20px 0; + position: fixed; + height: calc(100vh - 20px); + overflow-y: auto; +} + +.nav-menu { + padding: 0 20px; +} + +.nav-item { + display: flex; + align-items: center; + gap: 15px; + padding: 15px 20px; + margin: 5px 0; + color: #a0aec0; + cursor: pointer; + border-radius: 10px; + transition: all 0.3s ease; + position: relative; +} + +.nav-item:hover { + background: rgba(255, 255, 255, 0.1); + color: #fff; + transform: translateX(5px); +} + +.nav-item.active { + background: linear-gradient(135deg, #4facfe 0%, #00f2fe 100%); + color: #fff; + box-shadow: 0 4px 15px rgba(79, 172, 254, 0.3); +} + +.nav-item i { + font-size: 1.2rem; + width: 20px; + text-align: center; +} + +/* Main Content */ +.main-content { + flex: 1; + margin-left: 260px; + padding: 90px 30px 30px 30px; + background: #f8fafc; + min-height: calc(100vh - 20px); + border-radius: 0 15px 15px 0; +} + +.content-section { + display: none; + animation: fadeIn 0.5s ease-in-out; +} + +.content-section.active { + display: block; +} + +@keyframes fadeIn { + from { opacity: 0; transform: translateY(20px); } + to { opacity: 1; transform: translateY(0); } +} + +.section-header { + margin-bottom: 30px; +} + +.section-header h2 { + font-size: 2rem; + color: #2d3748; + margin-bottom: 8px; +} + +.section-header p { + color: #718096; + font-size: 1.1rem; +} + +/* Stats Grid */ +.stats-grid { + display: grid; + grid-template-columns: repeat(auto-fit, minmax(250px, 1fr)); + gap: 20px; + margin-bottom: 40px; +} + +.stat-card { + background: linear-gradient(135deg, #fff 0%, #f8fafc 100%); + padding: 25px; + border-radius: 15px; + box-shadow: 0 4px 20px rgba(0, 0, 0, 0.08); + display: flex; + align-items: center; + gap: 20px; + transition: all 0.3s ease; +} + +.stat-card:hover { + transform: translateY(-5px); + box-shadow: 0 8px 30px rgba(0, 0, 0, 0.12); +} + +.stat-icon { + width: 60px; + height: 60px; + border-radius: 15px; + display: flex; + align-items: center; + justify-content: center; + background: linear-gradient(135deg, #4facfe 0%, #00f2fe 100%); + color: white; + font-size: 1.5rem; +} + +.stat-content h3 { + font-size: 2rem; + font-weight: 700; + color: #2d3748; + margin-bottom: 5px; +} + +.stat-content p { + color: #718096; + font-size: 0.9rem; + text-transform: uppercase; + letter-spacing: 0.5px; +} + +/* Component Status */ +.component-status { + background: white; + padding: 25px; + border-radius: 15px; + box-shadow: 0 4px 20px rgba(0, 0, 0, 0.08); + margin-bottom: 30px; +} + +.component-status h3 { + color: #2d3748; + margin-bottom: 20px; + font-size: 1.3rem; +} + +.component-grid { + display: grid; + grid-template-columns: repeat(auto-fit, minmax(200px, 1fr)); + gap: 15px; +} + +.component-item { + display: flex; + justify-content: space-between; + align-items: center; + padding: 15px; + background: #f8fafc; + border-radius: 10px; + border-left: 4px solid #e2e8f0; +} + +.component-item.online { + border-left-color: #4ade80; +} + +.component-item.offline { + border-left-color: #ef4444; +} + +.component-name { + font-weight: 600; + color: #2d3748; +} + +.component-status-badge { + padding: 4px 12px; + border-radius: 20px; + font-size: 0.8rem; + font-weight: 600; + text-transform: uppercase; +} + +.component-status-badge.online { + background: #dcfce7; + color: #166534; +} + +.component-status-badge.offline { + background: #fee2e2; + color: #991b1b; +} + +/* Charts Container */ +.charts-container { + display: grid; + grid-template-columns: repeat(auto-fit, minmax(400px, 1fr)); + gap: 20px; + margin-top: 30px; +} + +.chart-card { + background: white; + padding: 25px; + border-radius: 15px; + box-shadow: 0 4px 20px rgba(0, 0, 0, 0.08); +} + +.chart-card h3 { + color: #2d3748; + margin-bottom: 20px; + font-size: 1.2rem; +} + +/* Tables */ +.table-container { + background: white; + border-radius: 15px; + box-shadow: 0 4px 20px rgba(0, 0, 0, 0.08); + overflow: hidden; + margin-bottom: 20px; +} + +.table-header { + display: flex; + justify-content: space-between; + align-items: center; + padding: 20px 25px; + background: #f8fafc; + border-bottom: 1px solid #e2e8f0; +} + +.table-header h3 { + color: #2d3748; + font-size: 1.2rem; +} + +.table-wrapper { + overflow-x: auto; +} + +table { + width: 100%; + border-collapse: collapse; +} + +thead { + background: #f8fafc; +} + +th, td { + padding: 15px; + text-align: left; + border-bottom: 1px solid #e2e8f0; +} + +th { + font-weight: 600; + color: #4a5568; + text-transform: uppercase; + font-size: 0.8rem; + letter-spacing: 0.5px; +} + +td { + color: #2d3748; +} + +tbody tr:hover { + background: #f8fafc; +} + +/* Buttons */ +.btn { + padding: 10px 20px; + border: none; + border-radius: 8px; + cursor: pointer; + font-weight: 600; + text-decoration: none; + display: inline-flex; + align-items: center; + gap: 8px; + transition: all 0.3s ease; + font-size: 0.9rem; +} + +.btn-primary { + background: linear-gradient(135deg, #4facfe 0%, #00f2fe 100%); + color: white; +} + +.btn-primary:hover { + transform: translateY(-2px); + box-shadow: 0 4px 15px rgba(79, 172, 254, 0.3); +} + +.btn-secondary { + background: #e2e8f0; + color: #4a5568; +} + +.btn-secondary:hover { + background: #cbd5e0; + transform: translateY(-2px); +} + +.btn-danger { + background: linear-gradient(135deg, #ff6b6b 0%, #ee5a52 100%); + color: white; +} + +.btn-danger:hover { + transform: translateY(-2px); + box-shadow: 0 4px 15px rgba(255, 107, 107, 0.3); +} + +.btn-success { + background: linear-gradient(135deg, #51cf66 0%, #40c057 100%); + color: white; +} + +.btn-success:hover { + transform: translateY(-2px); + box-shadow: 0 4px 15px rgba(81, 207, 102, 0.3); +} + +/* Status Badges */ +.status-badge { + padding: 4px 12px; + border-radius: 20px; + font-size: 0.8rem; + font-weight: 600; + text-transform: uppercase; +} + +.status-active { + background: #dcfce7; + color: #166534; +} + +.status-inactive { + background: #fee2e2; + color: #991b1b; +} + +.status-pending { + background: #fef3c7; + color: #92400e; +} + +/* NAT Stats */ +.nat-stats { + background: white; + padding: 25px; + border-radius: 15px; + box-shadow: 0 4px 20px rgba(0, 0, 0, 0.08); + margin-bottom: 30px; +} + +.stat-row { + display: flex; + justify-content: space-around; + align-items: center; + flex-wrap: wrap; + gap: 20px; +} + +.stat-item { + text-align: center; +} + +.stat-label { + display: block; + color: #718096; + font-size: 0.9rem; + margin-bottom: 5px; +} + +.stat-value { + display: block; + font-size: 1.5rem; + font-weight: 700; + color: #2d3748; +} + +/* Firewall Controls */ +.firewall-controls { + display: flex; + gap: 15px; + margin-bottom: 20px; +} + +/* Router Tabs */ +.router-tabs { + background: white; + border-radius: 15px; + box-shadow: 0 4px 20px rgba(0, 0, 0, 0.08); + overflow: hidden; +} + +.tab-buttons { + display: flex; + background: #f8fafc; + border-bottom: 1px solid #e2e8f0; +} + +.tab-btn { + flex: 1; + padding: 15px 20px; + border: none; + background: transparent; + cursor: pointer; + font-weight: 600; + color: #718096; + transition: all 0.3s ease; +} + +.tab-btn.active { + background: linear-gradient(135deg, #4facfe 0%, #00f2fe 100%); + color: white; +} + +.tab-btn:hover:not(.active) { + background: #e2e8f0; + color: #2d3748; +} + +.tab-content { + padding: 25px; +} + +.tab-pane { + display: none; +} + +.tab-pane.active { + display: block; +} + +/* Bridge Info */ +.bridge-info { + display: grid; + grid-template-columns: repeat(auto-fit, minmax(250px, 1fr)); + gap: 20px; + margin-bottom: 30px; +} + +.info-card { + background: white; + padding: 20px; + border-radius: 15px; + box-shadow: 0 4px 20px rgba(0, 0, 0, 0.08); + text-align: center; +} + +.info-card h4 { + color: #2d3748; + margin-bottom: 10px; + font-size: 1.1rem; +} + +.info-card p { + color: #718096; + margin: 5px 0; +} + +/* Session Summary */ +.session-summary { + background: white; + padding: 25px; + border-radius: 15px; + box-shadow: 0 4px 20px rgba(0, 0, 0, 0.08); + margin-bottom: 30px; +} + +/* Table Controls */ +.table-controls { + display: flex; + gap: 15px; + align-items: center; +} + +.table-controls select { + padding: 8px 12px; + border: 1px solid #e2e8f0; + border-radius: 6px; + background: white; + color: #2d3748; +} + +/* Log Controls */ +.log-controls { + display: flex; + justify-content: space-between; + align-items: center; + margin-bottom: 20px; + flex-wrap: wrap; + gap: 15px; +} + +.log-filters { + display: flex; + gap: 15px; + align-items: center; + flex-wrap: wrap; +} + +.log-filters select, +.log-filters input { + padding: 8px 12px; + border: 1px solid #e2e8f0; + border-radius: 6px; + background: white; + color: #2d3748; +} + +.log-filters input { + min-width: 200px; +} + +.log-actions { + display: flex; + gap: 10px; +} + +/* Log Container */ +.log-container { + background: white; + border-radius: 15px; + box-shadow: 0 4px 20px rgba(0, 0, 0, 0.08); + max-height: 600px; + overflow-y: auto; +} + +.log-entry { + padding: 15px 20px; + border-bottom: 1px solid #e2e8f0; + display: flex; + align-items: flex-start; + gap: 15px; +} + +.log-entry:last-child { + border-bottom: none; +} + +.log-entry:hover { + background: #f8fafc; +} + +.log-level { + padding: 4px 8px; + border-radius: 4px; + font-size: 0.7rem; + font-weight: 600; + text-transform: uppercase; + min-width: 60px; + text-align: center; +} + +.log-level.DEBUG { + background: #e2e8f0; + color: #4a5568; +} + +.log-level.INFO { + background: #bee3f8; + color: #2b6cb0; +} + +.log-level.WARNING { + background: #fef3c7; + color: #92400e; +} + +.log-level.ERROR { + background: #fed7d7; + color: #c53030; +} + +.log-level.CRITICAL { + background: #fed7d7; + color: #742a2a; +} + +.log-content { + flex: 1; +} + +.log-timestamp { + color: #718096; + font-size: 0.8rem; + margin-bottom: 5px; +} + +.log-message { + color: #2d3748; + line-height: 1.5; +} + +.log-metadata { + color: #718096; + font-size: 0.8rem; + margin-top: 5px; +} + +/* Configuration */ +.config-container { + display: grid; + gap: 30px; + margin-bottom: 30px; +} + +.config-section { + background: white; + padding: 25px; + border-radius: 15px; + box-shadow: 0 4px 20px rgba(0, 0, 0, 0.08); +} + +.config-section h3 { + color: #2d3748; + margin-bottom: 20px; + font-size: 1.2rem; +} + +.config-form { + display: grid; + grid-template-columns: repeat(auto-fit, minmax(250px, 1fr)); + gap: 20px; +} + +.form-group { + display: flex; + flex-direction: column; + gap: 5px; +} + +.form-group label { + color: #4a5568; + font-weight: 600; + font-size: 0.9rem; +} + +.form-group input, +.form-group select { + padding: 10px 12px; + border: 1px solid #e2e8f0; + border-radius: 6px; + background: white; + color: #2d3748; + transition: border-color 0.3s ease; +} + +.form-group input:focus, +.form-group select:focus { + outline: none; + border-color: #4facfe; + box-shadow: 0 0 0 3px rgba(79, 172, 254, 0.1); +} + +.config-actions { + display: flex; + gap: 15px; + justify-content: center; +} + +/* Modals */ +.modal { + display: none; + position: fixed; + z-index: 2000; + left: 0; + top: 0; + width: 100%; + height: 100%; + background: rgba(0, 0, 0, 0.5); + backdrop-filter: blur(5px); +} + +.modal-content { + background: white; + margin: 5% auto; + padding: 0; + border-radius: 15px; + width: 90%; + max-width: 600px; + box-shadow: 0 20px 40px rgba(0, 0, 0, 0.2); + animation: modalSlideIn 0.3s ease-out; +} + +@keyframes modalSlideIn { + from { + opacity: 0; + transform: translateY(-50px); + } + to { + opacity: 1; + transform: translateY(0); + } +} + +.modal-header { + display: flex; + justify-content: space-between; + align-items: center; + padding: 20px 25px; + border-bottom: 1px solid #e2e8f0; + background: #f8fafc; + border-radius: 15px 15px 0 0; +} + +.modal-header h3 { + color: #2d3748; + margin: 0; +} + +.close { + color: #718096; + font-size: 28px; + font-weight: bold; + cursor: pointer; + transition: color 0.3s ease; +} + +.close:hover { + color: #2d3748; +} + +.modal-body { + padding: 25px; +} + +.modal-footer { + display: flex; + justify-content: flex-end; + gap: 15px; + padding: 20px 25px; + border-top: 1px solid #e2e8f0; + background: #f8fafc; + border-radius: 0 0 15px 15px; +} + +/* Loading Overlay */ +.loading-overlay { + display: none; + position: fixed; + z-index: 3000; + left: 0; + top: 0; + width: 100%; + height: 100%; + background: rgba(255, 255, 255, 0.9); + backdrop-filter: blur(5px); +} + +.loading-spinner { + position: absolute; + top: 50%; + left: 50%; + transform: translate(-50%, -50%); + text-align: center; + color: #4facfe; +} + +.loading-spinner i { + font-size: 3rem; + margin-bottom: 20px; +} + +.loading-spinner p { + font-size: 1.2rem; + font-weight: 600; +} + +/* Toast Notifications */ +.toast-container { + position: fixed; + top: 100px; + right: 30px; + z-index: 2500; + display: flex; + flex-direction: column; + gap: 10px; +} + +.toast { + padding: 15px 20px; + border-radius: 10px; + color: white; + font-weight: 600; + box-shadow: 0 4px 15px rgba(0, 0, 0, 0.2); + animation: toastSlideIn 0.3s ease-out; + max-width: 350px; +} + +@keyframes toastSlideIn { + from { + opacity: 0; + transform: translateX(100%); + } + to { + opacity: 1; + transform: translateX(0); + } +} + +.toast.success { + background: linear-gradient(135deg, #51cf66 0%, #40c057 100%); +} + +.toast.error { + background: linear-gradient(135deg, #ff6b6b 0%, #ee5a52 100%); +} + +.toast.warning { + background: linear-gradient(135deg, #ffd43b 0%, #fab005 100%); +} + +.toast.info { + background: linear-gradient(135deg, #4facfe 0%, #00f2fe 100%); +} + +/* Responsive Design */ +@media (max-width: 1200px) { + .stats-grid { + grid-template-columns: repeat(2, 1fr); + } + + .charts-container { + grid-template-columns: 1fr; + } +} + +@media (max-width: 768px) { + .app-container { + margin: 5px; + border-radius: 10px; + } + + .sidebar { + width: 100%; + height: auto; + position: relative; + border-radius: 10px 10px 0 0; + padding: 20px 0; + } + + .header { + position: relative; + left: 0; + right: 0; + top: 0; + border-radius: 0; + } + + .main-content { + margin-left: 0; + padding: 20px; + border-radius: 0 0 10px 10px; + } + + .stats-grid { + grid-template-columns: 1fr; + } + + .header-content { + padding: 0 20px; + } + + .logo h1 { + font-size: 1.4rem; + } + + .section-header h2 { + font-size: 1.5rem; + } + + .log-controls { + flex-direction: column; + align-items: stretch; + } + + .log-filters { + justify-content: stretch; + } + + .log-filters input { + min-width: auto; + flex: 1; + } + + .modal-content { + width: 95%; + margin: 10% auto; + } + + .config-form { + grid-template-columns: 1fr; + } + + .stat-row { + flex-direction: column; + gap: 15px; + } + + .table-wrapper { + font-size: 0.9rem; + } + + th, td { + padding: 10px 8px; + } +} + +@media (max-width: 480px) { + .header-content { + padding: 0 15px; + } + + .logo h1 { + display: none; + } + + .main-content { + padding: 15px; + } + + .section-header h2 { + font-size: 1.3rem; + } + + .stat-card { + padding: 20px; + } + + .stat-icon { + width: 50px; + height: 50px; + font-size: 1.2rem; + } + + .stat-content h3 { + font-size: 1.5rem; + } + + .btn { + padding: 8px 16px; + font-size: 0.8rem; + } + + th, td { + padding: 8px 6px; + font-size: 0.8rem; + } +} + +/* Scrollbar Styling */ +::-webkit-scrollbar { + width: 8px; + height: 8px; +} + +::-webkit-scrollbar-track { + background: #f1f1f1; + border-radius: 4px; +} + +::-webkit-scrollbar-thumb { + background: linear-gradient(135deg, #4facfe 0%, #00f2fe 100%); + border-radius: 4px; +} + +::-webkit-scrollbar-thumb:hover { + background: linear-gradient(135deg, #3b82f6 0%, #1e40af 100%); +} + +/* Animation Classes */ +.fade-in { + animation: fadeIn 0.5s ease-in-out; +} + +.slide-up { + animation: slideUp 0.3s ease-out; +} + +@keyframes slideUp { + from { + opacity: 0; + transform: translateY(20px); + } + to { + opacity: 1; + transform: translateY(0); + } +} + +.pulse { + animation: pulse 2s infinite; +} + +@keyframes pulse { + 0% { + box-shadow: 0 0 0 0 rgba(79, 172, 254, 0.7); + } + 70% { + box-shadow: 0 0 0 10px rgba(79, 172, 254, 0); + } + 100% { + box-shadow: 0 0 0 0 rgba(79, 172, 254, 0); + } +} +