Spaces:
Build error
Build error
| import asyncio | |
| import os | |
| import logging | |
| import requests | |
| import json | |
| from fastapi import FastAPI, Request, Response | |
| # Set up logging with more detailed configuration | |
| logging.basicConfig( | |
| level=logging.DEBUG, | |
| format='%(asctime)s - %(name)s - %(levelname)s - %(message)s' | |
| ) | |
| logger = logging.getLogger(__name__) | |
| app = FastAPI() | |
| proc = None | |
| async def startup_event(): | |
| global proc | |
| logger.info("=== STARTUP EVENT ===") | |
| # Get the token from an environment variable on the Hugging Face Space | |
| token = os.environ.get("GITHUB_PERSONAL_ACCESS_TOKEN") | |
| logger.info("Attempting to start subprocess...") | |
| logger.info("Token environment variable exists: %s", token is not None) | |
| if not token: | |
| logger.error("GITHUB_PERSONAL_ACCESS_TOKEN environment variable not set") | |
| return | |
| logger.info("Starting subprocess with token: %s", token[:10] + "...") | |
| try: | |
| # Create the subprocess to run the github-mcp-server | |
| proc = await asyncio.create_subprocess_exec( | |
| '/usr/local/bin/github-mcp-server', 'stdio', | |
| stdin=asyncio.subprocess.PIPE, | |
| stdout=asyncio.subprocess.PIPE, | |
| stderr=asyncio.subprocess.PIPE, | |
| env={"GITHUB_PERSONAL_ACCESS_TOKEN": token} | |
| ) | |
| logger.info("Subprocess created successfully with PID: %s", proc.pid) | |
| # Check if subprocess is actually running | |
| if proc.returncode is None: | |
| logger.info("Subprocess appears to be running") | |
| else: | |
| logger.warning("Subprocess may have exited immediately with code: %s", proc.returncode) | |
| # Start the background task to log stderr | |
| asyncio.create_task(log_stderr()) | |
| logger.info("=== STARTUP COMPLETE ===") | |
| except Exception as e: | |
| logger.error("Failed to create subprocess: %s", str(e)) | |
| # Re-raise to crash the application startup, as the proxy cannot function without the subprocess | |
| raise | |
| async def log_stderr(): | |
| """Reads and logs output from the subprocess's stderr stream.""" | |
| if proc and proc.stderr: | |
| logger.info("Starting stderr logging task") | |
| while not proc.stderr.at_eof(): | |
| try: | |
| line = await proc.stderr.readline() | |
| if line: | |
| logger.debug("github-mcp-server stderr: %s", line.decode().strip()) | |
| else: | |
| logger.debug("Empty line from stderr") | |
| except Exception as e: | |
| logger.error("Error reading stderr: %s", str(e)) | |
| break | |
| async def proxy(request: Request): | |
| logger.info("=== NEW REQUEST RECEIVED ===") | |
| logger.info("Request method: %s", request.method) | |
| logger.info("Request headers: %s", dict(request.headers)) | |
| # 1. Check subprocess health | |
| if not proc or not proc.stdin or not proc.stdout: | |
| logger.error("Subprocess not running - returning 500") | |
| return Response(status_code=500, content="Subprocess not running") | |
| # 2. Log incoming request body | |
| body = await request.body() | |
| # Decode only a small portion for logging to prevent console spam for huge payloads | |
| logger.info("Received request body preview: %s", body.decode()[:200] + "..." if len(body) > 200 else body.decode()) | |
| # 3. Token verification for debugging (Synchronous call to external API) | |
| token = os.environ.get("GITHUB_PERSONAL_ACCESS_TOKEN") | |
| if token: | |
| try: | |
| logger.info("Testing GitHub token with API call...") | |
| # Note: requests.get is blocking, but typically fast enough here. | |
| response_gh = requests.get("https://api.github.com/user", headers={"Authorization": f"token {token}"}, timeout=5) | |
| logger.info("Token verification response status: %s", response_gh.status_code) | |
| if response_gh.status_code == 200: | |
| logger.info("GitHub token verification successful") | |
| else: | |
| logger.error("GitHub token verification failed with status code: %s", response_gh.status_code) | |
| except Exception as e: | |
| logger.error("Error verifying GitHub token: %s", str(e)) | |
| else: | |
| logger.warning("No GitHub token found in environment") | |
| # 4. Send request to subprocess | |
| logger.info("Sending request to subprocess...") | |
| try: | |
| proc.stdin.write(body) | |
| await proc.stdin.drain() | |
| logger.info("Successfully wrote request body to subprocess stdin") | |
| except Exception as e: | |
| logger.error("Error writing to subprocess stdin: %s", str(e)) | |
| return Response(status_code=500, content=f"Error writing to subprocess: {str(e)}") | |
| # 5. Read response with timeout and robust JSON detection | |
| logger.info("Reading response from subprocess with timeout...") | |
| response = bytearray() | |
| timeout_seconds = 60 | |
| start_time = asyncio.get_event_loop().time() | |
| try: | |
| while True: | |
| elapsed = asyncio.get_event_loop().time() - start_time | |
| if elapsed > timeout_seconds: | |
| logger.error("Timeout reading response from subprocess after %d seconds", timeout_seconds) | |
| break | |
| try: | |
| # Use wait_for to enforce a per-read timeout | |
| chunk = await asyncio.wait_for(proc.stdout.read(1024), timeout=5.0) | |
| logger.debug("Received chunk of size: %d", len(chunk)) | |
| if not chunk: | |
| logger.info("EOF reached from subprocess") | |
| break | |
| response.extend(chunk) | |
| logger.debug("Extended response with chunk, total size: %d", len(response)) | |
| # Check for complete JSON-RPC message | |
| try: | |
| response_str = bytes(response).decode("utf-8") | |
| if len(response_str.strip()) > 0: | |
| # Attempt to parse as JSON | |
| try: | |
| json_data = json.loads(response_str) | |
| # Check for mandatory JSON-RPC fields in a response | |
| if isinstance(json_data, dict) and "jsonrpc" in json_data: | |
| if "id" in json_data or "error" in json_data: | |
| logger.info("Complete JSON-RPC response detected") | |
| # Log only a preview of the full response | |
| logger.info("Full response preview: %s", response_str[:200] + "..." if len(response_str) > 200 else response_str) | |
| break | |
| # If it's a request, we keep reading (though the subprocess should only send responses) | |
| else: | |
| logger.debug("JSON-RPC object found, but not a final response (missing 'id' or 'error'). Continuing to read.") | |
| except json.JSONDecodeError as e: | |
| # Expected when reading partial JSON data. Continue reading. | |
| logger.debug("JSON parsing failed (partial read): %s", str(e)) | |
| continue | |
| else: | |
| logger.debug("Empty response, continuing to read") | |
| continue | |
| except UnicodeDecodeError as e: | |
| logger.warning("Decoding failed (partial multi-byte char): %s", str(e)) | |
| continue | |
| except asyncio.TimeoutError: | |
| logger.warning("Timeout waiting for chunk from subprocess, continuing loop.") | |
| # This timeout is okay, it allows the main timeout check to fire if needed. | |
| continue | |
| except Exception as e: | |
| logger.error("Error reading chunk from subprocess: %s", str(e)) | |
| break | |
| except Exception as e: | |
| logger.error("Error in primary response reading loop: %s", str(e)) | |
| return Response(status_code=500, content=f"Error reading response: {str(e)}") | |
| # 6. Final response check and return | |
| logger.info("Final response size: %d bytes", len(response)) | |
| if len(response) == 0: | |
| logger.error("Final response is empty after reading loop.") | |
| logger.info("=== REQUEST PROCESSING COMPLETE ===") | |
| return Response(content=bytes(response)) | |
| async def shutdown_event(): | |
| logger.info("=== SHUTDOWN EVENT ===") | |
| if proc: | |
| logger.info("Shutting down subprocess...") | |
| proc.kill() | |
| await proc.wait() | |
| logger.info("Subprocess shut down") | |
| logger.info("=== SHUTDOWN COMPLETE ===") |