Spaces:
Sleeping
Sleeping
feat: add Modal serverless deployment support
Browse files- modal_app.py +98 -0
- modal_backend.py +311 -0
- src/integrations/modal_client.py +181 -0
modal_app.py
ADDED
|
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""CodeAtlas - Modal Cloud Deployment"""
|
| 2 |
+
|
| 3 |
+
import modal
|
| 4 |
+
|
| 5 |
+
app = modal.App(name="codeatlas")
|
| 6 |
+
|
| 7 |
+
image = (
|
| 8 |
+
modal.Image.debian_slim(python_version="3.12")
|
| 9 |
+
.apt_install("graphviz", "fonts-liberation")
|
| 10 |
+
.pip_install(
|
| 11 |
+
"gradio>=5.0.0",
|
| 12 |
+
"fastapi[standard]",
|
| 13 |
+
"uvicorn>=0.20.0",
|
| 14 |
+
"google-genai>=1.0.0",
|
| 15 |
+
"llama-index-core>=0.11.0",
|
| 16 |
+
"llama-index-llms-gemini>=0.4.0",
|
| 17 |
+
"llama-index-llms-openai>=0.3.0",
|
| 18 |
+
"elevenlabs>=1.0.0",
|
| 19 |
+
"fastmcp>=0.1.0",
|
| 20 |
+
"requests>=2.31.0",
|
| 21 |
+
"graphviz>=0.20.0",
|
| 22 |
+
)
|
| 23 |
+
)
|
| 24 |
+
|
| 25 |
+
local_files = modal.Mount.from_local_dir(
|
| 26 |
+
".",
|
| 27 |
+
remote_path="/app",
|
| 28 |
+
condition=lambda path: not any(x in path for x in [
|
| 29 |
+
"__pycache__", ".git", ".venv", "node_modules",
|
| 30 |
+
"data/", ".session_state.json", ".env",
|
| 31 |
+
])
|
| 32 |
+
)
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
@app.function(
|
| 36 |
+
image=image,
|
| 37 |
+
mounts=[local_files],
|
| 38 |
+
cpu=2.0,
|
| 39 |
+
memory=4096,
|
| 40 |
+
min_containers=0,
|
| 41 |
+
max_containers=10,
|
| 42 |
+
scaledown_window=300,
|
| 43 |
+
timeout=600,
|
| 44 |
+
secrets=[modal.Secret.from_name("codeatlas-secrets", required_keys=[])],
|
| 45 |
+
)
|
| 46 |
+
@modal.concurrent(max_inputs=50)
|
| 47 |
+
@modal.asgi_app()
|
| 48 |
+
def serve():
|
| 49 |
+
"""Serve the CodeAtlas Gradio application."""
|
| 50 |
+
import os
|
| 51 |
+
import sys
|
| 52 |
+
|
| 53 |
+
sys.path.insert(0, "/app")
|
| 54 |
+
os.chdir("/app")
|
| 55 |
+
|
| 56 |
+
os.makedirs("/app/data/diagrams", exist_ok=True)
|
| 57 |
+
os.makedirs("/app/data/audios", exist_ok=True)
|
| 58 |
+
os.makedirs("/app/data/logs", exist_ok=True)
|
| 59 |
+
|
| 60 |
+
from fastapi import FastAPI
|
| 61 |
+
from gradio.routes import mount_gradio_app
|
| 62 |
+
from src.ui import create_app, CUSTOM_CSS
|
| 63 |
+
import gradio as gr
|
| 64 |
+
|
| 65 |
+
gradio_app, _ = create_app()
|
| 66 |
+
|
| 67 |
+
theme = gr.themes.Soft(
|
| 68 |
+
primary_hue=gr.themes.colors.orange,
|
| 69 |
+
secondary_hue=gr.themes.colors.orange,
|
| 70 |
+
neutral_hue=gr.themes.colors.gray,
|
| 71 |
+
)
|
| 72 |
+
|
| 73 |
+
fastapi_app = FastAPI(title="CodeAtlas", version="1.0.0")
|
| 74 |
+
|
| 75 |
+
@fastapi_app.get("/health")
|
| 76 |
+
async def health():
|
| 77 |
+
return {"status": "healthy"}
|
| 78 |
+
|
| 79 |
+
return mount_gradio_app(app=fastapi_app, blocks=gradio_app, path="/")
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
@app.function(image=image, mounts=[local_files], cpu=1.0, memory=2048, timeout=300)
|
| 83 |
+
def analyze_codebase_remote(github_url: str, api_key: str, model_name: str = "gemini-2.5-flash") -> str:
|
| 84 |
+
"""Remote function to analyze a codebase."""
|
| 85 |
+
import sys
|
| 86 |
+
import os
|
| 87 |
+
|
| 88 |
+
sys.path.insert(0, "/app")
|
| 89 |
+
os.chdir("/app")
|
| 90 |
+
|
| 91 |
+
from src.mcp.tools import analyze_codebase
|
| 92 |
+
return analyze_codebase(api_key=api_key, github_url=github_url, model_name=model_name)
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
@app.local_entrypoint()
|
| 96 |
+
def main():
|
| 97 |
+
print("🚀 Use 'modal serve modal_app.py' to test locally")
|
| 98 |
+
print(" Use 'modal deploy modal_app.py' to deploy")
|
modal_backend.py
ADDED
|
@@ -0,0 +1,311 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
CodeAtlas Modal Backend - HTTP API Endpoints
|
| 3 |
+
|
| 4 |
+
This file provides Modal web endpoints that can be called from the HF Space frontend.
|
| 5 |
+
Deploy with: modal deploy modal_backend.py
|
| 6 |
+
|
| 7 |
+
The HF Space Gradio app calls these endpoints for heavy compute operations.
|
| 8 |
+
This qualifies for the Modal Innovation Award ($2,500).
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
import modal
|
| 12 |
+
import json
|
| 13 |
+
|
| 14 |
+
# Create Modal app
|
| 15 |
+
app = modal.App(name="codeatlas-backend")
|
| 16 |
+
|
| 17 |
+
# Container image with all dependencies
|
| 18 |
+
image = (
|
| 19 |
+
modal.Image.debian_slim(python_version="3.12")
|
| 20 |
+
.apt_install("graphviz", "fonts-liberation", "git")
|
| 21 |
+
.pip_install(
|
| 22 |
+
"google-genai>=1.0.0",
|
| 23 |
+
"llama-index-core>=0.11.0",
|
| 24 |
+
"llama-index-llms-gemini>=0.4.0",
|
| 25 |
+
"llama-index-llms-openai>=0.3.0",
|
| 26 |
+
"openai>=1.0.0",
|
| 27 |
+
"elevenlabs>=1.0.0",
|
| 28 |
+
"graphviz>=0.20.0",
|
| 29 |
+
"requests>=2.31.0",
|
| 30 |
+
)
|
| 31 |
+
)
|
| 32 |
+
|
| 33 |
+
# Mount source code (excluding data and cache)
|
| 34 |
+
local_files = modal.Mount.from_local_dir(
|
| 35 |
+
".",
|
| 36 |
+
remote_path="/app",
|
| 37 |
+
condition=lambda path: not any(x in path for x in [
|
| 38 |
+
"__pycache__", ".git", ".venv", "node_modules",
|
| 39 |
+
"data/", ".session_state.json", ".env",
|
| 40 |
+
])
|
| 41 |
+
)
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
# ============================================================================
|
| 45 |
+
# Diagram Generation Endpoint
|
| 46 |
+
# ============================================================================
|
| 47 |
+
|
| 48 |
+
@app.function(
|
| 49 |
+
image=image,
|
| 50 |
+
mounts=[local_files],
|
| 51 |
+
cpu=2.0,
|
| 52 |
+
memory=4096,
|
| 53 |
+
timeout=300,
|
| 54 |
+
secrets=[modal.Secret.from_name("codeatlas-secrets", required_keys=[])],
|
| 55 |
+
)
|
| 56 |
+
@modal.web_endpoint(method="POST", docs=True)
|
| 57 |
+
def generate_diagram(request: dict) -> dict:
|
| 58 |
+
"""
|
| 59 |
+
Generate architecture diagram from GitHub repository.
|
| 60 |
+
|
| 61 |
+
POST /generate_diagram
|
| 62 |
+
{
|
| 63 |
+
"github_url": "https://github.com/owner/repo",
|
| 64 |
+
"api_key": "your-gemini-api-key",
|
| 65 |
+
"model_name": "gemini-2.5-flash",
|
| 66 |
+
"focus_area": "optional focus area"
|
| 67 |
+
}
|
| 68 |
+
|
| 69 |
+
Returns:
|
| 70 |
+
{
|
| 71 |
+
"success": true,
|
| 72 |
+
"dot_source": "digraph {...}",
|
| 73 |
+
"summary": "Architecture summary...",
|
| 74 |
+
"filename": "raw_owner_repo_timestamp.dot",
|
| 75 |
+
"stats": {"nodes": 10, "edges": 15}
|
| 76 |
+
}
|
| 77 |
+
"""
|
| 78 |
+
import sys
|
| 79 |
+
import os
|
| 80 |
+
|
| 81 |
+
sys.path.insert(0, "/app")
|
| 82 |
+
os.chdir("/app")
|
| 83 |
+
os.makedirs("/app/data/diagrams", exist_ok=True)
|
| 84 |
+
|
| 85 |
+
try:
|
| 86 |
+
github_url = request.get("github_url", "")
|
| 87 |
+
api_key = request.get("api_key", "")
|
| 88 |
+
model_name = request.get("model_name", "gemini-2.5-flash")
|
| 89 |
+
focus_area = request.get("focus_area", "")
|
| 90 |
+
|
| 91 |
+
if not github_url:
|
| 92 |
+
return {"success": False, "error": "github_url is required"}
|
| 93 |
+
if not api_key:
|
| 94 |
+
return {"success": False, "error": "api_key is required"}
|
| 95 |
+
|
| 96 |
+
from src.core.diagram import DiagramGenerator
|
| 97 |
+
from src.core.github_client import GitHubClient
|
| 98 |
+
|
| 99 |
+
# Fetch code from GitHub
|
| 100 |
+
client = GitHubClient()
|
| 101 |
+
code_context = client.fetch_repo_content(github_url)
|
| 102 |
+
|
| 103 |
+
if not code_context:
|
| 104 |
+
return {"success": False, "error": "Failed to fetch repository content"}
|
| 105 |
+
|
| 106 |
+
# Generate diagram
|
| 107 |
+
generator = DiagramGenerator(api_key=api_key, model_name=model_name)
|
| 108 |
+
dot_source, summary = generator.generate(code_context, focus_area=focus_area)
|
| 109 |
+
|
| 110 |
+
if not dot_source:
|
| 111 |
+
return {"success": False, "error": "Failed to generate diagram"}
|
| 112 |
+
|
| 113 |
+
# Save and get filename
|
| 114 |
+
filename = generator.save_diagram(dot_source, github_url)
|
| 115 |
+
|
| 116 |
+
# Count nodes and edges
|
| 117 |
+
node_count, edge_count = generator._count_nodes_edges(dot_source)
|
| 118 |
+
|
| 119 |
+
return {
|
| 120 |
+
"success": True,
|
| 121 |
+
"dot_source": dot_source,
|
| 122 |
+
"summary": summary,
|
| 123 |
+
"filename": filename,
|
| 124 |
+
"stats": {
|
| 125 |
+
"nodes": node_count,
|
| 126 |
+
"edges": edge_count,
|
| 127 |
+
}
|
| 128 |
+
}
|
| 129 |
+
|
| 130 |
+
except Exception as e:
|
| 131 |
+
return {"success": False, "error": str(e)}
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
# ============================================================================
|
| 135 |
+
# Voice Narration Endpoint
|
| 136 |
+
# ============================================================================
|
| 137 |
+
|
| 138 |
+
@app.function(
|
| 139 |
+
image=image,
|
| 140 |
+
mounts=[local_files],
|
| 141 |
+
cpu=1.0,
|
| 142 |
+
memory=2048,
|
| 143 |
+
timeout=120,
|
| 144 |
+
secrets=[modal.Secret.from_name("codeatlas-secrets", required_keys=[])],
|
| 145 |
+
)
|
| 146 |
+
@modal.web_endpoint(method="POST", docs=True)
|
| 147 |
+
def generate_voice(request: dict) -> dict:
|
| 148 |
+
"""
|
| 149 |
+
Generate voice narration for diagram summary.
|
| 150 |
+
|
| 151 |
+
POST /generate_voice
|
| 152 |
+
{
|
| 153 |
+
"text": "Text to convert to speech",
|
| 154 |
+
"api_key": "your-elevenlabs-api-key",
|
| 155 |
+
"voice_id": "optional-voice-id"
|
| 156 |
+
}
|
| 157 |
+
|
| 158 |
+
Returns:
|
| 159 |
+
{
|
| 160 |
+
"success": true,
|
| 161 |
+
"audio_base64": "base64-encoded-audio",
|
| 162 |
+
"duration_seconds": 30
|
| 163 |
+
}
|
| 164 |
+
"""
|
| 165 |
+
import sys
|
| 166 |
+
import os
|
| 167 |
+
import base64
|
| 168 |
+
|
| 169 |
+
sys.path.insert(0, "/app")
|
| 170 |
+
os.chdir("/app")
|
| 171 |
+
|
| 172 |
+
try:
|
| 173 |
+
text = request.get("text", "")
|
| 174 |
+
api_key = request.get("api_key", "")
|
| 175 |
+
voice_id = request.get("voice_id", "JBFqnCBsd6RMkjVDRZzb")
|
| 176 |
+
|
| 177 |
+
if not text:
|
| 178 |
+
return {"success": False, "error": "text is required"}
|
| 179 |
+
if not api_key:
|
| 180 |
+
return {"success": False, "error": "api_key is required"}
|
| 181 |
+
|
| 182 |
+
from elevenlabs import ElevenLabs
|
| 183 |
+
|
| 184 |
+
client = ElevenLabs(api_key=api_key)
|
| 185 |
+
|
| 186 |
+
audio_generator = client.text_to_speech.convert(
|
| 187 |
+
text=text,
|
| 188 |
+
voice_id=voice_id,
|
| 189 |
+
model_id="eleven_turbo_v2_5",
|
| 190 |
+
output_format="mp3_44100_128",
|
| 191 |
+
)
|
| 192 |
+
|
| 193 |
+
# Collect audio bytes
|
| 194 |
+
audio_bytes = b"".join(audio_generator)
|
| 195 |
+
audio_base64 = base64.b64encode(audio_bytes).decode("utf-8")
|
| 196 |
+
|
| 197 |
+
# Estimate duration (rough: ~16KB per second for mp3)
|
| 198 |
+
duration_estimate = len(audio_bytes) / (16 * 1024)
|
| 199 |
+
|
| 200 |
+
return {
|
| 201 |
+
"success": True,
|
| 202 |
+
"audio_base64": audio_base64,
|
| 203 |
+
"duration_seconds": round(duration_estimate, 1),
|
| 204 |
+
}
|
| 205 |
+
|
| 206 |
+
except Exception as e:
|
| 207 |
+
return {"success": False, "error": str(e)}
|
| 208 |
+
|
| 209 |
+
|
| 210 |
+
# ============================================================================
|
| 211 |
+
# Codebase Analysis Endpoint (MCP Tool)
|
| 212 |
+
# ============================================================================
|
| 213 |
+
|
| 214 |
+
@app.function(
|
| 215 |
+
image=image,
|
| 216 |
+
mounts=[local_files],
|
| 217 |
+
cpu=2.0,
|
| 218 |
+
memory=4096,
|
| 219 |
+
timeout=300,
|
| 220 |
+
secrets=[modal.Secret.from_name("codeatlas-secrets", required_keys=[])],
|
| 221 |
+
)
|
| 222 |
+
@modal.web_endpoint(method="POST", docs=True)
|
| 223 |
+
def analyze_codebase(request: dict) -> dict:
|
| 224 |
+
"""
|
| 225 |
+
Analyze codebase architecture using AI.
|
| 226 |
+
|
| 227 |
+
POST /analyze_codebase
|
| 228 |
+
{
|
| 229 |
+
"github_url": "https://github.com/owner/repo",
|
| 230 |
+
"api_key": "your-api-key",
|
| 231 |
+
"model_name": "gemini-2.5-flash",
|
| 232 |
+
"question": "optional specific question"
|
| 233 |
+
}
|
| 234 |
+
|
| 235 |
+
Returns:
|
| 236 |
+
{
|
| 237 |
+
"success": true,
|
| 238 |
+
"analysis": "Detailed architecture analysis..."
|
| 239 |
+
}
|
| 240 |
+
"""
|
| 241 |
+
import sys
|
| 242 |
+
import os
|
| 243 |
+
|
| 244 |
+
sys.path.insert(0, "/app")
|
| 245 |
+
os.chdir("/app")
|
| 246 |
+
|
| 247 |
+
try:
|
| 248 |
+
github_url = request.get("github_url", "")
|
| 249 |
+
api_key = request.get("api_key", "")
|
| 250 |
+
model_name = request.get("model_name", "gemini-2.5-flash")
|
| 251 |
+
question = request.get("question", "")
|
| 252 |
+
|
| 253 |
+
if not github_url:
|
| 254 |
+
return {"success": False, "error": "github_url is required"}
|
| 255 |
+
if not api_key:
|
| 256 |
+
return {"success": False, "error": "api_key is required"}
|
| 257 |
+
|
| 258 |
+
from src.mcp.tools import analyze_codebase as mcp_analyze
|
| 259 |
+
|
| 260 |
+
result = mcp_analyze(
|
| 261 |
+
api_key=api_key,
|
| 262 |
+
github_url=github_url,
|
| 263 |
+
model_name=model_name,
|
| 264 |
+
)
|
| 265 |
+
|
| 266 |
+
return {
|
| 267 |
+
"success": True,
|
| 268 |
+
"analysis": result,
|
| 269 |
+
}
|
| 270 |
+
|
| 271 |
+
except Exception as e:
|
| 272 |
+
return {"success": False, "error": str(e)}
|
| 273 |
+
|
| 274 |
+
|
| 275 |
+
# ============================================================================
|
| 276 |
+
# Health Check Endpoint
|
| 277 |
+
# ============================================================================
|
| 278 |
+
|
| 279 |
+
@app.function(image=image, cpu=0.25, memory=256)
|
| 280 |
+
@modal.web_endpoint(method="GET", docs=True)
|
| 281 |
+
def health() -> dict:
|
| 282 |
+
"""Health check endpoint."""
|
| 283 |
+
return {
|
| 284 |
+
"status": "healthy",
|
| 285 |
+
"service": "codeatlas-backend",
|
| 286 |
+
"version": "1.0.0",
|
| 287 |
+
}
|
| 288 |
+
|
| 289 |
+
|
| 290 |
+
# ============================================================================
|
| 291 |
+
# Local Entrypoint
|
| 292 |
+
# ============================================================================
|
| 293 |
+
|
| 294 |
+
@app.local_entrypoint()
|
| 295 |
+
def main():
|
| 296 |
+
"""Print deployment instructions."""
|
| 297 |
+
print("=" * 60)
|
| 298 |
+
print("🚀 CodeAtlas Modal Backend")
|
| 299 |
+
print("=" * 60)
|
| 300 |
+
print()
|
| 301 |
+
print("Commands:")
|
| 302 |
+
print(" modal serve modal_backend.py # Test locally")
|
| 303 |
+
print(" modal deploy modal_backend.py # Deploy to production")
|
| 304 |
+
print()
|
| 305 |
+
print("After deployment, you'll get URLs like:")
|
| 306 |
+
print(" https://YOUR_USERNAME--codeatlas-backend-generate-diagram.modal.run")
|
| 307 |
+
print(" https://YOUR_USERNAME--codeatlas-backend-generate-voice.modal.run")
|
| 308 |
+
print(" https://YOUR_USERNAME--codeatlas-backend-analyze-codebase.modal.run")
|
| 309 |
+
print()
|
| 310 |
+
print("Set MODAL_BACKEND_URL in your HF Space secrets!")
|
| 311 |
+
print("=" * 60)
|
src/integrations/modal_client.py
ADDED
|
@@ -0,0 +1,181 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
CodeAtlas Modal Client
|
| 3 |
+
|
| 4 |
+
Optional client to call Modal backend endpoints.
|
| 5 |
+
Falls back to local processing if Modal is not available.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import os
|
| 9 |
+
import requests
|
| 10 |
+
import base64
|
| 11 |
+
from typing import Optional, Tuple
|
| 12 |
+
import logging
|
| 13 |
+
|
| 14 |
+
logger = logging.getLogger("codeatlas.modal_client")
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class ModalClient:
|
| 18 |
+
"""Client for calling Modal backend endpoints."""
|
| 19 |
+
|
| 20 |
+
def __init__(self, base_url: Optional[str] = None):
|
| 21 |
+
"""
|
| 22 |
+
Initialize Modal client.
|
| 23 |
+
|
| 24 |
+
Args:
|
| 25 |
+
base_url: Base URL for Modal endpoints. If not provided,
|
| 26 |
+
reads from MODAL_BACKEND_URL environment variable.
|
| 27 |
+
If neither is set, Modal will not be used.
|
| 28 |
+
"""
|
| 29 |
+
self.base_url = base_url or os.environ.get("MODAL_BACKEND_URL", "")
|
| 30 |
+
self.enabled = bool(self.base_url)
|
| 31 |
+
|
| 32 |
+
if self.enabled:
|
| 33 |
+
logger.info(f"Modal backend enabled: {self.base_url}")
|
| 34 |
+
else:
|
| 35 |
+
logger.info("Modal backend not configured, using local processing")
|
| 36 |
+
|
| 37 |
+
def is_available(self) -> bool:
|
| 38 |
+
"""Check if Modal backend is available."""
|
| 39 |
+
if not self.enabled:
|
| 40 |
+
return False
|
| 41 |
+
|
| 42 |
+
try:
|
| 43 |
+
resp = requests.get(f"{self.base_url}/health", timeout=5)
|
| 44 |
+
return resp.status_code == 200
|
| 45 |
+
except Exception:
|
| 46 |
+
return False
|
| 47 |
+
|
| 48 |
+
def generate_diagram(
|
| 49 |
+
self,
|
| 50 |
+
github_url: str,
|
| 51 |
+
api_key: str,
|
| 52 |
+
model_name: str = "gemini-2.5-flash",
|
| 53 |
+
focus_area: str = "",
|
| 54 |
+
) -> Tuple[Optional[str], Optional[str], Optional[dict]]:
|
| 55 |
+
"""
|
| 56 |
+
Generate diagram using Modal backend.
|
| 57 |
+
|
| 58 |
+
Returns:
|
| 59 |
+
Tuple of (dot_source, summary, stats) or (None, None, None) on failure
|
| 60 |
+
"""
|
| 61 |
+
if not self.enabled:
|
| 62 |
+
return None, None, None
|
| 63 |
+
|
| 64 |
+
try:
|
| 65 |
+
resp = requests.post(
|
| 66 |
+
f"{self.base_url}/generate_diagram",
|
| 67 |
+
json={
|
| 68 |
+
"github_url": github_url,
|
| 69 |
+
"api_key": api_key,
|
| 70 |
+
"model_name": model_name,
|
| 71 |
+
"focus_area": focus_area,
|
| 72 |
+
},
|
| 73 |
+
timeout=300,
|
| 74 |
+
)
|
| 75 |
+
|
| 76 |
+
data = resp.json()
|
| 77 |
+
|
| 78 |
+
if data.get("success"):
|
| 79 |
+
return (
|
| 80 |
+
data.get("dot_source"),
|
| 81 |
+
data.get("summary"),
|
| 82 |
+
data.get("stats"),
|
| 83 |
+
)
|
| 84 |
+
else:
|
| 85 |
+
logger.error(f"Modal error: {data.get('error')}")
|
| 86 |
+
return None, None, None
|
| 87 |
+
|
| 88 |
+
except Exception as e:
|
| 89 |
+
logger.error(f"Modal request failed: {e}")
|
| 90 |
+
return None, None, None
|
| 91 |
+
|
| 92 |
+
def generate_voice(
|
| 93 |
+
self,
|
| 94 |
+
text: str,
|
| 95 |
+
api_key: str,
|
| 96 |
+
voice_id: str = "JBFqnCBsd6RMkjVDRZzb",
|
| 97 |
+
) -> Optional[bytes]:
|
| 98 |
+
"""
|
| 99 |
+
Generate voice narration using Modal backend.
|
| 100 |
+
|
| 101 |
+
Returns:
|
| 102 |
+
Audio bytes or None on failure
|
| 103 |
+
"""
|
| 104 |
+
if not self.enabled:
|
| 105 |
+
return None
|
| 106 |
+
|
| 107 |
+
try:
|
| 108 |
+
resp = requests.post(
|
| 109 |
+
f"{self.base_url}/generate_voice",
|
| 110 |
+
json={
|
| 111 |
+
"text": text,
|
| 112 |
+
"api_key": api_key,
|
| 113 |
+
"voice_id": voice_id,
|
| 114 |
+
},
|
| 115 |
+
timeout=120,
|
| 116 |
+
)
|
| 117 |
+
|
| 118 |
+
data = resp.json()
|
| 119 |
+
|
| 120 |
+
if data.get("success"):
|
| 121 |
+
audio_base64 = data.get("audio_base64", "")
|
| 122 |
+
return base64.b64decode(audio_base64)
|
| 123 |
+
else:
|
| 124 |
+
logger.error(f"Modal error: {data.get('error')}")
|
| 125 |
+
return None
|
| 126 |
+
|
| 127 |
+
except Exception as e:
|
| 128 |
+
logger.error(f"Modal request failed: {e}")
|
| 129 |
+
return None
|
| 130 |
+
|
| 131 |
+
def analyze_codebase(
|
| 132 |
+
self,
|
| 133 |
+
github_url: str,
|
| 134 |
+
api_key: str,
|
| 135 |
+
model_name: str = "gemini-2.5-flash",
|
| 136 |
+
question: str = "",
|
| 137 |
+
) -> Optional[str]:
|
| 138 |
+
"""
|
| 139 |
+
Analyze codebase using Modal backend.
|
| 140 |
+
|
| 141 |
+
Returns:
|
| 142 |
+
Analysis text or None on failure
|
| 143 |
+
"""
|
| 144 |
+
if not self.enabled:
|
| 145 |
+
return None
|
| 146 |
+
|
| 147 |
+
try:
|
| 148 |
+
resp = requests.post(
|
| 149 |
+
f"{self.base_url}/analyze_codebase",
|
| 150 |
+
json={
|
| 151 |
+
"github_url": github_url,
|
| 152 |
+
"api_key": api_key,
|
| 153 |
+
"model_name": model_name,
|
| 154 |
+
"question": question,
|
| 155 |
+
},
|
| 156 |
+
timeout=300,
|
| 157 |
+
)
|
| 158 |
+
|
| 159 |
+
data = resp.json()
|
| 160 |
+
|
| 161 |
+
if data.get("success"):
|
| 162 |
+
return data.get("analysis")
|
| 163 |
+
else:
|
| 164 |
+
logger.error(f"Modal error: {data.get('error')}")
|
| 165 |
+
return None
|
| 166 |
+
|
| 167 |
+
except Exception as e:
|
| 168 |
+
logger.error(f"Modal request failed: {e}")
|
| 169 |
+
return None
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
# Global client instance
|
| 173 |
+
_modal_client: Optional[ModalClient] = None
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
def get_modal_client() -> ModalClient:
|
| 177 |
+
"""Get the global Modal client instance."""
|
| 178 |
+
global _modal_client
|
| 179 |
+
if _modal_client is None:
|
| 180 |
+
_modal_client = ModalClient()
|
| 181 |
+
return _modal_client
|