Spaces:
Paused
Paused
File size: 6,812 Bytes
2d1b6e6 1145aa7 2d1b6e6 1145aa7 2d1b6e6 1145aa7 2d1b6e6 e0c2510 2d1b6e6 8ba3f09 2d1b6e6 b7534f3 2d1b6e6 8ba3f09 1145aa7 3eb29d0 8ba3f09 2d1b6e6 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 | """
Remote GPU Execution Server v2.2 (Streaming Support)
=====================================================
Questo script usa FastAPI + Gradio per ricevere codice Python ed eseguirlo sulla GPU remota.
Supporta sia esecuzione sincrona che streaming in tempo reale.
"""
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import StreamingResponse
import gradio as gr
import subprocess
import tempfile
import base64
import glob
import shutil
import os
import sys
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
def execute_code(python_code: str) -> dict:
"""
Esegue codice e restituisce output + file generati (png, jpg, csv).
"""
if not python_code or not python_code.strip():
return {"output": "β Errore: Nessun codice ricevuto.", "files": []}
# Crea directory temporanea unica per questa esecuzione
work_dir = tempfile.mkdtemp()
script_path = os.path.join(work_dir, "script.py")
# Scrive il codice
with open(script_path, 'w', encoding='utf-8') as f:
f.write(python_code)
output_parts = []
generated_files = []
try:
output_parts.append("π Avvio esecuzione...")
output_parts.append(f"π Directory: {work_dir}")
output_parts.append("=" * 50)
# Esegue
result = subprocess.run(
[sys.executable, script_path],
capture_output=True,
text=True,
cwd=work_dir, # Esegue nella dir temporanea
timeout=3600, # 1 hour timeout for long training runs
env={**os.environ, "PYTHONUNBUFFERED": "1"}
)
# Output
if result.stdout: output_parts.append(result.stdout)
if result.stderr:
output_parts.append("\n--- STDERR ---")
output_parts.append(result.stderr)
output_parts.append("=" * 50)
status = "β
Successo" if result.returncode == 0 else f"β Errore ({result.returncode})"
output_parts.append(f"{status}")
# Cerca file generati (immagini, csv, json)
for ext in ['*.png', '*.jpg', '*.svg', '*.csv', '*.json', '*.txt', '*.pth', '*.pt']:
if ext == '*.txt': continue # Salta txt per evitare di rileggere log
for filepath in glob.glob(os.path.join(work_dir, ext)):
filename = os.path.basename(filepath)
try:
with open(filepath, "rb") as f:
if os.path.getsize(filepath) > 10 * 1024 * 1024:
output_parts.append(f"β οΈ File '{filename}' troppo grande (>10MB), saltato.")
continue
encoded = base64.b64encode(f.read()).decode('utf-8')
generated_files.append({
"name": filename,
"data": encoded,
"mime": "application/octet-stream"
})
output_parts.append(f"π¦ File generato: {filename}")
except Exception as e:
output_parts.append(f"β Errore lettura {filename}: {e}")
except Exception as e:
import traceback
output_parts.append(f"β Eccezione: {str(e)}\n{traceback.format_exc()}")
finally:
shutil.rmtree(work_dir, ignore_errors=True)
return {
"output": "\n".join(output_parts),
"files": generated_files
}
def get_system_info():
info = ["System Info:"]
info.append(f"Python: {sys.version}")
try:
import torch
info.append(f"PyTorch: {torch.__version__}")
info.append(f"CUDA Available: {torch.cuda.is_available()}")
if torch.cuda.is_available():
info.append(f"GPU: {torch.cuda.get_device_name(0)}")
except ImportError:
info.append("PyTorch not installed")
return "\n".join(info)
@app.get("/")
def read_root():
return {"status": "running", "message": "Remote GPU Execution Server"}
@app.post("/execute")
async def execute(data: dict):
"""Endpoint POST per eseguire codice Python"""
try:
code = data.get("code", "")
return execute_code(code)
except Exception as e:
import traceback
return {"output": f"β Server Error: {str(e)}\n\n{traceback.format_exc()}", "files": []}
def stream_generator(python_code: str):
"""Generator che esegue codice e yielda output in tempo reale"""
if not python_code or not python_code.strip():
yield "β Errore: Nessun codice ricevuto.\n"
return
work_dir = tempfile.mkdtemp()
script_path = os.path.join(work_dir, "script.py")
with open(script_path, 'w', encoding='utf-8') as f:
f.write(python_code)
yield f"π Avvio esecuzione...\n"
yield f"π Directory: {work_dir}\n"
yield "=" * 50 + "\n"
try:
process = subprocess.Popen(
[sys.executable, "-u", script_path], # -u for unbuffered
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
text=True,
cwd=work_dir,
env={**os.environ, "PYTHONUNBUFFERED": "1"}
)
# Stream output line by line
for line in process.stdout:
yield line
process.wait()
yield "\n" + "=" * 50 + "\n"
status = "β
Successo" if process.returncode == 0 else f"β Errore ({process.returncode})"
yield f"{status}\n"
except Exception as e:
import traceback
yield f"β Eccezione: {str(e)}\n{traceback.format_exc()}\n"
finally:
shutil.rmtree(work_dir, ignore_errors=True)
@app.post("/execute_stream")
async def execute_stream(data: dict):
"""Endpoint POST per eseguire codice Python con streaming in tempo reale"""
code = data.get("code", "")
return StreamingResponse(stream_generator(code), media_type="text/plain")
# Interfaccia Gradio (Wrapper semplificato)
def gradio_wrapper(code):
res = execute_code(code)
return res["output"]
demo = gr.Interface(
fn=gradio_wrapper,
inputs=gr.Textbox(
label="π Codice Python",
placeholder="Incolla qui il tuo codice Python...",
lines=15
),
outputs=gr.Textbox(label="π€ Output Esecuzione (i file non sono visualizzati qui, usa l'API)"),
title="π Remote GPU Execution Server",
description=f"Server attivo. {get_system_info()}"
)
# Monta Gradio su FastAPI
app = gr.mount_gradio_app(app, demo, path="/gradio")
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=7860)
|