Spaces:
Running on Zero
Running on Zero
File size: 2,899 Bytes
a6ecbb1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 | import sys
import os
# --- ⚔️ THE SOVEREIGN PATCH ---
import huggingface_hub
if not hasattr(huggingface_hub, 'HfFolder'):
class SovereignHfFolder:
@staticmethod
def get_token(): return None
huggingface_hub.HfFolder = SovereignHfFolder
sys.modules['huggingface_hub.HfFolder'] = SovereignHfFolder
import subprocess
import gradio as gr
import spaces
import time
REPO_DIR = os.path.join(os.getcwd(), "autoresearch")
@spaces.GPU(duration=300) # Maximum ZeroGPU burst (5 Minutes)
def execute_on_h200(command, openrouter_key):
if not command:
yield "❌ Please enter a command."
return
env = os.environ.copy()
if openrouter_key:
env["OPENAI_API_KEY"] = openrouter_key
env["OPENAI_BASE_URL"] = "https://openrouter.ai/api/v1"
# Force Python to stream logs instantly instead of buffering them
env["PYTHONUNBUFFERED"] = "1"
output_log = f"[*] Attaching H200 and executing: {command}\n\n"
yield output_log
try:
# Popen allows us to stream the output live
process = subprocess.Popen(
command,
shell=True,
cwd=REPO_DIR,
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
text=True,
bufsize=1,
universal_newlines=True
)
# Stream the output line-by-line to the Gradio UI
for line in iter(process.stdout.readline, ''):
output_log += line
yield output_log
process.wait()
output_log += f"\n--- EXIT CODE: {process.returncode} ---"
yield output_log
except Exception as e:
yield output_log + f"\n❌ CRASH: {str(e)}"
def get_readme():
try:
with open(os.path.join(REPO_DIR, "README.md"), "r") as f:
return f.read()
except:
return "README not found."
with gr.Blocks(theme=gr.themes.Monochrome()) as demo:
gr.Markdown("# 🧬 Dex Sovereign H200 Terminal (Live Stream Edition)")
gr.Markdown("Direct command-line execution on the Hugging Face ZeroGPU. Output streams live.")
with gr.Row():
with gr.Column(scale=2):
cmd_input = gr.Textbox(
value="ls -la && python prepare.py",
label="Execute Command on H200"
)
or_key = gr.Textbox(label="OpenRouter Key (Optional, bypasses OpenAI)", type="password")
btn = gr.Button("🚀 Run Command on GPU", variant="primary")
with gr.Column(scale=3):
output = gr.Textbox(label="Live Terminal Output", lines=20, max_lines=40)
gr.Markdown("### 📖 Framework Documentation (README.md)")
gr.Markdown(get_readme())
btn.click(fn=execute_on_h200, inputs=[cmd_input, or_key], outputs=[output])
if __name__ == "__main__":
demo.launch()
|