charSLee013
feat: complete Hugging Face Spaces deployment with production-ready CognitiveKernel-Launchpad
1ea26af
#!/usr/bin/env python3
"""
Hugging Face Spaces entrypoint for CognitiveKernel-Launchpad.
Defines a Gradio demo object at module import time as required by Spaces.
Environment variables are used for credentials when not provided in config.toml:
- OPENAI_API_BASE -> used as call_target when missing in TOML
- OPENAI_API_KEY -> used as api_key when missing in TOML
- OPENAI_API_MODEL -> used as model when missing in TOML
Note: Although variable names say OPENAI_*, they are generic in this project and
can point to other providers such as ModelScope.
Additionally, we proactively ensure Playwright browsers are installed to avoid
runtime failures in Spaces by running a lightweight readiness check and, if
needed, invoking `python -m playwright install chrome`.
"""
import os
import sys
import platform
import traceback
import subprocess
# Run Setup.sh for diagnostics and Playwright preparation
try:
subprocess.run(["bash", "Setup.sh"], check=False)
except Exception:
pass
import gradio as gr
from ck_pro.config.settings import Settings
from ck_pro.core import CognitiveKernel
from ck_pro.gradio_app import create_interface
# Build settings: prefer config.toml if present; otherwise env-first
settings = Settings.load("config.toml")
# Initialize kernel and create the Gradio Blocks app
kernel = CognitiveKernel(settings)
demo = create_interface(kernel)
if __name__ == "__main__":
# Local run convenience (Spaces will ignore this and run `demo` automatically)
demo.launch(server_name="0.0.0.0", server_port=7860, show_error=True)