File size: 1,562 Bytes
1ea26af
 
 
 
50d0fa6
1ea26af
 
 
 
50d0fa6
1ea26af
 
50d0fa6
1ea26af
 
 
 
50d0fa6
1ea26af
 
 
 
 
50d0fa6
 
1ea26af
 
 
 
 
50d0fa6
1ea26af
 
 
 
50d0fa6
1ea26af
 
50d0fa6
1ea26af
 
 
50d0fa6
 
1ea26af
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
#!/usr/bin/env python3
"""
Hugging Face Spaces entrypoint for CognitiveKernel-Launchpad.
Defines a Gradio demo object at module import time as required by Spaces.

Environment variables are used for credentials when not provided in config.toml:
- OPENAI_API_BASE   -> used as call_target when missing in TOML
- OPENAI_API_KEY    -> used as api_key when missing in TOML
- OPENAI_API_MODEL  -> used as model when missing in TOML

Note: Although variable names say OPENAI_*, they are generic in this project and
can point to other providers such as ModelScope.

Additionally, we proactively ensure Playwright browsers are installed to avoid
runtime failures in Spaces by running a lightweight readiness check and, if
needed, invoking `python -m playwright install chrome`.
"""

import os
import sys
import platform
import traceback
import subprocess


# Run Setup.sh for diagnostics and Playwright preparation
try:
    subprocess.run(["bash", "Setup.sh"], check=False)
except Exception:
    pass

import gradio as gr
from ck_pro.config.settings import Settings
from ck_pro.core import CognitiveKernel
from ck_pro.gradio_app import create_interface

# Build settings: prefer config.toml if present; otherwise env-first
settings = Settings.load("config.toml")

# Initialize kernel and create the Gradio Blocks app
kernel = CognitiveKernel(settings)
demo = create_interface(kernel)

if __name__ == "__main__":
    # Local run convenience (Spaces will ignore this and run `demo` automatically)
    demo.launch(server_name="0.0.0.0", server_port=7860, show_error=True)