Spaces:
Runtime error
Runtime error
File size: 14,175 Bytes
1ea26af | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 | #!/usr/bin/env python3
# NOTICE: This file is adapted from Tencent's CognitiveKernel-Pro (https://github.com/Tencent/CognitiveKernel-Pro).
# Modifications in this fork (2025) are for academic research and educational use only; no commercial use.
# Original rights belong to the original authors and Tencent; see upstream license for details.
"""
CognitiveKernel-Pro Gradio Interface
Simple, direct implementation following Linus Torvalds principles.
No defensive programming, maximum reuse of existing logic.
NOTE:
The CognitiveKernel system previously used signal-based timeouts which had threading
issues. This has been fixed by replacing signal-based timeouts with thread-safe
threading.Timer mechanisms in the CodeExecutor class.
"""
import gradio as gr
from pathlib import Path
import time
from .config.settings import Settings
from .core import CognitiveKernel
def create_interface(kernel):
"""Create modern Gradio chat interface with sidebar layout - inspired by smolagents design"""
with gr.Blocks(theme="ocean", fill_height=True) as interface:
# Session state management
session_state = gr.State({})
# Add Hugging Face OAuth login button
login_button = gr.LoginButton()
with gr.Sidebar():
# Header with branding
gr.Markdown(
"# 🧠 CognitiveKernel Pro"
"\n> Advanced AI reasoning system with three-stage cognitive architecture"
"\n\n🔒 **Authentication Required**: Please sign in with Hugging Face to use this service."
)
# Example questions section
with gr.Group():
gr.Markdown("**💡 Try These Examples**")
def set_example(example_text):
return example_text
example1_btn = gr.Button("📊 什么是机器学习?", size="sm")
example2_btn = gr.Button("🌐 What is artificial intelligence?", size="sm")
example3_btn = gr.Button("🔍 帮我搜索最新的AI发展趋势", size="sm")
example4_btn = gr.Button("📝 Explain quantum computing", size="sm")
# Input section with modern grouping
with gr.Group():
gr.Markdown("**💬 Your Request**")
query_input = gr.Textbox(
lines=4,
label="Chat Message",
container=False,
placeholder="Enter your question here and press Shift+Enter or click Submit...",
show_label=False
)
with gr.Row():
submit_btn = gr.Button("🚀 Submit", variant="primary", scale=2)
clear_btn = gr.Button("🗑️ Clear", scale=1)
# System info section
with gr.Group():
gr.Markdown("**⚙️ System Status**")
status_display = gr.Textbox(
value="Ready for reasoning tasks",
label="Status",
interactive=False,
container=False,
show_label=False
)
# Branding footer
gr.HTML(
"<br><h4><center>Powered by <a target='_blank' href='https://github.com/charSLee013/CognitiveKernel-Launchpad'><b>🧠 CognitiveKernel-Launchpad</b></a></center></h4>"
)
# Main chat interface with enhanced features
chatbot = gr.Chatbot(
label="CognitiveKernel Assistant",
type="messages",
avatar_images=(
"https://cdn-icons-png.flaticon.com/512/1077/1077114.png", # User avatar
"https://cdn-icons-png.flaticon.com/512/4712/4712027.png" # AI avatar
),
show_copy_button=True,
resizeable=True,
scale=1,
latex_delimiters=[
{"left": r"$$", "right": r"$$", "display": True},
{"left": r"$", "right": r"$", "display": False},
{"left": r"\[", "right": r"\]", "display": True},
{"left": r"\(", "right": r"\)", "display": False},
],
height=600
)
def user_enter(question, history, session_state):
"""Handle user input - add to history and clear input with status update"""
if not question or not question.strip():
return "", history, "Ready for reasoning tasks", gr.Button(interactive=True)
history = history + [{"role": "user", "content": question.strip()}]
return "", history, "🤔 Processing your request...", gr.Button(interactive=False)
def ai_response(history, session_state):
"""Handle AI response with enhanced status updates"""
if not history:
yield history, "Ready for reasoning tasks", gr.Button(interactive=True)
return
# Get the last user message
user_messages = [msg for msg in history if msg["role"] == "user"]
if not user_messages:
yield history, "Ready for reasoning tasks", gr.Button(interactive=True)
return
question = user_messages[-1]["content"]
if not question or not question.strip():
yield history, "Ready for reasoning tasks", gr.Button(interactive=True)
return
try:
# 检查kernel状态
if not hasattr(kernel, 'settings') or not kernel.settings:
error_msg = "❌ Kernel configuration error: Settings not loaded"
history = history + [{"role": "assistant", "content": error_msg}]
yield history, "❌ Configuration error", gr.Button(interactive=True)
return
# 检查API密钥
api_key = kernel.settings.ck.model.api_key
if not api_key or api_key == "your-api-key-here":
error_msg = "❌ API Key not configured. Please set OPENAI_API_KEY environment variable."
history = history + [{"role": "assistant", "content": error_msg}]
yield history, "❌ API Key missing", gr.Button(interactive=True)
return
# Phase 2: Process reasoning steps sequentially with status updates
streaming_generator = kernel.reason(question.strip(), stream=True)
step_count = 0
generator_empty = True
for step_update in streaming_generator:
generator_empty = False
step_type = step_update.get("type", "unknown")
result = step_update.get("result")
step_count += 1
# Update status based on step type
if step_type == "start":
status = "🎯 Planning approach..."
elif step_type == "intermediate":
status = f"⚡ Executing step {step_count}..."
elif step_type == "complete":
status = "✅ Task completed successfully!"
else:
status = f"🔄 Processing step {step_count}..."
if result and result.success:
if step_type == "complete":
# Final step: build complete response with cleaner formatting
final_content = ""
if result.answer and result.answer.strip():
final_content = result.answer.strip()
# Check for explanation display
end_style = kernel.settings.ck.end_template if kernel and kernel.settings and kernel.settings.ck else None
if end_style in ("medium", "more") and getattr(result, "explanation", None):
# Use separator line format for explanation
separator_length = 50
separator = "─" * separator_length
explanation_header = " Explanation "
padding_left = (separator_length - len(explanation_header)) // 2
padding_right = separator_length - len(explanation_header) - padding_left
formatted_explanation = (
"\n\n" +
("─" * padding_left) + explanation_header + ("─" * padding_right) +
"\n" + result.explanation.strip()
)
final_content += formatted_explanation
content = final_content
else:
# Intermediate steps: show reasoning
if result.reasoning_steps_content and len(result.reasoning_steps_content.strip()) > 0:
content = result.reasoning_steps_content.strip()
else:
content = "Processing..."
# Add assistant message
history = history + [{"role": "assistant", "content": content}]
yield history, status, gr.Button(interactive=False)
# Phase 4: Add separator if not final step (following algorithm design)
if step_type != "complete":
history = history + [{"role": "user", "content": ""}]
yield history, status, gr.Button(interactive=False)
time.sleep(0.3) # Visual rhythm from verified pattern
# 检查生成器是否为空
if generator_empty:
error_msg = "❌ No reasoning steps generated. This might indicate an API or configuration issue."
history = history + [{"role": "assistant", "content": error_msg}]
yield history, "❌ No response generated", gr.Button(interactive=True)
return
# Phase 5: Final cleanup and enable input
while history and history[-1]["role"] == "user" and history[-1]["content"] == "":
history.pop()
yield history, "✅ Ready for next question", gr.Button(interactive=True)
yield history, "✅ Ready for next question", gr.Button(interactive=True)
except Exception as e:
# Error handling with complete error information
error_content = f"""🚨 **Critical Processing Error**
I encountered a critical issue while processing your request.
**Error Details:** {str(e)}
**Debug Info:**
- Question: {question[:100]}...
- API Key configured: {'Yes' if hasattr(kernel, 'settings') and kernel.settings.ck.model.api_key and kernel.settings.ck.model.api_key != 'your-api-key-here' else 'No'}
- Model: {kernel.settings.ck.model.model if hasattr(kernel, 'settings') else 'Unknown'}
The reasoning pipeline encountered an unexpected error. Please check the logs and try again."""
history = history + [{"role": "assistant", "content": error_content}]
yield history, "❌ Error occurred - Ready for retry", gr.Button(interactive=True)
# Enhanced event handlers with status updates
submit_btn.click(
fn=user_enter,
inputs=[query_input, chatbot, session_state],
outputs=[query_input, chatbot, status_display, submit_btn]
).then(
fn=ai_response,
inputs=[chatbot, session_state],
outputs=[chatbot, status_display, submit_btn]
)
query_input.submit(
fn=user_enter,
inputs=[query_input, chatbot, session_state],
outputs=[query_input, chatbot, status_display, submit_btn]
).then(
fn=ai_response,
inputs=[chatbot, session_state],
outputs=[chatbot, status_display, submit_btn]
)
clear_btn.click(
fn=lambda: ([], "🗑️ Chat cleared - Ready for new conversation", gr.Button(interactive=True)),
inputs=[],
outputs=[chatbot, status_display, submit_btn]
)
# Example button event handlers
example1_btn.click(
fn=lambda: "什么是机器学习?",
inputs=[],
outputs=[query_input]
)
example2_btn.click(
fn=lambda: "What is artificial intelligence?",
inputs=[],
outputs=[query_input]
)
example3_btn.click(
fn=lambda: "帮我搜索最新的AI发展趋势",
inputs=[],
outputs=[query_input]
)
example4_btn.click(
fn=lambda: "Explain quantum computing",
inputs=[],
outputs=[query_input]
)
return interface
def main():
"""Simple CLI entry point"""
import argparse
import sys
parser = argparse.ArgumentParser(description="CognitiveKernel-Pro Gradio Interface")
parser.add_argument("--config", "-c", default="config.toml", help="Config file path (optional; environment variables supported)")
parser.add_argument("--host", default="0.0.0.0", help="Host to bind to")
parser.add_argument("--port", type=int, default=7860, help="Port to bind to")
args = parser.parse_args()
# Build settings: prefer explicit config if present; otherwise env-first
if args.config and Path(args.config).exists():
settings = Settings.load(args.config)
else:
settings = Settings.load(args.config or "config.toml")
kernel = CognitiveKernel(settings)
interface = create_interface(kernel)
# Launch directly
interface.launch(
server_name=args.host,
server_port=args.port,
show_error=True
)
if __name__ == "__main__":
main() |