AXON-TRINITY / app.py
AIencoder's picture
Update app.py
b2ea920 verified
import warnings
import sys
# Suppress asyncio warnings (Python 3.12/3.13 known issue with Gradio)
warnings.filterwarnings('ignore', message='.*Invalid file descriptor.*')
if sys.version_info >= (3, 12):
import logging
logging.getLogger('asyncio').setLevel(logging.CRITICAL)
import gradio as gr
from src.chimera_core import Chimera
# Initialize
try:
chimera = Chimera()
except Exception as e:
chimera = None
print(f"Startup Error: {e}")
def chat_logic(message, image_file, history, mode):
# Ensure history is a list
if history is None:
history = []
# 1. Handle Empty Input
if not message and not image_file:
return history, "", None
if not chimera:
user_msg = message or "[Image uploaded]"
history.append({"role": "user", "content": user_msg})
history.append({"role": "assistant", "content": "❌ Error: API Keys missing."})
return history, "", None
# 2. Convert history to simple format for chimera
simple_history = []
for msg in history:
if isinstance(msg, dict):
simple_history.append([msg.get("content", "")])
else:
simple_history.append(msg)
# 3. Process Request
try:
response_data, active_module = chimera.process_request(
message or "",
simple_history,
mode,
image_file
)
except Exception as e:
response_data = f"Processing Error: {str(e)}"
active_module = "ERR"
# 4. Handle response data (could be text or tuple with image)
if isinstance(response_data, tuple):
# Image generation response (text, image_path)
response_text, image_path = response_data
final_response = f"**[{active_module} Active]**\n\n{response_text}"
else:
# Regular text response
response_text = response_data
image_path = None
final_response = f"**[{active_module} Active]**\n\n{response_text}"
# 5. Create user message
if image_file:
user_msg = f"🖼️ [Image Uploaded]\n\n{message or 'Analyze this image'}"
else:
user_msg = message
# 6. Append to History
history.append({"role": "user", "content": user_msg})
# If there's an image to display, include it in the response
if image_path:
history.append({
"role": "assistant",
"content": {
"text": final_response,
"files": [image_path]
}
})
else:
history.append({"role": "assistant", "content": final_response})
return history, "", None
# --- UI Layout ---
custom_css = """
body {
background-color: #0b0f19;
color: #c9d1d9;
}
.gradio-container {
font-family: 'IBM Plex Mono', monospace;
}
#chatbot {
border-radius: 10px;
}
"""
with gr.Blocks(title="⚡ AXON: GOD MODE") as demo:
gr.Markdown("# ⚡ AXON: GOD MODE")
gr.Markdown("*> Modules: VIM (Vision) | NET (Web) | IGM (Art) | ASM (Code)*")
with gr.Row():
chatbot = gr.Chatbot(
height=500,
elem_id="chatbot"
)
with gr.Row():
with gr.Column(scale=4):
msg = gr.Textbox(
placeholder="Ask anything, or upload an image...",
show_label=False,
container=False
)
btn_upload = gr.Image(
type="filepath",
label="📸 Upload for Vision (VIM)",
height=100
)
with gr.Column(scale=1):
mode = gr.Dropdown(
choices=["Auto", "ASM (Code)", "IGM (Generate Image)", "NET (Search)", "VIM (Vision)"],
value="Auto",
label="Mode",
show_label=True
)
submit = gr.Button("🚀 EXECUTE", variant="primary", size="lg")
# Event Handlers
submit.click(
chat_logic,
inputs=[msg, btn_upload, chatbot, mode],
outputs=[chatbot, msg, btn_upload]
)
msg.submit(
chat_logic,
inputs=[msg, btn_upload, chatbot, mode],
outputs=[chatbot, msg, btn_upload]
)
if __name__ == "__main__":
demo.launch(ssr_mode=False, css=custom_css)