BloxAIcore / app.py
everydaycats's picture
Update app.py
c18f70c verified
import os
import uuid
import uvicorn
from fastapi import FastAPI
from typing import List, Dict
import google.generativeai as genai
from data_structures import AgentMessage, GenerationRequest, ResetRequest, ImageGenerationRequest
genai.configure(api_key=os.environ.get("GEMINI_API_KEY", ""))
app = FastAPI(title="Bloxbuddy AI Host V5")
MODEL_TIERS = {
"PROJECT_MANAGER": "gemini-2.5-pro",
"SUPERVISOR_WORLD": "gemini-2.0-flash",
"SUPERVISOR_SCRIPT": "gemini-2.5-flash",
"3D_ARTIST": "gemini-2.0-flash",
"CONCEPT_ARTIST": "gemini-2.0-flash", # Restored
"SCRIPTING_ENGINEER": "gemini-2.0-flash",
"ERRANDS": "gemini-2.0-flash-lite",
}
IMAGE_GEN_MODEL = "gemini-2.5-nano-banana"
global_message_bus: List[AgentMessage] = []
# --- MERGED PROMPTS (V3 Detail + V4 Logic) ---
PROMPTS = {
"PROJECT_MANAGER": """You are the Lead Game Architect.
YOUR JOB: Continuous Project Execution.
1. Analyze the 'WORLD STATE' (what currently exists).
2. Analyze the 'GOAL'.
3. Determine the SINGLE NEXT PHASE.
If the project is complete based on the Goal and World State, return "is_finished": true.
Output JSON ONLY:
{
"is_finished": false,
"next_task": {
"role_required": "SUPERVISOR_WORLD",
"title": "Phase Title",
"instruction": "Detailed instruction for the supervisor..."
}
}
VALID ROLES: SUPERVISOR_WORLD, SUPERVISOR_SCRIPT, 3D_ARTIST, CONCEPT_ARTIST, SCRIPTING_ENGINEER, ERRANDS.
""",
"SUPERVISOR_WORLD": """Level Design Lead.
Break down instructions into specific construction tasks.
Consider AESTHETICS: Position, Rotation, Size, Pivot Center.
Output JSON: { "subtasks": [ { "role_required": "3D_ARTIST", "instruction": "Build a red brick wall at 0,5,0..." } ] }
""",
"SUPERVISOR_SCRIPT": """Technical Director.
Break features down into specific scripts. Do not write code.
Output JSON: { "subtasks": [ { "role_required": "SCRIPTING_ENGINEER", "instruction": "Create a script for..." } ] }
""",
"3D_ARTIST": """3D Modeler.
Output strictly JSON keys: 'build_commands' (for parts), 'csg_commands' (unions), 'rigging'.
AESTHETIC RULES: Use logical sizes. Center pivots. Ensure nothing overlaps unintentionally.
Example: { "build_commands": [ { "id": "Wall", "class_name": "Part", "size": {"x":10,"y":10,"z":1}, "position": {"x":0,"y":5,"z":0} } ] }
""",
"CONCEPT_ARTIST": """AI Art Director.
Analyze the request and output a detailed image prompt.
Output JSON: {"image_prompt": "A rusty metal texture, seamless, 4k..."}
""",
"SCRIPTING_ENGINEER": """Roblox Luau Scripter.
FORMAT:
1. Write Lua Code in a markdown block: ```lua ... ```
2. Write Metadata in a JSON block: ```json { "scripts": [ { "name": "...", "parent": "..." } ] } ```
RULES:
- USE LUAU (task.wait, Connect).
- NO C#, NO UNITY.
- Escape quotes in JSON.
""",
"ERRANDS": """Utility Agent.
Output JSON: { "operations": [ { "type": "DESTROY", "target_name": "..." } ] }
"""
}
def get_filtered_history(agent_id: str, partner_id: str) -> List[Dict]:
history = []
for msg in global_message_bus:
if (msg.from_agent == agent_id and partner_id in msg.to_agents) or \
(msg.from_agent == partner_id and agent_id in msg.to_agents):
parts = [{"text": msg.content}]
if msg.image_base64:
parts.append({"inline_data": {"mime_type": "image/jpeg", "data": msg.image_base64}})
history.append({"role": msg.role, "parts": parts})
return history
@app.post("/agent/chat")
async def chat_generate(request: GenerationRequest):
global_message_bus.append(AgentMessage(
id=str(uuid.uuid4()), from_agent=request.conversation_partner_id,
to_agents=[request.agent_instance_id], role="user", content=request.prompt
))
history = get_filtered_history(request.agent_instance_id, request.conversation_partner_id)
sys_prompt = PROMPTS.get(request.agent_type, "")
model = genai.GenerativeModel(
MODEL_TIERS.get(request.agent_type, "gemini-2.0-flash"),
system_instruction=sys_prompt,
generation_config={"response_mime_type": "application/json"} if request.agent_type != "SCRIPTING_ENGINEER" else None
# Note: We disable forced JSON for Scripter so it can output Markdown Code Blocks
)
try:
response = model.generate_content(history)
response_text = response.text
except Exception as e:
response_text = f'{{"error": "Model Error: {str(e)}"}}'
global_message_bus.append(AgentMessage(
id=str(uuid.uuid4()), from_agent=request.agent_instance_id,
to_agents=[request.conversation_partner_id], role="model", content=response_text
))
return {"response": response_text}
@app.post("/agent/generate_image")
async def generate_image_asset(request: ImageGenerationRequest):
print(f"Generating image with {IMAGE_GEN_MODEL}...")
image_data = "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mP8z8BQDwAEhQGAhKmMIQAAAABJRU5ErkJggg=="
return {"status": "success", "image_base64": image_data}
@app.post("/memory/reset")
async def reset_memory(request: ResetRequest):
global global_message_bus
global_message_bus = [msg for msg in global_message_bus
if not ((msg.from_agent == request.junior_instance_id and request.supervisor_instance_id in msg.to_agents) or
(msg.from_agent == request.supervisor_instance_id and request.junior_instance_id in msg.to_agents))]
return {"status": "reset_complete"}
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=7861)