Spaces:
Sleeping
Sleeping
File size: 7,081 Bytes
d2ce3be 3cb013f 0045f6d d2ce3be ebc2e6e aa23bd9 0045f6d ebc2e6e 2062f3b ebc2e6e 0045f6d 8aa2120 0045f6d d2ce3be ebc2e6e d2ce3be b42e7f9 d2ce3be 0045f6d aa23bd9 8aa2120 0045f6d 2062f3b aa23bd9 15e6aca d2ce3be aa23bd9 d2ce3be 3cb013f aa23bd9 33faed4 f11a051 72b0557 aa23bd9 0045f6d ebc2e6e b42e7f9 ebc2e6e 33faed4 ebc2e6e 72b0557 aa23bd9 ebc2e6e aa23bd9 b42e7f9 aa23bd9 ebc2e6e aa23bd9 ebc2e6e 2062f3b aa23bd9 b42e7f9 aa23bd9 0045f6d aa23bd9 72b0557 0045f6d 2062f3b 0045f6d b42e7f9 33faed4 aa23bd9 ebc2e6e aa23bd9 ebc2e6e aa23bd9 3cb013f aa23bd9 420789e ebc2e6e 420789e aa23bd9 f11a051 b42e7f9 33faed4 f11a051 2062f3b 33faed4 aa23bd9 33faed4 3cb013f aa23bd9 2062f3b f11a051 aa23bd9 f11a051 aa23bd9 2062f3b 33faed4 b42e7f9 5a535b1 f11a051 72b0557 2062f3b 33faed4 aa23bd9 33faed4 f11a051 aa23bd9 2062f3b f11a051 aa23bd9 f11a051 aa23bd9 3cb013f | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 | import os
import os
import asyncio
import httpx
import asyncio
import traceback
from typing import Optional, List, Dict, Union
from fastapi import FastAPI, UploadFile, File, Form, Body, Header
from fastapi.responses import JSONResponse
from pydantic import BaseModel
from core.mvi_ai_full import MVI_AI
from reasoning.mvi_instruction_engine import build_system_prompt
from generation.responder import Responder
# -------- APP INIT --------
app = FastAPI(title="MVI-AI Multimodal Engine")
# -------- SAFE MVI_AI STARTUP --------
try:
mvi_ai = MVI_AI()
except Exception as e:
print(f"[STARTUP WARNING] MVI_AI failed to initialize: {e}")
mvi_ai = None
# -------- RESONDER --------
responder = Responder()
# -------- SESSION MEMORY --------
SESSION_MEMORY: Dict[str, List[str]] = {}
MAX_HISTORY = 10
def get_session_id(x_session_id: Optional[str] = Header(None)) -> str:
return x_session_id or "default_session"
# -------- DATA MODEL --------
class UserInput(BaseModel):
text: Optional[str] = None
# -------- SYSTEM PROMPT BUILDER --------
def prepare_system_prompt(user_input: str, runtime_prompt: Optional[str] = None) -> str:
external_prompt = """
You are MVI-AI.
Maintain safety, clarity, and ethical behavior.
"""
return build_system_prompt(
external_prompt=external_prompt,
user_prompt=runtime_prompt or user_input
)
# -------- ROOT & HEALTH --------
@app.get("/")
async def root():
return {"message": "MVI-AI Engine is running. Use /health to check status."}
@app.get("/health")
async def health():
return {"status": "ok", "mvi_ai_ready": bool(mvi_ai)}
# -------- SELF-PING / KEEP-ALIVE --------
SELF_URL = "https://musombi-mvi-ai-engine.hf.space/health"
PING_INTERVAL = 5 * 60 # every 5 minutes
async def keep_alive():
async with httpx.AsyncClient() as client:
while True:
try:
response = await client.get(SELF_URL)
if response.status_code == 200:
print("[KEEP-ALIVE] Ping successful")
else:
print(f"[KEEP-ALIVE] Ping failed: {response.status_code}")
except Exception as e:
print(f"[KEEP-ALIVE] Ping error: {e}")
await asyncio.sleep(PING_INTERVAL)
@app.on_event("startup")
async def startup_event():
asyncio.create_task(keep_alive())
# -------- CORE HANDLER --------
async def handle_ask(
text: Optional[str] = None,
input_data: Optional[UserInput] = None,
audio_file: Optional[UploadFile] = None,
image_file: Optional[UploadFile] = None,
video_file: Optional[UploadFile] = None,
system_prompt: Optional[str] = None,
session_id: str = "default_session",
):
# MVI_AI not available
if mvi_ai is None:
return JSONResponse(
status_code=503,
content={"error": "MVI_AI not available. Check logs for model load errors."}
)
# Determine actual text input
actual_text = text or (input_data.text if input_data else "")
if not actual_text and not any([audio_file, image_file, video_file]):
return JSONResponse(status_code=400, content={"error": "No input provided"})
# -----------------------
# Session memory handling
# -----------------------
session_history = SESSION_MEMORY.get(session_id, [])
if actual_text:
session_history.append(actual_text)
SESSION_MEMORY[session_id] = session_history[-MAX_HISTORY:]
# -----------------------
# Detect modality
# -----------------------
modality = "text"
if audio_file:
modality = "audio"
elif image_file:
modality = "image"
elif video_file:
modality = "video"
# Build final system prompt
final_prompt = prepare_system_prompt(actual_text, runtime_prompt=system_prompt)
# -----------------------
# Run MVI_AI safely
# -----------------------
try:
mvi_response = await mvi_ai.ask(
text=actual_text,
audio=audio_file,
image=image_file,
video=video_file,
system_prompt=final_prompt
)
except Exception:
traceback.print_exc()
return JSONResponse(status_code=500, content={"error": "MVI_AI internal error (see logs)"})
# -----------------------
# Generate humanized output safely
# -----------------------
try:
response_data = responder.generate(
plan={
"emotion": mvi_response.get("emotion", {"label":"neutral"}).get("label","neutral"),
"reasoning_depth": "high"
}
)
# Save code if generated
code_file_info = None
generated_code = mvi_response.get("generated_code")
if generated_code:
lang, filename = responder.detect_language(generated_code)
if lang:
path = responder.save_code_file(filename, generated_code)
code_file_info = {"language": lang, "file_path": path}
return {
"success": True,
"response": response_data,
"raw_mvi_ai": mvi_response,
"code_file": code_file_info
}
except Exception:
traceback.print_exc()
return JSONResponse(status_code=500, content={"error": "Responder failed to generate response"})
# -------- /ASK ENDPOINT --------
@app.post("/ask")
async def ask(
input_data: Optional[UserInput] = Body(None),
message: Optional[str] = Body(None),
text: Optional[str] = Form(None),
system_prompt: Optional[str] = Form(None),
audio_file: Optional[UploadFile] = File(None),
image_file: Optional[UploadFile] = File(None),
video_file: Optional[UploadFile] = File(None),
x_session_id: Optional[str] = Header(None)
):
final_text = (input_data.text if input_data else None) or message or text
session_id = get_session_id(x_session_id)
return await handle_ask(
text=final_text,
input_data=input_data,
audio_file=audio_file,
image_file=image_file,
video_file=video_file,
system_prompt=system_prompt,
session_id=session_id
)
# -------- /PREDICT ENDPOINT --------
@app.post("/predict")
async def predict(
input_data: Optional[UserInput] = Body(None),
message: Optional[str] = Body(None),
text: Optional[str] = Form(None),
system_prompt: Optional[str] = Form(None),
audio_file: Optional[UploadFile] = File(None),
image_file: Optional[UploadFile] = File(None),
video_file: Optional[UploadFile] = File(None),
x_session_id: Optional[str] = Header(None)
):
final_text = message or text or (input_data.text if input_data else None)
session_id = get_session_id(x_session_id)
return await handle_ask(
text=final_text,
input_data=input_data,
audio_file=audio_file,
image_file=image_file,
video_file=video_file,
system_prompt=system_prompt,
session_id=session_id
)
@app.get("/health")
async def health_check():
return {"status": "ok"} |