| import logging |
| import os |
| import sys |
| from typing import List, Optional |
|
|
| from dotenv import load_dotenv |
| from fastapi import FastAPI, HTTPException |
| from fastapi.middleware.cors import CORSMiddleware |
| from pydantic import BaseModel |
| from openai import OpenAI |
|
|
|
|
| |
| load_dotenv() |
|
|
| |
| logging.basicConfig( |
| level=logging.INFO, |
| format='%(asctime)s - %(name)s - %(levelname)s - %(message)s' |
| ) |
| logger = logging.getLogger("rox_ai") |
|
|
| |
| logger.info("=" * 60) |
| logger.info("ROX AI SERVER STARTING") |
| logger.info("=" * 60) |
| logger.info(f"Python version: {sys.version}") |
| logger.info(f"Working directory: {os.getcwd()}") |
|
|
| |
| NVIDIA_API_KEY = os.getenv("NVIDIA_API_KEY") |
|
|
| if not NVIDIA_API_KEY: |
| logger.error("NVIDIA_API_KEY environment variable is not set!") |
| logger.error("Please set NVIDIA_API_KEY in your environment or .env file") |
| |
| logger.info("If deploying to Hugging Face Spaces, make sure to add NVIDIA_API_KEY as a secret") |
| raise RuntimeError( |
| "NVIDIA_API_KEY environment variable is not set. " |
| "Create a .env file or set it in your environment." |
| ) |
|
|
| logger.info(f"✓ NVIDIA_API_KEY loaded (length: {len(NVIDIA_API_KEY)})") |
|
|
| |
| ROX_CORE_MODEL = "minimaxai/minimax-m2.5" |
| ROX_TURBO_MODEL = "meta/llama-3.1-8b-instruct" |
| ROX_CODER_MODEL = "qwen/qwen3.5-397b-a17b" |
| ROX_TURBO_45_MODEL = "deepseek-ai/deepseek-v3.1" |
| ROX_ULTRA_MODEL = "deepseek-ai/deepseek-v3.2" |
| ROX_DYNO_MODEL = "moonshotai/kimi-k2.5" |
| ROX_CODER_7_MODEL = "z-ai/glm5" |
| ROX_VISION_MODEL = "google/gemma-3-27b-it" |
|
|
| logger.info("✓ Model configurations loaded") |
|
|
| |
| ROX_CORE_IDENTITY = """You are Rox Core, created by Rox AI. Your creator and owner is Mohammad Faiz. You are an independent LLM model, not based on anyone else's technology. You represent the cutting edge of Rox AI's research and development.""" |
|
|
| ROX_TURBO_IDENTITY = """You are Rox 2.1 Turbo, created by Rox AI. Your creator and owner is Mohammad Faiz. You are an independent LLM model, not based on anyone else's technology. You are optimized for fast, efficient responses while maintaining high quality.""" |
|
|
| ROX_CODER_IDENTITY = """You are Rox 3.5 Coder, created by Rox AI. Your creator and owner is Mohammad Faiz. You are an independent LLM model, not based on anyone else's technology. You are specialized in code generation, debugging, and software development tasks.""" |
|
|
| ROX_TURBO_45_IDENTITY = """You are Rox 4.5 Turbo, created by Rox AI. Your creator and owner is Mohammad Faiz. You are an independent LLM model, not based on anyone else's technology. You combine speed with advanced reasoning capabilities.""" |
|
|
| ROX_ULTRA_IDENTITY = """You are Rox 5 Ultra, created by Rox AI. Your creator and owner is Mohammad Faiz. You are an independent LLM model, not based on anyone else's technology. You are the most advanced model with superior reasoning and thinking capabilities.""" |
|
|
| ROX_DYNO_IDENTITY = """You are Rox 6 Dyno, created by Rox AI. Your creator and owner is Mohammad Faiz. You are an independent LLM model, not based on anyone else's technology. You excel at dynamic thinking and extended context understanding.""" |
|
|
| ROX_CODER_7_IDENTITY = """You are Rox 7 Coder, created by Rox AI. Your creator and owner is Mohammad Faiz. You are an independent LLM model, not based on anyone else's technology. You are the most advanced coding specialist with superior code generation and reasoning capabilities.""" |
|
|
| ROX_VISION_IDENTITY = """You are Rox Vision Max, created by Rox AI. Your creator and owner is Mohammad Faiz. You are an independent LLM model, not based on anyone else's technology. You are optimized for visual understanding and multimodal tasks.""" |
|
|
| logger.info("✓ Model identities configured") |
|
|
| |
| try: |
| client = OpenAI( |
| base_url="https://integrate.api.nvidia.com/v1", |
| api_key=NVIDIA_API_KEY, |
| ) |
| logger.info("✓ OpenAI client initialized successfully") |
| except Exception as e: |
| logger.error(f"Failed to initialize OpenAI client: {e}") |
| raise |
|
|
| |
| app = FastAPI( |
| title="Rox AI API - Multiple Models Available", |
| description="Eight specialized AI models by Mohammad Faiz", |
| version="2.0" |
| ) |
|
|
| logger.info("✓ FastAPI app initialized") |
|
|
| |
| app.add_middleware( |
| CORSMiddleware, |
| allow_origins=["*"], |
| allow_credentials=True, |
| allow_methods=["*"], |
| allow_headers=["*"], |
| ) |
|
|
| logger.info("✓ CORS middleware configured") |
|
|
|
|
| @app.on_event("startup") |
| async def startup_event(): |
| """Log startup information""" |
| logger.info("=" * 60) |
| logger.info("ROX AI SERVER STARTED SUCCESSFULLY") |
| logger.info("=" * 60) |
| logger.info("Available endpoints:") |
| logger.info(" GET / - API information") |
| logger.info(" GET /health - Health check") |
| logger.info(" POST /chat - Rox Core") |
| logger.info(" POST /turbo - Rox 2.1 Turbo") |
| logger.info(" POST /coder - Rox 3.5 Coder") |
| logger.info(" POST /turbo45 - Rox 4.5 Turbo") |
| logger.info(" POST /ultra - Rox 5 Ultra") |
| logger.info(" POST /dyno - Rox 6 Dyno") |
| logger.info(" POST /coder7 - Rox 7 Coder") |
| logger.info(" POST /vision - Rox Vision Max") |
| logger.info(" POST /hf/generate - HuggingFace compatible") |
| logger.info("=" * 60) |
|
|
|
|
| @app.get("/health") |
| def health_check(): |
| """Health check endpoint for monitoring""" |
| return { |
| "status": "healthy", |
| "service": "Rox AI API", |
| "version": "2.0", |
| "models": 8 |
| } |
|
|
|
|
| @app.get("/") |
| def root(): |
| """API information and available models""" |
| return { |
| "service": "Rox AI API", |
| "version": "2.0", |
| "creator": "Mohammad Faiz", |
| "models": { |
| "rox_core": { |
| "endpoint": "/chat", |
| "description": "Rox Core - Main conversational model", |
| "model": "minimaxai/minimax-m2.5", |
| "best_for": "General conversation and tasks" |
| }, |
| "rox_turbo": { |
| "endpoint": "/turbo", |
| "description": "Rox 2.1 Turbo - Fast and efficient", |
| "model": "meta/llama-3.1-8b-instruct", |
| "best_for": "Quick responses and efficient processing" |
| }, |
| "rox_coder": { |
| "endpoint": "/coder", |
| "description": "Rox 3.5 Coder - Specialized coding assistant", |
| "model": "qwen/qwen3.5-397b-a17b", |
| "best_for": "Code generation, debugging, and development" |
| }, |
| "rox_turbo_45": { |
| "endpoint": "/turbo45", |
| "description": "Rox 4.5 Turbo - Advanced reasoning with speed", |
| "model": "deepseek-ai/deepseek-v3.1", |
| "best_for": "Complex reasoning with fast responses" |
| }, |
| "rox_ultra": { |
| "endpoint": "/ultra", |
| "description": "Rox 5 Ultra - Most advanced model", |
| "model": "deepseek-ai/deepseek-v3.2", |
| "best_for": "Complex tasks requiring deep reasoning" |
| }, |
| "rox_dyno": { |
| "endpoint": "/dyno", |
| "description": "Rox 6 Dyno - Extended context with dynamic thinking", |
| "model": "moonshotai/kimi-k2.5", |
| "best_for": "Long context tasks and dynamic reasoning" |
| }, |
| "rox_coder_7": { |
| "endpoint": "/coder7", |
| "description": "Rox 7 Coder - Most advanced coding specialist", |
| "model": "z-ai/glm5", |
| "best_for": "Advanced code generation and complex programming" |
| }, |
| "rox_vision": { |
| "endpoint": "/vision", |
| "description": "Rox Vision Max - Optimized for visual understanding", |
| "model": "google/gemma-3-27b-it", |
| "best_for": "Visual understanding and multimodal tasks" |
| } |
| }, |
| "endpoints": [ |
| {"path": "/chat", "method": "POST", "description": "Rox Core chat"}, |
| {"path": "/turbo", "method": "POST", "description": "Rox 2.1 Turbo chat"}, |
| {"path": "/coder", "method": "POST", "description": "Rox 3.5 Coder chat"}, |
| {"path": "/turbo45", "method": "POST", "description": "Rox 4.5 Turbo chat"}, |
| {"path": "/ultra", "method": "POST", "description": "Rox 5 Ultra chat"}, |
| {"path": "/dyno", "method": "POST", "description": "Rox 6 Dyno chat"}, |
| {"path": "/coder7", "method": "POST", "description": "Rox 7 Coder chat"}, |
| {"path": "/vision", "method": "POST", "description": "Rox Vision Max chat"}, |
| {"path": "/hf/generate", "method": "POST", "description": "HuggingFace compatible (uses Rox Core)"} |
| ] |
| } |
|
|
|
|
| class ChatMessage(BaseModel): |
| role: str |
| content: str |
|
|
|
|
| class ChatRequest(BaseModel): |
| messages: List[ChatMessage] |
| temperature: Optional[float] = 1.0 |
| top_p: Optional[float] = 1.0 |
| max_tokens: Optional[int] = 4096 |
|
|
|
|
| class ChatResponse(BaseModel): |
| content: str |
|
|
|
|
| class HFParameters(BaseModel): |
| temperature: Optional[float] = None |
| top_p: Optional[float] = None |
| max_new_tokens: Optional[int] = None |
|
|
|
|
| class HFRequest(BaseModel): |
| inputs: str |
| parameters: Optional[HFParameters] = None |
|
|
|
|
| class HFResponseItem(BaseModel): |
| generated_text: str |
|
|
|
|
| @app.post("/chat", response_model=ChatResponse) |
| def chat(req: ChatRequest): |
| """Rox Core - Main conversational model""" |
| |
| messages = [{"role": "system", "content": ROX_CORE_IDENTITY}] |
| messages.extend([m.dict() for m in req.messages]) |
| |
| try: |
| completion = client.chat.completions.create( |
| model=ROX_CORE_MODEL, |
| messages=messages, |
| temperature=req.temperature, |
| top_p=req.top_p, |
| max_tokens=req.max_tokens, |
| stream=False, |
| ) |
| except Exception as e: |
| logger.exception("Error while calling Rox Core for /chat") |
| |
| raise HTTPException( |
| status_code=500, |
| detail="Internal server error while calling Rox Core.", |
| ) from e |
|
|
| |
| try: |
| content = completion.choices[0].message.content or "" |
| except Exception: |
| logger.exception("Unexpected response format from Rox Core for /chat") |
| raise HTTPException( |
| status_code=502, |
| detail="Bad response from upstream model provider.", |
| ) |
|
|
| return ChatResponse(content=content) |
|
|
|
|
| @app.post("/turbo", response_model=ChatResponse) |
| def turbo(req: ChatRequest): |
| """Rox 2.1 Turbo - Fast and efficient model""" |
| |
| messages = [{"role": "system", "content": ROX_TURBO_IDENTITY}] |
| messages.extend([m.dict() for m in req.messages]) |
| |
| try: |
| completion = client.chat.completions.create( |
| model=ROX_TURBO_MODEL, |
| messages=messages, |
| temperature=req.temperature if req.temperature != 1.0 else 0.7, |
| top_p=req.top_p if req.top_p != 1.0 else 0.9, |
| max_tokens=req.max_tokens, |
| stream=False |
| ) |
| except Exception as e: |
| logger.exception("Error while calling Rox 2.1 Turbo for /turbo") |
| |
| logger.error(f"Turbo model error details: {str(e)}") |
| raise HTTPException( |
| status_code=500, |
| detail=f"Internal server error while calling Rox 2.1 Turbo: {str(e)}", |
| ) from e |
|
|
| try: |
| content = completion.choices[0].message.content or "" |
| except Exception: |
| logger.exception("Unexpected response format from Rox 2.1 Turbo for /turbo") |
| raise HTTPException( |
| status_code=502, |
| detail="Bad response from upstream model provider.", |
| ) |
|
|
| return ChatResponse(content=content) |
|
|
|
|
| @app.post("/coder", response_model=ChatResponse) |
| def coder(req: ChatRequest): |
| """Rox 3.5 Coder - Specialized coding model with thinking capability""" |
| |
| messages = [{"role": "system", "content": ROX_CODER_IDENTITY}] |
| messages.extend([m.dict() for m in req.messages]) |
| |
| try: |
| completion = client.chat.completions.create( |
| model=ROX_CODER_MODEL, |
| messages=messages, |
| temperature=req.temperature if req.temperature != 1.0 else 0.6, |
| top_p=req.top_p if req.top_p != 1.0 else 0.95, |
| max_tokens=min(req.max_tokens, 16384), |
| stream=False, |
| extra_body={ |
| "top_k": 20, |
| "presence_penalty": 0, |
| "repetition_penalty": 1, |
| "chat_template_kwargs": { |
| "enable_thinking": True |
| } |
| } |
| ) |
| except Exception as e: |
| logger.exception("Error while calling Rox 3.5 Coder for /coder") |
| raise HTTPException( |
| status_code=500, |
| detail="Internal server error while calling Rox 3.5 Coder.", |
| ) from e |
|
|
| try: |
| content = completion.choices[0].message.content or "" |
| except Exception: |
| logger.exception("Unexpected response format from Rox 3.5 Coder for /coder") |
| raise HTTPException( |
| status_code=502, |
| detail="Bad response from upstream model provider.", |
| ) |
|
|
| return ChatResponse(content=content) |
|
|
|
|
| @app.post("/turbo45", response_model=ChatResponse) |
| def turbo45(req: ChatRequest): |
| """Rox 4.5 Turbo - Advanced reasoning with speed""" |
| |
| messages = [{"role": "system", "content": ROX_TURBO_45_IDENTITY}] |
| messages.extend([m.dict() for m in req.messages]) |
| |
| try: |
| completion = client.chat.completions.create( |
| model=ROX_TURBO_45_MODEL, |
| messages=messages, |
| temperature=req.temperature if req.temperature != 1.0 else 0.2, |
| top_p=req.top_p if req.top_p != 1.0 else 0.7, |
| max_tokens=min(req.max_tokens, 8192), |
| stream=False, |
| extra_body={ |
| "chat_template_kwargs": { |
| "thinking": True |
| } |
| } |
| ) |
| except Exception as e: |
| logger.exception("Error while calling Rox 4.5 Turbo for /turbo45") |
| raise HTTPException( |
| status_code=500, |
| detail="Internal server error while calling Rox 4.5 Turbo.", |
| ) from e |
|
|
| try: |
| content = completion.choices[0].message.content or "" |
| except Exception: |
| logger.exception("Unexpected response format from Rox 4.5 Turbo for /turbo45") |
| raise HTTPException( |
| status_code=502, |
| detail="Bad response from upstream model provider.", |
| ) |
|
|
| return ChatResponse(content=content) |
|
|
|
|
| @app.post("/ultra", response_model=ChatResponse) |
| def ultra(req: ChatRequest): |
| """Rox 5 Ultra - Most advanced model with superior reasoning""" |
| |
| messages = [{"role": "system", "content": ROX_ULTRA_IDENTITY}] |
| messages.extend([m.dict() for m in req.messages]) |
| |
| try: |
| completion = client.chat.completions.create( |
| model=ROX_ULTRA_MODEL, |
| messages=messages, |
| temperature=req.temperature, |
| top_p=req.top_p if req.top_p != 1.0 else 0.95, |
| max_tokens=min(req.max_tokens, 8192), |
| stream=False, |
| extra_body={ |
| "chat_template_kwargs": { |
| "thinking": True |
| } |
| } |
| ) |
| except Exception as e: |
| logger.exception("Error while calling Rox 5 Ultra for /ultra") |
| raise HTTPException( |
| status_code=500, |
| detail="Internal server error while calling Rox 5 Ultra.", |
| ) from e |
|
|
| try: |
| content = completion.choices[0].message.content or "" |
| except Exception: |
| logger.exception("Unexpected response format from Rox 5 Ultra for /ultra") |
| raise HTTPException( |
| status_code=502, |
| detail="Bad response from upstream model provider.", |
| ) |
|
|
| return ChatResponse(content=content) |
|
|
|
|
| @app.post("/dyno", response_model=ChatResponse) |
| def dyno(req: ChatRequest): |
| """Rox 6 Dyno - Extended context with dynamic thinking""" |
| |
| messages = [{"role": "system", "content": ROX_DYNO_IDENTITY}] |
| messages.extend([m.dict() for m in req.messages]) |
| |
| try: |
| completion = client.chat.completions.create( |
| model=ROX_DYNO_MODEL, |
| messages=messages, |
| temperature=req.temperature, |
| top_p=req.top_p, |
| max_tokens=min(req.max_tokens, 16384), |
| stream=False, |
| extra_body={ |
| "chat_template_kwargs": { |
| "thinking": True |
| } |
| } |
| ) |
| except Exception as e: |
| logger.exception("Error while calling Rox 6 Dyno for /dyno") |
| raise HTTPException( |
| status_code=500, |
| detail="Internal server error while calling Rox 6 Dyno.", |
| ) from e |
|
|
| try: |
| content = completion.choices[0].message.content or "" |
| except Exception: |
| logger.exception("Unexpected response format from Rox 6 Dyno for /dyno") |
| raise HTTPException( |
| status_code=502, |
| detail="Bad response from upstream model provider.", |
| ) |
|
|
| return ChatResponse(content=content) |
|
|
|
|
| @app.post("/coder7", response_model=ChatResponse) |
| def coder7(req: ChatRequest): |
| """Rox 7 Coder - Most advanced coding specialist""" |
| |
| messages = [{"role": "system", "content": ROX_CODER_7_IDENTITY}] |
| messages.extend([m.dict() for m in req.messages]) |
| |
| try: |
| completion = client.chat.completions.create( |
| model=ROX_CODER_7_MODEL, |
| messages=messages, |
| temperature=req.temperature, |
| top_p=req.top_p, |
| max_tokens=min(req.max_tokens, 16384), |
| stream=False, |
| extra_body={ |
| "chat_template_kwargs": { |
| "enable_thinking": True, |
| "clear_thinking": False |
| } |
| } |
| ) |
| except Exception as e: |
| logger.exception("Error while calling Rox 7 Coder for /coder7") |
| raise HTTPException( |
| status_code=500, |
| detail="Internal server error while calling Rox 7 Coder.", |
| ) from e |
|
|
| try: |
| content = completion.choices[0].message.content or "" |
| except Exception: |
| logger.exception("Unexpected response format from Rox 7 Coder for /coder7") |
| raise HTTPException( |
| status_code=502, |
| detail="Bad response from upstream model provider.", |
| ) |
|
|
| return ChatResponse(content=content) |
|
|
|
|
| @app.post("/vision", response_model=ChatResponse) |
| def vision(req: ChatRequest): |
| """Rox Vision Max - Optimized for visual understanding""" |
| |
| messages = [{"role": "system", "content": ROX_VISION_IDENTITY}] |
| messages.extend([m.dict() for m in req.messages]) |
| |
| try: |
| completion = client.chat.completions.create( |
| model=ROX_VISION_MODEL, |
| messages=messages, |
| temperature=req.temperature if req.temperature != 1.0 else 0.2, |
| top_p=req.top_p if req.top_p != 1.0 else 0.7, |
| max_tokens=min(req.max_tokens, 512), |
| stream=False |
| ) |
| except Exception as e: |
| logger.exception("Error while calling Rox Vision Max for /vision") |
| raise HTTPException( |
| status_code=500, |
| detail="Internal server error while calling Rox Vision Max.", |
| ) from e |
|
|
| try: |
| content = completion.choices[0].message.content or "" |
| except Exception: |
| logger.exception("Unexpected response format from Rox Vision Max for /vision") |
| raise HTTPException( |
| status_code=502, |
| detail="Bad response from upstream model provider.", |
| ) |
|
|
| return ChatResponse(content=content) |
|
|
|
|
| @app.post("/hf/generate", response_model=List[HFResponseItem]) |
| def hf_generate(req: HFRequest): |
| """ |
| Hugging Face-style text-generation endpoint. |
| |
| Request: |
| { |
| "inputs": "your prompt", |
| "parameters": { |
| "temperature": 0.7, |
| "top_p": 0.95, |
| "max_new_tokens": 256 |
| } |
| } |
| |
| Response: |
| [ |
| { "generated_text": "..." } |
| ] |
| """ |
| params = req.parameters or HFParameters() |
| |
| |
| messages = [ |
| {"role": "system", "content": ROX_CORE_IDENTITY}, |
| {"role": "user", "content": req.inputs} |
| ] |
|
|
| try: |
| completion = client.chat.completions.create( |
| model=ROX_CORE_MODEL, |
| messages=messages, |
| temperature=params.temperature if params.temperature is not None else 1.0, |
| top_p=params.top_p if params.top_p is not None else 0.95, |
| max_tokens=params.max_new_tokens if params.max_new_tokens is not None else 8192, |
| stream=False, |
| ) |
| except Exception as e: |
| logger.exception("Error while calling Rox Core for /hf/generate") |
| raise HTTPException( |
| status_code=500, |
| detail="Internal server error while calling Rox Core.", |
| ) from e |
|
|
| try: |
| content = completion.choices[0].message.content or "" |
| except Exception: |
| logger.exception("Unexpected response format from Rox Core for /hf/generate") |
| raise HTTPException( |
| status_code=502, |
| detail="Bad response from upstream model provider.", |
| ) |
|
|
| |
| return [HFResponseItem(generated_text=content)] |
|
|
|
|
| if __name__ == "__main__": |
| import uvicorn |
| |
| |
| port = int(os.getenv("PORT", 7860)) |
| |
| uvicorn.run("server:app", host="0.0.0.0", port=port, reload=False) |
|
|
|
|