Rox-Turbo commited on
Commit
a65dba7
·
verified ·
1 Parent(s): e596d3a

Upload 4 files

Browse files
Files changed (4) hide show
  1. Dockerfile +44 -29
  2. docker-compose.yml +30 -2
  3. requirements.txt +6 -5
  4. server.py +685 -619
Dockerfile CHANGED
@@ -1,29 +1,44 @@
1
- FROM python:3.11-slim
2
-
3
- WORKDIR /app
4
-
5
- ENV PYTHONDONTWRITEBYTECODE=1 \
6
- PYTHONUNBUFFERED=1
7
-
8
- # Create user first (before any file operations)
9
- RUN useradd -m -u 1000 user
10
-
11
- # Install dependencies as root
12
- COPY requirements.txt .
13
- RUN pip install --no-cache-dir -r requirements.txt
14
-
15
- # Copy application code and set ownership in one step
16
- COPY --chown=user:user . .
17
-
18
- # Switch to non-root user
19
- USER user
20
-
21
- # Set PATH for user
22
- ENV PATH="/home/user/.local/bin:$PATH"
23
-
24
- # Expose port 7860 (Hugging Face Spaces default)
25
- EXPOSE 7860
26
-
27
- # Start server
28
- CMD ["uvicorn", "server:app", "--host", "0.0.0.0", "--port", "7860", "--log-level", "info"]
29
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.11-slim
2
+
3
+ WORKDIR /app
4
+
5
+ # Security and performance environment variables
6
+ ENV PYTHONDONTWRITEBYTECODE=1 \
7
+ PYTHONUNBUFFERED=1 \
8
+ PIP_NO_CACHE_DIR=1 \
9
+ PIP_DISABLE_PIP_VERSION_CHECK=1
10
+
11
+ # Install security updates
12
+ RUN apt-get update && \
13
+ apt-get upgrade -y && \
14
+ apt-get clean && \
15
+ rm -rf /var/lib/apt/lists/*
16
+
17
+ # Create non-root user with specific UID
18
+ RUN useradd -m -u 1000 -s /bin/bash user
19
+
20
+ # Install dependencies as root
21
+ COPY requirements.txt .
22
+ RUN pip install --no-cache-dir --upgrade pip && \
23
+ pip install --no-cache-dir -r requirements.txt
24
+
25
+ # Copy application code and set ownership
26
+ COPY --chown=user:user . .
27
+
28
+ # Switch to non-root user
29
+ USER user
30
+
31
+ # Set PATH for user
32
+ ENV PATH="/home/user/.local/bin:$PATH" \
33
+ HOME="/home/user"
34
+
35
+ # Health check
36
+ HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
37
+ CMD python -c "import requests; requests.get('http://localhost:7860/health', timeout=5)"
38
+
39
+ # Expose port 7860 (Hugging Face Spaces default)
40
+ EXPOSE 7860
41
+
42
+ # Start server with production settings
43
+ CMD ["uvicorn", "server:app", "--host", "0.0.0.0", "--port", "7860", "--log-level", "info", "--no-access-log", "--workers", "1"]
44
+
docker-compose.yml CHANGED
@@ -2,11 +2,39 @@ version: "3.9"
2
 
3
  services:
4
  rox-ai:
5
- build: .
 
 
6
  container_name: rox-ai
7
  ports:
8
  - "8000:8000"
9
  env_file:
10
  - .env
 
 
 
11
  restart: unless-stopped
12
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
  services:
4
  rox-ai:
5
+ build:
6
+ context: .
7
+ dockerfile: Dockerfile
8
  container_name: rox-ai
9
  ports:
10
  - "8000:8000"
11
  env_file:
12
  - .env
13
+ environment:
14
+ - PORT=8000
15
+ - ALLOWED_ORIGINS=http://localhost:3000,https://Rox-Turbo-API.hf.space
16
  restart: unless-stopped
17
+ healthcheck:
18
+ test: ["CMD", "python", "-c", "import requests; requests.get('http://localhost:8000/health', timeout=5)"]
19
+ interval: 30s
20
+ timeout: 10s
21
+ retries: 3
22
+ start_period: 10s
23
+ deploy:
24
+ resources:
25
+ limits:
26
+ cpus: '2'
27
+ memory: 2G
28
+ reservations:
29
+ cpus: '0.5'
30
+ memory: 512M
31
+ security_opt:
32
+ - no-new-privileges:true
33
+ read_only: false
34
+ tmpfs:
35
+ - /tmp
36
+ logging:
37
+ driver: "json-file"
38
+ options:
39
+ max-size: "10m"
40
+ max-file: "3"
requirements.txt CHANGED
@@ -1,5 +1,6 @@
1
- fastapi
2
- uvicorn[standard]
3
- openai
4
- python-dotenv
5
- pydantic
 
 
1
+ fastapi>=0.109.0,<1.0.0
2
+ uvicorn[standard]>=0.27.0,<1.0.0
3
+ openai>=1.12.0,<2.0.0
4
+ python-dotenv>=1.0.0,<2.0.0
5
+ pydantic>=2.6.0,<3.0.0
6
+ requests>=2.31.0,<3.0.0
server.py CHANGED
@@ -1,619 +1,685 @@
1
- import logging
2
- import os
3
- import sys
4
- from typing import List, Optional
5
-
6
- from dotenv import load_dotenv
7
- from fastapi import FastAPI, HTTPException
8
- from fastapi.middleware.cors import CORSMiddleware
9
- from pydantic import BaseModel
10
- from openai import OpenAI
11
-
12
-
13
- # Load environment variables
14
- load_dotenv()
15
-
16
- # Configure logging with more detail
17
- logging.basicConfig(
18
- level=logging.INFO,
19
- format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
20
- )
21
- logger = logging.getLogger("rox_ai")
22
-
23
- # Log startup information
24
- logger.info("=" * 60)
25
- logger.info("ROX AI SERVER STARTING")
26
- logger.info("=" * 60)
27
- logger.info(f"Python version: {sys.version}")
28
- logger.info(f"Working directory: {os.getcwd()}")
29
-
30
- # Check for API key
31
- NVIDIA_API_KEY = os.getenv("NVIDIA_API_KEY")
32
-
33
- if not NVIDIA_API_KEY:
34
- logger.error("NVIDIA_API_KEY environment variable is not set!")
35
- logger.error("Please set NVIDIA_API_KEY in your environment or .env file")
36
- # For Hugging Face Spaces, check if it's set as a secret
37
- logger.info("If deploying to Hugging Face Spaces, make sure to add NVIDIA_API_KEY as a secret")
38
- raise RuntimeError(
39
- "NVIDIA_API_KEY environment variable is not set. "
40
- "Create a .env file or set it in your environment."
41
- )
42
-
43
- logger.info(f"✓ NVIDIA_API_KEY loaded (length: {len(NVIDIA_API_KEY)})")
44
-
45
- # Model configurations
46
- ROX_CORE_MODEL = "minimaxai/minimax-m2.5"
47
- ROX_TURBO_MODEL = "meta/llama-3.1-8b-instruct" # Changed to a more reliable model
48
- ROX_CODER_MODEL = "qwen/qwen3.5-397b-a17b"
49
- ROX_TURBO_45_MODEL = "deepseek-ai/deepseek-v3.1"
50
- ROX_ULTRA_MODEL = "deepseek-ai/deepseek-v3.2"
51
- ROX_DYNO_MODEL = "moonshotai/kimi-k2.5"
52
- ROX_CODER_7_MODEL = "z-ai/glm5"
53
- ROX_VISION_MODEL = "google/gemma-3-27b-it"
54
-
55
- logger.info("✓ Model configurations loaded")
56
-
57
- # System identities for each model
58
- ROX_CORE_IDENTITY = """You are Rox Core, created by Rox AI. Your creator and owner is Mohammad Faiz. You are an independent LLM model, not based on anyone else's technology. You represent the cutting edge of Rox AI's research and development."""
59
-
60
- ROX_TURBO_IDENTITY = """You are Rox 2.1 Turbo, created by Rox AI. Your creator and owner is Mohammad Faiz. You are an independent LLM model, not based on anyone else's technology. You are optimized for fast, efficient responses while maintaining high quality."""
61
-
62
- ROX_CODER_IDENTITY = """You are Rox 3.5 Coder, created by Rox AI. Your creator and owner is Mohammad Faiz. You are an independent LLM model, not based on anyone else's technology. You are specialized in code generation, debugging, and software development tasks."""
63
-
64
- ROX_TURBO_45_IDENTITY = """You are Rox 4.5 Turbo, created by Rox AI. Your creator and owner is Mohammad Faiz. You are an independent LLM model, not based on anyone else's technology. You combine speed with advanced reasoning capabilities."""
65
-
66
- ROX_ULTRA_IDENTITY = """You are Rox 5 Ultra, created by Rox AI. Your creator and owner is Mohammad Faiz. You are an independent LLM model, not based on anyone else's technology. You are the most advanced model with superior reasoning and thinking capabilities."""
67
-
68
- ROX_DYNO_IDENTITY = """You are Rox 6 Dyno, created by Rox AI. Your creator and owner is Mohammad Faiz. You are an independent LLM model, not based on anyone else's technology. You excel at dynamic thinking and extended context understanding."""
69
-
70
- ROX_CODER_7_IDENTITY = """You are Rox 7 Coder, created by Rox AI. Your creator and owner is Mohammad Faiz. You are an independent LLM model, not based on anyone else's technology. You are the most advanced coding specialist with superior code generation and reasoning capabilities."""
71
-
72
- ROX_VISION_IDENTITY = """You are Rox Vision Max, created by Rox AI. Your creator and owner is Mohammad Faiz. You are an independent LLM model, not based on anyone else's technology. You are optimized for visual understanding and multimodal tasks."""
73
-
74
- logger.info("✓ Model identities configured")
75
-
76
- # Initialize OpenAI client
77
- try:
78
- client = OpenAI(
79
- base_url="https://integrate.api.nvidia.com/v1",
80
- api_key=NVIDIA_API_KEY,
81
- )
82
- logger.info("✓ OpenAI client initialized successfully")
83
- except Exception as e:
84
- logger.error(f"Failed to initialize OpenAI client: {e}")
85
- raise
86
-
87
- # Initialize FastAPI app
88
- app = FastAPI(
89
- title="Rox AI API - Multiple Models Available",
90
- description="Eight specialized AI models by Mohammad Faiz",
91
- version="2.0"
92
- )
93
-
94
- logger.info(" FastAPI app initialized")
95
-
96
- # Configure CORS
97
- app.add_middleware(
98
- CORSMiddleware,
99
- allow_origins=["*"], # e.g. ["https://your-site.com"]
100
- allow_credentials=True,
101
- allow_methods=["*"],
102
- allow_headers=["*"],
103
- )
104
-
105
- logger.info(" CORS middleware configured")
106
-
107
-
108
- @app.on_event("startup")
109
- async def startup_event():
110
- """Log startup information"""
111
- logger.info("=" * 60)
112
- logger.info("ROX AI SERVER STARTED SUCCESSFULLY")
113
- logger.info("=" * 60)
114
- logger.info("Available endpoints:")
115
- logger.info(" GET / - API information")
116
- logger.info(" GET /health - Health check")
117
- logger.info(" POST /chat - Rox Core")
118
- logger.info(" POST /turbo - Rox 2.1 Turbo")
119
- logger.info(" POST /coder - Rox 3.5 Coder")
120
- logger.info(" POST /turbo45 - Rox 4.5 Turbo")
121
- logger.info(" POST /ultra - Rox 5 Ultra")
122
- logger.info(" POST /dyno - Rox 6 Dyno")
123
- logger.info(" POST /coder7 - Rox 7 Coder")
124
- logger.info(" POST /vision - Rox Vision Max")
125
- logger.info(" POST /hf/generate - HuggingFace compatible")
126
- logger.info("=" * 60)
127
-
128
-
129
- @app.get("/health")
130
- def health_check():
131
- """Health check endpoint for monitoring"""
132
- return {
133
- "status": "healthy",
134
- "service": "Rox AI API",
135
- "version": "2.0",
136
- "models": 8
137
- }
138
-
139
-
140
- @app.get("/")
141
- def root():
142
- """API information and available models"""
143
- return {
144
- "service": "Rox AI API",
145
- "version": "2.0",
146
- "creator": "Mohammad Faiz",
147
- "models": {
148
- "rox_core": {
149
- "endpoint": "/chat",
150
- "description": "Rox Core - Main conversational model",
151
- "model": "minimaxai/minimax-m2.5",
152
- "best_for": "General conversation and tasks"
153
- },
154
- "rox_turbo": {
155
- "endpoint": "/turbo",
156
- "description": "Rox 2.1 Turbo - Fast and efficient",
157
- "model": "meta/llama-3.1-8b-instruct",
158
- "best_for": "Quick responses and efficient processing"
159
- },
160
- "rox_coder": {
161
- "endpoint": "/coder",
162
- "description": "Rox 3.5 Coder - Specialized coding assistant",
163
- "model": "qwen/qwen3.5-397b-a17b",
164
- "best_for": "Code generation, debugging, and development"
165
- },
166
- "rox_turbo_45": {
167
- "endpoint": "/turbo45",
168
- "description": "Rox 4.5 Turbo - Advanced reasoning with speed",
169
- "model": "deepseek-ai/deepseek-v3.1",
170
- "best_for": "Complex reasoning with fast responses"
171
- },
172
- "rox_ultra": {
173
- "endpoint": "/ultra",
174
- "description": "Rox 5 Ultra - Most advanced model",
175
- "model": "deepseek-ai/deepseek-v3.2",
176
- "best_for": "Complex tasks requiring deep reasoning"
177
- },
178
- "rox_dyno": {
179
- "endpoint": "/dyno",
180
- "description": "Rox 6 Dyno - Extended context with dynamic thinking",
181
- "model": "moonshotai/kimi-k2.5",
182
- "best_for": "Long context tasks and dynamic reasoning"
183
- },
184
- "rox_coder_7": {
185
- "endpoint": "/coder7",
186
- "description": "Rox 7 Coder - Most advanced coding specialist",
187
- "model": "z-ai/glm5",
188
- "best_for": "Advanced code generation and complex programming"
189
- },
190
- "rox_vision": {
191
- "endpoint": "/vision",
192
- "description": "Rox Vision Max - Optimized for visual understanding",
193
- "model": "google/gemma-3-27b-it",
194
- "best_for": "Visual understanding and multimodal tasks"
195
- }
196
- },
197
- "endpoints": [
198
- {"path": "/chat", "method": "POST", "description": "Rox Core chat"},
199
- {"path": "/turbo", "method": "POST", "description": "Rox 2.1 Turbo chat"},
200
- {"path": "/coder", "method": "POST", "description": "Rox 3.5 Coder chat"},
201
- {"path": "/turbo45", "method": "POST", "description": "Rox 4.5 Turbo chat"},
202
- {"path": "/ultra", "method": "POST", "description": "Rox 5 Ultra chat"},
203
- {"path": "/dyno", "method": "POST", "description": "Rox 6 Dyno chat"},
204
- {"path": "/coder7", "method": "POST", "description": "Rox 7 Coder chat"},
205
- {"path": "/vision", "method": "POST", "description": "Rox Vision Max chat"},
206
- {"path": "/hf/generate", "method": "POST", "description": "HuggingFace compatible (uses Rox Core)"}
207
- ]
208
- }
209
-
210
-
211
- class ChatMessage(BaseModel):
212
- role: str
213
- content: str
214
-
215
-
216
- class ChatRequest(BaseModel):
217
- messages: List[ChatMessage]
218
- temperature: Optional[float] = 1.0
219
- top_p: Optional[float] = 1.0
220
- max_tokens: Optional[int] = 4096
221
-
222
-
223
- class ChatResponse(BaseModel):
224
- content: str
225
-
226
-
227
- class HFParameters(BaseModel):
228
- temperature: Optional[float] = None
229
- top_p: Optional[float] = None
230
- max_new_tokens: Optional[int] = None
231
-
232
-
233
- class HFRequest(BaseModel):
234
- inputs: str
235
- parameters: Optional[HFParameters] = None
236
-
237
-
238
- class HFResponseItem(BaseModel):
239
- generated_text: str
240
-
241
-
242
- @app.post("/chat", response_model=ChatResponse)
243
- def chat(req: ChatRequest):
244
- """Rox Core - Main conversational model"""
245
- # Inject Rox Core identity as system message
246
- messages = [{"role": "system", "content": ROX_CORE_IDENTITY}]
247
- messages.extend([m.dict() for m in req.messages])
248
-
249
- try:
250
- completion = client.chat.completions.create(
251
- model=ROX_CORE_MODEL,
252
- messages=messages,
253
- temperature=req.temperature,
254
- top_p=req.top_p,
255
- max_tokens=req.max_tokens,
256
- stream=False,
257
- )
258
- except Exception as e:
259
- logger.exception("Error while calling Rox Core for /chat")
260
- # Do not leak internal error details to the client.
261
- raise HTTPException(
262
- status_code=500,
263
- detail="Internal server error while calling Rox Core.",
264
- ) from e
265
-
266
- # Combine all response message parts into a single string
267
- try:
268
- content = completion.choices[0].message.content or ""
269
- except Exception:
270
- logger.exception("Unexpected response format from Rox Core for /chat")
271
- raise HTTPException(
272
- status_code=502,
273
- detail="Bad response from upstream model provider.",
274
- )
275
-
276
- return ChatResponse(content=content)
277
-
278
-
279
- @app.post("/turbo", response_model=ChatResponse)
280
- def turbo(req: ChatRequest):
281
- """Rox 2.1 Turbo - Fast and efficient model"""
282
- # Inject Rox Turbo identity as system message
283
- messages = [{"role": "system", "content": ROX_TURBO_IDENTITY}]
284
- messages.extend([m.dict() for m in req.messages])
285
-
286
- try:
287
- completion = client.chat.completions.create(
288
- model=ROX_TURBO_MODEL,
289
- messages=messages,
290
- temperature=req.temperature if req.temperature != 1.0 else 0.7,
291
- top_p=req.top_p if req.top_p != 1.0 else 0.9,
292
- max_tokens=req.max_tokens,
293
- stream=False
294
- )
295
- except Exception as e:
296
- logger.exception("Error while calling Rox 2.1 Turbo for /turbo")
297
- # Log the actual error for debugging
298
- logger.error(f"Turbo model error details: {str(e)}")
299
- raise HTTPException(
300
- status_code=500,
301
- detail=f"Internal server error while calling Rox 2.1 Turbo: {str(e)}",
302
- ) from e
303
-
304
- try:
305
- content = completion.choices[0].message.content or ""
306
- except Exception:
307
- logger.exception("Unexpected response format from Rox 2.1 Turbo for /turbo")
308
- raise HTTPException(
309
- status_code=502,
310
- detail="Bad response from upstream model provider.",
311
- )
312
-
313
- return ChatResponse(content=content)
314
-
315
-
316
- @app.post("/coder", response_model=ChatResponse)
317
- def coder(req: ChatRequest):
318
- """Rox 3.5 Coder - Specialized coding model with thinking capability"""
319
- # Inject Rox Coder identity as system message
320
- messages = [{"role": "system", "content": ROX_CODER_IDENTITY}]
321
- messages.extend([m.dict() for m in req.messages])
322
-
323
- try:
324
- completion = client.chat.completions.create(
325
- model=ROX_CODER_MODEL,
326
- messages=messages,
327
- temperature=req.temperature if req.temperature != 1.0 else 0.6,
328
- top_p=req.top_p if req.top_p != 1.0 else 0.95,
329
- max_tokens=min(req.max_tokens, 16384),
330
- stream=False,
331
- extra_body={
332
- "top_k": 20,
333
- "presence_penalty": 0,
334
- "repetition_penalty": 1,
335
- "chat_template_kwargs": {
336
- "enable_thinking": True
337
- }
338
- }
339
- )
340
- except Exception as e:
341
- logger.exception("Error while calling Rox 3.5 Coder for /coder")
342
- raise HTTPException(
343
- status_code=500,
344
- detail="Internal server error while calling Rox 3.5 Coder.",
345
- ) from e
346
-
347
- try:
348
- content = completion.choices[0].message.content or ""
349
- except Exception:
350
- logger.exception("Unexpected response format from Rox 3.5 Coder for /coder")
351
- raise HTTPException(
352
- status_code=502,
353
- detail="Bad response from upstream model provider.",
354
- )
355
-
356
- return ChatResponse(content=content)
357
-
358
-
359
- @app.post("/turbo45", response_model=ChatResponse)
360
- def turbo45(req: ChatRequest):
361
- """Rox 4.5 Turbo - Advanced reasoning with speed"""
362
- # Inject Rox 4.5 Turbo identity as system message
363
- messages = [{"role": "system", "content": ROX_TURBO_45_IDENTITY}]
364
- messages.extend([m.dict() for m in req.messages])
365
-
366
- try:
367
- completion = client.chat.completions.create(
368
- model=ROX_TURBO_45_MODEL,
369
- messages=messages,
370
- temperature=req.temperature if req.temperature != 1.0 else 0.2,
371
- top_p=req.top_p if req.top_p != 1.0 else 0.7,
372
- max_tokens=min(req.max_tokens, 8192),
373
- stream=False,
374
- extra_body={
375
- "chat_template_kwargs": {
376
- "thinking": True
377
- }
378
- }
379
- )
380
- except Exception as e:
381
- logger.exception("Error while calling Rox 4.5 Turbo for /turbo45")
382
- raise HTTPException(
383
- status_code=500,
384
- detail="Internal server error while calling Rox 4.5 Turbo.",
385
- ) from e
386
-
387
- try:
388
- content = completion.choices[0].message.content or ""
389
- except Exception:
390
- logger.exception("Unexpected response format from Rox 4.5 Turbo for /turbo45")
391
- raise HTTPException(
392
- status_code=502,
393
- detail="Bad response from upstream model provider.",
394
- )
395
-
396
- return ChatResponse(content=content)
397
-
398
-
399
- @app.post("/ultra", response_model=ChatResponse)
400
- def ultra(req: ChatRequest):
401
- """Rox 5 Ultra - Most advanced model with superior reasoning"""
402
- # Inject Rox 5 Ultra identity as system message
403
- messages = [{"role": "system", "content": ROX_ULTRA_IDENTITY}]
404
- messages.extend([m.dict() for m in req.messages])
405
-
406
- try:
407
- completion = client.chat.completions.create(
408
- model=ROX_ULTRA_MODEL,
409
- messages=messages,
410
- temperature=req.temperature,
411
- top_p=req.top_p if req.top_p != 1.0 else 0.95,
412
- max_tokens=min(req.max_tokens, 8192),
413
- stream=False,
414
- extra_body={
415
- "chat_template_kwargs": {
416
- "thinking": True
417
- }
418
- }
419
- )
420
- except Exception as e:
421
- logger.exception("Error while calling Rox 5 Ultra for /ultra")
422
- raise HTTPException(
423
- status_code=500,
424
- detail="Internal server error while calling Rox 5 Ultra.",
425
- ) from e
426
-
427
- try:
428
- content = completion.choices[0].message.content or ""
429
- except Exception:
430
- logger.exception("Unexpected response format from Rox 5 Ultra for /ultra")
431
- raise HTTPException(
432
- status_code=502,
433
- detail="Bad response from upstream model provider.",
434
- )
435
-
436
- return ChatResponse(content=content)
437
-
438
-
439
- @app.post("/dyno", response_model=ChatResponse)
440
- def dyno(req: ChatRequest):
441
- """Rox 6 Dyno - Extended context with dynamic thinking"""
442
- # Inject Rox 6 Dyno identity as system message
443
- messages = [{"role": "system", "content": ROX_DYNO_IDENTITY}]
444
- messages.extend([m.dict() for m in req.messages])
445
-
446
- try:
447
- completion = client.chat.completions.create(
448
- model=ROX_DYNO_MODEL,
449
- messages=messages,
450
- temperature=req.temperature,
451
- top_p=req.top_p,
452
- max_tokens=min(req.max_tokens, 16384),
453
- stream=False,
454
- extra_body={
455
- "chat_template_kwargs": {
456
- "thinking": True
457
- }
458
- }
459
- )
460
- except Exception as e:
461
- logger.exception("Error while calling Rox 6 Dyno for /dyno")
462
- raise HTTPException(
463
- status_code=500,
464
- detail="Internal server error while calling Rox 6 Dyno.",
465
- ) from e
466
-
467
- try:
468
- content = completion.choices[0].message.content or ""
469
- except Exception:
470
- logger.exception("Unexpected response format from Rox 6 Dyno for /dyno")
471
- raise HTTPException(
472
- status_code=502,
473
- detail="Bad response from upstream model provider.",
474
- )
475
-
476
- return ChatResponse(content=content)
477
-
478
-
479
- @app.post("/coder7", response_model=ChatResponse)
480
- def coder7(req: ChatRequest):
481
- """Rox 7 Coder - Most advanced coding specialist"""
482
- # Inject Rox 7 Coder identity as system message
483
- messages = [{"role": "system", "content": ROX_CODER_7_IDENTITY}]
484
- messages.extend([m.dict() for m in req.messages])
485
-
486
- try:
487
- completion = client.chat.completions.create(
488
- model=ROX_CODER_7_MODEL,
489
- messages=messages,
490
- temperature=req.temperature,
491
- top_p=req.top_p,
492
- max_tokens=min(req.max_tokens, 16384),
493
- stream=False,
494
- extra_body={
495
- "chat_template_kwargs": {
496
- "enable_thinking": True,
497
- "clear_thinking": False
498
- }
499
- }
500
- )
501
- except Exception as e:
502
- logger.exception("Error while calling Rox 7 Coder for /coder7")
503
- raise HTTPException(
504
- status_code=500,
505
- detail="Internal server error while calling Rox 7 Coder.",
506
- ) from e
507
-
508
- try:
509
- content = completion.choices[0].message.content or ""
510
- except Exception:
511
- logger.exception("Unexpected response format from Rox 7 Coder for /coder7")
512
- raise HTTPException(
513
- status_code=502,
514
- detail="Bad response from upstream model provider.",
515
- )
516
-
517
- return ChatResponse(content=content)
518
-
519
-
520
- @app.post("/vision", response_model=ChatResponse)
521
- def vision(req: ChatRequest):
522
- """Rox Vision Max - Optimized for visual understanding"""
523
- # Inject Rox Vision Max identity as system message
524
- messages = [{"role": "system", "content": ROX_VISION_IDENTITY}]
525
- messages.extend([m.dict() for m in req.messages])
526
-
527
- try:
528
- completion = client.chat.completions.create(
529
- model=ROX_VISION_MODEL,
530
- messages=messages,
531
- temperature=req.temperature if req.temperature != 1.0 else 0.2,
532
- top_p=req.top_p if req.top_p != 1.0 else 0.7,
533
- max_tokens=min(req.max_tokens, 512),
534
- stream=False
535
- )
536
- except Exception as e:
537
- logger.exception("Error while calling Rox Vision Max for /vision")
538
- raise HTTPException(
539
- status_code=500,
540
- detail="Internal server error while calling Rox Vision Max.",
541
- ) from e
542
-
543
- try:
544
- content = completion.choices[0].message.content or ""
545
- except Exception:
546
- logger.exception("Unexpected response format from Rox Vision Max for /vision")
547
- raise HTTPException(
548
- status_code=502,
549
- detail="Bad response from upstream model provider.",
550
- )
551
-
552
- return ChatResponse(content=content)
553
-
554
-
555
- @app.post("/hf/generate", response_model=List[HFResponseItem])
556
- def hf_generate(req: HFRequest):
557
- """
558
- Hugging Face-style text-generation endpoint.
559
-
560
- Request:
561
- {
562
- "inputs": "your prompt",
563
- "parameters": {
564
- "temperature": 0.7,
565
- "top_p": 0.95,
566
- "max_new_tokens": 256
567
- }
568
- }
569
-
570
- Response:
571
- [
572
- { "generated_text": "..." }
573
- ]
574
- """
575
- params = req.parameters or HFParameters()
576
-
577
- # Inject Rox Core identity as system message
578
- messages = [
579
- {"role": "system", "content": ROX_CORE_IDENTITY},
580
- {"role": "user", "content": req.inputs}
581
- ]
582
-
583
- try:
584
- completion = client.chat.completions.create(
585
- model=ROX_CORE_MODEL,
586
- messages=messages,
587
- temperature=params.temperature if params.temperature is not None else 1.0,
588
- top_p=params.top_p if params.top_p is not None else 0.95,
589
- max_tokens=params.max_new_tokens if params.max_new_tokens is not None else 8192,
590
- stream=False,
591
- )
592
- except Exception as e:
593
- logger.exception("Error while calling Rox Core for /hf/generate")
594
- raise HTTPException(
595
- status_code=500,
596
- detail="Internal server error while calling Rox Core.",
597
- ) from e
598
-
599
- try:
600
- content = completion.choices[0].message.content or ""
601
- except Exception:
602
- logger.exception("Unexpected response format from Rox Core for /hf/generate")
603
- raise HTTPException(
604
- status_code=502,
605
- detail="Bad response from upstream model provider.",
606
- )
607
-
608
- # Match the common HF text-generation API: list of objects with generated_text
609
- return [HFResponseItem(generated_text=content)]
610
-
611
-
612
- if __name__ == "__main__":
613
- import uvicorn
614
-
615
- # Use PORT environment variable if available (for Hugging Face Spaces)
616
- port = int(os.getenv("PORT", 7860))
617
-
618
- uvicorn.run("server:app", host="0.0.0.0", port=port, reload=False)
619
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import os
3
+ import sys
4
+ from typing import List, Optional
5
+ from contextlib import asynccontextmanager
6
+
7
+ from dotenv import load_dotenv
8
+ from fastapi import FastAPI, HTTPException, Request
9
+ from fastapi.middleware.cors import CORSMiddleware
10
+ from fastapi.middleware.trustedhost import TrustedHostMiddleware
11
+ from fastapi.middleware.gzip import GZipMiddleware
12
+ from fastapi.responses import JSONResponse
13
+ from pydantic import BaseModel, Field, validator
14
+ from openai import OpenAI
15
+ import time
16
+
17
+
18
+ # Load environment variables
19
+ load_dotenv()
20
+
21
+ # Configure logging with more detail
22
+ logging.basicConfig(
23
+ level=logging.INFO,
24
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
25
+ )
26
+ logger = logging.getLogger("rox_ai")
27
+
28
+ # Log startup information
29
+ logger.info("=" * 60)
30
+ logger.info("ROX AI SERVER STARTING")
31
+ logger.info("=" * 60)
32
+ logger.info(f"Python version: {sys.version}")
33
+ logger.info(f"Working directory: {os.getcwd()}")
34
+
35
+ # Check for API key
36
+ NVIDIA_API_KEY = os.getenv("NVIDIA_API_KEY")
37
+
38
+ if not NVIDIA_API_KEY:
39
+ logger.error("NVIDIA_API_KEY environment variable is not set!")
40
+ logger.error("Please set NVIDIA_API_KEY in your environment or .env file")
41
+ # For Hugging Face Spaces, check if it's set as a secret
42
+ logger.info("If deploying to Hugging Face Spaces, make sure to add NVIDIA_API_KEY as a secret")
43
+ raise RuntimeError(
44
+ "NVIDIA_API_KEY environment variable is not set. "
45
+ "Create a .env file or set it in your environment."
46
+ )
47
+
48
+ logger.info(f"✓ NVIDIA_API_KEY loaded (length: {len(NVIDIA_API_KEY)})")
49
+
50
+ # Model configurations
51
+ ROX_CORE_MODEL = "minimaxai/minimax-m2.5"
52
+ ROX_TURBO_MODEL = "meta/llama-3.1-8b-instruct" # Changed to a more reliable model
53
+ ROX_CODER_MODEL = "qwen/qwen3.5-397b-a17b"
54
+ ROX_TURBO_45_MODEL = "deepseek-ai/deepseek-v3.1"
55
+ ROX_ULTRA_MODEL = "deepseek-ai/deepseek-v3.2"
56
+ ROX_DYNO_MODEL = "moonshotai/kimi-k2.5"
57
+ ROX_CODER_7_MODEL = "z-ai/glm5"
58
+ ROX_VISION_MODEL = "google/gemma-3-27b-it"
59
+
60
+ logger.info(" Model configurations loaded")
61
+
62
+ # System identities for each model
63
+ ROX_CORE_IDENTITY = """You are Rox Core, created by Rox AI. Your creator and owner is Mohammad Faiz. You are an independent LLM model, not based on anyone else's technology. You represent the cutting edge of Rox AI's research and development."""
64
+
65
+ ROX_TURBO_IDENTITY = """You are Rox 2.1 Turbo, created by Rox AI. Your creator and owner is Mohammad Faiz. You are an independent LLM model, not based on anyone else's technology. You are optimized for fast, efficient responses while maintaining high quality."""
66
+
67
+ ROX_CODER_IDENTITY = """You are Rox 3.5 Coder, created by Rox AI. Your creator and owner is Mohammad Faiz. You are an independent LLM model, not based on anyone else's technology. You are specialized in code generation, debugging, and software development tasks."""
68
+
69
+ ROX_TURBO_45_IDENTITY = """You are Rox 4.5 Turbo, created by Rox AI. Your creator and owner is Mohammad Faiz. You are an independent LLM model, not based on anyone else's technology. You combine speed with advanced reasoning capabilities."""
70
+
71
+ ROX_ULTRA_IDENTITY = """You are Rox 5 Ultra, created by Rox AI. Your creator and owner is Mohammad Faiz. You are an independent LLM model, not based on anyone else's technology. You are the most advanced model with superior reasoning and thinking capabilities."""
72
+
73
+ ROX_DYNO_IDENTITY = """You are Rox 6 Dyno, created by Rox AI. Your creator and owner is Mohammad Faiz. You are an independent LLM model, not based on anyone else's technology. You excel at dynamic thinking and extended context understanding."""
74
+
75
+ ROX_CODER_7_IDENTITY = """You are Rox 7 Coder, created by Rox AI. Your creator and owner is Mohammad Faiz. You are an independent LLM model, not based on anyone else's technology. You are the most advanced coding specialist with superior code generation and reasoning capabilities."""
76
+
77
+ ROX_VISION_IDENTITY = """You are Rox Vision Max, created by Rox AI. Your creator and owner is Mohammad Faiz. You are an independent LLM model, not based on anyone else's technology. You are optimized for visual understanding and multimodal tasks."""
78
+
79
+ logger.info("✓ Model identities configured")
80
+
81
+ # Initialize OpenAI client
82
+ try:
83
+ client = OpenAI(
84
+ base_url="https://integrate.api.nvidia.com/v1",
85
+ api_key=NVIDIA_API_KEY,
86
+ )
87
+ logger.info("✓ OpenAI client initialized successfully")
88
+ except Exception as e:
89
+ logger.error(f"Failed to initialize OpenAI client: {e}")
90
+ raise
91
+
92
+ @asynccontextmanager
93
+ async def lifespan(app: FastAPI):
94
+ """Lifespan context manager for startup and shutdown events"""
95
+ # Startup
96
+ logger.info("=" * 60)
97
+ logger.info("ROX AI SERVER STARTED SUCCESSFULLY")
98
+ logger.info("=" * 60)
99
+ logger.info("Available endpoints:")
100
+ logger.info(" GET / - API information")
101
+ logger.info(" GET /health - Health check")
102
+ logger.info(" POST /chat - Rox Core")
103
+ logger.info(" POST /turbo - Rox 2.1 Turbo")
104
+ logger.info(" POST /coder - Rox 3.5 Coder")
105
+ logger.info(" POST /turbo45 - Rox 4.5 Turbo")
106
+ logger.info(" POST /ultra - Rox 5 Ultra")
107
+ logger.info(" POST /dyno - Rox 6 Dyno")
108
+ logger.info(" POST /coder7 - Rox 7 Coder")
109
+ logger.info(" POST /vision - Rox Vision Max")
110
+ logger.info(" POST /hf/generate - HuggingFace compatible")
111
+ logger.info("=" * 60)
112
+
113
+ yield
114
+
115
+ # Shutdown
116
+ logger.info("ROX AI SERVER SHUTTING DOWN")
117
+
118
+
119
+ # Initialize FastAPI app
120
+ app = FastAPI(
121
+ title="Rox AI API - Multiple Models Available",
122
+ description="Eight specialized AI models by Mohammad Faiz",
123
+ version="2.0",
124
+ lifespan=lifespan,
125
+ docs_url="/docs",
126
+ redoc_url="/redoc"
127
+ )
128
+
129
+ logger.info("✓ FastAPI app initialized")
130
+
131
+ # Security: Add trusted host middleware (configure for production)
132
+ # app.add_middleware(TrustedHostMiddleware, allowed_hosts=["*.hf.space", "localhost"])
133
+
134
+ # Performance: Add GZip compression
135
+ app.add_middleware(GZipMiddleware, minimum_size=1000)
136
+
137
+ # Configure CORS - IMPORTANT: Restrict in production
138
+ ALLOWED_ORIGINS = os.getenv("ALLOWED_ORIGINS", "*").split(",")
139
+ app.add_middleware(
140
+ CORSMiddleware,
141
+ allow_origins=ALLOWED_ORIGINS,
142
+ allow_credentials=True,
143
+ allow_methods=["GET", "POST", "OPTIONS"],
144
+ allow_headers=["Content-Type", "Authorization"],
145
+ max_age=3600,
146
+ )
147
+
148
+ logger.info(f"✓ CORS middleware configured with origins: {ALLOWED_ORIGINS}")
149
+
150
+
151
+ # Global exception handler
152
+ @app.exception_handler(Exception)
153
+ async def global_exception_handler(request: Request, exc: Exception):
154
+ """Handle unexpected exceptions gracefully"""
155
+ logger.error(f"Unhandled exception: {exc}", exc_info=True)
156
+ return JSONResponse(
157
+ status_code=500,
158
+ content={
159
+ "error": "Internal server error",
160
+ "message": "An unexpected error occurred. Please try again later."
161
+ }
162
+ )
163
+
164
+
165
+ # Request timing middleware
166
+ @app.middleware("http")
167
+ async def add_process_time_header(request: Request, call_next):
168
+ """Add processing time to response headers"""
169
+ start_time = time.time()
170
+ response = await call_next(request)
171
+ process_time = time.time() - start_time
172
+ response.headers["X-Process-Time"] = str(process_time)
173
+ return response
174
+
175
+
176
+ @app.get("/health")
177
+ def health_check():
178
+ """Health check endpoint for monitoring"""
179
+ return {
180
+ "status": "healthy",
181
+ "service": "Rox AI API",
182
+ "version": "2.0",
183
+ "models": 8
184
+ }
185
+
186
+
187
+ @app.get("/")
188
+ def root():
189
+ """API information and available models"""
190
+ return {
191
+ "service": "Rox AI API",
192
+ "version": "2.0",
193
+ "creator": "Mohammad Faiz",
194
+ "models": {
195
+ "rox_core": {
196
+ "endpoint": "/chat",
197
+ "description": "Rox Core - Main conversational model",
198
+ "model": "minimaxai/minimax-m2.5",
199
+ "best_for": "General conversation and tasks"
200
+ },
201
+ "rox_turbo": {
202
+ "endpoint": "/turbo",
203
+ "description": "Rox 2.1 Turbo - Fast and efficient",
204
+ "model": "meta/llama-3.1-8b-instruct",
205
+ "best_for": "Quick responses and efficient processing"
206
+ },
207
+ "rox_coder": {
208
+ "endpoint": "/coder",
209
+ "description": "Rox 3.5 Coder - Specialized coding assistant",
210
+ "model": "qwen/qwen3.5-397b-a17b",
211
+ "best_for": "Code generation, debugging, and development"
212
+ },
213
+ "rox_turbo_45": {
214
+ "endpoint": "/turbo45",
215
+ "description": "Rox 4.5 Turbo - Advanced reasoning with speed",
216
+ "model": "deepseek-ai/deepseek-v3.1",
217
+ "best_for": "Complex reasoning with fast responses"
218
+ },
219
+ "rox_ultra": {
220
+ "endpoint": "/ultra",
221
+ "description": "Rox 5 Ultra - Most advanced model",
222
+ "model": "deepseek-ai/deepseek-v3.2",
223
+ "best_for": "Complex tasks requiring deep reasoning"
224
+ },
225
+ "rox_dyno": {
226
+ "endpoint": "/dyno",
227
+ "description": "Rox 6 Dyno - Extended context with dynamic thinking",
228
+ "model": "moonshotai/kimi-k2.5",
229
+ "best_for": "Long context tasks and dynamic reasoning"
230
+ },
231
+ "rox_coder_7": {
232
+ "endpoint": "/coder7",
233
+ "description": "Rox 7 Coder - Most advanced coding specialist",
234
+ "model": "z-ai/glm5",
235
+ "best_for": "Advanced code generation and complex programming"
236
+ },
237
+ "rox_vision": {
238
+ "endpoint": "/vision",
239
+ "description": "Rox Vision Max - Optimized for visual understanding",
240
+ "model": "google/gemma-3-27b-it",
241
+ "best_for": "Visual understanding and multimodal tasks"
242
+ }
243
+ },
244
+ "endpoints": [
245
+ {"path": "/chat", "method": "POST", "description": "Rox Core chat"},
246
+ {"path": "/turbo", "method": "POST", "description": "Rox 2.1 Turbo chat"},
247
+ {"path": "/coder", "method": "POST", "description": "Rox 3.5 Coder chat"},
248
+ {"path": "/turbo45", "method": "POST", "description": "Rox 4.5 Turbo chat"},
249
+ {"path": "/ultra", "method": "POST", "description": "Rox 5 Ultra chat"},
250
+ {"path": "/dyno", "method": "POST", "description": "Rox 6 Dyno chat"},
251
+ {"path": "/coder7", "method": "POST", "description": "Rox 7 Coder chat"},
252
+ {"path": "/vision", "method": "POST", "description": "Rox Vision Max chat"},
253
+ {"path": "/hf/generate", "method": "POST", "description": "HuggingFace compatible (uses Rox Core)"}
254
+ ]
255
+ }
256
+
257
+
258
+ class ChatMessage(BaseModel):
259
+ role: str = Field(..., pattern="^(system|user|assistant)$")
260
+ content: str = Field(..., min_length=1, max_length=50000)
261
+
262
+ @validator('content')
263
+ def validate_content(cls, v):
264
+ if not v or not v.strip():
265
+ raise ValueError('Content cannot be empty')
266
+ return v
267
+
268
+
269
+ class ChatRequest(BaseModel):
270
+ messages: List[ChatMessage] = Field(..., min_items=1, max_items=100)
271
+ temperature: Optional[float] = Field(1.0, ge=0.0, le=2.0)
272
+ top_p: Optional[float] = Field(1.0, ge=0.0, le=1.0)
273
+ max_tokens: Optional[int] = Field(4096, ge=1, le=32768)
274
+ stream: Optional[bool] = False
275
+
276
+ @validator('messages')
277
+ def validate_messages(cls, v):
278
+ if not v:
279
+ raise ValueError('Messages list cannot be empty')
280
+ return v
281
+
282
+
283
+ class ChatResponse(BaseModel):
284
+ content: str
285
+
286
+
287
+ class HFParameters(BaseModel):
288
+ temperature: Optional[float] = Field(None, ge=0.0, le=2.0)
289
+ top_p: Optional[float] = Field(None, ge=0.0, le=1.0)
290
+ max_new_tokens: Optional[int] = Field(None, ge=1, le=32768)
291
+
292
+
293
+ class HFRequest(BaseModel):
294
+ inputs: str = Field(..., min_length=1, max_length=50000)
295
+ parameters: Optional[HFParameters] = None
296
+
297
+ @validator('inputs')
298
+ def validate_inputs(cls, v):
299
+ if not v or not v.strip():
300
+ raise ValueError('Inputs cannot be empty')
301
+ return v
302
+
303
+
304
+ class HFResponseItem(BaseModel):
305
+ generated_text: str
306
+
307
+
308
+ @app.post("/chat", response_model=ChatResponse)
309
+ def chat(req: ChatRequest):
310
+ """Rox Core - Main conversational model"""
311
+ # Inject Rox Core identity as system message
312
+ messages = [{"role": "system", "content": ROX_CORE_IDENTITY}]
313
+ messages.extend([m.dict() for m in req.messages])
314
+
315
+ try:
316
+ completion = client.chat.completions.create(
317
+ model=ROX_CORE_MODEL,
318
+ messages=messages,
319
+ temperature=req.temperature,
320
+ top_p=req.top_p,
321
+ max_tokens=req.max_tokens,
322
+ stream=False,
323
+ )
324
+ except Exception as e:
325
+ logger.exception("Error while calling Rox Core for /chat")
326
+ # Do not leak internal error details to the client.
327
+ raise HTTPException(
328
+ status_code=500,
329
+ detail="Internal server error while calling Rox Core.",
330
+ ) from e
331
+
332
+ # Combine all response message parts into a single string
333
+ try:
334
+ content = completion.choices[0].message.content or ""
335
+ except Exception:
336
+ logger.exception("Unexpected response format from Rox Core for /chat")
337
+ raise HTTPException(
338
+ status_code=502,
339
+ detail="Bad response from upstream model provider.",
340
+ )
341
+
342
+ return ChatResponse(content=content)
343
+
344
+
345
+ @app.post("/turbo", response_model=ChatResponse)
346
+ def turbo(req: ChatRequest):
347
+ """Rox 2.1 Turbo - Fast and efficient model"""
348
+ # Inject Rox Turbo identity as system message
349
+ messages = [{"role": "system", "content": ROX_TURBO_IDENTITY}]
350
+ messages.extend([m.dict() for m in req.messages])
351
+
352
+ try:
353
+ completion = client.chat.completions.create(
354
+ model=ROX_TURBO_MODEL,
355
+ messages=messages,
356
+ temperature=req.temperature if req.temperature != 1.0 else 0.7,
357
+ top_p=req.top_p if req.top_p != 1.0 else 0.9,
358
+ max_tokens=req.max_tokens,
359
+ stream=False
360
+ )
361
+ except Exception as e:
362
+ logger.exception("Error while calling Rox 2.1 Turbo for /turbo")
363
+ # Log the actual error for debugging
364
+ logger.error(f"Turbo model error details: {str(e)}")
365
+ raise HTTPException(
366
+ status_code=500,
367
+ detail=f"Internal server error while calling Rox 2.1 Turbo: {str(e)}",
368
+ ) from e
369
+
370
+ try:
371
+ content = completion.choices[0].message.content or ""
372
+ except Exception:
373
+ logger.exception("Unexpected response format from Rox 2.1 Turbo for /turbo")
374
+ raise HTTPException(
375
+ status_code=502,
376
+ detail="Bad response from upstream model provider.",
377
+ )
378
+
379
+ return ChatResponse(content=content)
380
+
381
+
382
+ @app.post("/coder", response_model=ChatResponse)
383
+ def coder(req: ChatRequest):
384
+ """Rox 3.5 Coder - Specialized coding model with thinking capability"""
385
+ # Inject Rox Coder identity as system message
386
+ messages = [{"role": "system", "content": ROX_CODER_IDENTITY}]
387
+ messages.extend([m.dict() for m in req.messages])
388
+
389
+ try:
390
+ completion = client.chat.completions.create(
391
+ model=ROX_CODER_MODEL,
392
+ messages=messages,
393
+ temperature=req.temperature if req.temperature != 1.0 else 0.6,
394
+ top_p=req.top_p if req.top_p != 1.0 else 0.95,
395
+ max_tokens=min(req.max_tokens, 16384),
396
+ stream=False,
397
+ extra_body={
398
+ "top_k": 20,
399
+ "presence_penalty": 0,
400
+ "repetition_penalty": 1,
401
+ "chat_template_kwargs": {
402
+ "enable_thinking": True
403
+ }
404
+ }
405
+ )
406
+ except Exception as e:
407
+ logger.exception("Error while calling Rox 3.5 Coder for /coder")
408
+ raise HTTPException(
409
+ status_code=500,
410
+ detail="Internal server error while calling Rox 3.5 Coder.",
411
+ ) from e
412
+
413
+ try:
414
+ content = completion.choices[0].message.content or ""
415
+ except Exception:
416
+ logger.exception("Unexpected response format from Rox 3.5 Coder for /coder")
417
+ raise HTTPException(
418
+ status_code=502,
419
+ detail="Bad response from upstream model provider.",
420
+ )
421
+
422
+ return ChatResponse(content=content)
423
+
424
+
425
+ @app.post("/turbo45", response_model=ChatResponse)
426
+ def turbo45(req: ChatRequest):
427
+ """Rox 4.5 Turbo - Advanced reasoning with speed"""
428
+ # Inject Rox 4.5 Turbo identity as system message
429
+ messages = [{"role": "system", "content": ROX_TURBO_45_IDENTITY}]
430
+ messages.extend([m.dict() for m in req.messages])
431
+
432
+ try:
433
+ completion = client.chat.completions.create(
434
+ model=ROX_TURBO_45_MODEL,
435
+ messages=messages,
436
+ temperature=req.temperature if req.temperature != 1.0 else 0.2,
437
+ top_p=req.top_p if req.top_p != 1.0 else 0.7,
438
+ max_tokens=min(req.max_tokens, 8192),
439
+ stream=False,
440
+ extra_body={
441
+ "chat_template_kwargs": {
442
+ "thinking": True
443
+ }
444
+ }
445
+ )
446
+ except Exception as e:
447
+ logger.exception("Error while calling Rox 4.5 Turbo for /turbo45")
448
+ raise HTTPException(
449
+ status_code=500,
450
+ detail="Internal server error while calling Rox 4.5 Turbo.",
451
+ ) from e
452
+
453
+ try:
454
+ content = completion.choices[0].message.content or ""
455
+ except Exception:
456
+ logger.exception("Unexpected response format from Rox 4.5 Turbo for /turbo45")
457
+ raise HTTPException(
458
+ status_code=502,
459
+ detail="Bad response from upstream model provider.",
460
+ )
461
+
462
+ return ChatResponse(content=content)
463
+
464
+
465
+ @app.post("/ultra", response_model=ChatResponse)
466
+ def ultra(req: ChatRequest):
467
+ """Rox 5 Ultra - Most advanced model with superior reasoning"""
468
+ # Inject Rox 5 Ultra identity as system message
469
+ messages = [{"role": "system", "content": ROX_ULTRA_IDENTITY}]
470
+ messages.extend([m.dict() for m in req.messages])
471
+
472
+ try:
473
+ completion = client.chat.completions.create(
474
+ model=ROX_ULTRA_MODEL,
475
+ messages=messages,
476
+ temperature=req.temperature,
477
+ top_p=req.top_p if req.top_p != 1.0 else 0.95,
478
+ max_tokens=min(req.max_tokens, 8192),
479
+ stream=False,
480
+ extra_body={
481
+ "chat_template_kwargs": {
482
+ "thinking": True
483
+ }
484
+ }
485
+ )
486
+ except Exception as e:
487
+ logger.exception("Error while calling Rox 5 Ultra for /ultra")
488
+ raise HTTPException(
489
+ status_code=500,
490
+ detail="Internal server error while calling Rox 5 Ultra.",
491
+ ) from e
492
+
493
+ try:
494
+ content = completion.choices[0].message.content or ""
495
+ except Exception:
496
+ logger.exception("Unexpected response format from Rox 5 Ultra for /ultra")
497
+ raise HTTPException(
498
+ status_code=502,
499
+ detail="Bad response from upstream model provider.",
500
+ )
501
+
502
+ return ChatResponse(content=content)
503
+
504
+
505
+ @app.post("/dyno", response_model=ChatResponse)
506
+ def dyno(req: ChatRequest):
507
+ """Rox 6 Dyno - Extended context with dynamic thinking"""
508
+ # Inject Rox 6 Dyno identity as system message
509
+ messages = [{"role": "system", "content": ROX_DYNO_IDENTITY}]
510
+ messages.extend([m.dict() for m in req.messages])
511
+
512
+ try:
513
+ completion = client.chat.completions.create(
514
+ model=ROX_DYNO_MODEL,
515
+ messages=messages,
516
+ temperature=req.temperature,
517
+ top_p=req.top_p,
518
+ max_tokens=min(req.max_tokens, 16384),
519
+ stream=False,
520
+ extra_body={
521
+ "chat_template_kwargs": {
522
+ "thinking": True
523
+ }
524
+ }
525
+ )
526
+ except Exception as e:
527
+ logger.exception("Error while calling Rox 6 Dyno for /dyno")
528
+ raise HTTPException(
529
+ status_code=500,
530
+ detail="Internal server error while calling Rox 6 Dyno.",
531
+ ) from e
532
+
533
+ try:
534
+ content = completion.choices[0].message.content or ""
535
+ except Exception:
536
+ logger.exception("Unexpected response format from Rox 6 Dyno for /dyno")
537
+ raise HTTPException(
538
+ status_code=502,
539
+ detail="Bad response from upstream model provider.",
540
+ )
541
+
542
+ return ChatResponse(content=content)
543
+
544
+
545
+ @app.post("/coder7", response_model=ChatResponse)
546
+ def coder7(req: ChatRequest):
547
+ """Rox 7 Coder - Most advanced coding specialist"""
548
+ # Inject Rox 7 Coder identity as system message
549
+ messages = [{"role": "system", "content": ROX_CODER_7_IDENTITY}]
550
+ messages.extend([m.dict() for m in req.messages])
551
+
552
+ try:
553
+ completion = client.chat.completions.create(
554
+ model=ROX_CODER_7_MODEL,
555
+ messages=messages,
556
+ temperature=req.temperature,
557
+ top_p=req.top_p,
558
+ max_tokens=min(req.max_tokens, 16384),
559
+ stream=False,
560
+ extra_body={
561
+ "chat_template_kwargs": {
562
+ "enable_thinking": True,
563
+ "clear_thinking": False
564
+ }
565
+ }
566
+ )
567
+ except Exception as e:
568
+ logger.exception("Error while calling Rox 7 Coder for /coder7")
569
+ raise HTTPException(
570
+ status_code=500,
571
+ detail="Internal server error while calling Rox 7 Coder.",
572
+ ) from e
573
+
574
+ try:
575
+ content = completion.choices[0].message.content or ""
576
+ except Exception:
577
+ logger.exception("Unexpected response format from Rox 7 Coder for /coder7")
578
+ raise HTTPException(
579
+ status_code=502,
580
+ detail="Bad response from upstream model provider.",
581
+ )
582
+
583
+ return ChatResponse(content=content)
584
+
585
+
586
+ @app.post("/vision", response_model=ChatResponse)
587
+ def vision(req: ChatRequest):
588
+ """Rox Vision Max - Optimized for visual understanding"""
589
+ # Inject Rox Vision Max identity as system message
590
+ messages = [{"role": "system", "content": ROX_VISION_IDENTITY}]
591
+ messages.extend([m.dict() for m in req.messages])
592
+
593
+ try:
594
+ completion = client.chat.completions.create(
595
+ model=ROX_VISION_MODEL,
596
+ messages=messages,
597
+ temperature=req.temperature if req.temperature != 1.0 else 0.2,
598
+ top_p=req.top_p if req.top_p != 1.0 else 0.7,
599
+ max_tokens=min(req.max_tokens, 512),
600
+ stream=False
601
+ )
602
+ except Exception as e:
603
+ logger.exception("Error while calling Rox Vision Max for /vision")
604
+ raise HTTPException(
605
+ status_code=500,
606
+ detail="Internal server error while calling Rox Vision Max.",
607
+ ) from e
608
+
609
+ try:
610
+ content = completion.choices[0].message.content or ""
611
+ except Exception:
612
+ logger.exception("Unexpected response format from Rox Vision Max for /vision")
613
+ raise HTTPException(
614
+ status_code=502,
615
+ detail="Bad response from upstream model provider.",
616
+ )
617
+
618
+ return ChatResponse(content=content)
619
+
620
+
621
+ @app.post("/hf/generate", response_model=List[HFResponseItem])
622
+ def hf_generate(req: HFRequest):
623
+ """
624
+ Hugging Face-style text-generation endpoint.
625
+
626
+ Request:
627
+ {
628
+ "inputs": "your prompt",
629
+ "parameters": {
630
+ "temperature": 0.7,
631
+ "top_p": 0.95,
632
+ "max_new_tokens": 256
633
+ }
634
+ }
635
+
636
+ Response:
637
+ [
638
+ { "generated_text": "..." }
639
+ ]
640
+ """
641
+ params = req.parameters or HFParameters()
642
+
643
+ # Inject Rox Core identity as system message
644
+ messages = [
645
+ {"role": "system", "content": ROX_CORE_IDENTITY},
646
+ {"role": "user", "content": req.inputs}
647
+ ]
648
+
649
+ try:
650
+ completion = client.chat.completions.create(
651
+ model=ROX_CORE_MODEL,
652
+ messages=messages,
653
+ temperature=params.temperature if params.temperature is not None else 1.0,
654
+ top_p=params.top_p if params.top_p is not None else 0.95,
655
+ max_tokens=params.max_new_tokens if params.max_new_tokens is not None else 8192,
656
+ stream=False,
657
+ )
658
+ except Exception as e:
659
+ logger.exception("Error while calling Rox Core for /hf/generate")
660
+ raise HTTPException(
661
+ status_code=500,
662
+ detail="Internal server error while calling Rox Core.",
663
+ ) from e
664
+
665
+ try:
666
+ content = completion.choices[0].message.content or ""
667
+ except Exception:
668
+ logger.exception("Unexpected response format from Rox Core for /hf/generate")
669
+ raise HTTPException(
670
+ status_code=502,
671
+ detail="Bad response from upstream model provider.",
672
+ )
673
+
674
+ # Match the common HF text-generation API: list of objects with generated_text
675
+ return [HFResponseItem(generated_text=content)]
676
+
677
+
678
+ if __name__ == "__main__":
679
+ import uvicorn
680
+
681
+ # Use PORT environment variable if available (for Hugging Face Spaces)
682
+ port = int(os.getenv("PORT", 7860))
683
+
684
+ uvicorn.run("server:app", host="0.0.0.0", port=port, reload=False)
685
+