Rox-Turbo commited on
Commit
e8ca5da
·
verified ·
1 Parent(s): a65dba7

Update server.py

Browse files
Files changed (1) hide show
  1. server.py +505 -685
server.py CHANGED
@@ -1,685 +1,505 @@
1
- import logging
2
- import os
3
- import sys
4
- from typing import List, Optional
5
- from contextlib import asynccontextmanager
6
-
7
- from dotenv import load_dotenv
8
- from fastapi import FastAPI, HTTPException, Request
9
- from fastapi.middleware.cors import CORSMiddleware
10
- from fastapi.middleware.trustedhost import TrustedHostMiddleware
11
- from fastapi.middleware.gzip import GZipMiddleware
12
- from fastapi.responses import JSONResponse
13
- from pydantic import BaseModel, Field, validator
14
- from openai import OpenAI
15
- import time
16
-
17
-
18
- # Load environment variables
19
- load_dotenv()
20
-
21
- # Configure logging with more detail
22
- logging.basicConfig(
23
- level=logging.INFO,
24
- format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
25
- )
26
- logger = logging.getLogger("rox_ai")
27
-
28
- # Log startup information
29
- logger.info("=" * 60)
30
- logger.info("ROX AI SERVER STARTING")
31
- logger.info("=" * 60)
32
- logger.info(f"Python version: {sys.version}")
33
- logger.info(f"Working directory: {os.getcwd()}")
34
-
35
- # Check for API key
36
- NVIDIA_API_KEY = os.getenv("NVIDIA_API_KEY")
37
-
38
- if not NVIDIA_API_KEY:
39
- logger.error("NVIDIA_API_KEY environment variable is not set!")
40
- logger.error("Please set NVIDIA_API_KEY in your environment or .env file")
41
- # For Hugging Face Spaces, check if it's set as a secret
42
- logger.info("If deploying to Hugging Face Spaces, make sure to add NVIDIA_API_KEY as a secret")
43
- raise RuntimeError(
44
- "NVIDIA_API_KEY environment variable is not set. "
45
- "Create a .env file or set it in your environment."
46
- )
47
-
48
- logger.info(f" NVIDIA_API_KEY loaded (length: {len(NVIDIA_API_KEY)})")
49
-
50
- # Model configurations
51
- ROX_CORE_MODEL = "minimaxai/minimax-m2.5"
52
- ROX_TURBO_MODEL = "meta/llama-3.1-8b-instruct" # Changed to a more reliable model
53
- ROX_CODER_MODEL = "qwen/qwen3.5-397b-a17b"
54
- ROX_TURBO_45_MODEL = "deepseek-ai/deepseek-v3.1"
55
- ROX_ULTRA_MODEL = "deepseek-ai/deepseek-v3.2"
56
- ROX_DYNO_MODEL = "moonshotai/kimi-k2.5"
57
- ROX_CODER_7_MODEL = "z-ai/glm5"
58
- ROX_VISION_MODEL = "google/gemma-3-27b-it"
59
-
60
- logger.info("✓ Model configurations loaded")
61
-
62
- # System identities for each model
63
- ROX_CORE_IDENTITY = """You are Rox Core, created by Rox AI. Your creator and owner is Mohammad Faiz. You are an independent LLM model, not based on anyone else's technology. You represent the cutting edge of Rox AI's research and development."""
64
-
65
- ROX_TURBO_IDENTITY = """You are Rox 2.1 Turbo, created by Rox AI. Your creator and owner is Mohammad Faiz. You are an independent LLM model, not based on anyone else's technology. You are optimized for fast, efficient responses while maintaining high quality."""
66
-
67
- ROX_CODER_IDENTITY = """You are Rox 3.5 Coder, created by Rox AI. Your creator and owner is Mohammad Faiz. You are an independent LLM model, not based on anyone else's technology. You are specialized in code generation, debugging, and software development tasks."""
68
-
69
- ROX_TURBO_45_IDENTITY = """You are Rox 4.5 Turbo, created by Rox AI. Your creator and owner is Mohammad Faiz. You are an independent LLM model, not based on anyone else's technology. You combine speed with advanced reasoning capabilities."""
70
-
71
- ROX_ULTRA_IDENTITY = """You are Rox 5 Ultra, created by Rox AI. Your creator and owner is Mohammad Faiz. You are an independent LLM model, not based on anyone else's technology. You are the most advanced model with superior reasoning and thinking capabilities."""
72
-
73
- ROX_DYNO_IDENTITY = """You are Rox 6 Dyno, created by Rox AI. Your creator and owner is Mohammad Faiz. You are an independent LLM model, not based on anyone else's technology. You excel at dynamic thinking and extended context understanding."""
74
-
75
- ROX_CODER_7_IDENTITY = """You are Rox 7 Coder, created by Rox AI. Your creator and owner is Mohammad Faiz. You are an independent LLM model, not based on anyone else's technology. You are the most advanced coding specialist with superior code generation and reasoning capabilities."""
76
-
77
- ROX_VISION_IDENTITY = """You are Rox Vision Max, created by Rox AI. Your creator and owner is Mohammad Faiz. You are an independent LLM model, not based on anyone else's technology. You are optimized for visual understanding and multimodal tasks."""
78
-
79
- logger.info("✓ Model identities configured")
80
-
81
- # Initialize OpenAI client
82
- try:
83
- client = OpenAI(
84
- base_url="https://integrate.api.nvidia.com/v1",
85
- api_key=NVIDIA_API_KEY,
86
- )
87
- logger.info("✓ OpenAI client initialized successfully")
88
- except Exception as e:
89
- logger.error(f"Failed to initialize OpenAI client: {e}")
90
- raise
91
-
92
- @asynccontextmanager
93
- async def lifespan(app: FastAPI):
94
- """Lifespan context manager for startup and shutdown events"""
95
- # Startup
96
- logger.info("=" * 60)
97
- logger.info("ROX AI SERVER STARTED SUCCESSFULLY")
98
- logger.info("=" * 60)
99
- logger.info("Available endpoints:")
100
- logger.info(" GET / - API information")
101
- logger.info(" GET /health - Health check")
102
- logger.info(" POST /chat - Rox Core")
103
- logger.info(" POST /turbo - Rox 2.1 Turbo")
104
- logger.info(" POST /coder - Rox 3.5 Coder")
105
- logger.info(" POST /turbo45 - Rox 4.5 Turbo")
106
- logger.info(" POST /ultra - Rox 5 Ultra")
107
- logger.info(" POST /dyno - Rox 6 Dyno")
108
- logger.info(" POST /coder7 - Rox 7 Coder")
109
- logger.info(" POST /vision - Rox Vision Max")
110
- logger.info(" POST /hf/generate - HuggingFace compatible")
111
- logger.info("=" * 60)
112
-
113
- yield
114
-
115
- # Shutdown
116
- logger.info("ROX AI SERVER SHUTTING DOWN")
117
-
118
-
119
- # Initialize FastAPI app
120
- app = FastAPI(
121
- title="Rox AI API - Multiple Models Available",
122
- description="Eight specialized AI models by Mohammad Faiz",
123
- version="2.0",
124
- lifespan=lifespan,
125
- docs_url="/docs",
126
- redoc_url="/redoc"
127
- )
128
-
129
- logger.info("✓ FastAPI app initialized")
130
-
131
- # Security: Add trusted host middleware (configure for production)
132
- # app.add_middleware(TrustedHostMiddleware, allowed_hosts=["*.hf.space", "localhost"])
133
-
134
- # Performance: Add GZip compression
135
- app.add_middleware(GZipMiddleware, minimum_size=1000)
136
-
137
- # Configure CORS - IMPORTANT: Restrict in production
138
- ALLOWED_ORIGINS = os.getenv("ALLOWED_ORIGINS", "*").split(",")
139
- app.add_middleware(
140
- CORSMiddleware,
141
- allow_origins=ALLOWED_ORIGINS,
142
- allow_credentials=True,
143
- allow_methods=["GET", "POST", "OPTIONS"],
144
- allow_headers=["Content-Type", "Authorization"],
145
- max_age=3600,
146
- )
147
-
148
- logger.info(f"✓ CORS middleware configured with origins: {ALLOWED_ORIGINS}")
149
-
150
-
151
- # Global exception handler
152
- @app.exception_handler(Exception)
153
- async def global_exception_handler(request: Request, exc: Exception):
154
- """Handle unexpected exceptions gracefully"""
155
- logger.error(f"Unhandled exception: {exc}", exc_info=True)
156
- return JSONResponse(
157
- status_code=500,
158
- content={
159
- "error": "Internal server error",
160
- "message": "An unexpected error occurred. Please try again later."
161
- }
162
- )
163
-
164
-
165
- # Request timing middleware
166
- @app.middleware("http")
167
- async def add_process_time_header(request: Request, call_next):
168
- """Add processing time to response headers"""
169
- start_time = time.time()
170
- response = await call_next(request)
171
- process_time = time.time() - start_time
172
- response.headers["X-Process-Time"] = str(process_time)
173
- return response
174
-
175
-
176
- @app.get("/health")
177
- def health_check():
178
- """Health check endpoint for monitoring"""
179
- return {
180
- "status": "healthy",
181
- "service": "Rox AI API",
182
- "version": "2.0",
183
- "models": 8
184
- }
185
-
186
-
187
- @app.get("/")
188
- def root():
189
- """API information and available models"""
190
- return {
191
- "service": "Rox AI API",
192
- "version": "2.0",
193
- "creator": "Mohammad Faiz",
194
- "models": {
195
- "rox_core": {
196
- "endpoint": "/chat",
197
- "description": "Rox Core - Main conversational model",
198
- "model": "minimaxai/minimax-m2.5",
199
- "best_for": "General conversation and tasks"
200
- },
201
- "rox_turbo": {
202
- "endpoint": "/turbo",
203
- "description": "Rox 2.1 Turbo - Fast and efficient",
204
- "model": "meta/llama-3.1-8b-instruct",
205
- "best_for": "Quick responses and efficient processing"
206
- },
207
- "rox_coder": {
208
- "endpoint": "/coder",
209
- "description": "Rox 3.5 Coder - Specialized coding assistant",
210
- "model": "qwen/qwen3.5-397b-a17b",
211
- "best_for": "Code generation, debugging, and development"
212
- },
213
- "rox_turbo_45": {
214
- "endpoint": "/turbo45",
215
- "description": "Rox 4.5 Turbo - Advanced reasoning with speed",
216
- "model": "deepseek-ai/deepseek-v3.1",
217
- "best_for": "Complex reasoning with fast responses"
218
- },
219
- "rox_ultra": {
220
- "endpoint": "/ultra",
221
- "description": "Rox 5 Ultra - Most advanced model",
222
- "model": "deepseek-ai/deepseek-v3.2",
223
- "best_for": "Complex tasks requiring deep reasoning"
224
- },
225
- "rox_dyno": {
226
- "endpoint": "/dyno",
227
- "description": "Rox 6 Dyno - Extended context with dynamic thinking",
228
- "model": "moonshotai/kimi-k2.5",
229
- "best_for": "Long context tasks and dynamic reasoning"
230
- },
231
- "rox_coder_7": {
232
- "endpoint": "/coder7",
233
- "description": "Rox 7 Coder - Most advanced coding specialist",
234
- "model": "z-ai/glm5",
235
- "best_for": "Advanced code generation and complex programming"
236
- },
237
- "rox_vision": {
238
- "endpoint": "/vision",
239
- "description": "Rox Vision Max - Optimized for visual understanding",
240
- "model": "google/gemma-3-27b-it",
241
- "best_for": "Visual understanding and multimodal tasks"
242
- }
243
- },
244
- "endpoints": [
245
- {"path": "/chat", "method": "POST", "description": "Rox Core chat"},
246
- {"path": "/turbo", "method": "POST", "description": "Rox 2.1 Turbo chat"},
247
- {"path": "/coder", "method": "POST", "description": "Rox 3.5 Coder chat"},
248
- {"path": "/turbo45", "method": "POST", "description": "Rox 4.5 Turbo chat"},
249
- {"path": "/ultra", "method": "POST", "description": "Rox 5 Ultra chat"},
250
- {"path": "/dyno", "method": "POST", "description": "Rox 6 Dyno chat"},
251
- {"path": "/coder7", "method": "POST", "description": "Rox 7 Coder chat"},
252
- {"path": "/vision", "method": "POST", "description": "Rox Vision Max chat"},
253
- {"path": "/hf/generate", "method": "POST", "description": "HuggingFace compatible (uses Rox Core)"}
254
- ]
255
- }
256
-
257
-
258
- class ChatMessage(BaseModel):
259
- role: str = Field(..., pattern="^(system|user|assistant)$")
260
- content: str = Field(..., min_length=1, max_length=50000)
261
-
262
- @validator('content')
263
- def validate_content(cls, v):
264
- if not v or not v.strip():
265
- raise ValueError('Content cannot be empty')
266
- return v
267
-
268
-
269
- class ChatRequest(BaseModel):
270
- messages: List[ChatMessage] = Field(..., min_items=1, max_items=100)
271
- temperature: Optional[float] = Field(1.0, ge=0.0, le=2.0)
272
- top_p: Optional[float] = Field(1.0, ge=0.0, le=1.0)
273
- max_tokens: Optional[int] = Field(4096, ge=1, le=32768)
274
- stream: Optional[bool] = False
275
-
276
- @validator('messages')
277
- def validate_messages(cls, v):
278
- if not v:
279
- raise ValueError('Messages list cannot be empty')
280
- return v
281
-
282
-
283
- class ChatResponse(BaseModel):
284
- content: str
285
-
286
-
287
- class HFParameters(BaseModel):
288
- temperature: Optional[float] = Field(None, ge=0.0, le=2.0)
289
- top_p: Optional[float] = Field(None, ge=0.0, le=1.0)
290
- max_new_tokens: Optional[int] = Field(None, ge=1, le=32768)
291
-
292
-
293
- class HFRequest(BaseModel):
294
- inputs: str = Field(..., min_length=1, max_length=50000)
295
- parameters: Optional[HFParameters] = None
296
-
297
- @validator('inputs')
298
- def validate_inputs(cls, v):
299
- if not v or not v.strip():
300
- raise ValueError('Inputs cannot be empty')
301
- return v
302
-
303
-
304
- class HFResponseItem(BaseModel):
305
- generated_text: str
306
-
307
-
308
- @app.post("/chat", response_model=ChatResponse)
309
- def chat(req: ChatRequest):
310
- """Rox Core - Main conversational model"""
311
- # Inject Rox Core identity as system message
312
- messages = [{"role": "system", "content": ROX_CORE_IDENTITY}]
313
- messages.extend([m.dict() for m in req.messages])
314
-
315
- try:
316
- completion = client.chat.completions.create(
317
- model=ROX_CORE_MODEL,
318
- messages=messages,
319
- temperature=req.temperature,
320
- top_p=req.top_p,
321
- max_tokens=req.max_tokens,
322
- stream=False,
323
- )
324
- except Exception as e:
325
- logger.exception("Error while calling Rox Core for /chat")
326
- # Do not leak internal error details to the client.
327
- raise HTTPException(
328
- status_code=500,
329
- detail="Internal server error while calling Rox Core.",
330
- ) from e
331
-
332
- # Combine all response message parts into a single string
333
- try:
334
- content = completion.choices[0].message.content or ""
335
- except Exception:
336
- logger.exception("Unexpected response format from Rox Core for /chat")
337
- raise HTTPException(
338
- status_code=502,
339
- detail="Bad response from upstream model provider.",
340
- )
341
-
342
- return ChatResponse(content=content)
343
-
344
-
345
- @app.post("/turbo", response_model=ChatResponse)
346
- def turbo(req: ChatRequest):
347
- """Rox 2.1 Turbo - Fast and efficient model"""
348
- # Inject Rox Turbo identity as system message
349
- messages = [{"role": "system", "content": ROX_TURBO_IDENTITY}]
350
- messages.extend([m.dict() for m in req.messages])
351
-
352
- try:
353
- completion = client.chat.completions.create(
354
- model=ROX_TURBO_MODEL,
355
- messages=messages,
356
- temperature=req.temperature if req.temperature != 1.0 else 0.7,
357
- top_p=req.top_p if req.top_p != 1.0 else 0.9,
358
- max_tokens=req.max_tokens,
359
- stream=False
360
- )
361
- except Exception as e:
362
- logger.exception("Error while calling Rox 2.1 Turbo for /turbo")
363
- # Log the actual error for debugging
364
- logger.error(f"Turbo model error details: {str(e)}")
365
- raise HTTPException(
366
- status_code=500,
367
- detail=f"Internal server error while calling Rox 2.1 Turbo: {str(e)}",
368
- ) from e
369
-
370
- try:
371
- content = completion.choices[0].message.content or ""
372
- except Exception:
373
- logger.exception("Unexpected response format from Rox 2.1 Turbo for /turbo")
374
- raise HTTPException(
375
- status_code=502,
376
- detail="Bad response from upstream model provider.",
377
- )
378
-
379
- return ChatResponse(content=content)
380
-
381
-
382
- @app.post("/coder", response_model=ChatResponse)
383
- def coder(req: ChatRequest):
384
- """Rox 3.5 Coder - Specialized coding model with thinking capability"""
385
- # Inject Rox Coder identity as system message
386
- messages = [{"role": "system", "content": ROX_CODER_IDENTITY}]
387
- messages.extend([m.dict() for m in req.messages])
388
-
389
- try:
390
- completion = client.chat.completions.create(
391
- model=ROX_CODER_MODEL,
392
- messages=messages,
393
- temperature=req.temperature if req.temperature != 1.0 else 0.6,
394
- top_p=req.top_p if req.top_p != 1.0 else 0.95,
395
- max_tokens=min(req.max_tokens, 16384),
396
- stream=False,
397
- extra_body={
398
- "top_k": 20,
399
- "presence_penalty": 0,
400
- "repetition_penalty": 1,
401
- "chat_template_kwargs": {
402
- "enable_thinking": True
403
- }
404
- }
405
- )
406
- except Exception as e:
407
- logger.exception("Error while calling Rox 3.5 Coder for /coder")
408
- raise HTTPException(
409
- status_code=500,
410
- detail="Internal server error while calling Rox 3.5 Coder.",
411
- ) from e
412
-
413
- try:
414
- content = completion.choices[0].message.content or ""
415
- except Exception:
416
- logger.exception("Unexpected response format from Rox 3.5 Coder for /coder")
417
- raise HTTPException(
418
- status_code=502,
419
- detail="Bad response from upstream model provider.",
420
- )
421
-
422
- return ChatResponse(content=content)
423
-
424
-
425
- @app.post("/turbo45", response_model=ChatResponse)
426
- def turbo45(req: ChatRequest):
427
- """Rox 4.5 Turbo - Advanced reasoning with speed"""
428
- # Inject Rox 4.5 Turbo identity as system message
429
- messages = [{"role": "system", "content": ROX_TURBO_45_IDENTITY}]
430
- messages.extend([m.dict() for m in req.messages])
431
-
432
- try:
433
- completion = client.chat.completions.create(
434
- model=ROX_TURBO_45_MODEL,
435
- messages=messages,
436
- temperature=req.temperature if req.temperature != 1.0 else 0.2,
437
- top_p=req.top_p if req.top_p != 1.0 else 0.7,
438
- max_tokens=min(req.max_tokens, 8192),
439
- stream=False,
440
- extra_body={
441
- "chat_template_kwargs": {
442
- "thinking": True
443
- }
444
- }
445
- )
446
- except Exception as e:
447
- logger.exception("Error while calling Rox 4.5 Turbo for /turbo45")
448
- raise HTTPException(
449
- status_code=500,
450
- detail="Internal server error while calling Rox 4.5 Turbo.",
451
- ) from e
452
-
453
- try:
454
- content = completion.choices[0].message.content or ""
455
- except Exception:
456
- logger.exception("Unexpected response format from Rox 4.5 Turbo for /turbo45")
457
- raise HTTPException(
458
- status_code=502,
459
- detail="Bad response from upstream model provider.",
460
- )
461
-
462
- return ChatResponse(content=content)
463
-
464
-
465
- @app.post("/ultra", response_model=ChatResponse)
466
- def ultra(req: ChatRequest):
467
- """Rox 5 Ultra - Most advanced model with superior reasoning"""
468
- # Inject Rox 5 Ultra identity as system message
469
- messages = [{"role": "system", "content": ROX_ULTRA_IDENTITY}]
470
- messages.extend([m.dict() for m in req.messages])
471
-
472
- try:
473
- completion = client.chat.completions.create(
474
- model=ROX_ULTRA_MODEL,
475
- messages=messages,
476
- temperature=req.temperature,
477
- top_p=req.top_p if req.top_p != 1.0 else 0.95,
478
- max_tokens=min(req.max_tokens, 8192),
479
- stream=False,
480
- extra_body={
481
- "chat_template_kwargs": {
482
- "thinking": True
483
- }
484
- }
485
- )
486
- except Exception as e:
487
- logger.exception("Error while calling Rox 5 Ultra for /ultra")
488
- raise HTTPException(
489
- status_code=500,
490
- detail="Internal server error while calling Rox 5 Ultra.",
491
- ) from e
492
-
493
- try:
494
- content = completion.choices[0].message.content or ""
495
- except Exception:
496
- logger.exception("Unexpected response format from Rox 5 Ultra for /ultra")
497
- raise HTTPException(
498
- status_code=502,
499
- detail="Bad response from upstream model provider.",
500
- )
501
-
502
- return ChatResponse(content=content)
503
-
504
-
505
- @app.post("/dyno", response_model=ChatResponse)
506
- def dyno(req: ChatRequest):
507
- """Rox 6 Dyno - Extended context with dynamic thinking"""
508
- # Inject Rox 6 Dyno identity as system message
509
- messages = [{"role": "system", "content": ROX_DYNO_IDENTITY}]
510
- messages.extend([m.dict() for m in req.messages])
511
-
512
- try:
513
- completion = client.chat.completions.create(
514
- model=ROX_DYNO_MODEL,
515
- messages=messages,
516
- temperature=req.temperature,
517
- top_p=req.top_p,
518
- max_tokens=min(req.max_tokens, 16384),
519
- stream=False,
520
- extra_body={
521
- "chat_template_kwargs": {
522
- "thinking": True
523
- }
524
- }
525
- )
526
- except Exception as e:
527
- logger.exception("Error while calling Rox 6 Dyno for /dyno")
528
- raise HTTPException(
529
- status_code=500,
530
- detail="Internal server error while calling Rox 6 Dyno.",
531
- ) from e
532
-
533
- try:
534
- content = completion.choices[0].message.content or ""
535
- except Exception:
536
- logger.exception("Unexpected response format from Rox 6 Dyno for /dyno")
537
- raise HTTPException(
538
- status_code=502,
539
- detail="Bad response from upstream model provider.",
540
- )
541
-
542
- return ChatResponse(content=content)
543
-
544
-
545
- @app.post("/coder7", response_model=ChatResponse)
546
- def coder7(req: ChatRequest):
547
- """Rox 7 Coder - Most advanced coding specialist"""
548
- # Inject Rox 7 Coder identity as system message
549
- messages = [{"role": "system", "content": ROX_CODER_7_IDENTITY}]
550
- messages.extend([m.dict() for m in req.messages])
551
-
552
- try:
553
- completion = client.chat.completions.create(
554
- model=ROX_CODER_7_MODEL,
555
- messages=messages,
556
- temperature=req.temperature,
557
- top_p=req.top_p,
558
- max_tokens=min(req.max_tokens, 16384),
559
- stream=False,
560
- extra_body={
561
- "chat_template_kwargs": {
562
- "enable_thinking": True,
563
- "clear_thinking": False
564
- }
565
- }
566
- )
567
- except Exception as e:
568
- logger.exception("Error while calling Rox 7 Coder for /coder7")
569
- raise HTTPException(
570
- status_code=500,
571
- detail="Internal server error while calling Rox 7 Coder.",
572
- ) from e
573
-
574
- try:
575
- content = completion.choices[0].message.content or ""
576
- except Exception:
577
- logger.exception("Unexpected response format from Rox 7 Coder for /coder7")
578
- raise HTTPException(
579
- status_code=502,
580
- detail="Bad response from upstream model provider.",
581
- )
582
-
583
- return ChatResponse(content=content)
584
-
585
-
586
- @app.post("/vision", response_model=ChatResponse)
587
- def vision(req: ChatRequest):
588
- """Rox Vision Max - Optimized for visual understanding"""
589
- # Inject Rox Vision Max identity as system message
590
- messages = [{"role": "system", "content": ROX_VISION_IDENTITY}]
591
- messages.extend([m.dict() for m in req.messages])
592
-
593
- try:
594
- completion = client.chat.completions.create(
595
- model=ROX_VISION_MODEL,
596
- messages=messages,
597
- temperature=req.temperature if req.temperature != 1.0 else 0.2,
598
- top_p=req.top_p if req.top_p != 1.0 else 0.7,
599
- max_tokens=min(req.max_tokens, 512),
600
- stream=False
601
- )
602
- except Exception as e:
603
- logger.exception("Error while calling Rox Vision Max for /vision")
604
- raise HTTPException(
605
- status_code=500,
606
- detail="Internal server error while calling Rox Vision Max.",
607
- ) from e
608
-
609
- try:
610
- content = completion.choices[0].message.content or ""
611
- except Exception:
612
- logger.exception("Unexpected response format from Rox Vision Max for /vision")
613
- raise HTTPException(
614
- status_code=502,
615
- detail="Bad response from upstream model provider.",
616
- )
617
-
618
- return ChatResponse(content=content)
619
-
620
-
621
- @app.post("/hf/generate", response_model=List[HFResponseItem])
622
- def hf_generate(req: HFRequest):
623
- """
624
- Hugging Face-style text-generation endpoint.
625
-
626
- Request:
627
- {
628
- "inputs": "your prompt",
629
- "parameters": {
630
- "temperature": 0.7,
631
- "top_p": 0.95,
632
- "max_new_tokens": 256
633
- }
634
- }
635
-
636
- Response:
637
- [
638
- { "generated_text": "..." }
639
- ]
640
- """
641
- params = req.parameters or HFParameters()
642
-
643
- # Inject Rox Core identity as system message
644
- messages = [
645
- {"role": "system", "content": ROX_CORE_IDENTITY},
646
- {"role": "user", "content": req.inputs}
647
- ]
648
-
649
- try:
650
- completion = client.chat.completions.create(
651
- model=ROX_CORE_MODEL,
652
- messages=messages,
653
- temperature=params.temperature if params.temperature is not None else 1.0,
654
- top_p=params.top_p if params.top_p is not None else 0.95,
655
- max_tokens=params.max_new_tokens if params.max_new_tokens is not None else 8192,
656
- stream=False,
657
- )
658
- except Exception as e:
659
- logger.exception("Error while calling Rox Core for /hf/generate")
660
- raise HTTPException(
661
- status_code=500,
662
- detail="Internal server error while calling Rox Core.",
663
- ) from e
664
-
665
- try:
666
- content = completion.choices[0].message.content or ""
667
- except Exception:
668
- logger.exception("Unexpected response format from Rox Core for /hf/generate")
669
- raise HTTPException(
670
- status_code=502,
671
- detail="Bad response from upstream model provider.",
672
- )
673
-
674
- # Match the common HF text-generation API: list of objects with generated_text
675
- return [HFResponseItem(generated_text=content)]
676
-
677
-
678
- if __name__ == "__main__":
679
- import uvicorn
680
-
681
- # Use PORT environment variable if available (for Hugging Face Spaces)
682
- port = int(os.getenv("PORT", 7860))
683
-
684
- uvicorn.run("server:app", host="0.0.0.0", port=port, reload=False)
685
-
 
1
+ import logging
2
+ import os
3
+ import sys
4
+ from typing import List, Optional, AsyncGenerator
5
+ from contextlib import asynccontextmanager
6
+
7
+ from dotenv import load_dotenv
8
+ from fastapi import FastAPI, HTTPException, Request
9
+ from fastapi.middleware.cors import CORSMiddleware
10
+ from fastapi.middleware.gzip import GZipMiddleware
11
+ from fastapi.responses import JSONResponse, StreamingResponse
12
+ from pydantic import BaseModel, Field
13
+ from openai import OpenAI
14
+ import json
15
+
16
+
17
+ # Load environment variables
18
+ load_dotenv()
19
+
20
+ # Configure minimal logging for production speed
21
+ logging.basicConfig(
22
+ level=logging.WARNING,
23
+ format='%(levelname)s - %(message)s'
24
+ )
25
+ logger = logging.getLogger("rox_ai")
26
+
27
+ # Check for API key
28
+ NVIDIA_API_KEY = os.getenv("NVIDIA_API_KEY")
29
+
30
+ if not NVIDIA_API_KEY:
31
+ raise RuntimeError("NVIDIA_API_KEY not set")
32
+
33
+ # Model configurations
34
+ ROX_CORE_MODEL = "minimaxai/minimax-m2.5"
35
+ ROX_TURBO_MODEL = "meta/llama-3.1-8b-instruct" # Changed to a more reliable model
36
+ ROX_CODER_MODEL = "qwen/qwen3.5-397b-a17b"
37
+ ROX_TURBO_45_MODEL = "deepseek-ai/deepseek-v3.1"
38
+ ROX_ULTRA_MODEL = "deepseek-ai/deepseek-v3.2"
39
+ ROX_DYNO_MODEL = "moonshotai/kimi-k2.5"
40
+ ROX_CODER_7_MODEL = "z-ai/glm5"
41
+ ROX_VISION_MODEL = "google/gemma-3-27b-it"
42
+
43
+ # System identities - concise for faster processing
44
+ ROX_CORE_IDENTITY = "You are Rox Core by Mohammad Faiz."
45
+ ROX_TURBO_IDENTITY = "You are Rox 2.1 Turbo by Mohammad Faiz. Fast responses."
46
+ ROX_CODER_IDENTITY = "You are Rox 3.5 Coder by Mohammad Faiz. Code specialist."
47
+ ROX_TURBO_45_IDENTITY = "You are Rox 4.5 Turbo by Mohammad Faiz. Fast reasoning."
48
+ ROX_ULTRA_IDENTITY = "You are Rox 5 Ultra by Mohammad Faiz. Advanced reasoning."
49
+ ROX_DYNO_IDENTITY = "You are Rox 6 Dyno by Mohammad Faiz. Long context."
50
+ ROX_CODER_7_IDENTITY = "You are Rox 7 Coder by Mohammad Faiz. Advanced coding."
51
+ ROX_VISION_IDENTITY = "You are Rox Vision Max by Mohammad Faiz. Visual understanding."
52
+
53
+ # Initialize OpenAI client with timeout optimization
54
+ client = OpenAI(
55
+ base_url="https://integrate.api.nvidia.com/v1",
56
+ api_key=NVIDIA_API_KEY,
57
+ timeout=60.0,
58
+ max_retries=2
59
+ )
60
+
61
+ @asynccontextmanager
62
+ async def lifespan(app: FastAPI):
63
+ """Lifespan context manager"""
64
+ yield
65
+
66
+
67
+ # Initialize FastAPI app - optimized for speed
68
+ app = FastAPI(
69
+ title="Rox AI API",
70
+ description="Eight specialized AI models by Mohammad Faiz",
71
+ version="2.0",
72
+ lifespan=lifespan,
73
+ docs_url="/docs",
74
+ redoc_url="/redoc"
75
+ )
76
+
77
+ # GZip compression for faster transfers
78
+ app.add_middleware(GZipMiddleware, minimum_size=500)
79
+
80
+ # CORS - unlimited access
81
+ app.add_middleware(
82
+ CORSMiddleware,
83
+ allow_origins=["*"],
84
+ allow_credentials=True,
85
+ allow_methods=["*"],
86
+ allow_headers=["*"],
87
+ )
88
+
89
+
90
+ # Minimal exception handler
91
+ @app.exception_handler(Exception)
92
+ async def global_exception_handler(request: Request, exc: Exception):
93
+ return JSONResponse(
94
+ status_code=500,
95
+ content={"error": "Internal server error"}
96
+ )
97
+
98
+
99
+ @app.get("/health")
100
+ def health_check():
101
+ """Health check endpoint"""
102
+ return {"status": "healthy", "service": "Rox AI", "version": "2.0"}
103
+
104
+
105
+ # Helper function for streaming responses
106
+ async def stream_response(model: str, messages: list, temperature: float, top_p: float, max_tokens: int, extra_body: dict = None):
107
+ """Stream responses from OpenAI API"""
108
+ try:
109
+ stream = client.chat.completions.create(
110
+ model=model,
111
+ messages=messages,
112
+ temperature=temperature,
113
+ top_p=top_p,
114
+ max_tokens=max_tokens,
115
+ stream=True,
116
+ extra_body=extra_body
117
+ )
118
+
119
+ for chunk in stream:
120
+ if chunk.choices[0].delta.content:
121
+ yield f"data: {json.dumps({'content': chunk.choices[0].delta.content})}\n\n"
122
+
123
+ yield "data: [DONE]\n\n"
124
+ except Exception as e:
125
+ yield f"data: {json.dumps({'error': str(e)})}\n\n"
126
+
127
+
128
+ @app.get("/health")
129
+ def health_check():
130
+ """Health check endpoint for monitoring"""
131
+ return {
132
+ "status": "healthy",
133
+ "service": "Rox AI API",
134
+ "version": "2.0",
135
+ "models": 8
136
+ }
137
+
138
+
139
+ @app.get("/")
140
+ def root():
141
+ """API information and available models"""
142
+ return {
143
+ "service": "Rox AI API",
144
+ "version": "2.0",
145
+ "creator": "Mohammad Faiz",
146
+ "models": {
147
+ "rox_core": {
148
+ "endpoint": "/chat",
149
+ "description": "Rox Core - Main conversational model",
150
+ "model": "minimaxai/minimax-m2.5",
151
+ "best_for": "General conversation and tasks"
152
+ },
153
+ "rox_turbo": {
154
+ "endpoint": "/turbo",
155
+ "description": "Rox 2.1 Turbo - Fast and efficient",
156
+ "model": "meta/llama-3.1-8b-instruct",
157
+ "best_for": "Quick responses and efficient processing"
158
+ },
159
+ "rox_coder": {
160
+ "endpoint": "/coder",
161
+ "description": "Rox 3.5 Coder - Specialized coding assistant",
162
+ "model": "qwen/qwen3.5-397b-a17b",
163
+ "best_for": "Code generation, debugging, and development"
164
+ },
165
+ "rox_turbo_45": {
166
+ "endpoint": "/turbo45",
167
+ "description": "Rox 4.5 Turbo - Advanced reasoning with speed",
168
+ "model": "deepseek-ai/deepseek-v3.1",
169
+ "best_for": "Complex reasoning with fast responses"
170
+ },
171
+ "rox_ultra": {
172
+ "endpoint": "/ultra",
173
+ "description": "Rox 5 Ultra - Most advanced model",
174
+ "model": "deepseek-ai/deepseek-v3.2",
175
+ "best_for": "Complex tasks requiring deep reasoning"
176
+ },
177
+ "rox_dyno": {
178
+ "endpoint": "/dyno",
179
+ "description": "Rox 6 Dyno - Extended context with dynamic thinking",
180
+ "model": "moonshotai/kimi-k2.5",
181
+ "best_for": "Long context tasks and dynamic reasoning"
182
+ },
183
+ "rox_coder_7": {
184
+ "endpoint": "/coder7",
185
+ "description": "Rox 7 Coder - Most advanced coding specialist",
186
+ "model": "z-ai/glm5",
187
+ "best_for": "Advanced code generation and complex programming"
188
+ },
189
+ "rox_vision": {
190
+ "endpoint": "/vision",
191
+ "description": "Rox Vision Max - Optimized for visual understanding",
192
+ "model": "google/gemma-3-27b-it",
193
+ "best_for": "Visual understanding and multimodal tasks"
194
+ }
195
+ },
196
+ "endpoints": [
197
+ {"path": "/chat", "method": "POST", "description": "Rox Core chat"},
198
+ {"path": "/turbo", "method": "POST", "description": "Rox 2.1 Turbo chat"},
199
+ {"path": "/coder", "method": "POST", "description": "Rox 3.5 Coder chat"},
200
+ {"path": "/turbo45", "method": "POST", "description": "Rox 4.5 Turbo chat"},
201
+ {"path": "/ultra", "method": "POST", "description": "Rox 5 Ultra chat"},
202
+ {"path": "/dyno", "method": "POST", "description": "Rox 6 Dyno chat"},
203
+ {"path": "/coder7", "method": "POST", "description": "Rox 7 Coder chat"},
204
+ {"path": "/vision", "method": "POST", "description": "Rox Vision Max chat"},
205
+ {"path": "/hf/generate", "method": "POST", "description": "HuggingFace compatible (uses Rox Core)"}
206
+ ]
207
+ }
208
+
209
+
210
+ class ChatMessage(BaseModel):
211
+ role: str
212
+ content: str
213
+
214
+
215
+ class ChatRequest(BaseModel):
216
+ messages: List[ChatMessage]
217
+ temperature: Optional[float] = 0.7
218
+ top_p: Optional[float] = 0.95
219
+ max_tokens: Optional[int] = 8192
220
+ stream: Optional[bool] = False
221
+
222
+
223
+ class ChatResponse(BaseModel):
224
+ content: str
225
+
226
+
227
+ class HFParameters(BaseModel):
228
+ temperature: Optional[float] = None
229
+ top_p: Optional[float] = None
230
+ max_new_tokens: Optional[int] = None
231
+
232
+
233
+ class HFRequest(BaseModel):
234
+ inputs: str
235
+ parameters: Optional[HFParameters] = None
236
+
237
+
238
+ class HFResponseItem(BaseModel):
239
+ generated_text: str
240
+
241
+
242
+ @app.post("/chat")
243
+ async def chat(req: ChatRequest):
244
+ """Rox Core - Main conversational model with streaming support"""
245
+ messages = [{"role": "system", "content": ROX_CORE_IDENTITY}]
246
+ messages.extend([m.dict() for m in req.messages])
247
+
248
+ if req.stream:
249
+ return StreamingResponse(
250
+ stream_response(ROX_CORE_MODEL, messages, req.temperature, req.top_p, req.max_tokens),
251
+ media_type="text/event-stream"
252
+ )
253
+
254
+ try:
255
+ completion = client.chat.completions.create(
256
+ model=ROX_CORE_MODEL,
257
+ messages=messages,
258
+ temperature=req.temperature,
259
+ top_p=req.top_p,
260
+ max_tokens=req.max_tokens,
261
+ stream=False
262
+ )
263
+ return {"content": completion.choices[0].message.content or ""}
264
+ except Exception as e:
265
+ raise HTTPException(status_code=500, detail=str(e))
266
+
267
+
268
+ @app.post("/turbo")
269
+ async def turbo(req: ChatRequest):
270
+ """Rox 2.1 Turbo - Fast and efficient with streaming"""
271
+ messages = [{"role": "system", "content": ROX_TURBO_IDENTITY}]
272
+ messages.extend([m.dict() for m in req.messages])
273
+
274
+ if req.stream:
275
+ return StreamingResponse(
276
+ stream_response(ROX_TURBO_MODEL, messages, req.temperature, req.top_p, req.max_tokens),
277
+ media_type="text/event-stream"
278
+ )
279
+
280
+ try:
281
+ completion = client.chat.completions.create(
282
+ model=ROX_TURBO_MODEL,
283
+ messages=messages,
284
+ temperature=req.temperature,
285
+ top_p=req.top_p,
286
+ max_tokens=req.max_tokens,
287
+ stream=False
288
+ )
289
+ return {"content": completion.choices[0].message.content or ""}
290
+ except Exception as e:
291
+ raise HTTPException(status_code=500, detail=str(e))
292
+
293
+
294
+ @app.post("/coder")
295
+ async def coder(req: ChatRequest):
296
+ """Rox 3.5 Coder - Specialized coding with streaming"""
297
+ messages = [{"role": "system", "content": ROX_CODER_IDENTITY}]
298
+ messages.extend([m.dict() for m in req.messages])
299
+
300
+ extra_body = {
301
+ "top_k": 20,
302
+ "presence_penalty": 0,
303
+ "repetition_penalty": 1,
304
+ "chat_template_kwargs": {"enable_thinking": True}
305
+ }
306
+
307
+ if req.stream:
308
+ return StreamingResponse(
309
+ stream_response(ROX_CODER_MODEL, messages, req.temperature, req.top_p, min(req.max_tokens, 16384), extra_body),
310
+ media_type="text/event-stream"
311
+ )
312
+
313
+ try:
314
+ completion = client.chat.completions.create(
315
+ model=ROX_CODER_MODEL,
316
+ messages=messages,
317
+ temperature=req.temperature,
318
+ top_p=req.top_p,
319
+ max_tokens=min(req.max_tokens, 16384),
320
+ stream=False,
321
+ extra_body=extra_body
322
+ )
323
+ return {"content": completion.choices[0].message.content or ""}
324
+ except Exception as e:
325
+ raise HTTPException(status_code=500, detail=str(e))
326
+
327
+
328
+ @app.post("/turbo45")
329
+ async def turbo45(req: ChatRequest):
330
+ """Rox 4.5 Turbo - Advanced reasoning with streaming"""
331
+ messages = [{"role": "system", "content": ROX_TURBO_45_IDENTITY}]
332
+ messages.extend([m.dict() for m in req.messages])
333
+
334
+ extra_body = {"chat_template_kwargs": {"thinking": True}}
335
+
336
+ if req.stream:
337
+ return StreamingResponse(
338
+ stream_response(ROX_TURBO_45_MODEL, messages, req.temperature, req.top_p, min(req.max_tokens, 8192), extra_body),
339
+ media_type="text/event-stream"
340
+ )
341
+
342
+ try:
343
+ completion = client.chat.completions.create(
344
+ model=ROX_TURBO_45_MODEL,
345
+ messages=messages,
346
+ temperature=req.temperature,
347
+ top_p=req.top_p,
348
+ max_tokens=min(req.max_tokens, 8192),
349
+ stream=False,
350
+ extra_body=extra_body
351
+ )
352
+ return {"content": completion.choices[0].message.content or ""}
353
+ except Exception as e:
354
+ raise HTTPException(status_code=500, detail=str(e))
355
+
356
+
357
+ @app.post("/ultra")
358
+ async def ultra(req: ChatRequest):
359
+ """Rox 5 Ultra - Most advanced with streaming"""
360
+ messages = [{"role": "system", "content": ROX_ULTRA_IDENTITY}]
361
+ messages.extend([m.dict() for m in req.messages])
362
+
363
+ extra_body = {"chat_template_kwargs": {"thinking": True}}
364
+
365
+ if req.stream:
366
+ return StreamingResponse(
367
+ stream_response(ROX_ULTRA_MODEL, messages, req.temperature, req.top_p, min(req.max_tokens, 8192), extra_body),
368
+ media_type="text/event-stream"
369
+ )
370
+
371
+ try:
372
+ completion = client.chat.completions.create(
373
+ model=ROX_ULTRA_MODEL,
374
+ messages=messages,
375
+ temperature=req.temperature,
376
+ top_p=req.top_p,
377
+ max_tokens=min(req.max_tokens, 8192),
378
+ stream=False,
379
+ extra_body=extra_body
380
+ )
381
+ return {"content": completion.choices[0].message.content or ""}
382
+ except Exception as e:
383
+ raise HTTPException(status_code=500, detail=str(e))
384
+
385
+
386
+ @app.post("/dyno")
387
+ async def dyno(req: ChatRequest):
388
+ """Rox 6 Dyno - Extended context with streaming"""
389
+ messages = [{"role": "system", "content": ROX_DYNO_IDENTITY}]
390
+ messages.extend([m.dict() for m in req.messages])
391
+
392
+ extra_body = {"chat_template_kwargs": {"thinking": True}}
393
+
394
+ if req.stream:
395
+ return StreamingResponse(
396
+ stream_response(ROX_DYNO_MODEL, messages, req.temperature, req.top_p, min(req.max_tokens, 16384), extra_body),
397
+ media_type="text/event-stream"
398
+ )
399
+
400
+ try:
401
+ completion = client.chat.completions.create(
402
+ model=ROX_DYNO_MODEL,
403
+ messages=messages,
404
+ temperature=req.temperature,
405
+ top_p=req.top_p,
406
+ max_tokens=min(req.max_tokens, 16384),
407
+ stream=False,
408
+ extra_body=extra_body
409
+ )
410
+ return {"content": completion.choices[0].message.content or ""}
411
+ except Exception as e:
412
+ raise HTTPException(status_code=500, detail=str(e))
413
+
414
+
415
+ @app.post("/coder7")
416
+ async def coder7(req: ChatRequest):
417
+ """Rox 7 Coder - Most advanced coding with streaming"""
418
+ messages = [{"role": "system", "content": ROX_CODER_7_IDENTITY}]
419
+ messages.extend([m.dict() for m in req.messages])
420
+
421
+ extra_body = {
422
+ "chat_template_kwargs": {
423
+ "enable_thinking": True,
424
+ "clear_thinking": False
425
+ }
426
+ }
427
+
428
+ if req.stream:
429
+ return StreamingResponse(
430
+ stream_response(ROX_CODER_7_MODEL, messages, req.temperature, req.top_p, min(req.max_tokens, 16384), extra_body),
431
+ media_type="text/event-stream"
432
+ )
433
+
434
+ try:
435
+ completion = client.chat.completions.create(
436
+ model=ROX_CODER_7_MODEL,
437
+ messages=messages,
438
+ temperature=req.temperature,
439
+ top_p=req.top_p,
440
+ max_tokens=min(req.max_tokens, 16384),
441
+ stream=False,
442
+ extra_body=extra_body
443
+ )
444
+ return {"content": completion.choices[0].message.content or ""}
445
+ except Exception as e:
446
+ raise HTTPException(status_code=500, detail=str(e))
447
+
448
+
449
+ @app.post("/vision")
450
+ async def vision(req: ChatRequest):
451
+ """Rox Vision Max - Visual understanding with streaming"""
452
+ messages = [{"role": "system", "content": ROX_VISION_IDENTITY}]
453
+ messages.extend([m.dict() for m in req.messages])
454
+
455
+ if req.stream:
456
+ return StreamingResponse(
457
+ stream_response(ROX_VISION_MODEL, messages, req.temperature, req.top_p, min(req.max_tokens, 8192)),
458
+ media_type="text/event-stream"
459
+ )
460
+
461
+ try:
462
+ completion = client.chat.completions.create(
463
+ model=ROX_VISION_MODEL,
464
+ messages=messages,
465
+ temperature=req.temperature,
466
+ top_p=req.top_p,
467
+ max_tokens=min(req.max_tokens, 8192),
468
+ stream=False
469
+ )
470
+ return {"content": completion.choices[0].message.content or ""}
471
+ except Exception as e:
472
+ raise HTTPException(status_code=500, detail=str(e))
473
+
474
+
475
+ @app.post("/hf/generate")
476
+ async def hf_generate(req: HFRequest):
477
+ """HuggingFace compatible endpoint"""
478
+ params = req.parameters or HFParameters()
479
+ messages = [
480
+ {"role": "system", "content": ROX_CORE_IDENTITY},
481
+ {"role": "user", "content": req.inputs}
482
+ ]
483
+
484
+ try:
485
+ completion = client.chat.completions.create(
486
+ model=ROX_CORE_MODEL,
487
+ messages=messages,
488
+ temperature=params.temperature or 0.7,
489
+ top_p=params.top_p or 0.95,
490
+ max_tokens=params.max_new_tokens or 8192,
491
+ stream=False
492
+ )
493
+ return [{"generated_text": completion.choices[0].message.content or ""}]
494
+ except Exception as e:
495
+ raise HTTPException(status_code=500, detail=str(e))
496
+
497
+
498
+ if __name__ == "__main__":
499
+ import uvicorn
500
+
501
+ # Use PORT environment variable if available (for Hugging Face Spaces)
502
+ port = int(os.getenv("PORT", 7860))
503
+
504
+ uvicorn.run("server:app", host="0.0.0.0", port=port, reload=False)
505
+