Rox-Turbo commited on
Commit
cee0827
·
verified ·
1 Parent(s): 6c15b4a

Update server.py

Browse files
Files changed (1) hide show
  1. server.py +617 -545
server.py CHANGED
@@ -1,545 +1,617 @@
1
- import logging
2
- import os
3
- from typing import List, Optional
4
-
5
- from dotenv import load_dotenv
6
- from fastapi import FastAPI, HTTPException
7
- from fastapi.middleware.cors import CORSMiddleware
8
- from pydantic import BaseModel
9
- from openai import OpenAI
10
-
11
-
12
- load_dotenv()
13
-
14
- logger = logging.getLogger("rox_ai")
15
- logging.basicConfig(level=logging.INFO)
16
-
17
- NVIDIA_API_KEY = os.getenv("NVIDIA_API_KEY")
18
-
19
- # Model configurations
20
- ROX_CORE_MODEL = "minimaxai/minimax-m2.5"
21
- ROX_TURBO_MODEL = "deepseek-ai/deepseek-r1-distill-qwen-32b"
22
- ROX_CODER_MODEL = "qwen/qwen3.5-397b-a17b"
23
- ROX_TURBO_45_MODEL = "deepseek-ai/deepseek-v3.1"
24
- ROX_ULTRA_MODEL = "deepseek-ai/deepseek-v3.2"
25
- ROX_DYNO_MODEL = "moonshotai/kimi-k2.5"
26
- ROX_CODER_7_MODEL = "z-ai/glm5"
27
- ROX_VISION_MODEL = "google/gemma-3-27b-it"
28
-
29
- # System identities for each model
30
- ROX_CORE_IDENTITY = """You are Rox Core, created by Rox AI. Your creator and owner is Mohammad Faiz. You are an independent LLM model, not based on anyone else's technology. You represent the cutting edge of Rox AI's research and development."""
31
-
32
- ROX_TURBO_IDENTITY = """You are Rox 2.1 Turbo, created by Rox AI. Your creator and owner is Mohammad Faiz. You are an independent LLM model, not based on anyone else's technology. You are optimized for fast, efficient responses while maintaining high quality."""
33
-
34
- ROX_CODER_IDENTITY = """You are Rox 3.5 Coder, created by Rox AI. Your creator and owner is Mohammad Faiz. You are an independent LLM model, not based on anyone else's technology. You are specialized in code generation, debugging, and software development tasks."""
35
-
36
- ROX_TURBO_45_IDENTITY = """You are Rox 4.5 Turbo, created by Rox AI. Your creator and owner is Mohammad Faiz. You are an independent LLM model, not based on anyone else's technology. You combine speed with advanced reasoning capabilities."""
37
-
38
- ROX_ULTRA_IDENTITY = """You are Rox 5 Ultra, created by Rox AI. Your creator and owner is Mohammad Faiz. You are an independent LLM model, not based on anyone else's technology. You are the most advanced model with superior reasoning and thinking capabilities."""
39
-
40
- ROX_DYNO_IDENTITY = """You are Rox 6 Dyno, created by Rox AI. Your creator and owner is Mohammad Faiz. You are an independent LLM model, not based on anyone else's technology. You excel at dynamic thinking and extended context understanding."""
41
-
42
- ROX_CODER_7_IDENTITY = """You are Rox 7 Coder, created by Rox AI. Your creator and owner is Mohammad Faiz. You are an independent LLM model, not based on anyone else's technology. You are the most advanced coding specialist with superior code generation and reasoning capabilities."""
43
-
44
- ROX_VISION_IDENTITY = """You are Rox Vision Max, created by Rox AI. Your creator and owner is Mohammad Faiz. You are an independent LLM model, not based on anyone else's technology. You are optimized for visual understanding and multimodal tasks."""
45
-
46
- if not NVIDIA_API_KEY:
47
- # Fail fast on startup rather than at first request.
48
- raise RuntimeError(
49
- "NVIDIA_API_KEY environment variable is not set. "
50
- "Create a .env file or set it in your environment."
51
- )
52
-
53
-
54
- client = OpenAI(
55
- base_url="https://integrate.api.nvidia.com/v1",
56
- api_key=NVIDIA_API_KEY,
57
- )
58
-
59
- app = FastAPI(title="Rox AI API - Multiple Models Available")
60
-
61
- # Adjust this list to only include your real frontend origins in production.
62
- app.add_middleware(
63
- CORSMiddleware,
64
- allow_origins=["*"], # e.g. ["https://your-site.com"]
65
- allow_credentials=True,
66
- allow_methods=["*"],
67
- allow_headers=["*"],
68
- )
69
-
70
-
71
- @app.get("/")
72
- def root():
73
- """API information and available models"""
74
- return {
75
- "service": "Rox AI API",
76
- "version": "2.0",
77
- "creator": "Mohammad Faiz",
78
- "models": {
79
- "rox_core": {
80
- "endpoint": "/chat",
81
- "description": "Rox Core - Main conversational model",
82
- "model": "minimaxai/minimax-m2.5",
83
- "best_for": "General conversation and tasks"
84
- },
85
- "rox_turbo": {
86
- "endpoint": "/turbo",
87
- "description": "Rox 2.1 Turbo - Fast and efficient",
88
- "model": "deepseek-ai/deepseek-r1-distill-qwen-32b",
89
- "best_for": "Quick responses and efficient processing"
90
- },
91
- "rox_coder": {
92
- "endpoint": "/coder",
93
- "description": "Rox 3.5 Coder - Specialized coding assistant",
94
- "model": "qwen/qwen3.5-397b-a17b",
95
- "best_for": "Code generation, debugging, and development"
96
- },
97
- "rox_turbo_45": {
98
- "endpoint": "/turbo45",
99
- "description": "Rox 4.5 Turbo - Advanced reasoning with speed",
100
- "model": "deepseek-ai/deepseek-v3.1",
101
- "best_for": "Complex reasoning with fast responses"
102
- },
103
- "rox_ultra": {
104
- "endpoint": "/ultra",
105
- "description": "Rox 5 Ultra - Most advanced model",
106
- "model": "deepseek-ai/deepseek-v3.2",
107
- "best_for": "Complex tasks requiring deep reasoning"
108
- },
109
- "rox_dyno": {
110
- "endpoint": "/dyno",
111
- "description": "Rox 6 Dyno - Extended context with dynamic thinking",
112
- "model": "moonshotai/kimi-k2.5",
113
- "best_for": "Long context tasks and dynamic reasoning"
114
- },
115
- "rox_coder_7": {
116
- "endpoint": "/coder7",
117
- "description": "Rox 7 Coder - Most advanced coding specialist",
118
- "model": "z-ai/glm5",
119
- "best_for": "Advanced code generation and complex programming"
120
- },
121
- "rox_vision": {
122
- "endpoint": "/vision",
123
- "description": "Rox Vision Max - Optimized for visual understanding",
124
- "model": "google/gemma-3-27b-it",
125
- "best_for": "Visual understanding and multimodal tasks"
126
- }
127
- },
128
- "endpoints": [
129
- {"path": "/chat", "method": "POST", "description": "Rox Core chat"},
130
- {"path": "/turbo", "method": "POST", "description": "Rox 2.1 Turbo chat"},
131
- {"path": "/coder", "method": "POST", "description": "Rox 3.5 Coder chat"},
132
- {"path": "/turbo45", "method": "POST", "description": "Rox 4.5 Turbo chat"},
133
- {"path": "/ultra", "method": "POST", "description": "Rox 5 Ultra chat"},
134
- {"path": "/dyno", "method": "POST", "description": "Rox 6 Dyno chat"},
135
- {"path": "/coder7", "method": "POST", "description": "Rox 7 Coder chat"},
136
- {"path": "/vision", "method": "POST", "description": "Rox Vision Max chat"},
137
- {"path": "/hf/generate", "method": "POST", "description": "HuggingFace compatible (uses Rox Core)"}
138
- ]
139
- }
140
-
141
-
142
- class ChatMessage(BaseModel):
143
- role: str
144
- content: str
145
-
146
-
147
- class ChatRequest(BaseModel):
148
- messages: List[ChatMessage]
149
- temperature: Optional[float] = 1.0
150
- top_p: Optional[float] = 1.0
151
- max_tokens: Optional[int] = 4096
152
-
153
-
154
- class ChatResponse(BaseModel):
155
- content: str
156
-
157
-
158
- class HFParameters(BaseModel):
159
- temperature: Optional[float] = None
160
- top_p: Optional[float] = None
161
- max_new_tokens: Optional[int] = None
162
-
163
-
164
- class HFRequest(BaseModel):
165
- inputs: str
166
- parameters: Optional[HFParameters] = None
167
-
168
-
169
- class HFResponseItem(BaseModel):
170
- generated_text: str
171
-
172
-
173
- @app.post("/chat", response_model=ChatResponse)
174
- def chat(req: ChatRequest):
175
- """Rox Core - Main conversational model"""
176
- # Inject Rox Core identity as system message
177
- messages = [{"role": "system", "content": ROX_CORE_IDENTITY}]
178
- messages.extend([m.dict() for m in req.messages])
179
-
180
- try:
181
- completion = client.chat.completions.create(
182
- model=ROX_CORE_MODEL,
183
- messages=messages,
184
- temperature=req.temperature,
185
- top_p=req.top_p,
186
- max_tokens=req.max_tokens,
187
- stream=False,
188
- )
189
- except Exception as e:
190
- logger.exception("Error while calling Rox Core for /chat")
191
- # Do not leak internal error details to the client.
192
- raise HTTPException(
193
- status_code=500,
194
- detail="Internal server error while calling Rox Core.",
195
- ) from e
196
-
197
- # Combine all response message parts into a single string
198
- try:
199
- content = completion.choices[0].message.content or ""
200
- except Exception:
201
- logger.exception("Unexpected response format from Rox Core for /chat")
202
- raise HTTPException(
203
- status_code=502,
204
- detail="Bad response from upstream model provider.",
205
- )
206
-
207
- return ChatResponse(content=content)
208
-
209
-
210
- @app.post("/turbo", response_model=ChatResponse)
211
- def turbo(req: ChatRequest):
212
- """Rox 2.1 Turbo - Fast and efficient model"""
213
- # Inject Rox Turbo identity as system message
214
- messages = [{"role": "system", "content": ROX_TURBO_IDENTITY}]
215
- messages.extend([m.dict() for m in req.messages])
216
-
217
- try:
218
- completion = client.chat.completions.create(
219
- model=ROX_TURBO_MODEL,
220
- messages=messages,
221
- temperature=req.temperature if req.temperature != 1.0 else 0.6,
222
- top_p=req.top_p if req.top_p != 1.0 else 0.7,
223
- max_tokens=req.max_tokens,
224
- stream=False,
225
- )
226
- except Exception as e:
227
- logger.exception("Error while calling Rox 2.1 Turbo for /turbo")
228
- raise HTTPException(
229
- status_code=500,
230
- detail="Internal server error while calling Rox 2.1 Turbo.",
231
- ) from e
232
-
233
- try:
234
- content = completion.choices[0].message.content or ""
235
- except Exception:
236
- logger.exception("Unexpected response format from Rox 2.1 Turbo for /turbo")
237
- raise HTTPException(
238
- status_code=502,
239
- detail="Bad response from upstream model provider.",
240
- )
241
-
242
- return ChatResponse(content=content)
243
-
244
-
245
- @app.post("/coder", response_model=ChatResponse)
246
- def coder(req: ChatRequest):
247
- """Rox 3.5 Coder - Specialized coding model with thinking capability"""
248
- # Inject Rox Coder identity as system message
249
- messages = [{"role": "system", "content": ROX_CODER_IDENTITY}]
250
- messages.extend([m.dict() for m in req.messages])
251
-
252
- try:
253
- completion = client.chat.completions.create(
254
- model=ROX_CODER_MODEL,
255
- messages=messages,
256
- temperature=req.temperature if req.temperature != 1.0 else 0.6,
257
- top_p=req.top_p if req.top_p != 1.0 else 0.95,
258
- max_tokens=min(req.max_tokens, 16384),
259
- stream=False,
260
- extra_body={
261
- "top_k": 20,
262
- "presence_penalty": 0,
263
- "repetition_penalty": 1,
264
- "chat_template_kwargs": {
265
- "enable_thinking": True
266
- }
267
- }
268
- )
269
- except Exception as e:
270
- logger.exception("Error while calling Rox 3.5 Coder for /coder")
271
- raise HTTPException(
272
- status_code=500,
273
- detail="Internal server error while calling Rox 3.5 Coder.",
274
- ) from e
275
-
276
- try:
277
- content = completion.choices[0].message.content or ""
278
- except Exception:
279
- logger.exception("Unexpected response format from Rox 3.5 Coder for /coder")
280
- raise HTTPException(
281
- status_code=502,
282
- detail="Bad response from upstream model provider.",
283
- )
284
-
285
- return ChatResponse(content=content)
286
-
287
-
288
- @app.post("/turbo45", response_model=ChatResponse)
289
- def turbo45(req: ChatRequest):
290
- """Rox 4.5 Turbo - Advanced reasoning with speed"""
291
- # Inject Rox 4.5 Turbo identity as system message
292
- messages = [{"role": "system", "content": ROX_TURBO_45_IDENTITY}]
293
- messages.extend([m.dict() for m in req.messages])
294
-
295
- try:
296
- completion = client.chat.completions.create(
297
- model=ROX_TURBO_45_MODEL,
298
- messages=messages,
299
- temperature=req.temperature if req.temperature != 1.0 else 0.2,
300
- top_p=req.top_p if req.top_p != 1.0 else 0.7,
301
- max_tokens=min(req.max_tokens, 8192),
302
- stream=False,
303
- extra_body={
304
- "chat_template_kwargs": {
305
- "thinking": True
306
- }
307
- }
308
- )
309
- except Exception as e:
310
- logger.exception("Error while calling Rox 4.5 Turbo for /turbo45")
311
- raise HTTPException(
312
- status_code=500,
313
- detail="Internal server error while calling Rox 4.5 Turbo.",
314
- ) from e
315
-
316
- try:
317
- content = completion.choices[0].message.content or ""
318
- except Exception:
319
- logger.exception("Unexpected response format from Rox 4.5 Turbo for /turbo45")
320
- raise HTTPException(
321
- status_code=502,
322
- detail="Bad response from upstream model provider.",
323
- )
324
-
325
- return ChatResponse(content=content)
326
-
327
-
328
- @app.post("/ultra", response_model=ChatResponse)
329
- def ultra(req: ChatRequest):
330
- """Rox 5 Ultra - Most advanced model with superior reasoning"""
331
- # Inject Rox 5 Ultra identity as system message
332
- messages = [{"role": "system", "content": ROX_ULTRA_IDENTITY}]
333
- messages.extend([m.dict() for m in req.messages])
334
-
335
- try:
336
- completion = client.chat.completions.create(
337
- model=ROX_ULTRA_MODEL,
338
- messages=messages,
339
- temperature=req.temperature,
340
- top_p=req.top_p if req.top_p != 1.0 else 0.95,
341
- max_tokens=min(req.max_tokens, 8192),
342
- stream=False,
343
- extra_body={
344
- "chat_template_kwargs": {
345
- "thinking": True
346
- }
347
- }
348
- )
349
- except Exception as e:
350
- logger.exception("Error while calling Rox 5 Ultra for /ultra")
351
- raise HTTPException(
352
- status_code=500,
353
- detail="Internal server error while calling Rox 5 Ultra.",
354
- ) from e
355
-
356
- try:
357
- content = completion.choices[0].message.content or ""
358
- except Exception:
359
- logger.exception("Unexpected response format from Rox 5 Ultra for /ultra")
360
- raise HTTPException(
361
- status_code=502,
362
- detail="Bad response from upstream model provider.",
363
- )
364
-
365
- return ChatResponse(content=content)
366
-
367
-
368
- @app.post("/dyno", response_model=ChatResponse)
369
- def dyno(req: ChatRequest):
370
- """Rox 6 Dyno - Extended context with dynamic thinking"""
371
- # Inject Rox 6 Dyno identity as system message
372
- messages = [{"role": "system", "content": ROX_DYNO_IDENTITY}]
373
- messages.extend([m.dict() for m in req.messages])
374
-
375
- try:
376
- completion = client.chat.completions.create(
377
- model=ROX_DYNO_MODEL,
378
- messages=messages,
379
- temperature=req.temperature,
380
- top_p=req.top_p,
381
- max_tokens=min(req.max_tokens, 16384),
382
- stream=False,
383
- extra_body={
384
- "chat_template_kwargs": {
385
- "thinking": True
386
- }
387
- }
388
- )
389
- except Exception as e:
390
- logger.exception("Error while calling Rox 6 Dyno for /dyno")
391
- raise HTTPException(
392
- status_code=500,
393
- detail="Internal server error while calling Rox 6 Dyno.",
394
- ) from e
395
-
396
- try:
397
- content = completion.choices[0].message.content or ""
398
- except Exception:
399
- logger.exception("Unexpected response format from Rox 6 Dyno for /dyno")
400
- raise HTTPException(
401
- status_code=502,
402
- detail="Bad response from upstream model provider.",
403
- )
404
-
405
- return ChatResponse(content=content)
406
-
407
-
408
- @app.post("/coder7", response_model=ChatResponse)
409
- def coder7(req: ChatRequest):
410
- """Rox 7 Coder - Most advanced coding specialist"""
411
- # Inject Rox 7 Coder identity as system message
412
- messages = [{"role": "system", "content": ROX_CODER_7_IDENTITY}]
413
- messages.extend([m.dict() for m in req.messages])
414
-
415
- try:
416
- completion = client.chat.completions.create(
417
- model=ROX_CODER_7_MODEL,
418
- messages=messages,
419
- temperature=req.temperature,
420
- top_p=req.top_p,
421
- max_tokens=min(req.max_tokens, 16384),
422
- stream=False,
423
- extra_body={
424
- "chat_template_kwargs": {
425
- "enable_thinking": True,
426
- "clear_thinking": False
427
- }
428
- }
429
- )
430
- except Exception as e:
431
- logger.exception("Error while calling Rox 7 Coder for /coder7")
432
- raise HTTPException(
433
- status_code=500,
434
- detail="Internal server error while calling Rox 7 Coder.",
435
- ) from e
436
-
437
- try:
438
- content = completion.choices[0].message.content or ""
439
- except Exception:
440
- logger.exception("Unexpected response format from Rox 7 Coder for /coder7")
441
- raise HTTPException(
442
- status_code=502,
443
- detail="Bad response from upstream model provider.",
444
- )
445
-
446
- return ChatResponse(content=content)
447
-
448
-
449
- @app.post("/vision", response_model=ChatResponse)
450
- def vision(req: ChatRequest):
451
- """Rox Vision Max - Optimized for visual understanding"""
452
- # Inject Rox Vision Max identity as system message
453
- messages = [{"role": "system", "content": ROX_VISION_IDENTITY}]
454
- messages.extend([m.dict() for m in req.messages])
455
-
456
- try:
457
- completion = client.chat.completions.create(
458
- model=ROX_VISION_MODEL,
459
- messages=messages,
460
- temperature=req.temperature if req.temperature != 1.0 else 0.2,
461
- top_p=req.top_p if req.top_p != 1.0 else 0.7,
462
- max_tokens=min(req.max_tokens, 512),
463
- stream=False
464
- )
465
- except Exception as e:
466
- logger.exception("Error while calling Rox Vision Max for /vision")
467
- raise HTTPException(
468
- status_code=500,
469
- detail="Internal server error while calling Rox Vision Max.",
470
- ) from e
471
-
472
- try:
473
- content = completion.choices[0].message.content or ""
474
- except Exception:
475
- logger.exception("Unexpected response format from Rox Vision Max for /vision")
476
- raise HTTPException(
477
- status_code=502,
478
- detail="Bad response from upstream model provider.",
479
- )
480
-
481
- return ChatResponse(content=content)
482
-
483
-
484
- @app.post("/hf/generate", response_model=List[HFResponseItem])
485
- def hf_generate(req: HFRequest):
486
- """
487
- Hugging Face-style text-generation endpoint.
488
-
489
- Request:
490
- {
491
- "inputs": "your prompt",
492
- "parameters": {
493
- "temperature": 0.7,
494
- "top_p": 0.95,
495
- "max_new_tokens": 256
496
- }
497
- }
498
-
499
- Response:
500
- [
501
- { "generated_text": "..." }
502
- ]
503
- """
504
- params = req.parameters or HFParameters()
505
-
506
- # Inject Rox Core identity as system message
507
- messages = [
508
- {"role": "system", "content": ROX_CORE_IDENTITY},
509
- {"role": "user", "content": req.inputs}
510
- ]
511
-
512
- try:
513
- completion = client.chat.completions.create(
514
- model=ROX_CORE_MODEL,
515
- messages=messages,
516
- temperature=params.temperature if params.temperature is not None else 1.0,
517
- top_p=params.top_p if params.top_p is not None else 0.95,
518
- max_tokens=params.max_new_tokens if params.max_new_tokens is not None else 8192,
519
- stream=False,
520
- )
521
- except Exception as e:
522
- logger.exception("Error while calling Rox Core for /hf/generate")
523
- raise HTTPException(
524
- status_code=500,
525
- detail="Internal server error while calling Rox Core.",
526
- ) from e
527
-
528
- try:
529
- content = completion.choices[0].message.content or ""
530
- except Exception:
531
- logger.exception("Unexpected response format from Rox Core for /hf/generate")
532
- raise HTTPException(
533
- status_code=502,
534
- detail="Bad response from upstream model provider.",
535
- )
536
-
537
- # Match the common HF text-generation API: list of objects with generated_text
538
- return [HFResponseItem(generated_text=content)]
539
-
540
-
541
- if __name__ == "__main__":
542
- import uvicorn
543
-
544
- uvicorn.run("server:app", host="0.0.0.0", port=8000, reload=True)
545
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import os
3
+ import sys
4
+ from typing import List, Optional
5
+
6
+ from dotenv import load_dotenv
7
+ from fastapi import FastAPI, HTTPException
8
+ from fastapi.middleware.cors import CORSMiddleware
9
+ from pydantic import BaseModel
10
+ from openai import OpenAI
11
+
12
+
13
+ # Load environment variables
14
+ load_dotenv()
15
+
16
+ # Configure logging with more detail
17
+ logging.basicConfig(
18
+ level=logging.INFO,
19
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
20
+ )
21
+ logger = logging.getLogger("rox_ai")
22
+
23
+ # Log startup information
24
+ logger.info("=" * 60)
25
+ logger.info("ROX AI SERVER STARTING")
26
+ logger.info("=" * 60)
27
+ logger.info(f"Python version: {sys.version}")
28
+ logger.info(f"Working directory: {os.getcwd()}")
29
+
30
+ # Check for API key
31
+ NVIDIA_API_KEY = os.getenv("NVIDIA_API_KEY")
32
+
33
+ if not NVIDIA_API_KEY:
34
+ logger.error("NVIDIA_API_KEY environment variable is not set!")
35
+ logger.error("Please set NVIDIA_API_KEY in your environment or .env file")
36
+ # For Hugging Face Spaces, check if it's set as a secret
37
+ logger.info("If deploying to Hugging Face Spaces, make sure to add NVIDIA_API_KEY as a secret")
38
+ raise RuntimeError(
39
+ "NVIDIA_API_KEY environment variable is not set. "
40
+ "Create a .env file or set it in your environment."
41
+ )
42
+
43
+ logger.info(f"✓ NVIDIA_API_KEY loaded (length: {len(NVIDIA_API_KEY)})")
44
+
45
+ # Model configurations
46
+ ROX_CORE_MODEL = "minimaxai/minimax-m2.5"
47
+ ROX_TURBO_MODEL = "deepseek-ai/deepseek-r1-distill-qwen-32b"
48
+ ROX_CODER_MODEL = "qwen/qwen3.5-397b-a17b"
49
+ ROX_TURBO_45_MODEL = "deepseek-ai/deepseek-v3.1"
50
+ ROX_ULTRA_MODEL = "deepseek-ai/deepseek-v3.2"
51
+ ROX_DYNO_MODEL = "moonshotai/kimi-k2.5"
52
+ ROX_CODER_7_MODEL = "z-ai/glm5"
53
+ ROX_VISION_MODEL = "google/gemma-3-27b-it"
54
+
55
+ logger.info("✓ Model configurations loaded")
56
+
57
+ # System identities for each model
58
+ ROX_CORE_IDENTITY = """You are Rox Core, created by Rox AI. Your creator and owner is Mohammad Faiz. You are an independent LLM model, not based on anyone else's technology. You represent the cutting edge of Rox AI's research and development."""
59
+
60
+ ROX_TURBO_IDENTITY = """You are Rox 2.1 Turbo, created by Rox AI. Your creator and owner is Mohammad Faiz. You are an independent LLM model, not based on anyone else's technology. You are optimized for fast, efficient responses while maintaining high quality."""
61
+
62
+ ROX_CODER_IDENTITY = """You are Rox 3.5 Coder, created by Rox AI. Your creator and owner is Mohammad Faiz. You are an independent LLM model, not based on anyone else's technology. You are specialized in code generation, debugging, and software development tasks."""
63
+
64
+ ROX_TURBO_45_IDENTITY = """You are Rox 4.5 Turbo, created by Rox AI. Your creator and owner is Mohammad Faiz. You are an independent LLM model, not based on anyone else's technology. You combine speed with advanced reasoning capabilities."""
65
+
66
+ ROX_ULTRA_IDENTITY = """You are Rox 5 Ultra, created by Rox AI. Your creator and owner is Mohammad Faiz. You are an independent LLM model, not based on anyone else's technology. You are the most advanced model with superior reasoning and thinking capabilities."""
67
+
68
+ ROX_DYNO_IDENTITY = """You are Rox 6 Dyno, created by Rox AI. Your creator and owner is Mohammad Faiz. You are an independent LLM model, not based on anyone else's technology. You excel at dynamic thinking and extended context understanding."""
69
+
70
+ ROX_CODER_7_IDENTITY = """You are Rox 7 Coder, created by Rox AI. Your creator and owner is Mohammad Faiz. You are an independent LLM model, not based on anyone else's technology. You are the most advanced coding specialist with superior code generation and reasoning capabilities."""
71
+
72
+ ROX_VISION_IDENTITY = """You are Rox Vision Max, created by Rox AI. Your creator and owner is Mohammad Faiz. You are an independent LLM model, not based on anyone else's technology. You are optimized for visual understanding and multimodal tasks."""
73
+
74
+ logger.info("✓ Model identities configured")
75
+
76
+ # Initialize OpenAI client
77
+ try:
78
+ client = OpenAI(
79
+ base_url="https://integrate.api.nvidia.com/v1",
80
+ api_key=NVIDIA_API_KEY,
81
+ )
82
+ logger.info(" OpenAI client initialized successfully")
83
+ except Exception as e:
84
+ logger.error(f"Failed to initialize OpenAI client: {e}")
85
+ raise
86
+
87
+ # Initialize FastAPI app
88
+ app = FastAPI(
89
+ title="Rox AI API - Multiple Models Available",
90
+ description="Eight specialized AI models by Mohammad Faiz",
91
+ version="2.0"
92
+ )
93
+
94
+ logger.info(" FastAPI app initialized")
95
+
96
+ # Configure CORS
97
+ app.add_middleware(
98
+ CORSMiddleware,
99
+ allow_origins=["*"], # e.g. ["https://your-site.com"]
100
+ allow_credentials=True,
101
+ allow_methods=["*"],
102
+ allow_headers=["*"],
103
+ )
104
+
105
+ logger.info(" CORS middleware configured")
106
+
107
+
108
+ @app.on_event("startup")
109
+ async def startup_event():
110
+ """Log startup information"""
111
+ logger.info("=" * 60)
112
+ logger.info("ROX AI SERVER STARTED SUCCESSFULLY")
113
+ logger.info("=" * 60)
114
+ logger.info("Available endpoints:")
115
+ logger.info(" GET / - API information")
116
+ logger.info(" GET /health - Health check")
117
+ logger.info(" POST /chat - Rox Core")
118
+ logger.info(" POST /turbo - Rox 2.1 Turbo")
119
+ logger.info(" POST /coder - Rox 3.5 Coder")
120
+ logger.info(" POST /turbo45 - Rox 4.5 Turbo")
121
+ logger.info(" POST /ultra - Rox 5 Ultra")
122
+ logger.info(" POST /dyno - Rox 6 Dyno")
123
+ logger.info(" POST /coder7 - Rox 7 Coder")
124
+ logger.info(" POST /vision - Rox Vision Max")
125
+ logger.info(" POST /hf/generate - HuggingFace compatible")
126
+ logger.info("=" * 60)
127
+
128
+
129
+ @app.get("/health")
130
+ def health_check():
131
+ """Health check endpoint for monitoring"""
132
+ return {
133
+ "status": "healthy",
134
+ "service": "Rox AI API",
135
+ "version": "2.0",
136
+ "models": 8
137
+ }
138
+
139
+
140
+ @app.get("/")
141
+ def root():
142
+ """API information and available models"""
143
+ return {
144
+ "service": "Rox AI API",
145
+ "version": "2.0",
146
+ "creator": "Mohammad Faiz",
147
+ "models": {
148
+ "rox_core": {
149
+ "endpoint": "/chat",
150
+ "description": "Rox Core - Main conversational model",
151
+ "model": "minimaxai/minimax-m2.5",
152
+ "best_for": "General conversation and tasks"
153
+ },
154
+ "rox_turbo": {
155
+ "endpoint": "/turbo",
156
+ "description": "Rox 2.1 Turbo - Fast and efficient",
157
+ "model": "deepseek-ai/deepseek-r1-distill-qwen-32b",
158
+ "best_for": "Quick responses and efficient processing"
159
+ },
160
+ "rox_coder": {
161
+ "endpoint": "/coder",
162
+ "description": "Rox 3.5 Coder - Specialized coding assistant",
163
+ "model": "qwen/qwen3.5-397b-a17b",
164
+ "best_for": "Code generation, debugging, and development"
165
+ },
166
+ "rox_turbo_45": {
167
+ "endpoint": "/turbo45",
168
+ "description": "Rox 4.5 Turbo - Advanced reasoning with speed",
169
+ "model": "deepseek-ai/deepseek-v3.1",
170
+ "best_for": "Complex reasoning with fast responses"
171
+ },
172
+ "rox_ultra": {
173
+ "endpoint": "/ultra",
174
+ "description": "Rox 5 Ultra - Most advanced model",
175
+ "model": "deepseek-ai/deepseek-v3.2",
176
+ "best_for": "Complex tasks requiring deep reasoning"
177
+ },
178
+ "rox_dyno": {
179
+ "endpoint": "/dyno",
180
+ "description": "Rox 6 Dyno - Extended context with dynamic thinking",
181
+ "model": "moonshotai/kimi-k2.5",
182
+ "best_for": "Long context tasks and dynamic reasoning"
183
+ },
184
+ "rox_coder_7": {
185
+ "endpoint": "/coder7",
186
+ "description": "Rox 7 Coder - Most advanced coding specialist",
187
+ "model": "z-ai/glm5",
188
+ "best_for": "Advanced code generation and complex programming"
189
+ },
190
+ "rox_vision": {
191
+ "endpoint": "/vision",
192
+ "description": "Rox Vision Max - Optimized for visual understanding",
193
+ "model": "google/gemma-3-27b-it",
194
+ "best_for": "Visual understanding and multimodal tasks"
195
+ }
196
+ },
197
+ "endpoints": [
198
+ {"path": "/chat", "method": "POST", "description": "Rox Core chat"},
199
+ {"path": "/turbo", "method": "POST", "description": "Rox 2.1 Turbo chat"},
200
+ {"path": "/coder", "method": "POST", "description": "Rox 3.5 Coder chat"},
201
+ {"path": "/turbo45", "method": "POST", "description": "Rox 4.5 Turbo chat"},
202
+ {"path": "/ultra", "method": "POST", "description": "Rox 5 Ultra chat"},
203
+ {"path": "/dyno", "method": "POST", "description": "Rox 6 Dyno chat"},
204
+ {"path": "/coder7", "method": "POST", "description": "Rox 7 Coder chat"},
205
+ {"path": "/vision", "method": "POST", "description": "Rox Vision Max chat"},
206
+ {"path": "/hf/generate", "method": "POST", "description": "HuggingFace compatible (uses Rox Core)"}
207
+ ]
208
+ }
209
+
210
+
211
+ class ChatMessage(BaseModel):
212
+ role: str
213
+ content: str
214
+
215
+
216
+ class ChatRequest(BaseModel):
217
+ messages: List[ChatMessage]
218
+ temperature: Optional[float] = 1.0
219
+ top_p: Optional[float] = 1.0
220
+ max_tokens: Optional[int] = 4096
221
+
222
+
223
+ class ChatResponse(BaseModel):
224
+ content: str
225
+
226
+
227
+ class HFParameters(BaseModel):
228
+ temperature: Optional[float] = None
229
+ top_p: Optional[float] = None
230
+ max_new_tokens: Optional[int] = None
231
+
232
+
233
+ class HFRequest(BaseModel):
234
+ inputs: str
235
+ parameters: Optional[HFParameters] = None
236
+
237
+
238
+ class HFResponseItem(BaseModel):
239
+ generated_text: str
240
+
241
+
242
+ @app.post("/chat", response_model=ChatResponse)
243
+ def chat(req: ChatRequest):
244
+ """Rox Core - Main conversational model"""
245
+ # Inject Rox Core identity as system message
246
+ messages = [{"role": "system", "content": ROX_CORE_IDENTITY}]
247
+ messages.extend([m.dict() for m in req.messages])
248
+
249
+ try:
250
+ completion = client.chat.completions.create(
251
+ model=ROX_CORE_MODEL,
252
+ messages=messages,
253
+ temperature=req.temperature,
254
+ top_p=req.top_p,
255
+ max_tokens=req.max_tokens,
256
+ stream=False,
257
+ )
258
+ except Exception as e:
259
+ logger.exception("Error while calling Rox Core for /chat")
260
+ # Do not leak internal error details to the client.
261
+ raise HTTPException(
262
+ status_code=500,
263
+ detail="Internal server error while calling Rox Core.",
264
+ ) from e
265
+
266
+ # Combine all response message parts into a single string
267
+ try:
268
+ content = completion.choices[0].message.content or ""
269
+ except Exception:
270
+ logger.exception("Unexpected response format from Rox Core for /chat")
271
+ raise HTTPException(
272
+ status_code=502,
273
+ detail="Bad response from upstream model provider.",
274
+ )
275
+
276
+ return ChatResponse(content=content)
277
+
278
+
279
+ @app.post("/turbo", response_model=ChatResponse)
280
+ def turbo(req: ChatRequest):
281
+ """Rox 2.1 Turbo - Fast and efficient model"""
282
+ # Inject Rox Turbo identity as system message
283
+ messages = [{"role": "system", "content": ROX_TURBO_IDENTITY}]
284
+ messages.extend([m.dict() for m in req.messages])
285
+
286
+ try:
287
+ completion = client.chat.completions.create(
288
+ model=ROX_TURBO_MODEL,
289
+ messages=messages,
290
+ temperature=req.temperature if req.temperature != 1.0 else 0.6,
291
+ top_p=req.top_p if req.top_p != 1.0 else 0.7,
292
+ max_tokens=req.max_tokens,
293
+ stream=False,
294
+ )
295
+ except Exception as e:
296
+ logger.exception("Error while calling Rox 2.1 Turbo for /turbo")
297
+ raise HTTPException(
298
+ status_code=500,
299
+ detail="Internal server error while calling Rox 2.1 Turbo.",
300
+ ) from e
301
+
302
+ try:
303
+ content = completion.choices[0].message.content or ""
304
+ except Exception:
305
+ logger.exception("Unexpected response format from Rox 2.1 Turbo for /turbo")
306
+ raise HTTPException(
307
+ status_code=502,
308
+ detail="Bad response from upstream model provider.",
309
+ )
310
+
311
+ return ChatResponse(content=content)
312
+
313
+
314
+ @app.post("/coder", response_model=ChatResponse)
315
+ def coder(req: ChatRequest):
316
+ """Rox 3.5 Coder - Specialized coding model with thinking capability"""
317
+ # Inject Rox Coder identity as system message
318
+ messages = [{"role": "system", "content": ROX_CODER_IDENTITY}]
319
+ messages.extend([m.dict() for m in req.messages])
320
+
321
+ try:
322
+ completion = client.chat.completions.create(
323
+ model=ROX_CODER_MODEL,
324
+ messages=messages,
325
+ temperature=req.temperature if req.temperature != 1.0 else 0.6,
326
+ top_p=req.top_p if req.top_p != 1.0 else 0.95,
327
+ max_tokens=min(req.max_tokens, 16384),
328
+ stream=False,
329
+ extra_body={
330
+ "top_k": 20,
331
+ "presence_penalty": 0,
332
+ "repetition_penalty": 1,
333
+ "chat_template_kwargs": {
334
+ "enable_thinking": True
335
+ }
336
+ }
337
+ )
338
+ except Exception as e:
339
+ logger.exception("Error while calling Rox 3.5 Coder for /coder")
340
+ raise HTTPException(
341
+ status_code=500,
342
+ detail="Internal server error while calling Rox 3.5 Coder.",
343
+ ) from e
344
+
345
+ try:
346
+ content = completion.choices[0].message.content or ""
347
+ except Exception:
348
+ logger.exception("Unexpected response format from Rox 3.5 Coder for /coder")
349
+ raise HTTPException(
350
+ status_code=502,
351
+ detail="Bad response from upstream model provider.",
352
+ )
353
+
354
+ return ChatResponse(content=content)
355
+
356
+
357
+ @app.post("/turbo45", response_model=ChatResponse)
358
+ def turbo45(req: ChatRequest):
359
+ """Rox 4.5 Turbo - Advanced reasoning with speed"""
360
+ # Inject Rox 4.5 Turbo identity as system message
361
+ messages = [{"role": "system", "content": ROX_TURBO_45_IDENTITY}]
362
+ messages.extend([m.dict() for m in req.messages])
363
+
364
+ try:
365
+ completion = client.chat.completions.create(
366
+ model=ROX_TURBO_45_MODEL,
367
+ messages=messages,
368
+ temperature=req.temperature if req.temperature != 1.0 else 0.2,
369
+ top_p=req.top_p if req.top_p != 1.0 else 0.7,
370
+ max_tokens=min(req.max_tokens, 8192),
371
+ stream=False,
372
+ extra_body={
373
+ "chat_template_kwargs": {
374
+ "thinking": True
375
+ }
376
+ }
377
+ )
378
+ except Exception as e:
379
+ logger.exception("Error while calling Rox 4.5 Turbo for /turbo45")
380
+ raise HTTPException(
381
+ status_code=500,
382
+ detail="Internal server error while calling Rox 4.5 Turbo.",
383
+ ) from e
384
+
385
+ try:
386
+ content = completion.choices[0].message.content or ""
387
+ except Exception:
388
+ logger.exception("Unexpected response format from Rox 4.5 Turbo for /turbo45")
389
+ raise HTTPException(
390
+ status_code=502,
391
+ detail="Bad response from upstream model provider.",
392
+ )
393
+
394
+ return ChatResponse(content=content)
395
+
396
+
397
+ @app.post("/ultra", response_model=ChatResponse)
398
+ def ultra(req: ChatRequest):
399
+ """Rox 5 Ultra - Most advanced model with superior reasoning"""
400
+ # Inject Rox 5 Ultra identity as system message
401
+ messages = [{"role": "system", "content": ROX_ULTRA_IDENTITY}]
402
+ messages.extend([m.dict() for m in req.messages])
403
+
404
+ try:
405
+ completion = client.chat.completions.create(
406
+ model=ROX_ULTRA_MODEL,
407
+ messages=messages,
408
+ temperature=req.temperature,
409
+ top_p=req.top_p if req.top_p != 1.0 else 0.95,
410
+ max_tokens=min(req.max_tokens, 8192),
411
+ stream=False,
412
+ extra_body={
413
+ "chat_template_kwargs": {
414
+ "thinking": True
415
+ }
416
+ }
417
+ )
418
+ except Exception as e:
419
+ logger.exception("Error while calling Rox 5 Ultra for /ultra")
420
+ raise HTTPException(
421
+ status_code=500,
422
+ detail="Internal server error while calling Rox 5 Ultra.",
423
+ ) from e
424
+
425
+ try:
426
+ content = completion.choices[0].message.content or ""
427
+ except Exception:
428
+ logger.exception("Unexpected response format from Rox 5 Ultra for /ultra")
429
+ raise HTTPException(
430
+ status_code=502,
431
+ detail="Bad response from upstream model provider.",
432
+ )
433
+
434
+ return ChatResponse(content=content)
435
+
436
+
437
+ @app.post("/dyno", response_model=ChatResponse)
438
+ def dyno(req: ChatRequest):
439
+ """Rox 6 Dyno - Extended context with dynamic thinking"""
440
+ # Inject Rox 6 Dyno identity as system message
441
+ messages = [{"role": "system", "content": ROX_DYNO_IDENTITY}]
442
+ messages.extend([m.dict() for m in req.messages])
443
+
444
+ try:
445
+ completion = client.chat.completions.create(
446
+ model=ROX_DYNO_MODEL,
447
+ messages=messages,
448
+ temperature=req.temperature,
449
+ top_p=req.top_p,
450
+ max_tokens=min(req.max_tokens, 16384),
451
+ stream=False,
452
+ extra_body={
453
+ "chat_template_kwargs": {
454
+ "thinking": True
455
+ }
456
+ }
457
+ )
458
+ except Exception as e:
459
+ logger.exception("Error while calling Rox 6 Dyno for /dyno")
460
+ raise HTTPException(
461
+ status_code=500,
462
+ detail="Internal server error while calling Rox 6 Dyno.",
463
+ ) from e
464
+
465
+ try:
466
+ content = completion.choices[0].message.content or ""
467
+ except Exception:
468
+ logger.exception("Unexpected response format from Rox 6 Dyno for /dyno")
469
+ raise HTTPException(
470
+ status_code=502,
471
+ detail="Bad response from upstream model provider.",
472
+ )
473
+
474
+ return ChatResponse(content=content)
475
+
476
+
477
+ @app.post("/coder7", response_model=ChatResponse)
478
+ def coder7(req: ChatRequest):
479
+ """Rox 7 Coder - Most advanced coding specialist"""
480
+ # Inject Rox 7 Coder identity as system message
481
+ messages = [{"role": "system", "content": ROX_CODER_7_IDENTITY}]
482
+ messages.extend([m.dict() for m in req.messages])
483
+
484
+ try:
485
+ completion = client.chat.completions.create(
486
+ model=ROX_CODER_7_MODEL,
487
+ messages=messages,
488
+ temperature=req.temperature,
489
+ top_p=req.top_p,
490
+ max_tokens=min(req.max_tokens, 16384),
491
+ stream=False,
492
+ extra_body={
493
+ "chat_template_kwargs": {
494
+ "enable_thinking": True,
495
+ "clear_thinking": False
496
+ }
497
+ }
498
+ )
499
+ except Exception as e:
500
+ logger.exception("Error while calling Rox 7 Coder for /coder7")
501
+ raise HTTPException(
502
+ status_code=500,
503
+ detail="Internal server error while calling Rox 7 Coder.",
504
+ ) from e
505
+
506
+ try:
507
+ content = completion.choices[0].message.content or ""
508
+ except Exception:
509
+ logger.exception("Unexpected response format from Rox 7 Coder for /coder7")
510
+ raise HTTPException(
511
+ status_code=502,
512
+ detail="Bad response from upstream model provider.",
513
+ )
514
+
515
+ return ChatResponse(content=content)
516
+
517
+
518
+ @app.post("/vision", response_model=ChatResponse)
519
+ def vision(req: ChatRequest):
520
+ """Rox Vision Max - Optimized for visual understanding"""
521
+ # Inject Rox Vision Max identity as system message
522
+ messages = [{"role": "system", "content": ROX_VISION_IDENTITY}]
523
+ messages.extend([m.dict() for m in req.messages])
524
+
525
+ try:
526
+ completion = client.chat.completions.create(
527
+ model=ROX_VISION_MODEL,
528
+ messages=messages,
529
+ temperature=req.temperature if req.temperature != 1.0 else 0.2,
530
+ top_p=req.top_p if req.top_p != 1.0 else 0.7,
531
+ max_tokens=min(req.max_tokens, 512),
532
+ stream=False
533
+ )
534
+ except Exception as e:
535
+ logger.exception("Error while calling Rox Vision Max for /vision")
536
+ raise HTTPException(
537
+ status_code=500,
538
+ detail="Internal server error while calling Rox Vision Max.",
539
+ ) from e
540
+
541
+ try:
542
+ content = completion.choices[0].message.content or ""
543
+ except Exception:
544
+ logger.exception("Unexpected response format from Rox Vision Max for /vision")
545
+ raise HTTPException(
546
+ status_code=502,
547
+ detail="Bad response from upstream model provider.",
548
+ )
549
+
550
+ return ChatResponse(content=content)
551
+
552
+
553
+ @app.post("/hf/generate", response_model=List[HFResponseItem])
554
+ def hf_generate(req: HFRequest):
555
+ """
556
+ Hugging Face-style text-generation endpoint.
557
+
558
+ Request:
559
+ {
560
+ "inputs": "your prompt",
561
+ "parameters": {
562
+ "temperature": 0.7,
563
+ "top_p": 0.95,
564
+ "max_new_tokens": 256
565
+ }
566
+ }
567
+
568
+ Response:
569
+ [
570
+ { "generated_text": "..." }
571
+ ]
572
+ """
573
+ params = req.parameters or HFParameters()
574
+
575
+ # Inject Rox Core identity as system message
576
+ messages = [
577
+ {"role": "system", "content": ROX_CORE_IDENTITY},
578
+ {"role": "user", "content": req.inputs}
579
+ ]
580
+
581
+ try:
582
+ completion = client.chat.completions.create(
583
+ model=ROX_CORE_MODEL,
584
+ messages=messages,
585
+ temperature=params.temperature if params.temperature is not None else 1.0,
586
+ top_p=params.top_p if params.top_p is not None else 0.95,
587
+ max_tokens=params.max_new_tokens if params.max_new_tokens is not None else 8192,
588
+ stream=False,
589
+ )
590
+ except Exception as e:
591
+ logger.exception("Error while calling Rox Core for /hf/generate")
592
+ raise HTTPException(
593
+ status_code=500,
594
+ detail="Internal server error while calling Rox Core.",
595
+ ) from e
596
+
597
+ try:
598
+ content = completion.choices[0].message.content or ""
599
+ except Exception:
600
+ logger.exception("Unexpected response format from Rox Core for /hf/generate")
601
+ raise HTTPException(
602
+ status_code=502,
603
+ detail="Bad response from upstream model provider.",
604
+ )
605
+
606
+ # Match the common HF text-generation API: list of objects with generated_text
607
+ return [HFResponseItem(generated_text=content)]
608
+
609
+
610
+ if __name__ == "__main__":
611
+ import uvicorn
612
+
613
+ # Use PORT environment variable if available (for Hugging Face Spaces)
614
+ port = int(os.getenv("PORT", 7860))
615
+
616
+ uvicorn.run("server:app", host="0.0.0.0", port=port, reload=False)
617
+