File size: 4,508 Bytes
7867d98
 
4105399
a743fdf
e5f75ff
 
 
 
 
 
900e81e
 
 
3bd322d
e5f75ff
a6e484d
 
 
 
 
 
 
3bd322d
7867d98
 
e5f75ff
900e81e
a6e484d
e5f75ff
 
 
 
 
 
 
7867d98
e5f75ff
7867d98
 
900e81e
 
a6e484d
900e81e
a6e484d
900e81e
7867d98
e5f75ff
 
9e646b9
 
 
e5f75ff
ba3310a
e5f75ff
40a7838
e5f75ff
8a406cf
e5f75ff
a6e484d
4105399
a6e484d
 
 
 
 
 
40a7838
e5f75ff
a743fdf
7867d98
e5f75ff
7867d98
 
9e855f0
e5f75ff
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
900e81e
e5f75ff
 
 
a6e484d
e5f75ff
 
c5e4874
4105399
a6e484d
e5f75ff
4105399
e5f75ff
a743fdf
 
 
 
e5f75ff
a743fdf
 
e5f75ff
 
 
 
 
 
 
 
 
4105399
e5f75ff
 
 
a6e484d
 
 
 
 
a743fdf
4105399
 
e5f75ff
 
 
a6e484d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
from fastapi import FastAPI, HTTPException, Depends, Security
from fastapi.security import APIKeyHeader
from fastapi.responses import JSONResponse
from pydantic import BaseModel
import requests
import uuid
import os
from datetime import datetime
from fastapi.middleware.cors import CORSMiddleware
from dotenv import load_dotenv

# Load environment variables
load_dotenv()

# Initialize FastAPI app
app = FastAPI(
    title="MultiChatAI to OpenAI API Wrapper",
    description="API wrapper for MultiChatAI with OpenAI-compatible endpoints",
    version="1.0.0",
    docs_url="/docs",
    redoc_url=None
)

# Configuration
API_KEY_NAME = "X-API-KEY"
API_KEYS = os.getenv("API_KEYS", "").split(",")  # Comma-separated list from .env

# Configure CORS
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

# Security setup
api_key_header = APIKeyHeader(name=API_KEY_NAME, auto_error=False)

async def get_api_key(api_key: str = Security(api_key_header)):
    if not api_key:
        raise HTTPException(status_code=401, detail="API key is missing")
    if api_key not in API_KEYS:
        raise HTTPException(status_code=401, detail="Invalid API key")
    return api_key

# Request models
class ChatMessage(BaseModel):
    role: str
    content: str

class ChatCompletionRequest(BaseModel):
    model: str = "deepseek-ai/DeepSeek-R1"
    messages: list[ChatMessage]
    temperature: float = 0.7
    max_tokens: int = None

# Health check endpoint
@app.get("/", include_in_schema=False)
async def health_check():
    return {
        "status": "OK",
        "service": "MultiChatAI Proxy",
        "timestamp": datetime.now().isoformat(),
        "environment": os.getenv("ENVIRONMENT", "development")
    }

# Main API endpoint
@app.post("/v1/chat/completions")
async def chat_completion(
    request: ChatCompletionRequest,
    api_key: str = Depends(get_api_key)
):
    try:
        # Prepare request for MultiChatAI
        multi_chat_body = {
            "chatSettings": {
                "model": request.model,
                "prompt": "You are a helpful AI assistant.",
                "temperature": request.temperature,
                "contextLength": 32000,
                "includeProfileContext": True,
                "includeWorkspaceInstructions": True,
                "embeddingsProvider": "openai"
            },
            "messages": [
                {"role": "system", "content": f"Today is {datetime.now().strftime('%m/%d/%Y')}.\nYou are a helpful AI assistant."},
                *[{"role": msg.role, "content": msg.content} for msg in request.messages]
            ],
            "customModelId": ""
        }
        
        # Call MultiChatAI API
        response = requests.post(
            "https://www.multichatai.com/api/chat/deepinfra",
            headers={"Content-Type": "application/json"},
            json=multi_chat_body,
            timeout=30
        )
        
        response.raise_for_status()
        
        return JSONResponse({
            "id": f"chatcmpl-{uuid.uuid4()}",
            "object": "chat.completion",
            "created": int(datetime.now().timestamp()),
            "model": request.model,
            "choices": [{
                "index": 0,
                "message": {
                    "role": "assistant",
                    "content": response.text.strip()
                },
                "finish_reason": "stop"
            }],
            "usage": {
                "prompt_tokens": 0,
                "completion_tokens": 0,
                "total_tokens": 0
            }
        })
        
    except requests.Timeout:
        raise HTTPException(status_code=504, detail="Upstream service timeout")
    except requests.RequestException as e:
        raise HTTPException(
            status_code=502,
            detail=f"Upstream service error: {str(e)}"
        )
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))

# Add this if you need to support OPTIONS requests
@app.options("/v1/chat/completions")
async def options_handler():
    return JSONResponse(content={}, status_code=200)

# For production deployment
def get_application():
    return app

# For running locally
if __name__ == "__main__":
    import uvicorn
    uvicorn.run(
        "app:app",
        host="0.0.0.0",
        port=int(os.getenv("PORT", 7860)),
        reload=os.getenv("RELOAD", "false").lower() == "true"
    )