Samfy001 commited on
Commit
f6de122
·
verified ·
1 Parent(s): ee3a441

Upload 6 files

Browse files
Files changed (6) hide show
  1. .env +8 -0
  2. Dockerfile +20 -0
  3. docker-compose.yml +22 -0
  4. example.py +84 -0
  5. main.py +355 -0
  6. requirements.txt +7 -0
.env ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # v0.dev API Configuration
2
+ V0_API_KEY=v1:348jgN2Fu5eebFqZDMgEr0qm:u7FitMJwSu8Vi0AhUOgjlo7p
3
+ V0_API_BASE_URL=https://api.v0.dev/v1
4
+
5
+ # Server Configuration
6
+ HOST=0.0.0.0
7
+ PORT=8000
8
+ DEBUG=true
Dockerfile ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.9-slim
2
+
3
+ WORKDIR /app
4
+
5
+ # Install dependencies
6
+ COPY requirements.txt .
7
+ RUN pip install --no-cache-dir -r requirements.txt
8
+
9
+ # Copy application code
10
+ COPY . .
11
+
12
+ # Expose port
13
+ EXPOSE 8000
14
+
15
+ # Health check
16
+ HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
17
+ CMD python -c "import requests; requests.get('http://localhost:8000/health')"
18
+
19
+ # Run the application
20
+ CMD ["python", "main.py"]
docker-compose.yml ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ version: '3.8'
2
+
3
+ services:
4
+ v0-openai-api:
5
+ build: .
6
+ ports:
7
+ - "8000:8000"
8
+ environment:
9
+ - V0_API_KEY=${V0_API_KEY}
10
+ - V0_API_BASE_URL=${V0_API_BASE_URL:-https://api.v0.dev/v1}
11
+ - HOST=0.0.0.0
12
+ - PORT=8000
13
+ - DEBUG=${DEBUG:-true}
14
+ env_file:
15
+ - .env
16
+ restart: unless-stopped
17
+ healthcheck:
18
+ test: ["CMD", "python", "-c", "import requests; requests.get('http://localhost:8000/health')"]
19
+ interval: 30s
20
+ timeout: 10s
21
+ retries: 3
22
+ start_period: 10s
example.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Example usage of the v0.dev OpenAI Compatible API
4
+ """
5
+
6
+ import openai
7
+ import requests
8
+
9
+ # Configuration
10
+ BASE_URL = "http://localhost:8000/v1"
11
+ API_KEY = "dummy" # Not used but required by OpenAI client
12
+
13
+ # Initialize client
14
+ client = openai.OpenAI(
15
+ base_url=BASE_URL,
16
+ api_key=API_KEY
17
+ )
18
+
19
+ def basic_chat():
20
+ """Basic chat completion example"""
21
+ print("=== Basic Chat Example ===")
22
+
23
+ response = client.chat.completions.create(
24
+ model="v0-gpt-5",
25
+ messages=[
26
+ {"role": "system", "content": "You are an expert React developer"},
27
+ {"role": "user", "content": "Create a simple todo list component in React"}
28
+ ],
29
+ max_tokens=1000,
30
+ temperature=0.7
31
+ )
32
+
33
+ print("Response:")
34
+ print(response.choices[0].message.content)
35
+ print()
36
+
37
+ def streaming_chat():
38
+ """Streaming chat completion example"""
39
+ print("=== Streaming Chat Example ===")
40
+
41
+ stream = client.chat.completions.create(
42
+ model="v0-gpt-5",
43
+ messages=[
44
+ {"role": "user", "content": "Explain React hooks in simple terms"}
45
+ ],
46
+ stream=True
47
+ )
48
+
49
+ print("Response (streaming):")
50
+ for chunk in stream:
51
+ if chunk.choices[0].delta.content is not None:
52
+ print(chunk.choices[0].delta.content, end="")
53
+ print()
54
+
55
+ def list_models():
56
+ """List available models"""
57
+ print("=== Available Models ===")
58
+
59
+ models = client.models.list()
60
+ for model in models.data:
61
+ print(f"- {model.id} (owned by {model.owned_by})")
62
+ print()
63
+
64
+ def main():
65
+ """Run all examples"""
66
+ try:
67
+ # Test health endpoint
68
+ health = requests.get("http://localhost:8000/health")
69
+ if health.status_code != 200:
70
+ print("Server is not running. Please start the server first:")
71
+ print("python main.py")
72
+ return
73
+
74
+ # Run examples
75
+ list_models()
76
+ basic_chat()
77
+ streaming_chat()
78
+
79
+ except Exception as e:
80
+ print(f"Error: {e}")
81
+ print("Make sure the server is running: python main.py")
82
+
83
+ if __name__ == "__main__":
84
+ main()
main.py ADDED
@@ -0,0 +1,355 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ OpenAI-compatible API wrapper for v0.dev
4
+ Provides a drop-in replacement for OpenAI's API using v0.dev as the backend
5
+ """
6
+
7
+ import os
8
+ import json
9
+ import asyncio
10
+ from typing import List, Dict, Any, Optional, AsyncGenerator
11
+ from datetime import datetime
12
+ import uuid
13
+
14
+ import httpx
15
+ from fastapi import FastAPI, HTTPException, Depends, Request
16
+ from fastapi.responses import StreamingResponse
17
+ from fastapi.middleware.cors import CORSMiddleware
18
+ from pydantic import BaseModel, Field
19
+ from dotenv import load_dotenv
20
+
21
+ load_dotenv()
22
+
23
+ # Configuration
24
+ V0_API_KEY = os.getenv("V0_API_KEY", "v1:348jgN2Fu5eebFqZDMgEr0qm:u7FitMJwSu8Vi0AhUOgjlo7p")
25
+ V0_API_BASE_URL = os.getenv("V0_API_BASE_URL", "https://api.v0.dev/v1")
26
+ HOST = os.getenv("HOST", "0.0.0.0")
27
+ PORT = int(os.getenv("PORT", 8000))
28
+
29
+ # FastAPI app
30
+ app = FastAPI(
31
+ title="v0.dev OpenAI Compatible API",
32
+ description="Drop-in replacement for OpenAI API using v0.dev as backend",
33
+ version="1.0.0"
34
+ )
35
+
36
+ # CORS middleware
37
+ app.add_middleware(
38
+ CORSMiddleware,
39
+ allow_origins=["*"],
40
+ allow_credentials=True,
41
+ allow_methods=["*"],
42
+ allow_headers=["*"],
43
+ )
44
+
45
+ # Pydantic models
46
+ class Message(BaseModel):
47
+ role: str = Field(..., description="Role of the message sender (user, assistant, system)")
48
+ content: str = Field(..., description="Content of the message")
49
+
50
+ class ChatCompletionRequest(BaseModel):
51
+ model: str = Field(..., description="Model to use for completion")
52
+ messages: List[Message] = Field(..., description="List of messages")
53
+ max_tokens: Optional[int] = Field(None, description="Maximum tokens to generate")
54
+ temperature: Optional[float] = Field(0.7, description="Sampling temperature")
55
+ stream: Optional[bool] = Field(False, description="Whether to stream the response")
56
+ project_id: Optional[str] = Field(None, description="v0.dev project ID")
57
+
58
+ class Choice(BaseModel):
59
+ index: int
60
+ message: Message
61
+ finish_reason: str
62
+
63
+ class Usage(BaseModel):
64
+ prompt_tokens: int
65
+ completion_tokens: int
66
+ total_tokens: int
67
+
68
+ class ChatCompletionResponse(BaseModel):
69
+ id: str
70
+ object: str = "chat.completion"
71
+ created: int
72
+ model: str
73
+ choices: List[Choice]
74
+ usage: Usage
75
+
76
+ class ChatCompletionStreamResponse(BaseModel):
77
+ id: str
78
+ object: str = "chat.completion.chunk"
79
+ created: int
80
+ model: str
81
+ choices: List[Dict[str, Any]]
82
+
83
+ # v0.dev API client
84
+ class V0APIClient:
85
+ def __init__(self, api_key: str, base_url: str):
86
+ self.api_key = api_key
87
+ self.base_url = base_url
88
+ self.headers = {
89
+ "Authorization": f"Bearer {api_key}",
90
+ "Content-Type": "application/json"
91
+ }
92
+
93
+ async def create_chat(self, messages: List[Message], model_config: Dict[str, Any], project_id: Optional[str] = None) -> Dict[str, Any]:
94
+ """Create a new chat with v0.dev"""
95
+ url = f"{self.base_url}/chats"
96
+
97
+ # Use the formatting function to properly format messages
98
+ formatted_messages = [
99
+ {"role": msg.role, "content": msg.content}
100
+ for msg in messages
101
+ ]
102
+
103
+ # Extract system message and user messages using the formatting function
104
+ system_message = ""
105
+ user_messages = []
106
+
107
+ for msg in messages:
108
+ if msg.role == "system":
109
+ system_message = msg.content
110
+ else:
111
+ user_messages.append(msg)
112
+
113
+ # Use the last user message
114
+ if not user_messages:
115
+ raise HTTPException(status_code=400, detail="No user message found")
116
+
117
+ last_user_message = user_messages[-1].content
118
+
119
+ payload = {
120
+ "system": system_message,
121
+ "message": last_user_message,
122
+ "modelConfiguration": model_config,
123
+ "projectId": project_id
124
+ }
125
+
126
+ # Remove None values
127
+ payload = {k: v for k, v in payload.items() if v is not None}
128
+
129
+ async with httpx.AsyncClient() as client:
130
+ response = await client.post(url, headers=self.headers, json=payload)
131
+ response.raise_for_status()
132
+ return response.json()
133
+
134
+ # Initialize v0 client
135
+ v0_client = V0APIClient(V0_API_KEY, V0_API_BASE_URL)
136
+
137
+ # Helper functions
138
+ def format_prompt(messages: List[Dict[str, Any]], add_special_tokens: bool = False,
139
+ do_continue: bool = False, include_system: bool = True) -> str:
140
+ """
141
+ Format a series of messages into a single string, optionally adding special tokens.
142
+
143
+ Args:
144
+ messages: A list of message dictionaries, each containing 'role' and 'content'.
145
+ add_special_tokens: Whether to add special formatting tokens.
146
+ do_continue: If True, don't add the final "Assistant:" prompt.
147
+ include_system: Whether to include system messages in the formatted output.
148
+
149
+ Returns:
150
+ A formatted string containing all messages.
151
+ """
152
+ # Helper function to convert content to string
153
+ def to_string(value) -> str:
154
+ if isinstance(value, str):
155
+ return value
156
+ elif isinstance(value, dict):
157
+ if "text" in value:
158
+ return value.get("text", "")
159
+ return ""
160
+ elif isinstance(value, list):
161
+ return "".join([to_string(v) for v in value])
162
+ return str(value)
163
+
164
+ # If there's only one message and no special tokens needed, just return its content
165
+ if not add_special_tokens and len(messages) <= 1:
166
+ return to_string(messages[0]["content"])
167
+
168
+ # Filter and process messages
169
+ processed_messages = [
170
+ (message["role"], to_string(message["content"]))
171
+ for message in messages
172
+ if include_system or message.get("role") != "system"
173
+ ]
174
+
175
+ # Format each message as "Role: Content"
176
+ formatted = "\n".join([
177
+ f'{role.capitalize()}: {content}'
178
+ for role, content in processed_messages
179
+ if content.strip()
180
+ ])
181
+
182
+ # Add final prompt for assistant if needed
183
+ if do_continue:
184
+ return formatted
185
+
186
+ return f"{formatted}\nAssistant:"
187
+
188
+ def create_openai_response(v0_response: Dict[str, Any], model: str) -> ChatCompletionResponse:
189
+ """Convert v0.dev response to OpenAI format"""
190
+ messages = v0_response.get("messages", [])
191
+ assistant_message = None
192
+
193
+ for msg in messages:
194
+ if msg.get("role") == "assistant":
195
+ assistant_message = msg
196
+ break
197
+
198
+ if not assistant_message:
199
+ raise HTTPException(status_code=500, detail="No assistant message found in v0 response")
200
+
201
+ # Generate a unique ID
202
+ response_id = f"chatcmpl-{uuid.uuid4().hex}"
203
+ created = int(datetime.now().timestamp())
204
+
205
+ # Create choices
206
+ choice = Choice(
207
+ index=0,
208
+ message=Message(
209
+ role="assistant",
210
+ content=assistant_message.get("content", "")
211
+ ),
212
+ finish_reason="stop"
213
+ )
214
+
215
+ # Create usage (estimated)
216
+ content = assistant_message.get("content", "")
217
+ usage = Usage(
218
+ prompt_tokens=len(str(v0_response)),
219
+ completion_tokens=len(content),
220
+ total_tokens=len(str(v0_response)) + len(content)
221
+ )
222
+
223
+ return ChatCompletionResponse(
224
+ id=response_id,
225
+ created=created,
226
+ model=model,
227
+ choices=[choice],
228
+ usage=usage
229
+ )
230
+
231
+ async def create_streaming_response(v0_response: Dict[str, Any], model: str) -> AsyncGenerator[str, None]:
232
+ """Create streaming response in OpenAI format"""
233
+ response_id = f"chatcmpl-{uuid.uuid4().hex}"
234
+ created = int(datetime.now().timestamp())
235
+
236
+ messages = v0_response.get("messages", [])
237
+ assistant_message = ""
238
+
239
+ for msg in messages:
240
+ if msg.get("role") == "assistant":
241
+ assistant_message = msg.get("content", "")
242
+ break
243
+
244
+ # Simulate streaming by breaking the response into chunks
245
+ words = assistant_message.split()
246
+
247
+ # Send initial response
248
+ yield f"data: {json.dumps({
249
+ 'id': response_id,
250
+ 'object': 'chat.completion.chunk',
251
+ 'created': created,
252
+ 'model': model,
253
+ 'choices': [{
254
+ 'index': 0,
255
+ 'delta': {'role': 'assistant'},
256
+ 'finish_reason': None
257
+ }]
258
+ })}\n\n"
259
+
260
+ # Send content in chunks
261
+ current_text = ""
262
+ for word in words:
263
+ current_text += word + " "
264
+ yield f"data: {json.dumps({
265
+ 'id': response_id,
266
+ 'object': 'chat.completion.chunk',
267
+ 'created': created,
268
+ 'model': model,
269
+ 'choices': [{
270
+ 'index': 0,
271
+ 'delta': {'content': word + " "},
272
+ 'finish_reason': None
273
+ }]
274
+ })}\n\n"
275
+ await asyncio.sleep(0.01) # Small delay for streaming effect
276
+
277
+ # Send final response
278
+ yield f"data: {json.dumps({
279
+ 'id': response_id,
280
+ 'object': 'chat.completion.chunk',
281
+ 'created': created,
282
+ 'model': model,
283
+ 'choices': [{
284
+ 'index': 0,
285
+ 'delta': {},
286
+ 'finish_reason': 'stop'
287
+ }]
288
+ })}\n\n"
289
+
290
+ yield "data: [DONE]\n\n"
291
+
292
+ # API endpoints
293
+ @app.get("/")
294
+ async def root():
295
+ return {"message": "v0.dev OpenAI Compatible API", "version": "1.0.0"}
296
+
297
+ @app.get("/v1/models")
298
+ async def list_models():
299
+ """List available models (mock OpenAI format)"""
300
+ return {
301
+ "object": "list",
302
+ "data": [
303
+ {
304
+ "id": "v0-gpt-5",
305
+ "object": "model",
306
+ "created": int(datetime.now().timestamp()),
307
+ "owned_by": "v0.dev"
308
+ },
309
+ {
310
+ "id": "v0-gpt-4",
311
+ "object": "model",
312
+ "created": int(datetime.now().timestamp()),
313
+ "owned_by": "v0.dev"
314
+ }
315
+ ]
316
+ }
317
+
318
+ @app.post("/v1/chat/completions")
319
+ async def create_chat_completion(request: ChatCompletionRequest):
320
+ """Create chat completion (OpenAI compatible)"""
321
+ try:
322
+ # Map OpenAI model to v0.dev model
323
+ model_config = {
324
+ "modelId": request.model,
325
+ "imageGenerations": True,
326
+ "thinking": True
327
+ }
328
+
329
+ # Create chat with v0.dev
330
+ v0_response = await v0_client.create_chat(
331
+ messages=request.messages,
332
+ model_config=model_config,
333
+ project_id=request.project_id
334
+ )
335
+
336
+ if request.stream:
337
+ return StreamingResponse(
338
+ create_streaming_response(v0_response, request.model),
339
+ media_type="text/plain"
340
+ )
341
+ else:
342
+ return create_openai_response(v0_response, request.model)
343
+
344
+ except httpx.HTTPStatusError as e:
345
+ raise HTTPException(status_code=e.response.status_code, detail=str(e))
346
+ except Exception as e:
347
+ raise HTTPException(status_code=500, detail=str(e))
348
+
349
+ @app.get("/health")
350
+ async def health_check():
351
+ return {"status": "healthy", "timestamp": datetime.now().isoformat()}
352
+
353
+ if __name__ == "__main__":
354
+ import uvicorn
355
+ uvicorn.run(app, host=HOST, port=PORT)
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ fastapi==0.104.1
2
+ uvicorn==0.24.0
3
+ httpx==0.25.2
4
+ pydantic==2.5.0
5
+ python-dotenv==1.0.0
6
+ python-multipart==0.0.6
7
+ aiofiles==23.2.1