domohasturdesu commited on
Commit
515e926
·
verified ·
1 Parent(s): bb7697f

Upload 4 files

Browse files
Files changed (4) hide show
  1. Dockerfile +28 -0
  2. main.py +373 -0
  3. models.py +100 -0
  4. requirements.txt +5 -0
Dockerfile ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use an official Python runtime as a parent image
2
+ FROM python:3.10-slim
3
+
4
+ # Set the working directory in the container
5
+ WORKDIR /app
6
+
7
+ # Copy the requirements file into the container at /app
8
+ COPY requirements.txt .
9
+
10
+ # Install any needed packages specified in requirements.txt
11
+ # Use --no-cache-dir to reduce image size
12
+ # Use --upgrade to ensure latest versions are installed
13
+ RUN pip install --no-cache-dir --upgrade -r requirements.txt
14
+
15
+ # Copy the current directory contents into the container at /app
16
+ COPY main.py .
17
+ COPY models.py .
18
+
19
+ # Make port 8000 available to the world outside this container
20
+ EXPOSE 7860
21
+
22
+ # Define environment variables (placeholders, will be set at runtime)
23
+ ENV NOTION_COOKIE=""
24
+ ENV NOTION_SPACE_ID=""
25
+
26
+ # Run uvicorn when the container launches
27
+ # Use 0.0.0.0 to make it accessible externally
28
+ CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
main.py ADDED
@@ -0,0 +1,373 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import uuid
3
+ import json
4
+ import time
5
+ import random
6
+ import httpx
7
+ from fastapi import FastAPI, Request, HTTPException, Depends, status
8
+ from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
9
+ from fastapi.responses import StreamingResponse
10
+ from dotenv import load_dotenv
11
+ import secrets # Added for secure comparison
12
+ from datetime import datetime, timedelta, timezone # Explicit datetime imports
13
+ from zoneinfo import ZoneInfo # For timezone handling
14
+ from models import (
15
+ ChatMessage, ChatCompletionRequest, NotionTranscriptConfigValue,
16
+ NotionTranscriptContextValue, NotionTranscriptItem, NotionDebugOverrides,
17
+ NotionRequestBody, ChoiceDelta, Choice, ChatCompletionChunk, Model, ModelList
18
+ )
19
+
20
+ # Load environment variables from .env file
21
+ load_dotenv()
22
+
23
+ # --- Configuration ---
24
+ NOTION_API_URL = "https://www.notion.so/api/v3/runInferenceTranscript"
25
+ # IMPORTANT: Load the Notion cookie securely from environment variables
26
+ NOTION_COOKIE = os.getenv("NOTION_COOKIE")
27
+
28
+ NOTION_SPACE_ID = os.getenv("NOTION_SPACE_ID")
29
+ if not NOTION_COOKIE:
30
+ print("Error: NOTION_COOKIE environment variable not set.")
31
+ # Consider raising HTTPException or exiting in a real app
32
+ if not NOTION_SPACE_ID:
33
+ print("Warning: NOTION_SPACE_ID environment variable not set. Using a default UUID.")
34
+ # Using a default might not be ideal, depends on Notion's behavior
35
+ # Consider raising an error instead: raise ValueError("NOTION_SPACE_ID not set")
36
+ NOTION_SPACE_ID = str(uuid.uuid4()) # Default or raise error
37
+
38
+ # --- Authentication ---
39
+ EXPECTED_TOKEN = os.getenv("PROXY_AUTH_TOKEN", "default_token") # Default token
40
+ security = HTTPBearer()
41
+
42
+ def authenticate(credentials: HTTPAuthorizationCredentials = Depends(security)):
43
+ """Compares provided token with the expected token."""
44
+ correct_token = secrets.compare_digest(credentials.credentials, EXPECTED_TOKEN)
45
+ if not correct_token:
46
+ raise HTTPException(
47
+ status_code=status.HTTP_401_UNAUTHORIZED,
48
+ detail="Invalid authentication credentials",
49
+ # WWW-Authenticate header removed for Bearer
50
+ )
51
+ return True # Indicate successful authentication
52
+
53
+ # --- FastAPI App ---
54
+ app = FastAPI()
55
+
56
+ # --- Helper Functions ---
57
+
58
+ def build_notion_request(request_data: ChatCompletionRequest) -> NotionRequestBody:
59
+ """Transforms OpenAI-style messages to Notion transcript format, adding userId and createdAt."""
60
+
61
+ # --- Timestamp and User ID Logic ---
62
+ user_id = os.getenv("NOTION_ACTIVE_USER_HEADER")
63
+ # Get all non-assistant messages to assign timestamps
64
+ non_assistant_messages = [msg for msg in request_data.messages if msg.role != "assistant"]
65
+ num_non_assistant_messages = len(non_assistant_messages)
66
+ message_timestamps = {} # Store timestamps keyed by message id
67
+
68
+ if num_non_assistant_messages > 0:
69
+ # Get current time specifically in Pacific Time (America/Los_Angeles)
70
+ pacific_tz = ZoneInfo("America/Los_Angeles")
71
+ now_pacific = datetime.now(timezone.utc).astimezone(pacific_tz)
72
+
73
+ # Assign timestamp to the last non-assistant message
74
+ last_msg_id = non_assistant_messages[-1].id
75
+ message_timestamps[last_msg_id] = now_pacific
76
+
77
+ # Calculate timestamps for previous non-assistant messages (random intervals earlier)
78
+ current_timestamp = now_pacific
79
+ for i in range(num_non_assistant_messages - 2, -1, -1): # Iterate backwards from second-to-last
80
+ current_timestamp -= timedelta(minutes=random.randint(3, 20)) # Use random interval (3-20 mins)
81
+ message_timestamps[non_assistant_messages[i].id] = current_timestamp
82
+
83
+ # --- Build Transcript ---
84
+ # Get current time in Pacific timezone for context
85
+ pacific_tz = ZoneInfo("America/Los_Angeles")
86
+ now_pacific = datetime.now(timezone.utc).astimezone(pacific_tz)
87
+ # Format timestamp exactly as YYYY-MM-DDTHH:MM:SS.fff-HH:MM
88
+ dt_str = now_pacific.strftime("%Y-%m-%dT%H:%M:%S")
89
+ ms = f"{now_pacific.microsecond // 1000:03d}" # Ensure 3 digits for milliseconds
90
+ tz_str = now_pacific.strftime("%z") # Gets +HHMM or -HHMM
91
+ formatted_tz = f"{tz_str[:-2]}:{tz_str[-2:]}" # Insert colon
92
+ current_datetime_iso = f"{dt_str}.{ms}{formatted_tz}"
93
+
94
+ # Generate random text for userName and spaceName
95
+ random_words = ["Project", "Workspace", "Team", "Studio", "Lab", "Hub", "Zone", "Space"]
96
+ user_name = f"User{random.randint(100, 999)}"
97
+ space_name = f"{random.choice(random_words)} {random.randint(1, 99)}"
98
+
99
+ transcript = [
100
+ NotionTranscriptItem(
101
+ type="config",
102
+ value=NotionTranscriptConfigValue(model=request_data.notion_model)
103
+ ),
104
+ NotionTranscriptItem(
105
+ type="context",
106
+ value=NotionTranscriptContextValue(
107
+ userId=user_id or "", # Use the user_id from env or empty string
108
+ spaceId=NOTION_SPACE_ID,
109
+ surface="home_module",
110
+ timezone="America/Los_Angeles",
111
+ userName=user_name,
112
+ spaceName=space_name,
113
+ spaceViewId=str(uuid.uuid4()), # Random UUID for spaceViewId
114
+ currentDatetime=current_datetime_iso
115
+ )
116
+ ),
117
+ NotionTranscriptItem(
118
+ type="agent-integration"
119
+ # No value field needed for agent-integration
120
+ )
121
+ ]
122
+
123
+ for message in request_data.messages:
124
+ if message.role == "assistant":
125
+ # Assistant messages get type="markdown-chat" and a traceId
126
+ transcript.append(NotionTranscriptItem(
127
+ type="markdown-chat",
128
+ value=message.content,
129
+ traceId=str(uuid.uuid4()) # Generate unique traceId for assistant message
130
+ ))
131
+ else: # Treat all other roles (user, system, etc.) as "user" type
132
+ created_at_dt = message_timestamps.get(message.id) # Use the unified timestamp dict
133
+ created_at_iso = None
134
+ if created_at_dt:
135
+ # Format timestamp exactly as YYYY-MM-DDTHH:MM:SS.fff-HH:MM
136
+ dt_str = created_at_dt.strftime("%Y-%m-%dT%H:%M:%S")
137
+ ms = f"{created_at_dt.microsecond // 1000:03d}" # Ensure 3 digits for milliseconds
138
+ tz_str = created_at_dt.strftime("%z") # Gets +HHMM or -HHMM
139
+ formatted_tz = f"{tz_str[:-2]}:{tz_str[-2:]}" # Insert colon
140
+ created_at_iso = f"{dt_str}.{ms}{formatted_tz}"
141
+
142
+ content = message.content
143
+ # Ensure content is treated as a string for user/system messages
144
+ if isinstance(content, list):
145
+ # Attempt to extract text from list format, default to empty string
146
+ text_content = ""
147
+ for part in content:
148
+ if isinstance(part, dict) and part.get("type") == "text":
149
+ text_part = part.get("text")
150
+ if isinstance(text_part, str):
151
+ text_content += text_part # Concatenate text parts if needed
152
+ content = text_content if text_content else "" # Use extracted text or empty string
153
+ elif not isinstance(content, str):
154
+ content = "" # Default to empty string if not list or string
155
+
156
+ # Format value as expected by Notion for user type: [[content_string]]
157
+ notion_value = [[content]] if content else [[""]]
158
+
159
+ transcript.append(NotionTranscriptItem(
160
+ type="user", # Set type to "user" for non-assistant roles
161
+ value=notion_value,
162
+ userId=user_id, # Assign userId
163
+ createdAt=created_at_iso # Assign timestamp
164
+ # No traceId for user/system messages
165
+ ))
166
+
167
+ # Use globally configured spaceId, set createThread=True
168
+ return NotionRequestBody(
169
+ spaceId=NOTION_SPACE_ID, # From environment variable
170
+ transcript=transcript,
171
+ createThread=True, # Always create a new thread
172
+ # Generate a new traceId for each request
173
+ traceId=str(uuid.uuid4()),
174
+ # Explicitly set debugOverrides, generateTitle, and saveAllThreadOperations
175
+ debugOverrides=NotionDebugOverrides(
176
+ cachedInferences={},
177
+ annotationInferences={},
178
+ emitInferences=False
179
+ ),
180
+ generateTitle=False,
181
+ saveAllThreadOperations=False
182
+ )
183
+
184
+ async def stream_notion_response(notion_request_body: NotionRequestBody):
185
+ """Streams the request to Notion and yields OpenAI-compatible SSE chunks."""
186
+ headers = {
187
+ 'accept': 'application/x-ndjson',
188
+ 'accept-language': 'en-US,en;q=0.9',
189
+ 'content-type': 'application/json',
190
+ 'notion-audit-log-platform': 'web',
191
+ 'notion-client-version': '23.13.0.3668', # Consider making this configurable
192
+ 'origin': 'https://www.notion.so',
193
+ 'priority': 'u=1, i',
194
+ # Referer might be optional or need adjustment. Removing threadId part.
195
+ 'referer': 'https://www.notion.so/chat',
196
+ 'sec-ch-ua': '"Chromium";v="136", "Google Chrome";v="136", "Not.A/Brand";v="99"',
197
+ 'sec-ch-ua-mobile': '?0',
198
+ 'sec-ch-ua-platform': '"Windows"',
199
+ 'sec-fetch-dest': 'empty',
200
+ 'sec-fetch-mode': 'cors',
201
+ 'sec-fetch-site': 'same-origin',
202
+ 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/136.0.0.0 Safari/537.36',
203
+ 'cookie': NOTION_COOKIE, # Loaded from .env
204
+ 'x-notion-space-id': NOTION_SPACE_ID # Added space ID header
205
+ }
206
+
207
+ # Conditionally add the active user header
208
+ notion_active_user = os.getenv("NOTION_ACTIVE_USER_HEADER")
209
+ if notion_active_user: # Checks for None and empty string implicitly
210
+ headers['x-notion-active-user-header'] = notion_active_user
211
+
212
+ chunk_id = f"chatcmpl-{uuid.uuid4()}"
213
+ created_time = int(time.time())
214
+
215
+ try:
216
+ async with httpx.AsyncClient(timeout=None) as client: # No timeout for streaming
217
+ # Explicitly serialize using .json() to respect Pydantic Config (like json_encoders for UUID)
218
+ request_body_json = notion_request_body.json()
219
+ async with client.stream("POST", NOTION_API_URL, content=request_body_json, headers=headers) as response:
220
+ if response.status_code != 200:
221
+ error_content = await response.aread()
222
+ print(f"Error from Notion API: {response.status_code}")
223
+ print(f"Response: {error_content.decode()}")
224
+ # Yield an error message in SSE format? Or just raise exception?
225
+ # For now, raise internal server error in the endpoint
226
+ raise HTTPException(status_code=response.status_code, detail=f"Notion API Error: {error_content.decode()}")
227
+
228
+ async for line in response.aiter_lines():
229
+ if not line.strip():
230
+ continue
231
+ try:
232
+ data = json.loads(line)
233
+ # Check if it's the type of message containing text chunks
234
+ if data.get("type") == "markdown-chat" and isinstance(data.get("value"), str):
235
+ content_chunk = data["value"]
236
+ if content_chunk: # Only send if there's content
237
+ chunk = ChatCompletionChunk(
238
+ id=chunk_id,
239
+ created=created_time,
240
+ choices=[Choice(delta=ChoiceDelta(content=content_chunk))]
241
+ )
242
+ yield f"data: {chunk.json()}\n\n"
243
+ # Add logic here to detect the end of the stream if Notion has a specific marker
244
+ # For now, we assume markdown-chat stops when the main content is done.
245
+ # If we see a recordMap, it's definitely past the text stream.
246
+ elif "recordMap" in data:
247
+ print("Detected recordMap, stopping stream.")
248
+ break # Stop processing after recordMap
249
+
250
+ except json.JSONDecodeError:
251
+ print(f"Warning: Could not decode JSON line: {line}")
252
+ except Exception as e:
253
+ print(f"Error processing line: {line} - {e}")
254
+ # Decide if we should continue or stop
255
+
256
+ # Send the final chunk indicating stop
257
+ final_chunk = ChatCompletionChunk(
258
+ id=chunk_id,
259
+ created=created_time,
260
+ choices=[Choice(delta=ChoiceDelta(), finish_reason="stop")]
261
+ )
262
+ yield f"data: {final_chunk.json()}\n\n"
263
+ yield "data: [DONE]\n\n"
264
+
265
+ except httpx.RequestError as e:
266
+ print(f"HTTPX Request Error: {e}")
267
+ # Yield an error message or handle in the endpoint
268
+ # For now, let the endpoint handle it
269
+ raise HTTPException(status_code=500, detail=f"Error connecting to Notion API: {e}")
270
+ except Exception as e:
271
+ print(f"Unexpected error during streaming: {e}")
272
+ # Yield an error message or handle in the endpoint
273
+ raise HTTPException(status_code=500, detail=f"Internal server error during streaming: {e}")
274
+
275
+
276
+ # --- API Endpoint ---
277
+
278
+ @app.get("/v1/models", response_model=ModelList)
279
+ async def list_models(authenticated: bool = Depends(authenticate)):
280
+ """
281
+ Endpoint to list available Notion models, mimicking OpenAI's /v1/models.
282
+ """
283
+ available_models = [
284
+ "openai-gpt-4.1",
285
+ "anthropic-opus-4",
286
+ "anthropic-sonnet-4"
287
+ ]
288
+ model_list = [
289
+ Model(id=model_id, owned_by="notion") # created uses default_factory
290
+ for model_id in available_models
291
+ ]
292
+ return ModelList(data=model_list)
293
+ @app.post("/v1/chat/completions")
294
+ async def chat_completions(request_data: ChatCompletionRequest, request: Request, authenticated: bool = Depends(authenticate)):
295
+ """
296
+ Endpoint to mimic OpenAI's chat completions, proxying to Notion.
297
+ """
298
+ if not NOTION_COOKIE:
299
+ raise HTTPException(status_code=500, detail="Server configuration error: Notion cookie not set.")
300
+
301
+ notion_request_body = build_notion_request(request_data)
302
+
303
+ if request_data.stream:
304
+ return StreamingResponse(
305
+ stream_notion_response(notion_request_body),
306
+ media_type="text/event-stream"
307
+ )
308
+ else:
309
+ # --- Non-Streaming Logic (Optional - Collects stream internally) ---
310
+ # Note: The primary goal is streaming, but a non-streaming version
311
+ # might be useful for testing or simpler clients.
312
+ # This requires collecting all chunks from the async generator.
313
+ full_response_content = ""
314
+ final_finish_reason = None
315
+ chunk_id = f"chatcmpl-{uuid.uuid4()}" # Generate ID for the non-streamed response
316
+ created_time = int(time.time())
317
+
318
+ try:
319
+ async for line in stream_notion_response(notion_request_body):
320
+ if line.startswith("data: ") and "[DONE]" not in line:
321
+ try:
322
+ data_json = line[len("data: "):].strip()
323
+ if data_json:
324
+ chunk_data = json.loads(data_json)
325
+ if chunk_data.get("choices"):
326
+ delta = chunk_data["choices"][0].get("delta", {})
327
+ content = delta.get("content")
328
+ if content:
329
+ full_response_content += content
330
+ finish_reason = chunk_data["choices"][0].get("finish_reason")
331
+ if finish_reason:
332
+ final_finish_reason = finish_reason
333
+ except json.JSONDecodeError:
334
+ print(f"Warning: Could not decode JSON line in non-streaming mode: {line}")
335
+
336
+ # Construct the final OpenAI-compatible non-streaming response
337
+ return {
338
+ "id": chunk_id,
339
+ "object": "chat.completion",
340
+ "created": created_time,
341
+ "model": request_data.model, # Return the model requested by the client
342
+ "choices": [
343
+ {
344
+ "index": 0,
345
+ "message": {
346
+ "role": "assistant",
347
+ "content": full_response_content,
348
+ },
349
+ "finish_reason": final_finish_reason or "stop", # Default to stop if not explicitly set
350
+ }
351
+ ],
352
+ "usage": { # Note: Token usage is not available from Notion
353
+ "prompt_tokens": None,
354
+ "completion_tokens": None,
355
+ "total_tokens": None,
356
+ },
357
+ }
358
+ except HTTPException as e:
359
+ # Re-raise HTTP exceptions from the streaming function
360
+ raise e
361
+ except Exception as e:
362
+ print(f"Error during non-streaming processing: {e}")
363
+ raise HTTPException(status_code=500, detail="Internal server error processing Notion response")
364
+
365
+
366
+ # --- Uvicorn Runner ---
367
+ # Allows running with `python main.py` for simple testing,
368
+ # but `uvicorn main:app --reload` is recommended for development.
369
+ if __name__ == "__main__":
370
+ import uvicorn
371
+ print("Starting server. Access at http://127.0.0.1:7860")
372
+ print("Ensure NOTION_COOKIE is set in your .env file or environment.")
373
+ uvicorn.run(app, host="127.0.0.1", port=7860)
models.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+ import uuid
3
+ from pydantic import BaseModel, Field
4
+ from typing import List, Optional, Dict, Any, Literal, Union
5
+
6
+ # --- Models Moved from main.py ---
7
+
8
+ # Input Models (OpenAI-like)
9
+ class ChatMessage(BaseModel):
10
+ id: uuid.UUID = Field(default_factory=uuid.uuid4)
11
+ role: Literal["system", "user", "assistant"]
12
+ content: Union[str, List[Dict[str, Any]]]
13
+ userId: Optional[str] = None # Added for user messages
14
+ createdAt: Optional[str] = None # Added for timestamping
15
+ traceId: Optional[str] = None # Added for assistant messages
16
+
17
+ class ChatCompletionRequest(BaseModel):
18
+ messages: List[ChatMessage]
19
+ model: str = "notion-proxy" # Model name can be passed, but we map to Notion's model
20
+ stream: bool = False
21
+ # Add other potential OpenAI params if needed, though they might not map directly
22
+ # max_tokens: Optional[int] = None
23
+ # temperature: Optional[float] = None
24
+ # space_id and thread_id are now handled globally via environment variables
25
+ notion_model: str = "anthropic-opus-4" # Default Notion model, can be overridden
26
+
27
+
28
+ # Notion Models
29
+ class NotionTranscriptConfigValue(BaseModel):
30
+ type: str = "markdown-chat"
31
+ model: str # e.g., "anthropic-opus-4"
32
+
33
+ class NotionTranscriptContextValue(BaseModel):
34
+ userId: str
35
+ spaceId: str
36
+ surface: str = "home_module"
37
+ timezone: str = "America/Los_Angeles"
38
+ userName: str
39
+ spaceName: str
40
+ spaceViewId: str
41
+ currentDatetime: str
42
+
43
+ class NotionTranscriptItem(BaseModel):
44
+ id: uuid.UUID = Field(default_factory=uuid.uuid4)
45
+ type: Literal["config", "user", "markdown-chat", "agent-integration", "context"]
46
+ value: Optional[Union[List[List[str]], str, NotionTranscriptConfigValue, NotionTranscriptContextValue]] = None
47
+ userId: Optional[str] = None # Added for user messages in Notion transcript
48
+ createdAt: Optional[str] = None # Added for timestamping in Notion transcript
49
+ traceId: Optional[str] = None # Added for assistant messages in Notion transcript
50
+
51
+ class NotionDebugOverrides(BaseModel):
52
+ cachedInferences: Dict = Field(default_factory=dict)
53
+ annotationInferences: Dict = Field(default_factory=dict)
54
+ emitInferences: bool = False
55
+
56
+ class NotionRequestBody(BaseModel):
57
+ traceId: str = Field(default_factory=lambda: str(uuid.uuid4()))
58
+ spaceId: str
59
+ transcript: List[NotionTranscriptItem]
60
+ # threadId is removed, createThread will be set to true
61
+ createThread: bool = True
62
+ debugOverrides: NotionDebugOverrides = Field(default_factory=NotionDebugOverrides)
63
+ generateTitle: bool = False
64
+ saveAllThreadOperations: bool = True
65
+
66
+ class Config:
67
+ # Ensure UUIDs are serialized as strings in the final JSON request
68
+ json_encoders = {
69
+ uuid.UUID: str
70
+ }
71
+
72
+
73
+ # Output Models (OpenAI SSE)
74
+ class ChoiceDelta(BaseModel):
75
+ content: Optional[str] = None
76
+
77
+ class Choice(BaseModel):
78
+ index: int = 0
79
+ delta: ChoiceDelta
80
+ finish_reason: Optional[Literal["stop", "length"]] = None
81
+
82
+ class ChatCompletionChunk(BaseModel):
83
+ id: str = Field(default_factory=lambda: f"chatcmpl-{uuid.uuid4()}")
84
+ object: str = "chat.completion.chunk"
85
+ created: int = Field(default_factory=lambda: int(time.time()))
86
+ model: str = "notion-proxy" # Or could reflect the underlying Notion model
87
+ choices: List[Choice]
88
+
89
+
90
+ # --- Models for /v1/models Endpoint ---
91
+
92
+ class Model(BaseModel):
93
+ id: str
94
+ object: str = "model"
95
+ created: int = Field(default_factory=lambda: int(time.time()))
96
+ owned_by: str = "notion" # Or specify based on actual model origin if needed
97
+
98
+ class ModelList(BaseModel):
99
+ object: str = "list"
100
+ data: List[Model]
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ fastapi
2
+ uvicorn[standard]
3
+ httpx
4
+ pydantic
5
+ python-dotenv