Spaces:
Sleeping
Sleeping
File size: 14,227 Bytes
5bed516 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 |
import os
import uuid
import json
import time
import httpx
from fastapi import FastAPI, Request, HTTPException, Depends, status
from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
from fastapi.responses import StreamingResponse
from dotenv import load_dotenv
import secrets # Added for secure comparison
from models import (
ChatMessage, ChatCompletionRequest, NotionTranscriptConfigValue,
NotionTranscriptItem, NotionDebugOverrides, NotionRequestBody,
ChoiceDelta, Choice, ChatCompletionChunk, Model, ModelList
)
# Load environment variables from .env file
load_dotenv()
# --- Configuration ---
NOTION_API_URL = "https://www.notion.so/api/v3/runInferenceTranscript"
# IMPORTANT: Load the Notion cookie securely from environment variables
NOTION_COOKIE = os.getenv("NOTION_COOKIE")
NOTION_SPACE_ID = os.getenv("NOTION_SPACE_ID")
if not NOTION_COOKIE:
print("Error: NOTION_COOKIE environment variable not set.")
# Consider raising HTTPException or exiting in a real app
if not NOTION_SPACE_ID:
print("Warning: NOTION_SPACE_ID environment variable not set. Using a default UUID.")
# Using a default might not be ideal, depends on Notion's behavior
# Consider raising an error instead: raise ValueError("NOTION_SPACE_ID not set")
NOTION_SPACE_ID = str(uuid.uuid4()) # Default or raise error
# --- Authentication ---
EXPECTED_TOKEN = os.getenv("PROXY_AUTH_TOKEN", "default_token") # Default token
security = HTTPBearer()
def authenticate(credentials: HTTPAuthorizationCredentials = Depends(security)):
"""Compares provided token with the expected token."""
correct_token = secrets.compare_digest(credentials.credentials, EXPECTED_TOKEN)
if not correct_token:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Invalid authentication credentials",
# WWW-Authenticate header removed for Bearer
)
return True # Indicate successful authentication
# --- FastAPI App ---
app = FastAPI()
# --- Helper Functions ---
def build_notion_request(request_data: ChatCompletionRequest) -> NotionRequestBody:
"""Transforms OpenAI-style messages to Notion transcript format."""
transcript = [
NotionTranscriptItem(
type="config",
value=NotionTranscriptConfigValue(model=request_data.notion_model)
)
]
for message in request_data.messages:
# Map 'assistant' role to 'markdown-chat', all others to 'user'
if message.role == "assistant":
# Notion uses "markdown-chat" for assistant replies in the transcript history
transcript.append(NotionTranscriptItem(type="markdown-chat", value=message.content))
else: # Handles 'user', 'system', etc.
content = message.content
if isinstance(content, str):
# Handle string content: Append one item, using default [[""]] for empty strings
notion_value = [[content]] if content else [[""]]
transcript.append(NotionTranscriptItem(type="user", value=notion_value))
elif isinstance(content, list):
# Handle list content: Append a SEPARATE item for each valid text part
found_text_part = False
for part in content:
# Check if part is a dict with type="text" and non-empty text
if isinstance(part, dict) and part.get("type") == "text":
text_content = part.get("text")
if isinstance(text_content, str) and text_content:
# Create and append a SEPARATE item for this text part
transcript.append(NotionTranscriptItem(type="user", value=[[text_content]]))
found_text_part = True
# If the list was empty or had no valid text parts, append a default empty item to maintain behavior
if not found_text_part:
print(f'Error: no valid input found: {message}')
transcript.append(NotionTranscriptItem(type="user", value=[[""]]))
else:
# Handle unexpected content types (e.g., None, int) by appending a default empty item
transcript.append(NotionTranscriptItem(type="user", value=[[""]]))
print(f'Error: no valid input found: {message}')
# Use globally configured spaceId, set createThread=True
return NotionRequestBody(
spaceId=NOTION_SPACE_ID, # From environment variable
transcript=transcript,
createThread=True, # Always create a new thread
# Generate a new traceId for each request
traceId=str(uuid.uuid4()),
# Explicitly set debugOverrides, generateTitle, and saveAllThreadOperations
debugOverrides=NotionDebugOverrides(
cachedInferences={},
annotationInferences={},
emitInferences=False
),
generateTitle=False,
saveAllThreadOperations=False
)
async def stream_notion_response(notion_request_body: NotionRequestBody):
"""Streams the request to Notion and yields OpenAI-compatible SSE chunks."""
headers = {
'accept': 'application/x-ndjson',
'accept-language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7,zh-TW;q=0.6,ja;q=0.5',
'content-type': 'application/json',
'notion-audit-log-platform': 'web',
'notion-client-version': '23.13.0.3604', # Consider making this configurable
'origin': 'https://www.notion.so',
'priority': 'u=1, i',
# Referer might be optional or need adjustment. Removing threadId part.
'referer': 'https://www.notion.so/chat',
'sec-ch-ua': '"Chromium";v="136", "Google Chrome";v="136", "Not.A/Brand";v="99"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Windows"',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/136.0.0.0 Safari/537.36',
'cookie': NOTION_COOKIE, # Loaded from .env
'x-notion-space-id': NOTION_SPACE_ID # Added space ID header
}
# Conditionally add the active user header
notion_active_user = os.getenv("NOTION_ACTIVE_USER_HEADER")
if notion_active_user: # Checks for None and empty string implicitly
headers['x-notion-active-user-header'] = notion_active_user
chunk_id = f"chatcmpl-{uuid.uuid4()}"
created_time = int(time.time())
try:
async with httpx.AsyncClient(timeout=None) as client: # No timeout for streaming
async with client.stream("POST", NOTION_API_URL, json=notion_request_body.dict(), headers=headers) as response:
if response.status_code != 200:
error_content = await response.aread()
print(f"Error from Notion API: {response.status_code}")
print(f"Response: {error_content.decode()}")
# Yield an error message in SSE format? Or just raise exception?
# For now, raise internal server error in the endpoint
raise HTTPException(status_code=response.status_code, detail=f"Notion API Error: {error_content.decode()}")
async for line in response.aiter_lines():
if not line.strip():
continue
try:
data = json.loads(line)
# Check if it's the type of message containing text chunks
if data.get("type") == "markdown-chat" and isinstance(data.get("value"), str):
content_chunk = data["value"]
if content_chunk: # Only send if there's content
chunk = ChatCompletionChunk(
id=chunk_id,
created=created_time,
choices=[Choice(delta=ChoiceDelta(content=content_chunk))]
)
yield f"data: {chunk.json()}\n\n"
# Add logic here to detect the end of the stream if Notion has a specific marker
# For now, we assume markdown-chat stops when the main content is done.
# If we see a recordMap, it's definitely past the text stream.
elif "recordMap" in data:
print("Detected recordMap, stopping stream.")
break # Stop processing after recordMap
except json.JSONDecodeError:
print(f"Warning: Could not decode JSON line: {line}")
except Exception as e:
print(f"Error processing line: {line} - {e}")
# Decide if we should continue or stop
# Send the final chunk indicating stop
final_chunk = ChatCompletionChunk(
id=chunk_id,
created=created_time,
choices=[Choice(delta=ChoiceDelta(), finish_reason="stop")]
)
yield f"data: {final_chunk.json()}\n\n"
yield "data: [DONE]\n\n"
except httpx.RequestError as e:
print(f"HTTPX Request Error: {e}")
# Yield an error message or handle in the endpoint
# For now, let the endpoint handle it
raise HTTPException(status_code=500, detail=f"Error connecting to Notion API: {e}")
except Exception as e:
print(f"Unexpected error during streaming: {e}")
# Yield an error message or handle in the endpoint
raise HTTPException(status_code=500, detail=f"Internal server error during streaming: {e}")
# --- API Endpoint ---
@app.get("/v1/models", response_model=ModelList)
async def list_models(authenticated: bool = Depends(authenticate)):
"""
Endpoint to list available Notion models, mimicking OpenAI's /v1/models.
"""
available_models = [
"openai-gpt-4.1",
"anthropic-opus-4",
"anthropic-sonnet-4"
]
model_list = [
Model(id=model_id, owned_by="notion") # created uses default_factory
for model_id in available_models
]
return ModelList(data=model_list)
@app.post("/v1/chat/completions")
async def chat_completions(request_data: ChatCompletionRequest, request: Request, authenticated: bool = Depends(authenticate)):
"""
Endpoint to mimic OpenAI's chat completions, proxying to Notion.
"""
if not NOTION_COOKIE:
raise HTTPException(status_code=500, detail="Server configuration error: Notion cookie not set.")
notion_request_body = build_notion_request(request_data)
if request_data.stream:
return StreamingResponse(
stream_notion_response(notion_request_body),
media_type="text/event-stream"
)
else:
# --- Non-Streaming Logic (Optional - Collects stream internally) ---
# Note: The primary goal is streaming, but a non-streaming version
# might be useful for testing or simpler clients.
# This requires collecting all chunks from the async generator.
full_response_content = ""
final_finish_reason = None
chunk_id = f"chatcmpl-{uuid.uuid4()}" # Generate ID for the non-streamed response
created_time = int(time.time())
try:
async for line in stream_notion_response(notion_request_body):
if line.startswith("data: ") and "[DONE]" not in line:
try:
data_json = line[len("data: "):].strip()
if data_json:
chunk_data = json.loads(data_json)
if chunk_data.get("choices"):
delta = chunk_data["choices"][0].get("delta", {})
content = delta.get("content")
if content:
full_response_content += content
finish_reason = chunk_data["choices"][0].get("finish_reason")
if finish_reason:
final_finish_reason = finish_reason
except json.JSONDecodeError:
print(f"Warning: Could not decode JSON line in non-streaming mode: {line}")
# Construct the final OpenAI-compatible non-streaming response
return {
"id": chunk_id,
"object": "chat.completion",
"created": created_time,
"model": request_data.model, # Return the model requested by the client
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": full_response_content,
},
"finish_reason": final_finish_reason or "stop", # Default to stop if not explicitly set
}
],
"usage": { # Note: Token usage is not available from Notion
"prompt_tokens": None,
"completion_tokens": None,
"total_tokens": None,
},
}
except HTTPException as e:
# Re-raise HTTP exceptions from the streaming function
raise e
except Exception as e:
print(f"Error during non-streaming processing: {e}")
raise HTTPException(status_code=500, detail="Internal server error processing Notion response")
# --- Uvicorn Runner ---
# Allows running with `python main.py` for simple testing,
# but `uvicorn main:app --reload` is recommended for development.
if __name__ == "__main__":
import uvicorn
print("Starting server. Access at http://127.0.0.1:7860")
print("Ensure NOTION_COOKIE is set in your .env file or environment.")
uvicorn.run(app, host="127.0.0.1", port=7860) |