import os import uuid import json import time import httpx from fastapi import FastAPI, Request, HTTPException, Depends, status from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials from fastapi.responses import StreamingResponse from dotenv import load_dotenv import secrets # Added for secure comparison from models import ( ChatMessage, ChatCompletionRequest, NotionTranscriptConfigValue, NotionTranscriptItem, NotionDebugOverrides, NotionRequestBody, ChoiceDelta, Choice, ChatCompletionChunk, Model, ModelList ) # Load environment variables from .env file load_dotenv() # --- Configuration --- NOTION_API_URL = "https://www.notion.so/api/v3/runInferenceTranscript" # IMPORTANT: Load the Notion cookie securely from environment variables NOTION_COOKIE = os.getenv("NOTION_COOKIE") NOTION_SPACE_ID = os.getenv("NOTION_SPACE_ID") if not NOTION_COOKIE: print("Error: NOTION_COOKIE environment variable not set.") # Consider raising HTTPException or exiting in a real app if not NOTION_SPACE_ID: print("Warning: NOTION_SPACE_ID environment variable not set. Using a default UUID.") # Using a default might not be ideal, depends on Notion's behavior # Consider raising an error instead: raise ValueError("NOTION_SPACE_ID not set") NOTION_SPACE_ID = str(uuid.uuid4()) # Default or raise error # --- Authentication --- EXPECTED_TOKEN = os.getenv("PROXY_AUTH_TOKEN", "default_token") # Default token security = HTTPBearer() def authenticate(credentials: HTTPAuthorizationCredentials = Depends(security)): """Compares provided token with the expected token.""" correct_token = secrets.compare_digest(credentials.credentials, EXPECTED_TOKEN) if not correct_token: raise HTTPException( status_code=status.HTTP_401_UNAUTHORIZED, detail="Invalid authentication credentials", # WWW-Authenticate header removed for Bearer ) return True # Indicate successful authentication # --- FastAPI App --- app = FastAPI() # --- Helper Functions --- def build_notion_request(request_data: ChatCompletionRequest) -> NotionRequestBody: """Transforms OpenAI-style messages to Notion transcript format.""" transcript = [ NotionTranscriptItem( type="config", value=NotionTranscriptConfigValue(model=request_data.notion_model) ) ] for message in request_data.messages: # Map 'assistant' role to 'markdown-chat', all others to 'user' if message.role == "assistant": # Notion uses "markdown-chat" for assistant replies in the transcript history transcript.append(NotionTranscriptItem(type="markdown-chat", value=message.content)) else: # Handles 'user', 'system', etc. content = message.content if isinstance(content, str): # Handle string content: Append one item, using default [[""]] for empty strings notion_value = [[content]] if content else [[""]] transcript.append(NotionTranscriptItem(type="user", value=notion_value)) elif isinstance(content, list): # Handle list content: Append a SEPARATE item for each valid text part found_text_part = False for part in content: # Check if part is a dict with type="text" and non-empty text if isinstance(part, dict) and part.get("type") == "text": text_content = part.get("text") if isinstance(text_content, str) and text_content: # Create and append a SEPARATE item for this text part transcript.append(NotionTranscriptItem(type="user", value=[[text_content]])) found_text_part = True # If the list was empty or had no valid text parts, append a default empty item to maintain behavior if not found_text_part: print(f'Error: no valid input found: {message}') transcript.append(NotionTranscriptItem(type="user", value=[[""]])) else: # Handle unexpected content types (e.g., None, int) by appending a default empty item transcript.append(NotionTranscriptItem(type="user", value=[[""]])) print(f'Error: no valid input found: {message}') # Use globally configured spaceId, set createThread=True return NotionRequestBody( spaceId=NOTION_SPACE_ID, # From environment variable transcript=transcript, createThread=True, # Always create a new thread # Generate a new traceId for each request traceId=str(uuid.uuid4()), # Explicitly set debugOverrides, generateTitle, and saveAllThreadOperations debugOverrides=NotionDebugOverrides( cachedInferences={}, annotationInferences={}, emitInferences=False ), generateTitle=False, saveAllThreadOperations=False ) async def stream_notion_response(notion_request_body: NotionRequestBody): """Streams the request to Notion and yields OpenAI-compatible SSE chunks.""" headers = { 'accept': 'application/x-ndjson', 'accept-language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7,zh-TW;q=0.6,ja;q=0.5', 'content-type': 'application/json', 'notion-audit-log-platform': 'web', 'notion-client-version': '23.13.0.3604', # Consider making this configurable 'origin': 'https://www.notion.so', 'priority': 'u=1, i', # Referer might be optional or need adjustment. Removing threadId part. 'referer': 'https://www.notion.so/chat', 'sec-ch-ua': '"Chromium";v="136", "Google Chrome";v="136", "Not.A/Brand";v="99"', 'sec-ch-ua-mobile': '?0', 'sec-ch-ua-platform': '"Windows"', 'sec-fetch-dest': 'empty', 'sec-fetch-mode': 'cors', 'sec-fetch-site': 'same-origin', 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/136.0.0.0 Safari/537.36', 'cookie': NOTION_COOKIE, # Loaded from .env 'x-notion-space-id': NOTION_SPACE_ID # Added space ID header } # Conditionally add the active user header notion_active_user = os.getenv("NOTION_ACTIVE_USER_HEADER") if notion_active_user: # Checks for None and empty string implicitly headers['x-notion-active-user-header'] = notion_active_user chunk_id = f"chatcmpl-{uuid.uuid4()}" created_time = int(time.time()) try: async with httpx.AsyncClient(timeout=None) as client: # No timeout for streaming async with client.stream("POST", NOTION_API_URL, json=notion_request_body.dict(), headers=headers) as response: if response.status_code != 200: error_content = await response.aread() print(f"Error from Notion API: {response.status_code}") print(f"Response: {error_content.decode()}") # Yield an error message in SSE format? Or just raise exception? # For now, raise internal server error in the endpoint raise HTTPException(status_code=response.status_code, detail=f"Notion API Error: {error_content.decode()}") async for line in response.aiter_lines(): if not line.strip(): continue try: data = json.loads(line) # Check if it's the type of message containing text chunks if data.get("type") == "markdown-chat" and isinstance(data.get("value"), str): content_chunk = data["value"] if content_chunk: # Only send if there's content chunk = ChatCompletionChunk( id=chunk_id, created=created_time, choices=[Choice(delta=ChoiceDelta(content=content_chunk))] ) yield f"data: {chunk.json()}\n\n" # Add logic here to detect the end of the stream if Notion has a specific marker # For now, we assume markdown-chat stops when the main content is done. # If we see a recordMap, it's definitely past the text stream. elif "recordMap" in data: print("Detected recordMap, stopping stream.") break # Stop processing after recordMap except json.JSONDecodeError: print(f"Warning: Could not decode JSON line: {line}") except Exception as e: print(f"Error processing line: {line} - {e}") # Decide if we should continue or stop # Send the final chunk indicating stop final_chunk = ChatCompletionChunk( id=chunk_id, created=created_time, choices=[Choice(delta=ChoiceDelta(), finish_reason="stop")] ) yield f"data: {final_chunk.json()}\n\n" yield "data: [DONE]\n\n" except httpx.RequestError as e: print(f"HTTPX Request Error: {e}") # Yield an error message or handle in the endpoint # For now, let the endpoint handle it raise HTTPException(status_code=500, detail=f"Error connecting to Notion API: {e}") except Exception as e: print(f"Unexpected error during streaming: {e}") # Yield an error message or handle in the endpoint raise HTTPException(status_code=500, detail=f"Internal server error during streaming: {e}") # --- API Endpoint --- @app.get("/v1/models", response_model=ModelList) async def list_models(authenticated: bool = Depends(authenticate)): """ Endpoint to list available Notion models, mimicking OpenAI's /v1/models. """ available_models = [ "openai-gpt-4.1", "anthropic-opus-4", "anthropic-sonnet-4" ] model_list = [ Model(id=model_id, owned_by="notion") # created uses default_factory for model_id in available_models ] return ModelList(data=model_list) @app.post("/v1/chat/completions") async def chat_completions(request_data: ChatCompletionRequest, request: Request, authenticated: bool = Depends(authenticate)): """ Endpoint to mimic OpenAI's chat completions, proxying to Notion. """ if not NOTION_COOKIE: raise HTTPException(status_code=500, detail="Server configuration error: Notion cookie not set.") notion_request_body = build_notion_request(request_data) if request_data.stream: return StreamingResponse( stream_notion_response(notion_request_body), media_type="text/event-stream" ) else: # --- Non-Streaming Logic (Optional - Collects stream internally) --- # Note: The primary goal is streaming, but a non-streaming version # might be useful for testing or simpler clients. # This requires collecting all chunks from the async generator. full_response_content = "" final_finish_reason = None chunk_id = f"chatcmpl-{uuid.uuid4()}" # Generate ID for the non-streamed response created_time = int(time.time()) try: async for line in stream_notion_response(notion_request_body): if line.startswith("data: ") and "[DONE]" not in line: try: data_json = line[len("data: "):].strip() if data_json: chunk_data = json.loads(data_json) if chunk_data.get("choices"): delta = chunk_data["choices"][0].get("delta", {}) content = delta.get("content") if content: full_response_content += content finish_reason = chunk_data["choices"][0].get("finish_reason") if finish_reason: final_finish_reason = finish_reason except json.JSONDecodeError: print(f"Warning: Could not decode JSON line in non-streaming mode: {line}") # Construct the final OpenAI-compatible non-streaming response return { "id": chunk_id, "object": "chat.completion", "created": created_time, "model": request_data.model, # Return the model requested by the client "choices": [ { "index": 0, "message": { "role": "assistant", "content": full_response_content, }, "finish_reason": final_finish_reason or "stop", # Default to stop if not explicitly set } ], "usage": { # Note: Token usage is not available from Notion "prompt_tokens": None, "completion_tokens": None, "total_tokens": None, }, } except HTTPException as e: # Re-raise HTTP exceptions from the streaming function raise e except Exception as e: print(f"Error during non-streaming processing: {e}") raise HTTPException(status_code=500, detail="Internal server error processing Notion response") # --- Uvicorn Runner --- # Allows running with `python main.py` for simple testing, # but `uvicorn main:app --reload` is recommended for development. if __name__ == "__main__": import uvicorn print("Starting server. Access at http://127.0.0.1:7860") print("Ensure NOTION_COOKIE is set in your .env file or environment.") uvicorn.run(app, host="127.0.0.1", port=7860)