|
|
from fastapi import FastAPI, HTTPException, Request, UploadFile, File, Depends, Header |
|
|
from fastapi.responses import StreamingResponse, HTMLResponse, Response |
|
|
from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials |
|
|
from pydantic import BaseModel, Field |
|
|
from typing import Optional, List, Dict, Any |
|
|
import httpx |
|
|
import os |
|
|
import json |
|
|
import logging |
|
|
from datetime import datetime |
|
|
|
|
|
|
|
|
logging.basicConfig(level=logging.INFO) |
|
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
app = FastAPI( |
|
|
title="OMNIAPI Services", |
|
|
description="Professional AI Services - Chat Completions, Image Generation, Web Search, Speech-To-Text, and Text-to-Speech", |
|
|
version="1.0.0", |
|
|
servers=[ |
|
|
{ |
|
|
"url": "https://at41rv-a77.hf.space", |
|
|
"description": "Production server" |
|
|
} |
|
|
], |
|
|
swagger_ui_parameters={"defaultModelsExpandDepth": -1} |
|
|
) |
|
|
|
|
|
|
|
|
HF_TOKEN = os.getenv("HF_TOKEN") |
|
|
BACKEND_URL = "https://at41rv-a77backend.hf.space" |
|
|
ACCESS_TOKEN = os.getenv("ACCESS_TOKEN", "") |
|
|
|
|
|
if not HF_TOKEN: |
|
|
logger.warning("HF_TOKEN not found in environment variables") |
|
|
|
|
|
security = HTTPBearer() |
|
|
|
|
|
|
|
|
generated_api_keys = {} |
|
|
user_api_keys = {} |
|
|
|
|
|
async def verify_token(credentials: HTTPAuthorizationCredentials = Depends(security)): |
|
|
""" |
|
|
Verify the access token from the Authorization header |
|
|
Accepts both the original ACCESS_TOKEN and forwards to backend for validation |
|
|
""" |
|
|
token = credentials.credentials |
|
|
|
|
|
|
|
|
if ACCESS_TOKEN and token == ACCESS_TOKEN: |
|
|
return token |
|
|
|
|
|
|
|
|
try: |
|
|
response = await make_backend_request("/validate-token", "POST", {"token": token}) |
|
|
if response.status_code == 200: |
|
|
data = response.json() |
|
|
if data.get("valid", False): |
|
|
return token |
|
|
else: |
|
|
logger.error(f"Token validation failed: Backend response indicates token is invalid. Response: {data}") |
|
|
raise HTTPException(status_code=401, detail="Invalid access token. Backend validation failed.") |
|
|
elif response.status_code == 404: |
|
|
logger.warning(f"Token validation endpoint not found (404). Skipping validation and using token directly. Response: {response.text}") |
|
|
return token |
|
|
else: |
|
|
logger.error(f"Token validation failed: Backend returned status code {response.status_code}. Response: {response.text}") |
|
|
raise HTTPException(status_code=401, detail=f"Invalid access token. Backend returned status {response.status_code}.") |
|
|
except Exception as e: |
|
|
logger.error(f"Token validation error: {str(e)}") |
|
|
raise HTTPException(status_code=401, detail=f"Invalid access token. Validation error: {str(e)}") |
|
|
|
|
|
|
|
|
REQUEST_TIMEOUT = 60.0 |
|
|
|
|
|
|
|
|
class Message(BaseModel): |
|
|
role: str = Field(..., description="Role of the message sender", example="user") |
|
|
content: str = Field(..., description="Content of the message", example="Hello, how are you?") |
|
|
|
|
|
class SimpleChatRequest(BaseModel): |
|
|
model: str = Field(..., description="AI model to use for completion", example="gpt-4o") |
|
|
prompt: str = Field(None, nullable=True, description="Text prompt for the AI to respond to (legacy)", example="Explain quantum computing in simple terms") |
|
|
messages: List[Message] = Field(None, nullable=True, description="List of messages for OpenAI-compatible chat") |
|
|
system_prompt: str = Field( |
|
|
default="You are a helpful AI assistant.", |
|
|
nullable=True, |
|
|
description="System prompt to set AI behavior and personality", |
|
|
example="You are a helpful physics teacher who explains complex topics simply." |
|
|
) |
|
|
max_tokens: int = Field(default=2048, nullable=True, description="Maximum tokens to generate", example=2048) |
|
|
temperature: float = Field(default=0.7, nullable=True, description="Temperature for response randomness", example=0.7) |
|
|
stream: bool = Field(default=False, nullable=True, description="Whether to stream the response", example=False) |
|
|
|
|
|
class GenerationRequest(BaseModel): |
|
|
prompt: str = Field(..., description="Text description of the image to generate", example="A beautiful sunset over mountains") |
|
|
model: str = Field(..., description="Image generation model to use", example="flux.1-dev") |
|
|
size: str = Field(default="1024x1024", nullable=True, description="Image size", example="1024x1024") |
|
|
|
|
|
class SearchRequest(BaseModel): |
|
|
query: str = Field(..., description="Search query", example="artificial intelligence") |
|
|
max_results: int = Field(default=10, nullable=True, description="Maximum number of results", example=10) |
|
|
region: str = Field(default="us", nullable=True, description="Search region", example="us") |
|
|
safesearch: str = Field(default="moderate", nullable=True, description="Safe search level", example="moderate") |
|
|
max_chars: int = Field(default=2000, nullable=True, description="Maximum characters to scrape from URL", example=2000) |
|
|
|
|
|
class TTSRequest(BaseModel): |
|
|
text: str = Field(..., description="Text to convert to speech", example="Hello, this is a test message") |
|
|
provider: str = Field(..., description="TTS provider to use", example="GesseritTTS") |
|
|
voice: str = Field(..., description="Voice to use for TTS", example="Emma") |
|
|
|
|
|
class APIKeyCreateRequest(BaseModel): |
|
|
name: str = Field(..., min_length=1, max_length=50, description="Name for the API key", example="My API Key") |
|
|
|
|
|
class APIKeyResponse(BaseModel): |
|
|
success: bool = Field(..., description="Whether the operation was successful", example=True) |
|
|
api_key: str = Field(..., description="Generated API key", example="oa1234567890abcdef...") |
|
|
name: str = Field(default="", nullable=True, description="Name of the API key", example="My API Key") |
|
|
message: str = Field(..., description="Response message", example="API key generated successfully") |
|
|
created_at: str = Field(..., description="Creation timestamp", example="2024-01-20 10:30:00 UTC") |
|
|
expires: str = Field(default="Never", description="Expiration date", example="Never") |
|
|
usage_note: str = Field(..., description="Usage instructions", example="Include this API key in the Authorization header") |
|
|
|
|
|
class APIKeyInfo(BaseModel): |
|
|
key: str = Field(..., description="API key (full or masked)", example="oa1234567890abcdef...") |
|
|
name: str = Field(..., description="Name of the API key", example="My API Key") |
|
|
created_at: str = Field(..., description="Creation timestamp", example="2024-01-20 10:30:00 UTC") |
|
|
last_used: Optional[str] = Field(None, description="Last used timestamp", example="2024-01-20 15:45:00 UTC") |
|
|
usage_count: int = Field(..., description="Number of times used", example=42) |
|
|
key_preview: str = Field(..., description="Masked preview of the key", example="oa1234...********") |
|
|
|
|
|
class UserAPIKeysResponse(BaseModel): |
|
|
success: bool = Field(..., description="Whether the operation was successful", example=True) |
|
|
keys: List[APIKeyInfo] = Field(..., description="List of user's API keys") |
|
|
total_keys: int = Field(..., description="Total number of keys", example=2) |
|
|
max_keys: int = Field(default=3, description="Maximum allowed keys", example=3) |
|
|
remaining_slots: int = Field(..., description="Remaining key slots", example=1) |
|
|
|
|
|
class APIKeyDeleteResponse(BaseModel): |
|
|
success: bool = Field(..., description="Whether the operation was successful", example=True) |
|
|
message: str = Field(..., description="Response message", example="API key deleted successfully") |
|
|
|
|
|
async def make_backend_request( |
|
|
endpoint: str, |
|
|
method: str = "GET", |
|
|
data: dict = None, |
|
|
params: dict = None, |
|
|
files: dict = None, |
|
|
timeout: float = REQUEST_TIMEOUT |
|
|
) -> httpx.Response: |
|
|
"""Make authenticated request to backend API""" |
|
|
headers = { |
|
|
"Authorization": f"Bearer {HF_TOKEN}", |
|
|
"User-Agent": "OMNIAPI-Proxy/1.0" |
|
|
} |
|
|
|
|
|
|
|
|
if not files: |
|
|
headers["Content-Type"] = "application/json" |
|
|
|
|
|
try: |
|
|
async with httpx.AsyncClient(timeout=timeout) as client: |
|
|
if method.upper() == "GET": |
|
|
response = await client.get( |
|
|
f"{BACKEND_URL}{endpoint}", |
|
|
headers=headers, |
|
|
params=params |
|
|
) |
|
|
elif method.upper() == "POST": |
|
|
if files: |
|
|
|
|
|
headers.pop("Content-Type", None) |
|
|
response = await client.post( |
|
|
f"{BACKEND_URL}{endpoint}", |
|
|
headers=headers, |
|
|
files=files, |
|
|
params=params |
|
|
) |
|
|
else: |
|
|
response = await client.post( |
|
|
f"{BACKEND_URL}{endpoint}", |
|
|
headers=headers, |
|
|
json=data, |
|
|
params=params |
|
|
) |
|
|
elif method.upper() == "DELETE": |
|
|
response = await client.delete( |
|
|
f"{BACKEND_URL}{endpoint}", |
|
|
headers=headers, |
|
|
params=params |
|
|
) |
|
|
else: |
|
|
raise HTTPException(status_code=405, detail="Method not allowed") |
|
|
|
|
|
return response |
|
|
|
|
|
except httpx.TimeoutException: |
|
|
logger.error(f"Request timeout to {endpoint}") |
|
|
raise HTTPException(status_code=504, detail="Backend request timeout") |
|
|
except httpx.RequestError as e: |
|
|
logger.error(f"Request error to {endpoint}: {str(e)}") |
|
|
raise HTTPException(status_code=502, detail="Backend connection error") |
|
|
except Exception as e: |
|
|
logger.error(f"Unexpected error: {str(e)}") |
|
|
raise HTTPException(status_code=500, detail="Internal server error") |
|
|
|
|
|
@app.get("/") |
|
|
async def root(): |
|
|
"""Root endpoint returning OpenAPI specification""" |
|
|
return { |
|
|
"openapi": "3.0.0", |
|
|
"info": { |
|
|
"title": "OMNIAPI Services", |
|
|
"description": "Professional AI Services - Chat Completions, Image Generation, Web Search, Speech-To-Text, and Text-to-Speech", |
|
|
"version": "1.0.0" |
|
|
}, |
|
|
"servers": [ |
|
|
{ |
|
|
"url": "https://at41rv-a77.hf.space", |
|
|
"description": "Production server" |
|
|
} |
|
|
], |
|
|
"paths": { |
|
|
"/": { |
|
|
"get": { |
|
|
"summary": "Root endpoint returning OpenAPI specification", |
|
|
"responses": { |
|
|
"200": { |
|
|
"description": "OpenAPI specification", |
|
|
"content": { |
|
|
"application/json": { |
|
|
"schema": { |
|
|
"type": "object" |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
}, |
|
|
"/health": { |
|
|
"get": { |
|
|
"summary": "Health check endpoint", |
|
|
"responses": { |
|
|
"200": { |
|
|
"description": "Service health status" |
|
|
} |
|
|
} |
|
|
} |
|
|
}, |
|
|
"/models": { |
|
|
"get": { |
|
|
"summary": "Get available chat models", |
|
|
"security": [{"bearerAuth": []}], |
|
|
"responses": { |
|
|
"200": { |
|
|
"description": "List of available models" |
|
|
} |
|
|
} |
|
|
} |
|
|
}, |
|
|
"/chat/completions": { |
|
|
"post": { |
|
|
"summary": "Chat completions (OpenAI Compatible)", |
|
|
"security": [{"bearerAuth": []}], |
|
|
"requestBody": { |
|
|
"required": True, |
|
|
"content": { |
|
|
"application/json": { |
|
|
"schema": { |
|
|
"type": "object", |
|
|
"properties": { |
|
|
"model": {"type": "string"}, |
|
|
"messages": { |
|
|
"type": "array", |
|
|
"items": { |
|
|
"type": "object", |
|
|
"properties": { |
|
|
"role": {"type": "string"}, |
|
|
"content": {"type": "string"} |
|
|
} |
|
|
} |
|
|
}, |
|
|
"max_tokens": {"type": "integer"}, |
|
|
"temperature": {"type": "number"}, |
|
|
"stream": {"type": "boolean"} |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
}, |
|
|
"responses": { |
|
|
"200": { |
|
|
"description": "Chat completion response" |
|
|
} |
|
|
} |
|
|
} |
|
|
}, |
|
|
"/image/generate": { |
|
|
"post": { |
|
|
"summary": "Generate images", |
|
|
"security": [{"bearerAuth": []}], |
|
|
"requestBody": { |
|
|
"required": True, |
|
|
"content": { |
|
|
"application/json": { |
|
|
"schema": { |
|
|
"type": "object", |
|
|
"properties": { |
|
|
"prompt": {"type": "string"}, |
|
|
"model": {"type": "string"}, |
|
|
"size": {"type": "string"} |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
}, |
|
|
"responses": { |
|
|
"200": { |
|
|
"description": "Generated image response" |
|
|
} |
|
|
} |
|
|
} |
|
|
}, |
|
|
"/web/search": { |
|
|
"post": { |
|
|
"summary": "Web search", |
|
|
"security": [{"bearerAuth": []}], |
|
|
"requestBody": { |
|
|
"required": True, |
|
|
"content": { |
|
|
"application/json": { |
|
|
"schema": { |
|
|
"type": "object", |
|
|
"properties": { |
|
|
"query": {"type": "string"}, |
|
|
"max_results": {"type": "integer"}, |
|
|
"region": {"type": "string"}, |
|
|
"safesearch": {"type": "string"} |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
}, |
|
|
"responses": { |
|
|
"200": { |
|
|
"description": "Search results" |
|
|
} |
|
|
} |
|
|
} |
|
|
}, |
|
|
"/image/search": { |
|
|
"post": { |
|
|
"summary": "Image search", |
|
|
"security": [{"bearerAuth": []}], |
|
|
"requestBody": { |
|
|
"required": True, |
|
|
"content": { |
|
|
"application/json": { |
|
|
"schema": { |
|
|
"type": "object", |
|
|
"properties": { |
|
|
"query": {"type": "string"}, |
|
|
"max_results": {"type": "integer"}, |
|
|
"region": {"type": "string"}, |
|
|
"safesearch": {"type": "string"} |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
}, |
|
|
"responses": { |
|
|
"200": { |
|
|
"description": "Image search results" |
|
|
} |
|
|
} |
|
|
} |
|
|
}, |
|
|
"/videos/search": { |
|
|
"post": { |
|
|
"summary": "Video search", |
|
|
"security": [{"bearerAuth": []}], |
|
|
"requestBody": { |
|
|
"required": True, |
|
|
"content": { |
|
|
"application/json": { |
|
|
"schema": { |
|
|
"type": "object", |
|
|
"properties": { |
|
|
"query": {"type": "string"}, |
|
|
"max_results": {"type": "integer"}, |
|
|
"region": {"type": "string"}, |
|
|
"safesearch": {"type": "string"} |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
}, |
|
|
"responses": { |
|
|
"200": { |
|
|
"description": "Video search results" |
|
|
} |
|
|
} |
|
|
} |
|
|
}, |
|
|
"/tts/{provider}/voices": { |
|
|
"get": { |
|
|
"summary": "Get TTS voices for provider", |
|
|
"security": [{"bearerAuth": []}], |
|
|
"parameters": [ |
|
|
{ |
|
|
"name": "provider", |
|
|
"in": "path", |
|
|
"required": True, |
|
|
"schema": {"type": "string"} |
|
|
} |
|
|
], |
|
|
"responses": { |
|
|
"200": { |
|
|
"description": "Available voices" |
|
|
} |
|
|
} |
|
|
} |
|
|
}, |
|
|
"/tts/generate": { |
|
|
"post": { |
|
|
"summary": "Generate TTS audio", |
|
|
"security": [{"bearerAuth": []}], |
|
|
"requestBody": { |
|
|
"required": True, |
|
|
"content": { |
|
|
"application/json": { |
|
|
"schema": { |
|
|
"type": "object", |
|
|
"properties": { |
|
|
"text": {"type": "string"}, |
|
|
"provider": {"type": "string"}, |
|
|
"voice": {"type": "string"} |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
}, |
|
|
"responses": { |
|
|
"200": { |
|
|
"description": "Generated audio response" |
|
|
} |
|
|
} |
|
|
} |
|
|
}, |
|
|
"/transcribe": { |
|
|
"post": { |
|
|
"summary": "Audio transcription", |
|
|
"security": [{"bearerAuth": []}], |
|
|
"requestBody": { |
|
|
"required": True, |
|
|
"content": { |
|
|
"multipart/form-data": { |
|
|
"schema": { |
|
|
"type": "object", |
|
|
"properties": { |
|
|
"audio_file": { |
|
|
"type": "string", |
|
|
"format": "binary" |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
}, |
|
|
"responses": { |
|
|
"200": { |
|
|
"description": "Transcription result" |
|
|
} |
|
|
} |
|
|
} |
|
|
}, |
|
|
"/audio/{audio_id}": { |
|
|
"get": { |
|
|
"summary": "Serve audio file", |
|
|
"security": [{"bearerAuth": []}], |
|
|
"parameters": [ |
|
|
{ |
|
|
"name": "audio_id", |
|
|
"in": "path", |
|
|
"required": True, |
|
|
"schema": {"type": "string"} |
|
|
} |
|
|
], |
|
|
"responses": { |
|
|
"200": { |
|
|
"description": "Audio file", |
|
|
"content": { |
|
|
"audio/mpeg": { |
|
|
"schema": { |
|
|
"type": "string", |
|
|
"format": "binary" |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
}, |
|
|
"/image/{image_id}": { |
|
|
"get": { |
|
|
"summary": "Serve image file", |
|
|
"security": [{"bearerAuth": []}], |
|
|
"parameters": [ |
|
|
{ |
|
|
"name": "image_id", |
|
|
"in": "path", |
|
|
"required": True, |
|
|
"schema": {"type": "string"} |
|
|
} |
|
|
], |
|
|
"responses": { |
|
|
"200": { |
|
|
"description": "Image file", |
|
|
"content": { |
|
|
"image/jpeg": { |
|
|
"schema": { |
|
|
"type": "string", |
|
|
"format": "binary" |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
}, |
|
|
"/generate-api-key": { |
|
|
"post": { |
|
|
"summary": "Generate a new API key", |
|
|
"requestBody": { |
|
|
"required": True, |
|
|
"content": { |
|
|
"application/json": { |
|
|
"schema": { |
|
|
"type": "object", |
|
|
"properties": { |
|
|
"name": {"type": "string"} |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
}, |
|
|
"responses": { |
|
|
"200": { |
|
|
"description": "API key generated successfully" |
|
|
} |
|
|
} |
|
|
} |
|
|
}, |
|
|
"/api-keys/list": { |
|
|
"get": { |
|
|
"summary": "List user API keys", |
|
|
"responses": { |
|
|
"200": { |
|
|
"description": "List of user API keys" |
|
|
} |
|
|
} |
|
|
} |
|
|
}, |
|
|
"/api-keys/{api_key}": { |
|
|
"delete": { |
|
|
"summary": "Delete an API key", |
|
|
"parameters": [ |
|
|
{ |
|
|
"name": "api_key", |
|
|
"in": "path", |
|
|
"required": True, |
|
|
"schema": {"type": "string"} |
|
|
} |
|
|
], |
|
|
"responses": { |
|
|
"200": { |
|
|
"description": "API key deleted successfully" |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
}, |
|
|
"components": { |
|
|
"securitySchemes": { |
|
|
"bearerAuth": { |
|
|
"type": "http", |
|
|
"scheme": "bearer", |
|
|
"bearerFormat": "JWT" |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
@app.get("/health") |
|
|
async def health_check(): |
|
|
"""Health check endpoint""" |
|
|
try: |
|
|
|
|
|
response = await make_backend_request("/health") |
|
|
backend_status = "healthy" if response.status_code == 200 else "unhealthy" |
|
|
|
|
|
return { |
|
|
"status": "healthy", |
|
|
"backend_status": backend_status, |
|
|
"backend_url": BACKEND_URL, |
|
|
"hf_token_configured": bool(HF_TOKEN), |
|
|
"timestamp": datetime.now().isoformat() |
|
|
} |
|
|
except Exception as e: |
|
|
return { |
|
|
"status": "unhealthy", |
|
|
"error": str(e), |
|
|
"backend_url": BACKEND_URL, |
|
|
"hf_token_configured": bool(HF_TOKEN), |
|
|
"timestamp": datetime.now().isoformat() |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@app.get("/models") |
|
|
async def get_chat_models(token: str = Depends(verify_token)): |
|
|
"""Get available chat models""" |
|
|
response = await make_backend_request("/models") |
|
|
if response.status_code == 200: |
|
|
return response.json() |
|
|
else: |
|
|
raise HTTPException(status_code=response.status_code, detail="Failed to fetch models") |
|
|
|
|
|
@app.post("/chat/completions") |
|
|
async def chat_completions(request: SimpleChatRequest, token: str = Depends(verify_token)): |
|
|
"""Chat completions proxy (OpenAI Compatible)""" |
|
|
|
|
|
if not request.messages and not request.prompt: |
|
|
raise HTTPException(status_code=400, detail="Either 'messages' or 'prompt' must be provided") |
|
|
|
|
|
|
|
|
request_data = request.dict(exclude_none=True) |
|
|
|
|
|
response = await make_backend_request("/chat/completions", "POST", request_data) |
|
|
if response.status_code == 200: |
|
|
return response.json() |
|
|
else: |
|
|
raise HTTPException(status_code=response.status_code, detail=response.text) |
|
|
|
|
|
|
|
|
|
|
|
@app.post("/image/generate") |
|
|
async def generate_image(request: GenerationRequest, token: str = Depends(verify_token)): |
|
|
"""Image generation proxy""" |
|
|
response = await make_backend_request("/image/generate", "POST", request.dict()) |
|
|
if response.status_code == 200: |
|
|
return response.json() |
|
|
else: |
|
|
raise HTTPException(status_code=response.status_code, detail=response.text) |
|
|
|
|
|
|
|
|
|
|
|
@app.post("/web/search") |
|
|
async def web_search(request: SearchRequest, token: str = Depends(verify_token)): |
|
|
"""Web search proxy""" |
|
|
response = await make_backend_request("/web/search", "POST", request.dict()) |
|
|
if response.status_code == 200: |
|
|
return response.json() |
|
|
else: |
|
|
raise HTTPException(status_code=response.status_code, detail=response.text) |
|
|
|
|
|
@app.post("/image/search") |
|
|
async def image_search(request: SearchRequest, token: str = Depends(verify_token)): |
|
|
"""Image search proxy""" |
|
|
response = await make_backend_request("/image/search", "POST", request.dict()) |
|
|
if response.status_code == 200: |
|
|
return response.json() |
|
|
else: |
|
|
raise HTTPException(status_code=response.status_code, detail=response.text) |
|
|
|
|
|
@app.post("/videos/search") |
|
|
async def video_search(request: SearchRequest, token: str = Depends(verify_token)): |
|
|
"""Video search proxy""" |
|
|
response = await make_backend_request("/videos/search", "POST", request.dict()) |
|
|
if response.status_code == 200: |
|
|
return response.json() |
|
|
else: |
|
|
raise HTTPException(status_code=response.status_code, detail=response.text) |
|
|
|
|
|
|
|
|
|
|
|
@app.get("/tts/{provider}/voices") |
|
|
async def get_tts_voices(provider: str, token: str = Depends(verify_token)): |
|
|
"""Get TTS voices for provider""" |
|
|
response = await make_backend_request(f"/tts/{provider}/voices") |
|
|
if response.status_code == 200: |
|
|
return response.json() |
|
|
else: |
|
|
raise HTTPException(status_code=response.status_code, detail=response.text) |
|
|
|
|
|
@app.post("/tts/generate") |
|
|
async def generate_tts(request: TTSRequest, token: str = Depends(verify_token)): |
|
|
"""Generate TTS audio""" |
|
|
response = await make_backend_request("/tts/generate", "POST", request.dict()) |
|
|
if response.status_code == 200: |
|
|
return response.json() |
|
|
else: |
|
|
raise HTTPException(status_code=response.status_code, detail=response.text) |
|
|
|
|
|
@app.get("/audio/{audio_id}") |
|
|
async def serve_audio(audio_id: str, token: str = Depends(verify_token)): |
|
|
""" |
|
|
Proxy audio file serving |
|
|
|
|
|
To access audio files, use: https://at41rv-a77.hf.space/v1/audio/{audio_id} |
|
|
Example: https://at41rv-a77.hf.space/v1/audio/GesseritTTS_Emma_1750428123_abc12345 |
|
|
""" |
|
|
try: |
|
|
response = await make_backend_request(f"/audio/{audio_id}") |
|
|
if response.status_code == 200: |
|
|
return StreamingResponse( |
|
|
iter([response.content]), |
|
|
media_type="audio/mpeg", |
|
|
headers={"Content-Disposition": f"inline; filename={audio_id}.mp3"} |
|
|
) |
|
|
else: |
|
|
raise HTTPException(status_code=response.status_code, detail="Audio file not found") |
|
|
except Exception as e: |
|
|
logger.error(f"Audio serving error: {str(e)}") |
|
|
raise HTTPException(status_code=404, detail="Audio file not found") |
|
|
|
|
|
@app.get("/image/{image_id}") |
|
|
async def serve_image(image_id: str, token: str = Depends(verify_token)): |
|
|
""" |
|
|
Proxy image file serving |
|
|
|
|
|
To access image files, use: https://at41rv-a77.hf.space/v1/image/{image_id} |
|
|
Example: https://at41rv-a77.hf.space/v1/image/img_1750428123_abc12345 |
|
|
""" |
|
|
try: |
|
|
response = await make_backend_request(f"/image/{image_id}") |
|
|
if response.status_code == 200: |
|
|
return StreamingResponse( |
|
|
iter([response.content]), |
|
|
media_type="image/jpeg", |
|
|
headers={"Content-Disposition": f"inline; filename={image_id}.jpg"} |
|
|
) |
|
|
else: |
|
|
raise HTTPException(status_code=response.status_code, detail="Image file not found") |
|
|
except Exception as e: |
|
|
logger.error(f"Image serving error: {str(e)}") |
|
|
raise HTTPException(status_code=404, detail="Image file not found") |
|
|
|
|
|
@app.post("/transcribe") |
|
|
async def transcribe_audio(audio_file: UploadFile = File(...), token: str = Depends(verify_token)): |
|
|
"""Audio transcription proxy""" |
|
|
try: |
|
|
logger.info(f"Proxy: Received transcription request for file: {audio_file.filename}") |
|
|
logger.info(f"Proxy: Content type: {audio_file.content_type}") |
|
|
|
|
|
|
|
|
file_content = await audio_file.read() |
|
|
logger.info(f"Proxy: File size: {len(file_content)} bytes") |
|
|
|
|
|
if len(file_content) == 0: |
|
|
return { |
|
|
"success": False, |
|
|
"message": "Empty file uploaded", |
|
|
"transcription": None, |
|
|
"filename": audio_file.filename, |
|
|
"file_size": 0 |
|
|
} |
|
|
|
|
|
|
|
|
max_size = 50 * 1024 * 1024 |
|
|
if len(file_content) > max_size: |
|
|
return { |
|
|
"success": False, |
|
|
"message": "File too large. Maximum size is 50MB", |
|
|
"transcription": None, |
|
|
"filename": audio_file.filename, |
|
|
"file_size": len(file_content) |
|
|
} |
|
|
|
|
|
|
|
|
await audio_file.seek(0) |
|
|
|
|
|
|
|
|
files = { |
|
|
"audio_file": (audio_file.filename or "audio.mp3", await audio_file.read(), audio_file.content_type or "audio/mpeg") |
|
|
} |
|
|
|
|
|
logger.info(f"Proxy: Forwarding request to backend...") |
|
|
|
|
|
|
|
|
response = await make_backend_request("/transcribe", "POST", files=files, timeout=120.0) |
|
|
|
|
|
logger.info(f"Proxy: Backend responded with status: {response.status_code}") |
|
|
|
|
|
if response.status_code == 200: |
|
|
result = response.json() |
|
|
logger.info("Proxy: Successfully received transcription from backend") |
|
|
return result |
|
|
else: |
|
|
logger.error(f"Proxy: Backend error: {response.text}") |
|
|
return { |
|
|
"success": False, |
|
|
"message": f"Backend error: {response.status_code}", |
|
|
"transcription": None, |
|
|
"filename": audio_file.filename, |
|
|
"file_size": len(file_content) |
|
|
} |
|
|
|
|
|
except Exception as e: |
|
|
logger.error(f"Proxy: Transcription error: {str(e)}") |
|
|
return { |
|
|
"success": False, |
|
|
"message": f"Proxy error: {str(e)}", |
|
|
"transcription": None, |
|
|
"filename": audio_file.filename if audio_file else None, |
|
|
"file_size": None |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_user_id_from_request(request) -> str: |
|
|
"""Generate a user ID from request (using IP address as identifier)""" |
|
|
|
|
|
|
|
|
client_ip = getattr(request.client, 'host', 'unknown') |
|
|
return f"user_{client_ip}" |
|
|
|
|
|
@app.post("/generate-api-key", response_model=APIKeyResponse) |
|
|
async def create_api_key(request: APIKeyCreateRequest, http_request: Request): |
|
|
""" |
|
|
Generate a new named API key for accessing the OMNIAPI services |
|
|
|
|
|
This endpoint creates a permanent API key with a custom name that starts with 'oa' |
|
|
and can be used to authenticate with all API endpoints. Each user can create up to 1 API key. |
|
|
|
|
|
- **name**: Custom name for your API key (1-50 characters) |
|
|
|
|
|
Returns: |
|
|
- **api_key**: Your new API key (starts with 'oa') |
|
|
- **name**: The name you assigned to this key |
|
|
- **usage_note**: Instructions on how to use the API key |
|
|
|
|
|
Usage: |
|
|
Include the API key in the Authorization header as: `Bearer your_api_key_here` |
|
|
""" |
|
|
try: |
|
|
|
|
|
user_id = get_user_id_from_request(http_request) |
|
|
|
|
|
|
|
|
request_data = {"name": request.name, "user_id": user_id} |
|
|
response = await make_backend_request("/generate-api-key", "POST", request_data) |
|
|
|
|
|
if response.status_code == 200: |
|
|
return response.json() |
|
|
else: |
|
|
raise HTTPException(status_code=response.status_code, detail=response.text) |
|
|
|
|
|
except HTTPException: |
|
|
raise |
|
|
except Exception as e: |
|
|
raise HTTPException( |
|
|
status_code=500, |
|
|
detail=f"Failed to generate API key: {str(e)}" |
|
|
) |
|
|
|
|
|
@app.get("/api-keys/list", response_model=UserAPIKeysResponse) |
|
|
async def list_user_api_keys(http_request: Request, token: str = Depends(verify_token)): |
|
|
""" |
|
|
List all API keys for the current authenticated user |
|
|
|
|
|
This endpoint shows all API keys created by the current user, including: |
|
|
- Key name and masked API key (for security) |
|
|
- Creation date |
|
|
- Last used date |
|
|
- Usage count |
|
|
|
|
|
Requires authentication - users can only view their own keys |
|
|
""" |
|
|
try: |
|
|
logger.info(f"Proxy: Authenticated API key list request") |
|
|
|
|
|
|
|
|
headers = { |
|
|
"Authorization": f"Bearer {token}", |
|
|
"User-Agent": "OMNIAPI-Proxy/1.0", |
|
|
"Content-Type": "application/json" |
|
|
} |
|
|
|
|
|
async with httpx.AsyncClient(timeout=REQUEST_TIMEOUT) as client: |
|
|
response = await client.get( |
|
|
f"{BACKEND_URL}/api-keys/list", |
|
|
headers=headers |
|
|
) |
|
|
|
|
|
logger.info(f"Proxy: Backend responded with status {response.status_code}") |
|
|
|
|
|
if response.status_code == 200: |
|
|
result = response.json() |
|
|
logger.info(f"Proxy: Found {result.get('total_keys', 0)} API keys for authenticated user") |
|
|
return result |
|
|
else: |
|
|
logger.error(f"Proxy: Backend error: {response.text}") |
|
|
try: |
|
|
error_data = response.json() |
|
|
detail = error_data.get("detail", response.text) |
|
|
except: |
|
|
detail = response.text |
|
|
raise HTTPException(status_code=response.status_code, detail=detail) |
|
|
|
|
|
except HTTPException: |
|
|
raise |
|
|
except Exception as e: |
|
|
logger.error(f"Proxy: Failed to list API keys: {str(e)}") |
|
|
raise HTTPException( |
|
|
status_code=500, |
|
|
detail=f"Failed to list API keys: {str(e)}" |
|
|
) |
|
|
|
|
|
@app.delete("/api-keys/{api_key}", response_model=APIKeyDeleteResponse) |
|
|
async def delete_user_api_key(api_key: str, http_request: Request, token: str = Depends(verify_token)): |
|
|
""" |
|
|
Delete a specific API key |
|
|
|
|
|
This endpoint allows users to delete their own API keys. |
|
|
Only the user who created the key can delete it. |
|
|
|
|
|
- **api_key**: The full API key to delete |
|
|
""" |
|
|
try: |
|
|
user_id = get_user_id_from_request(http_request) |
|
|
logger.info(f"Proxy: Delete request for API key {api_key[:10]}... from user {user_id}") |
|
|
|
|
|
|
|
|
headers = { |
|
|
"Authorization": f"Bearer {token}", |
|
|
"User-Agent": "OMNIAPI-Proxy/1.0", |
|
|
"Content-Type": "application/json" |
|
|
} |
|
|
|
|
|
async with httpx.AsyncClient(timeout=REQUEST_TIMEOUT) as client: |
|
|
response = await client.delete( |
|
|
f"{BACKEND_URL}/api-keys/{api_key}", |
|
|
headers=headers |
|
|
) |
|
|
|
|
|
logger.info(f"Proxy: Backend responded with status {response.status_code}") |
|
|
|
|
|
if response.status_code == 200: |
|
|
result = response.json() |
|
|
logger.info("Proxy: API key deleted successfully") |
|
|
return result |
|
|
else: |
|
|
logger.error(f"Proxy: Backend error: {response.text}") |
|
|
|
|
|
try: |
|
|
error_data = response.json() |
|
|
detail = error_data.get("detail", response.text) |
|
|
except: |
|
|
detail = response.text |
|
|
raise HTTPException(status_code=response.status_code, detail=detail) |
|
|
|
|
|
except HTTPException: |
|
|
raise |
|
|
except Exception as e: |
|
|
logger.error(f"Proxy: Failed to delete API key: {str(e)}") |
|
|
raise HTTPException( |
|
|
status_code=500, |
|
|
detail=f"Failed to delete API key: {str(e)}" |
|
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
|
import uvicorn |
|
|
uvicorn.run(app, host="0.0.0.0", port=7862) |
|
|
|