Spaces:
Paused
Paused
File size: 1,601 Bytes
469e046 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 |
"""
FastAPI 依赖项模块
"""
import logging
from asyncio import Queue, Lock, Event
from typing import Dict, Any, List, Set
from fastapi import Request
def get_logger() -> logging.Logger:
from server import logger
return logger
def get_log_ws_manager():
from server import log_ws_manager
return log_ws_manager
def get_request_queue() -> Queue:
from server import request_queue
return request_queue
def get_processing_lock() -> Lock:
from server import processing_lock
return processing_lock
def get_worker_task():
from server import worker_task
return worker_task
def get_server_state() -> Dict[str, Any]:
from server import is_initializing, is_playwright_ready, is_browser_connected, is_page_ready
return {
"is_initializing": is_initializing,
"is_playwright_ready": is_playwright_ready,
"is_browser_connected": is_browser_connected,
"is_page_ready": is_page_ready,
}
def get_page_instance():
from server import page_instance
return page_instance
def get_model_list_fetch_event() -> Event:
from server import model_list_fetch_event
return model_list_fetch_event
def get_parsed_model_list() -> List[Dict[str, Any]]:
from server import parsed_model_list
return parsed_model_list
def get_excluded_model_ids() -> Set[str]:
from server import excluded_model_ids
return excluded_model_ids
def get_current_ai_studio_model_id() -> str:
from server import current_ai_studio_model_id
return current_ai_studio_model_id |