Spaces:
Sleeping
Sleeping
| """ | |
| Example Enhanced Plugin Implementation | |
| Shows how to create a backend plugin that works with: | |
| - Local deployment (running in project) | |
| - Network deployment (running on LAN) | |
| - Cloud deployment (commercial API) | |
| This is an example for Gemini, but pattern applies to any backend. | |
| """ | |
| import sys | |
| from pathlib import Path | |
| from typing import Any | |
| from .enhanced_base_plugin import EnhancedBackendPlugin | |
| from .backend_config import BackendConnectionConfig | |
| class EnhancedGeminiPlugin(EnhancedBackendPlugin): | |
| """ | |
| Gemini plugin that supports local/network/cloud deployment. | |
| Usage examples: | |
| # 1. Cloud (commercial API): | |
| config = BackendConnectionConfig( | |
| name='gemini_cloud', | |
| backend_type='gemini', | |
| location=BackendLocation.CLOUD, | |
| protocol=BackendProtocol.HTTP, | |
| endpoint='https://generativelanguage.googleapis.com/v1', | |
| api_key='YOUR_API_KEY' | |
| ) | |
| # 2. Network (self-hosted on LAN): | |
| config = BackendConnectionConfig( | |
| name='gemini_lan', | |
| backend_type='gemini', | |
| location=BackendLocation.NETWORK, | |
| protocol=BackendProtocol.HTTP, | |
| endpoint='http://192.168.1.100:8000' | |
| ) | |
| # 3. Local (running in project): | |
| config = BackendConnectionConfig( | |
| name='gemini_local', | |
| backend_type='gemini', | |
| location=BackendLocation.LOCAL, | |
| protocol=BackendProtocol.PYTHON | |
| ) | |
| """ | |
| def _initialize_local(self) -> None: | |
| """Initialize local Gemini backend.""" | |
| # Import the actual backend client | |
| # This is the ONLY place we import backend code | |
| try: | |
| # Add parent to path for import | |
| parent = Path(__file__).parent.parent.parent | |
| sys.path.insert(0, str(parent / 'character_forge_image')) | |
| from core.gemini_client import GeminiClient | |
| from config.settings import Settings | |
| # Get API key from settings | |
| settings = Settings() | |
| api_key = settings.get_api_key() | |
| if not api_key: | |
| raise ValueError("Gemini API key not found") | |
| # Initialize client | |
| self._client = GeminiClient(api_key) | |
| except ImportError as e: | |
| raise RuntimeError(f"Failed to import local Gemini client: {e}") | |
| def _initialize_network(self) -> None: | |
| """Initialize network Gemini backend.""" | |
| # For network backend, we use HTTP client | |
| # No imports needed - uses requests | |
| self._client = { | |
| 'type': 'network', | |
| 'endpoint': self.config.endpoint | |
| } | |
| def _initialize_cloud(self) -> None: | |
| """Initialize cloud Gemini backend.""" | |
| # For cloud backend, can use official SDK or HTTP | |
| # Option 1: Use official SDK (if available) | |
| try: | |
| from google import genai | |
| self._client = genai.Client(api_key=self.config.api_key) | |
| except ImportError: | |
| # Option 2: Fall back to HTTP client | |
| self._client = { | |
| 'type': 'cloud', | |
| 'endpoint': self.config.endpoint, | |
| 'api_key': self.config.api_key | |
| } | |
| def _generate_local(self, backend_request: dict) -> Any: | |
| """Generate using local Gemini client.""" | |
| # Use the imported client | |
| from models.generation_request import GenerationRequest | |
| # Convert dict back to GenerationRequest | |
| request = GenerationRequest( | |
| prompt=backend_request['prompt'], | |
| aspect_ratio=backend_request['aspect_ratio'], | |
| number_of_images=backend_request['number_of_images'], | |
| safety_filter_level=backend_request['safety_filter_level'], | |
| person_generation=backend_request['person_generation'] | |
| ) | |
| # Call local client | |
| result = self._client.generate(request) | |
| return result | |
| def _generate_network(self, backend_request: dict) -> Any: | |
| """Generate using network Gemini backend.""" | |
| # Send HTTP request to network endpoint | |
| response = self._send_http_request( | |
| endpoint='/generate', | |
| data=backend_request, | |
| method='POST' | |
| ) | |
| return response | |
| def _generate_cloud(self, backend_request: dict) -> Any: | |
| """Generate using cloud Gemini API.""" | |
| # If using official SDK | |
| if hasattr(self._client, 'models'): | |
| # Use SDK | |
| return self._client.models.generate_images(**backend_request) | |
| else: | |
| # Use HTTP API | |
| response = self._send_http_request( | |
| endpoint='/models/gemini-2.5-flash-image:generate', | |
| data=backend_request, | |
| method='POST' | |
| ) | |
| return response | |
| # Same pattern for other backends: | |
| class EnhancedOmniGen2Plugin(EnhancedBackendPlugin): | |
| """OmniGen2 plugin supporting local/network/cloud.""" | |
| def _initialize_local(self) -> None: | |
| """Local: Import OmniGen2 from project.""" | |
| # Import local OmniGen2 client | |
| pass | |
| def _initialize_network(self) -> None: | |
| """Network: Connect to OmniGen2 server on LAN.""" | |
| # Set up HTTP client for network OmniGen2 | |
| pass | |
| def _initialize_cloud(self) -> None: | |
| """Cloud: Use hosted OmniGen2 API.""" | |
| # Set up cloud API client | |
| pass | |
| def _generate_local(self, backend_request: dict) -> Any: | |
| """Generate with local OmniGen2.""" | |
| pass | |
| def _generate_network(self, backend_request: dict) -> Any: | |
| """Generate with network OmniGen2.""" | |
| pass | |
| def _generate_cloud(self, backend_request: dict) -> Any: | |
| """Generate with cloud OmniGen2.""" | |
| pass | |
| class EnhancedComfyUIPlugin(EnhancedBackendPlugin): | |
| """ComfyUI plugin supporting local/network/cloud.""" | |
| def _initialize_local(self) -> None: | |
| """Local: Connect to local ComfyUI instance.""" | |
| # ComfyUI always uses HTTP, even locally | |
| self.config.endpoint = 'http://127.0.0.1:8188' | |
| self._initialize_network() | |
| def _initialize_network(self) -> None: | |
| """Network: Connect to ComfyUI on LAN.""" | |
| self._client = { | |
| 'type': 'network', | |
| 'endpoint': self.config.endpoint | |
| } | |
| def _initialize_cloud(self) -> None: | |
| """Cloud: Use hosted ComfyUI service.""" | |
| self._client = { | |
| 'type': 'cloud', | |
| 'endpoint': self.config.endpoint, | |
| 'api_key': self.config.api_key | |
| } | |
| def _generate_local(self, backend_request: dict) -> Any: | |
| """Generate with local ComfyUI.""" | |
| return self._generate_network(backend_request) | |
| def _generate_network(self, backend_request: dict) -> Any: | |
| """Generate with network ComfyUI.""" | |
| # Queue workflow | |
| response = self._send_http_request( | |
| endpoint='/prompt', | |
| data={'prompt': backend_request}, | |
| method='POST' | |
| ) | |
| return response | |
| def _generate_cloud(self, backend_request: dict) -> Any: | |
| """Generate with cloud ComfyUI.""" | |
| return self._generate_network(backend_request) | |