Spaces:
Sleeping
Sleeping
File size: 7,155 Bytes
5b6e956 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 |
"""
Example Enhanced Plugin Implementation
Shows how to create a backend plugin that works with:
- Local deployment (running in project)
- Network deployment (running on LAN)
- Cloud deployment (commercial API)
This is an example for Gemini, but pattern applies to any backend.
"""
import sys
from pathlib import Path
from typing import Any
from .enhanced_base_plugin import EnhancedBackendPlugin
from .backend_config import BackendConnectionConfig
class EnhancedGeminiPlugin(EnhancedBackendPlugin):
"""
Gemini plugin that supports local/network/cloud deployment.
Usage examples:
# 1. Cloud (commercial API):
config = BackendConnectionConfig(
name='gemini_cloud',
backend_type='gemini',
location=BackendLocation.CLOUD,
protocol=BackendProtocol.HTTP,
endpoint='https://generativelanguage.googleapis.com/v1',
api_key='YOUR_API_KEY'
)
# 2. Network (self-hosted on LAN):
config = BackendConnectionConfig(
name='gemini_lan',
backend_type='gemini',
location=BackendLocation.NETWORK,
protocol=BackendProtocol.HTTP,
endpoint='http://192.168.1.100:8000'
)
# 3. Local (running in project):
config = BackendConnectionConfig(
name='gemini_local',
backend_type='gemini',
location=BackendLocation.LOCAL,
protocol=BackendProtocol.PYTHON
)
"""
def _initialize_local(self) -> None:
"""Initialize local Gemini backend."""
# Import the actual backend client
# This is the ONLY place we import backend code
try:
# Add parent to path for import
parent = Path(__file__).parent.parent.parent
sys.path.insert(0, str(parent / 'character_forge_image'))
from core.gemini_client import GeminiClient
from config.settings import Settings
# Get API key from settings
settings = Settings()
api_key = settings.get_api_key()
if not api_key:
raise ValueError("Gemini API key not found")
# Initialize client
self._client = GeminiClient(api_key)
except ImportError as e:
raise RuntimeError(f"Failed to import local Gemini client: {e}")
def _initialize_network(self) -> None:
"""Initialize network Gemini backend."""
# For network backend, we use HTTP client
# No imports needed - uses requests
self._client = {
'type': 'network',
'endpoint': self.config.endpoint
}
def _initialize_cloud(self) -> None:
"""Initialize cloud Gemini backend."""
# For cloud backend, can use official SDK or HTTP
# Option 1: Use official SDK (if available)
try:
from google import genai
self._client = genai.Client(api_key=self.config.api_key)
except ImportError:
# Option 2: Fall back to HTTP client
self._client = {
'type': 'cloud',
'endpoint': self.config.endpoint,
'api_key': self.config.api_key
}
def _generate_local(self, backend_request: dict) -> Any:
"""Generate using local Gemini client."""
# Use the imported client
from models.generation_request import GenerationRequest
# Convert dict back to GenerationRequest
request = GenerationRequest(
prompt=backend_request['prompt'],
aspect_ratio=backend_request['aspect_ratio'],
number_of_images=backend_request['number_of_images'],
safety_filter_level=backend_request['safety_filter_level'],
person_generation=backend_request['person_generation']
)
# Call local client
result = self._client.generate(request)
return result
def _generate_network(self, backend_request: dict) -> Any:
"""Generate using network Gemini backend."""
# Send HTTP request to network endpoint
response = self._send_http_request(
endpoint='/generate',
data=backend_request,
method='POST'
)
return response
def _generate_cloud(self, backend_request: dict) -> Any:
"""Generate using cloud Gemini API."""
# If using official SDK
if hasattr(self._client, 'models'):
# Use SDK
return self._client.models.generate_images(**backend_request)
else:
# Use HTTP API
response = self._send_http_request(
endpoint='/models/gemini-2.5-flash-image:generate',
data=backend_request,
method='POST'
)
return response
# Same pattern for other backends:
class EnhancedOmniGen2Plugin(EnhancedBackendPlugin):
"""OmniGen2 plugin supporting local/network/cloud."""
def _initialize_local(self) -> None:
"""Local: Import OmniGen2 from project."""
# Import local OmniGen2 client
pass
def _initialize_network(self) -> None:
"""Network: Connect to OmniGen2 server on LAN."""
# Set up HTTP client for network OmniGen2
pass
def _initialize_cloud(self) -> None:
"""Cloud: Use hosted OmniGen2 API."""
# Set up cloud API client
pass
def _generate_local(self, backend_request: dict) -> Any:
"""Generate with local OmniGen2."""
pass
def _generate_network(self, backend_request: dict) -> Any:
"""Generate with network OmniGen2."""
pass
def _generate_cloud(self, backend_request: dict) -> Any:
"""Generate with cloud OmniGen2."""
pass
class EnhancedComfyUIPlugin(EnhancedBackendPlugin):
"""ComfyUI plugin supporting local/network/cloud."""
def _initialize_local(self) -> None:
"""Local: Connect to local ComfyUI instance."""
# ComfyUI always uses HTTP, even locally
self.config.endpoint = 'http://127.0.0.1:8188'
self._initialize_network()
def _initialize_network(self) -> None:
"""Network: Connect to ComfyUI on LAN."""
self._client = {
'type': 'network',
'endpoint': self.config.endpoint
}
def _initialize_cloud(self) -> None:
"""Cloud: Use hosted ComfyUI service."""
self._client = {
'type': 'cloud',
'endpoint': self.config.endpoint,
'api_key': self.config.api_key
}
def _generate_local(self, backend_request: dict) -> Any:
"""Generate with local ComfyUI."""
return self._generate_network(backend_request)
def _generate_network(self, backend_request: dict) -> Any:
"""Generate with network ComfyUI."""
# Queue workflow
response = self._send_http_request(
endpoint='/prompt',
data={'prompt': backend_request},
method='POST'
)
return response
def _generate_cloud(self, backend_request: dict) -> Any:
"""Generate with cloud ComfyUI."""
return self._generate_network(backend_request)
|