GitHub Copilot commited on
Commit
8cb4c46
·
1 Parent(s): aeb7110

Arch: Add Protocol 4 (Autonomous Resource Integration) - connectors.py, AGENTS.md, .env.template

Browse files
Files changed (2) hide show
  1. .env.template +15 -0
  2. logos/connectors.py +272 -0
.env.template ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Environment Configuration Template
2
+ # Copy to .env and fill in values
3
+
4
+ # Hugging Face API Token (optional, for private models)
5
+ # Get from: https://huggingface.co/settings/tokens
6
+ HF_TOKEN=
7
+
8
+ # Hugging Face Space ID (for deployment)
9
+ HF_SPACE_ID=ANXLOG/LOGOS-SPCW-Matroska
10
+
11
+ # LOGOS Debug Mode (set to 1 for verbose logging)
12
+ LOGOS_DEBUG=0
13
+
14
+ # Number of parallel workers for DSP operations
15
+ NUM_WORKERS=16
logos/connectors.py ADDED
@@ -0,0 +1,272 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ connectors.py - External API/Service Adapters
3
+ Protocol 4: Autonomous Resource Integration
4
+
5
+ This module isolates all external dependencies so the core engine remains pure.
6
+ Each connector wraps an external API/library with a standardized interface.
7
+ """
8
+
9
+ import os
10
+ from typing import Optional, Dict, Any, List
11
+ from dataclasses import dataclass
12
+
13
+
14
+ # ==========================================
15
+ # CONFIGURATION
16
+ # ==========================================
17
+
18
+ @dataclass
19
+ class ConnectorConfig:
20
+ """Configuration for external connectors."""
21
+ hf_token: Optional[str] = None
22
+ hf_space_id: Optional[str] = None
23
+
24
+ @classmethod
25
+ def from_env(cls) -> 'ConnectorConfig':
26
+ """Load configuration from environment variables."""
27
+ return cls(
28
+ hf_token=os.environ.get('HF_TOKEN'),
29
+ hf_space_id=os.environ.get('HF_SPACE_ID')
30
+ )
31
+
32
+
33
+ # ==========================================
34
+ # HUGGING FACE CONNECTOR
35
+ # ==========================================
36
+
37
+ class HuggingFaceConnector:
38
+ """
39
+ Adapter for Hugging Face Hub and Inference API.
40
+ Wraps huggingface_hub for model loading and inference.
41
+ """
42
+
43
+ def __init__(self, config: ConnectorConfig = None):
44
+ self.config = config or ConnectorConfig.from_env()
45
+ self._client = None
46
+
47
+ def _ensure_client(self):
48
+ """Lazy initialization of HF client."""
49
+ if self._client is None:
50
+ try:
51
+ from huggingface_hub import InferenceClient
52
+ self._client = InferenceClient(token=self.config.hf_token)
53
+ except ImportError:
54
+ raise ImportError("huggingface_hub not installed. Run: pip install huggingface_hub")
55
+ return self._client
56
+
57
+ def image_to_text(self, image_path: str, model: str = "Salesforce/blip-image-captioning-base") -> str:
58
+ """
59
+ Generate text description from image using HF Inference API.
60
+
61
+ Args:
62
+ image_path: Path to image file
63
+ model: HF model ID for image captioning
64
+
65
+ Returns:
66
+ Generated text description
67
+ """
68
+ client = self._ensure_client()
69
+ with open(image_path, 'rb') as f:
70
+ result = client.image_to_text(f.read(), model=model)
71
+ return result
72
+
73
+ def text_generation(self, prompt: str, model: str = "gpt2", max_length: int = 100) -> str:
74
+ """
75
+ Generate text from prompt using HF Inference API.
76
+
77
+ Args:
78
+ prompt: Input text prompt
79
+ model: HF model ID for text generation
80
+ max_length: Maximum output length
81
+
82
+ Returns:
83
+ Generated text
84
+ """
85
+ client = self._ensure_client()
86
+ result = client.text_generation(prompt, model=model, max_new_tokens=max_length)
87
+ return result
88
+
89
+
90
+ # ==========================================
91
+ # OCR CONNECTOR
92
+ # ==========================================
93
+
94
+ class OCRConnector:
95
+ """
96
+ Adapter for EasyOCR.
97
+ Provides text extraction from images.
98
+ """
99
+
100
+ def __init__(self, languages: List[str] = None, gpu: bool = False):
101
+ self.languages = languages or ['en']
102
+ self.gpu = gpu
103
+ self._reader = None
104
+
105
+ def _ensure_reader(self):
106
+ """Lazy initialization of EasyOCR reader."""
107
+ if self._reader is None:
108
+ try:
109
+ import easyocr
110
+ self._reader = easyocr.Reader(self.languages, gpu=self.gpu)
111
+ except ImportError:
112
+ raise ImportError("easyocr not installed. Run: pip install easyocr")
113
+ return self._reader
114
+
115
+ def extract_text(self, image_path: str) -> Dict[str, Any]:
116
+ """
117
+ Extract text from image.
118
+
119
+ Args:
120
+ image_path: Path to image file
121
+
122
+ Returns:
123
+ Dict with text_blocks and full_text
124
+ """
125
+ reader = self._ensure_reader()
126
+ results = reader.readtext(image_path)
127
+
128
+ text_blocks = [
129
+ {"text": text, "confidence": conf, "bbox": bbox}
130
+ for bbox, text, conf in results
131
+ ]
132
+ full_text = " ".join([r[1] for r in results])
133
+
134
+ return {
135
+ "text_blocks": text_blocks,
136
+ "full_text": full_text,
137
+ "word_count": len(full_text.split())
138
+ }
139
+
140
+
141
+ # ==========================================
142
+ # VISION CONNECTOR (Future: Multi-modal)
143
+ # ==========================================
144
+
145
+ class VisionConnector:
146
+ """
147
+ Adapter for computer vision operations.
148
+ Wraps OpenCV and scikit-image.
149
+ """
150
+
151
+ @staticmethod
152
+ def calculate_ssim(image1_path: str, image2_path: str) -> float:
153
+ """
154
+ Calculate Structural Similarity Index between two images.
155
+ Uses scikit-image for accurate SSIM calculation.
156
+
157
+ Args:
158
+ image1_path: Path to first image
159
+ image2_path: Path to second image
160
+
161
+ Returns:
162
+ SSIM score (0-1, higher is better)
163
+ """
164
+ try:
165
+ import cv2
166
+ from skimage.metrics import structural_similarity as ssim
167
+
168
+ img1 = cv2.imread(image1_path)
169
+ img2 = cv2.imread(image2_path)
170
+
171
+ # Resize if needed
172
+ if img1.shape != img2.shape:
173
+ img2 = cv2.resize(img2, (img1.shape[1], img1.shape[0]))
174
+
175
+ # Convert to grayscale for SSIM
176
+ gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
177
+ gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
178
+
179
+ return ssim(gray1, gray2)
180
+
181
+ except ImportError as e:
182
+ raise ImportError(f"Required library not installed: {e}")
183
+
184
+ @staticmethod
185
+ def analyze_entropy(image_path: str) -> Dict[str, float]:
186
+ """
187
+ Analyze image entropy (information density).
188
+
189
+ Args:
190
+ image_path: Path to image file
191
+
192
+ Returns:
193
+ Dict with entropy metrics
194
+ """
195
+ try:
196
+ import cv2
197
+ import numpy as np
198
+ from skimage.measure import shannon_entropy
199
+
200
+ img = cv2.imread(image_path)
201
+ gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
202
+
203
+ # Calculate entropy
204
+ entropy = shannon_entropy(gray)
205
+
206
+ # Calculate histogram entropy
207
+ hist = cv2.calcHist([gray], [0], None, [256], [0, 256])
208
+ hist = hist.flatten() / hist.sum()
209
+ hist_entropy = -np.sum(hist[hist > 0] * np.log2(hist[hist > 0]))
210
+
211
+ return {
212
+ "shannon_entropy": entropy,
213
+ "histogram_entropy": hist_entropy,
214
+ "mean_intensity": float(np.mean(gray)),
215
+ "std_intensity": float(np.std(gray))
216
+ }
217
+
218
+ except ImportError as e:
219
+ raise ImportError(f"Required library not installed: {e}")
220
+
221
+
222
+ # ==========================================
223
+ # FACTORY
224
+ # ==========================================
225
+
226
+ def get_connector(connector_type: str, **kwargs) -> Any:
227
+ """
228
+ Factory function for connectors.
229
+
230
+ Args:
231
+ connector_type: One of 'hf', 'ocr', 'vision'
232
+ **kwargs: Connector-specific arguments
233
+
234
+ Returns:
235
+ Initialized connector instance
236
+ """
237
+ connectors = {
238
+ 'hf': HuggingFaceConnector,
239
+ 'ocr': OCRConnector,
240
+ 'vision': VisionConnector
241
+ }
242
+
243
+ if connector_type not in connectors:
244
+ raise ValueError(f"Unknown connector type: {connector_type}. Available: {list(connectors.keys())}")
245
+
246
+ return connectors[connector_type](**kwargs)
247
+
248
+
249
+ # ==========================================
250
+ # REGISTRY (For Protocol 4 Discovery)
251
+ # ==========================================
252
+
253
+ AVAILABLE_CONNECTORS = {
254
+ 'hf': {
255
+ 'name': 'Hugging Face',
256
+ 'capabilities': ['image_to_text', 'text_generation'],
257
+ 'requires': ['huggingface_hub'],
258
+ 'env_vars': ['HF_TOKEN']
259
+ },
260
+ 'ocr': {
261
+ 'name': 'EasyOCR',
262
+ 'capabilities': ['extract_text'],
263
+ 'requires': ['easyocr'],
264
+ 'env_vars': []
265
+ },
266
+ 'vision': {
267
+ 'name': 'Vision (OpenCV/scikit-image)',
268
+ 'capabilities': ['calculate_ssim', 'analyze_entropy'],
269
+ 'requires': ['opencv-python-headless', 'scikit-image'],
270
+ 'env_vars': []
271
+ }
272
+ }