rairo commited on
Commit
124ae0b
·
verified ·
1 Parent(s): 23638cc

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +61 -110
main.py CHANGED
@@ -60,7 +60,7 @@ except Exception as e:
60
  bucket = storage.bucket()
61
  db_ref = db.reference()
62
 
63
- # --- Google GenAI Client Initialization (Gemini 3.0 Ecosystem) ---
64
  try:
65
  logger.info("Initializing Google GenAI Client...")
66
  api_key = os.environ.get("Gemini")
@@ -79,10 +79,9 @@ ATHENA_PRO = "gemini-3-pro-image-preview"
79
 
80
  # Grounding / External API
81
  WOLFRAM_APP_ID = os.environ.get("WOLFRAM_APP_ID")
82
- OPENALEX_MAILTO = os.environ.get("OPENALEX_MAILTO", "rairo@sozofix.tech")
83
 
84
  # -----------------------------------------------------------------------------
85
- # 2. HELPER FUNCTIONS & AUTH
86
  # -----------------------------------------------------------------------------
87
 
88
  def verify_token(auth_header):
@@ -111,7 +110,7 @@ def upload_to_storage(data_bytes, destination_blob_name, content_type):
111
  blob.make_public()
112
  return blob.public_url
113
  except Exception as e:
114
- logger.error(f"Storage Upload Error: {e}")
115
  return None
116
 
117
  def query_wolfram_alpha(query):
@@ -123,55 +122,61 @@ def query_wolfram_alpha(query):
123
  except: return "Grounding in progress."
124
 
125
  # -----------------------------------------------------------------------------
126
- # 3. TITANESS MEDIA ENGINE (PARALLEL BLUEPRINTS & NARRATION)
127
  # -----------------------------------------------------------------------------
128
 
129
  def generate_narration_task(text, uid, epiphany_id, layer_name):
130
- """Async task for Deepgram Athena Voice generation."""
131
  if not text: return layer_name, None
132
  try:
133
- logger.info(f"Thread started: Narration for {layer_name}")
134
  api_key = os.environ.get("DEEPGRAM_API_KEY")
135
  if not api_key: return layer_name, None
136
-
137
  DEEPGRAM_URL = "https://api.deepgram.com/v1/speak?model=aura-luna-en"
138
  headers = {"Authorization": f"Token {api_key}", "Content-Type": "text/plain"}
139
  response = requests.post(DEEPGRAM_URL, headers=headers, data=text.encode('utf-8'))
140
  response.raise_for_status()
141
-
142
  path = f"users/{uid}/epiphanies/{epiphany_id}/audio/{layer_name}.mp3"
143
- url = upload_to_storage(response.content, path, 'audio/mpeg')
144
- return layer_name, url
145
  except Exception as e:
146
- logger.error(f"Narration Task Error [{layer_name}]: {e}")
147
  return layer_name, None
148
 
149
- def generate_blueprint_task(subject, text, uid, epiphany_id, layer_name):
150
- """Async task for Nano Banana Pro technical blueprint generation."""
151
- if not text: return layer_name, None
152
  try:
153
- logger.info(f"Thread started: Nano Banana Blueprint for {layer_name}")
 
 
 
 
 
 
 
 
154
  prompt = (
155
- f"Generate a technical 4K blueprint infographic of {subject} - {layer_name}. "
156
- f"Context: {text}. Style: White-line schematic on midnight navy background. 4K quality."
 
 
 
157
  )
 
158
  response = client.models.generate_content(
159
  model=ATHENA_PRO,
160
  contents=prompt,
161
  config=types.GenerateContentConfig(
 
162
  image_config=types.ImageConfig(aspect_ratio="16:9", image_size="4K")
163
  )
164
  )
165
  image_parts = [part for part in response.parts if part.inline_data]
166
  if image_parts:
167
  image_bytes = image_parts[0].inline_data.data
168
- path = f"users/{uid}/epiphanies/{epiphany_id}/blueprints/{layer_name}.png"
169
- url = upload_to_storage(image_bytes, path, 'image/png')
170
- return layer_name, url
171
- return layer_name, None
172
  except Exception as e:
173
- logger.error(f"Nano Banana Pro Task Error [{layer_name}]: {e}")
174
- return layer_name, None
175
 
176
  # -----------------------------------------------------------------------------
177
  # 4. CORE ENDPOINTS (EPIPHANY, THEIA, PROXY)
@@ -188,49 +193,33 @@ def image_proxy():
188
 
189
  @app.route('/api/epiphany/generate', methods=['POST'])
190
  def generate_epiphany():
191
- logger.info(">>> START generate_epiphany request")
192
  uid = verify_token(request.headers.get('Authorization'))
193
  if not uid: return jsonify({'error': 'Unauthorized'}), 401
194
 
195
  user_ref = db_ref.child(f'users/{uid}')
196
  user_data = user_ref.get()
197
-
198
- # 4 Sparks for Synthesis + Research + Blueprints
199
  if not user_data or user_data.get('credits', 0) < 4:
200
- return jsonify({'error': 'Need 4 Sparks for Titaness Synthesis.'}), 402
201
 
202
  image_file = request.files['image']
203
  image_bytes = image_file.read()
204
  pil_image = Image.open(io.BytesIO(image_bytes)).convert('RGB')
205
 
206
  try:
207
- # Step 1: ID
208
  id_prompt = "Identify this precisely. Reply with ONLY the name."
209
  subject = client.models.generate_content(model=ATHENA_FLASH, contents=[id_prompt, pil_image]).text.strip()
210
- logger.info(f"Subject Identified: {subject}")
211
 
212
- # Step 2: Synthesis + Feynman Scholar Search
213
  physics_fact = query_wolfram_alpha(f"physics laws of {subject}")
214
  synthesis_prompt = f"""
215
  Act as Athena. Analyze '{subject}' grounded in: {physics_fact}.
216
- Style: Richard Feynman. Simple analogies, profound scientific truths.
217
-
218
- Tasks:
219
- 1. Search for 3 diverse sources (ArXiv, Patents, Journals) about {subject} using Google Search.
220
- 2. Create 4 Discovery Layers.
221
- 3. For each research source, provide Title, URL, and a 2-sentence Feynman Summary.
222
-
223
- MANDATORY JSON SCHEMA (NO NESTING):
224
- {{
225
- "title": "string",
226
- "genesis": "string",
227
- "scientific_core": "string",
228
- "engineering_edge": "string",
229
- "cross_pollination": "string",
230
- "scholar": [
231
- {{"title": "string", "url": "string", "feynman_summary": "string"}}
232
- ]
233
- }}
234
  """
235
 
236
  res = client.models.generate_content(
@@ -242,58 +231,38 @@ def generate_epiphany():
242
  )
243
  )
244
 
245
- # SCHEMA SENTINEL: Robust JSON Parsing
246
  raw_json = res.text.strip()
247
- if "```json" in raw_json:
248
- raw_json = re.search(r'```json\n(.*?)\n```', raw_json, re.DOTALL).group(1)
249
-
250
  data = json.loads(raw_json)
251
  if isinstance(data, list): data = data[0]
252
 
253
- # Handle nesting variations
254
- if "epiphany" in data: data = data["epiphany"]
255
- elif "discovery_layers" in data: data = data["discovery_layers"]
256
-
257
- layers = ['genesis', 'scientific_core', 'engineering_edge', 'cross_pollination']
258
- # Verification to avoid KeyError
259
- for l in layers:
260
- if l not in data:
261
- logger.warning(f"AI omitted key '{l}'. Adding placeholder.")
262
- data[l] = f"The details of the {l.replace('_', ' ')} are unfolding."
263
-
264
  epiphany_id = str(uuid.uuid4())
 
265
 
266
- # Step 3: PARALLEL MEDIA ENGINE (The Async Core)
267
- logger.info(f"Step 3: Launching 8 media threads for {epiphany_id}...")
268
- audios, blueprints = {}, {}
269
- with ThreadPoolExecutor(max_workers=8) as executor:
270
- # Submit 4 Audio Tasks
271
  aud_futures = [executor.submit(generate_narration_task, data.get(l), uid, epiphany_id, l) for l in layers]
272
- # Submit 4 Blueprint Tasks
273
- blu_futures = [executor.submit(generate_blueprint_task, subject, data.get(l), uid, epiphany_id, l) for l in layers]
274
 
275
- # Reassemble Results
276
- for f in aud_futures:
277
  k, v = f.result()
278
  audios[k] = v
279
- for f in blu_futures:
280
- k, v = f.result()
281
- blueprints[k] = v
282
 
283
- # Step 4: Storage & Finalization
284
- image_url = upload_to_storage(image_bytes, f"users/{uid}/epiphanies/{epiphany_id}/vision.jpg", 'image/jpeg')
285
  epiphany_record = {
286
  "epiphanyId": epiphany_id,
287
  "uid": uid,
288
  "title": data.get('title', 'System Epiphany'),
289
  "subject": subject,
290
- "imageURL": image_url,
 
291
  "layers": {
292
- l: {
293
- "text": data.get(l, ""),
294
- "audio": audios.get(l),
295
- "blueprint": blueprints.get(l)
296
- } for l in layers
297
  },
298
  "scholar": data.get('scholar', []),
299
  "createdAt": datetime.utcnow().isoformat()
@@ -301,8 +270,6 @@ def generate_epiphany():
301
 
302
  db_ref.child(f'epiphanies/{epiphany_id}').set(epiphany_record)
303
  user_ref.update({'credits': user_data.get('credits', 0) - 4})
304
- logger.info(f"TITANESS SUCCESS: {epiphany_id}")
305
-
306
  return jsonify(epiphany_record), 201
307
 
308
  except Exception as e:
@@ -311,7 +278,6 @@ def generate_epiphany():
311
 
312
  @app.route('/api/epiphany/theia', methods=['POST'])
313
  def theia_sweep():
314
- """Independent Feature: Theia Mode Annotated deconstruction."""
315
  uid = verify_token(request.headers.get('Authorization'))
316
  if not uid: return jsonify({'error': 'Unauthorized'}), 401
317
 
@@ -321,19 +287,10 @@ def theia_sweep():
321
 
322
  epiphany_id = request.form.get('epiphanyId')
323
  image_file = request.files['image']
324
-
325
  context = db_ref.child(f'epiphanies/{epiphany_id}').get() or {}
326
  subject = context.get('subject', 'Complex System')
327
 
328
- sweep_prompt = f"""
329
- Theia Mode Activation: {subject}.
330
- Task: Use Python Code Execution for spatial deconstruction.
331
- Identify components and for each return:
332
- 1. Label.
333
- 2. Bounding Box [ymin, xmin, ymax, xmax] (0-1000).
334
- 3. 20-word Feynman-style Micro-Epiphany.
335
- Return JSON list ONLY.
336
- """
337
 
338
  try:
339
  pil_image = Image.open(io.BytesIO(image_file.read())).convert('RGB')
@@ -345,11 +302,8 @@ def theia_sweep():
345
  response_mime_type='application/json'
346
  )
347
  )
348
-
349
  raw_json = res.text.strip()
350
- if "```json" in raw_json:
351
- raw_json = re.search(r'```json\n(.*?)\n```', raw_json, re.DOTALL).group(1)
352
-
353
  annotations = json.loads(raw_json)
354
  user_ref.update({'credits': user_ref.get().get('credits', 0) - 4})
355
  return jsonify({"annotations": annotations}), 200
@@ -364,10 +318,7 @@ def deep_dive():
364
  image_file = request.files['image']
365
  try:
366
  pil_image = Image.open(io.BytesIO(image_file.read())).convert('RGB')
367
- res = client.models.generate_content(
368
- model=ATHENA_FLASH,
369
- contents=["Feynman style: Explain this zoom detail in 50 words.", pil_image]
370
- )
371
  user_ref = db_ref.child(f'users/{uid}')
372
  user_ref.update({'credits': max(0, (user_ref.get().get('credits', 0) or 0) - 1)})
373
  return jsonify({"analysis": res.text.strip()}), 200
@@ -386,8 +337,8 @@ def get_chiron_briefing():
386
  ctx = "New seeker."
387
  if last:
388
  e = list(last.values())[0]
389
- ctx = f"Subject: {e['subject']}. Scholar: {e.get('scholar', [])[:1]}"
390
- prompt = f"Prep Chiron (Mentor). Context: {ctx}. 4-sentence brief."
391
  res = client.models.generate_content(model=ATHENA_FLASH, contents=[prompt])
392
  return jsonify({"memory_summary": res.text.strip()}), 200
393
  except Exception as e: return jsonify({'error': str(e)}), 500
@@ -431,7 +382,7 @@ def request_sparks():
431
  return jsonify({"success": True})
432
 
433
  # -----------------------------------------------------------------------------
434
- # 6. AUTH & USER PROFILE
435
  # -----------------------------------------------------------------------------
436
 
437
  @app.route('/api/auth/signup', methods=['POST'])
@@ -455,7 +406,7 @@ def social_signin():
455
  return jsonify({'uid': uid, **user_ref.get()}), 200
456
 
457
  @app.route('/api/user/profile', methods=['GET'])
458
- def get_profile():
459
  uid = verify_token(request.headers.get('Authorization'))
460
  if not uid: return jsonify({'error': 'Unauthorized'}), 401
461
  return jsonify(db_ref.child(f'users/{uid}').get())
@@ -468,5 +419,5 @@ def list_epiphanies():
468
  return jsonify(list(res.values()))
469
 
470
  if __name__ == '__main__':
471
- logger.info("Titaness Final Backbone Active on 7860...")
472
  app.run(debug=False, host="0.0.0.0", port=7860)
 
60
  bucket = storage.bucket()
61
  db_ref = db.reference()
62
 
63
+ # --- Google GenAI Client Initialization (Gemini 3.0) ---
64
  try:
65
  logger.info("Initializing Google GenAI Client...")
66
  api_key = os.environ.get("Gemini")
 
79
 
80
  # Grounding / External API
81
  WOLFRAM_APP_ID = os.environ.get("WOLFRAM_APP_ID")
 
82
 
83
  # -----------------------------------------------------------------------------
84
+ # 2. HELPER FUNCTIONS & GROUNDING
85
  # -----------------------------------------------------------------------------
86
 
87
  def verify_token(auth_header):
 
110
  blob.make_public()
111
  return blob.public_url
112
  except Exception as e:
113
+ logger.error(f"Failed to upload to Firebase Storage: {e}")
114
  return None
115
 
116
  def query_wolfram_alpha(query):
 
122
  except: return "Grounding in progress."
123
 
124
  # -----------------------------------------------------------------------------
125
+ # 3. TITANESS MEDIA ENGINE (CONSOLIDATED MASTER BLUEPRINT)
126
  # -----------------------------------------------------------------------------
127
 
128
  def generate_narration_task(text, uid, epiphany_id, layer_name):
 
129
  if not text: return layer_name, None
130
  try:
 
131
  api_key = os.environ.get("DEEPGRAM_API_KEY")
132
  if not api_key: return layer_name, None
 
133
  DEEPGRAM_URL = "https://api.deepgram.com/v1/speak?model=aura-luna-en"
134
  headers = {"Authorization": f"Token {api_key}", "Content-Type": "text/plain"}
135
  response = requests.post(DEEPGRAM_URL, headers=headers, data=text.encode('utf-8'))
136
  response.raise_for_status()
 
137
  path = f"users/{uid}/epiphanies/{epiphany_id}/audio/{layer_name}.mp3"
138
+ return layer_name, upload_to_storage(response.content, path, 'audio/mpeg')
 
139
  except Exception as e:
140
+ logger.error(f"TTS Task Error [{layer_name}]: {e}")
141
  return layer_name, None
142
 
143
+ def generate_master_blueprint_task(subject, full_data, uid, epiphany_id):
144
+ """Generates ONE consolidated 4K Technical Blueprint for all 4 layers."""
 
145
  try:
146
+ logger.info(f"Generating Master Blueprint for: {subject}")
147
+ # Combine the 4 layer texts into a single prompt for context
148
+ summary = (
149
+ f"Genesis: {full_data['genesis'][:100]}... "
150
+ f"Core: {full_data['scientific_core'][:100]}... "
151
+ f"Edge: {full_data['engineering_edge'][:100]}... "
152
+ f"Future: {full_data['cross_pollination'][:100]}..."
153
+ )
154
+
155
  prompt = (
156
+ f"Create a single 4K Master Technical Blueprint for '{subject}'. "
157
+ f"The image MUST be divided into 4 clear quadrants or a visual sequence: "
158
+ f"1. The Origin (Genesis), 2. The Physics (Core), 3. The Engineering (Edge), 4. The Future (Cross-Pollination). "
159
+ f"Context: {summary}. Style: Leonardo Da Vinci sketch meets modern CAD schematic. "
160
+ f"Midnight navy background, white-line technical art. Professional engineering labels."
161
  )
162
+
163
  response = client.models.generate_content(
164
  model=ATHENA_PRO,
165
  contents=prompt,
166
  config=types.GenerateContentConfig(
167
+ tools=[{"google_search": {}}],
168
  image_config=types.ImageConfig(aspect_ratio="16:9", image_size="4K")
169
  )
170
  )
171
  image_parts = [part for part in response.parts if part.inline_data]
172
  if image_parts:
173
  image_bytes = image_parts[0].inline_data.data
174
+ path = f"users/{uid}/epiphanies/{epiphany_id}/master_blueprint.png"
175
+ return upload_to_storage(image_bytes, path, 'image/png')
176
+ return None
 
177
  except Exception as e:
178
+ logger.error(f"Master Blueprint Error: {e}")
179
+ return None
180
 
181
  # -----------------------------------------------------------------------------
182
  # 4. CORE ENDPOINTS (EPIPHANY, THEIA, PROXY)
 
193
 
194
  @app.route('/api/epiphany/generate', methods=['POST'])
195
  def generate_epiphany():
196
+ logger.info(">>> START Titaness generate_epiphany")
197
  uid = verify_token(request.headers.get('Authorization'))
198
  if not uid: return jsonify({'error': 'Unauthorized'}), 401
199
 
200
  user_ref = db_ref.child(f'users/{uid}')
201
  user_data = user_ref.get()
 
 
202
  if not user_data or user_data.get('credits', 0) < 4:
203
+ return jsonify({'error': 'Need 4 Sparks for Synthesis.'}), 402
204
 
205
  image_file = request.files['image']
206
  image_bytes = image_file.read()
207
  pil_image = Image.open(io.BytesIO(image_bytes)).convert('RGB')
208
 
209
  try:
210
+ # Step 1: Rapid ID
211
  id_prompt = "Identify this precisely. Reply with ONLY the name."
212
  subject = client.models.generate_content(model=ATHENA_FLASH, contents=[id_prompt, pil_image]).text.strip()
 
213
 
214
+ # Step 2: Synthesis + Universal Search
215
  physics_fact = query_wolfram_alpha(f"physics laws of {subject}")
216
  synthesis_prompt = f"""
217
  Act as Athena. Analyze '{subject}' grounded in: {physics_fact}.
218
+ Style: Richard Feynman.
219
+ Tasks:
220
+ 1. Search web for 3 sources (URL, Title, 2-sentence Feynman Summary).
221
+ 2. Create 4 Discovery Layers (genesis, scientific_core, engineering_edge, cross_pollination).
222
+ Return JSON Object ONLY.
 
 
 
 
 
 
 
 
 
 
 
 
 
223
  """
224
 
225
  res = client.models.generate_content(
 
231
  )
232
  )
233
 
234
+ # Schema Sentinel
235
  raw_json = res.text.strip()
236
+ if "```json" in raw_json: raw_json = re.search(r'```json\n(.*?)\n```', raw_json, re.DOTALL).group(1)
 
 
237
  data = json.loads(raw_json)
238
  if isinstance(data, list): data = data[0]
239
 
 
 
 
 
 
 
 
 
 
 
 
240
  epiphany_id = str(uuid.uuid4())
241
+ layers = ['genesis', 'scientific_core', 'engineering_edge', 'cross_pollination']
242
 
243
+ # Step 3: Parallel Media (4 Audios + 1 Master Blueprint)
244
+ audios = {}
245
+ master_blueprint_url = None
246
+ with ThreadPoolExecutor(max_workers=5) as executor:
 
247
  aud_futures = [executor.submit(generate_narration_task, data.get(l), uid, epiphany_id, l) for l in layers]
248
+ blu_future = executor.submit(generate_master_blueprint_task, subject, data, uid, epiphany_id)
 
249
 
250
+ for f in aud_futures:
 
251
  k, v = f.result()
252
  audios[k] = v
253
+ master_blueprint_url = blu_future.result()
 
 
254
 
255
+ # Step 4: Storage
256
+ orig_url = upload_to_storage(image_bytes, f"users/{uid}/epiphanies/{epiphany_id}/vision.jpg", 'image/jpeg')
257
  epiphany_record = {
258
  "epiphanyId": epiphany_id,
259
  "uid": uid,
260
  "title": data.get('title', 'System Epiphany'),
261
  "subject": subject,
262
+ "imageURL": orig_url,
263
+ "masterBlueprint": master_blueprint_url,
264
  "layers": {
265
+ l: {"text": data.get(l, ""), "audio": audios.get(l)} for l in layers
 
 
 
 
266
  },
267
  "scholar": data.get('scholar', []),
268
  "createdAt": datetime.utcnow().isoformat()
 
270
 
271
  db_ref.child(f'epiphanies/{epiphany_id}').set(epiphany_record)
272
  user_ref.update({'credits': user_data.get('credits', 0) - 4})
 
 
273
  return jsonify(epiphany_record), 201
274
 
275
  except Exception as e:
 
278
 
279
  @app.route('/api/epiphany/theia', methods=['POST'])
280
  def theia_sweep():
 
281
  uid = verify_token(request.headers.get('Authorization'))
282
  if not uid: return jsonify({'error': 'Unauthorized'}), 401
283
 
 
287
 
288
  epiphany_id = request.form.get('epiphanyId')
289
  image_file = request.files['image']
 
290
  context = db_ref.child(f'epiphanies/{epiphany_id}').get() or {}
291
  subject = context.get('subject', 'Complex System')
292
 
293
+ sweep_prompt = f"Theia Mode Activation: {subject}. Identify components via Python Code Execution. Return JSON list of annotations: label, coordinates [ymin, xmin, ymax, xmax], and 20-word Feynman Micro-Epiphany."
 
 
 
 
 
 
 
 
294
 
295
  try:
296
  pil_image = Image.open(io.BytesIO(image_file.read())).convert('RGB')
 
302
  response_mime_type='application/json'
303
  )
304
  )
 
305
  raw_json = res.text.strip()
306
+ if "```json" in raw_json: raw_json = re.search(r'```json\n(.*?)\n```', raw_json, re.DOTALL).group(1)
 
 
307
  annotations = json.loads(raw_json)
308
  user_ref.update({'credits': user_ref.get().get('credits', 0) - 4})
309
  return jsonify({"annotations": annotations}), 200
 
318
  image_file = request.files['image']
319
  try:
320
  pil_image = Image.open(io.BytesIO(image_file.read())).convert('RGB')
321
+ res = client.models.generate_content(model=ATHENA_FLASH, contents=["In 50 words Feynman style, explain this detail.", pil_image])
 
 
 
322
  user_ref = db_ref.child(f'users/{uid}')
323
  user_ref.update({'credits': max(0, (user_ref.get().get('credits', 0) or 0) - 1)})
324
  return jsonify({"analysis": res.text.strip()}), 200
 
337
  ctx = "New seeker."
338
  if last:
339
  e = list(last.values())[0]
340
+ ctx = f"Subject: {e['subject']}. Recent Research: {e.get('scholar', [])[:1]}"
341
+ prompt = f"Prep Chiron (Socratic Mentor). Context: {ctx}. 4-sentence brief."
342
  res = client.models.generate_content(model=ATHENA_FLASH, contents=[prompt])
343
  return jsonify({"memory_summary": res.text.strip()}), 200
344
  except Exception as e: return jsonify({'error': str(e)}), 500
 
382
  return jsonify({"success": True})
383
 
384
  # -----------------------------------------------------------------------------
385
+ # 6. AUTH & PROFILE
386
  # -----------------------------------------------------------------------------
387
 
388
  @app.route('/api/auth/signup', methods=['POST'])
 
406
  return jsonify({'uid': uid, **user_ref.get()}), 200
407
 
408
  @app.route('/api/user/profile', methods=['GET'])
409
+ def profile():
410
  uid = verify_token(request.headers.get('Authorization'))
411
  if not uid: return jsonify({'error': 'Unauthorized'}), 401
412
  return jsonify(db_ref.child(f'users/{uid}').get())
 
419
  return jsonify(list(res.values()))
420
 
421
  if __name__ == '__main__':
422
+ logger.info("Titaness Master Schematic Server Active on 7860...")
423
  app.run(debug=False, host="0.0.0.0", port=7860)