rairo commited on
Commit
23638cc
·
verified ·
1 Parent(s): 773d4f7

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +81 -44
main.py CHANGED
@@ -36,6 +36,7 @@ logger = logging.getLogger("SOZO_ATHENA")
36
  app = Flask(__name__)
37
  CORS(app)
38
 
 
39
  try:
40
  logger.info("Initializing Firebase Admin SDK...")
41
  credentials_json_string = os.environ.get("FIREBASE")
@@ -59,8 +60,9 @@ except Exception as e:
59
  bucket = storage.bucket()
60
  db_ref = db.reference()
61
 
 
62
  try:
63
- logger.info("Initializing Google GenAI Client (Gemini 3.0)...")
64
  api_key = os.environ.get("Gemini")
65
  if not api_key:
66
  raise ValueError("The 'Gemini' API key is not set.")
@@ -71,11 +73,11 @@ except Exception as e:
71
  logger.error(f"FATAL: Error initializing GenAI Client: {e}")
72
  exit(1)
73
 
74
- # Corrected Model Constants
75
  ATHENA_FLASH = "gemini-3-flash-preview"
76
  ATHENA_PRO = "gemini-3-pro-image-preview"
77
 
78
- # Grounding API Keys
79
  WOLFRAM_APP_ID = os.environ.get("WOLFRAM_APP_ID")
80
  OPENALEX_MAILTO = os.environ.get("OPENALEX_MAILTO", "rairo@sozofix.tech")
81
 
@@ -117,33 +119,41 @@ def query_wolfram_alpha(query):
117
  try:
118
  url = f"http://api.wolframalpha.com/v1/result?appid={WOLFRAM_APP_ID}&i={query}"
119
  response = requests.get(url, timeout=5)
120
- return response.text if response.status_code == 200 else "Constants verifying..."
121
  except: return "Grounding in progress."
122
 
123
  # -----------------------------------------------------------------------------
124
- # 3. TITANESS MEDIA ENGINE (AUDIO + INFOGRAPHICS)
125
  # -----------------------------------------------------------------------------
126
 
127
  def generate_narration_task(text, uid, epiphany_id, layer_name):
 
 
128
  try:
 
129
  api_key = os.environ.get("DEEPGRAM_API_KEY")
130
  if not api_key: return layer_name, None
 
131
  DEEPGRAM_URL = "https://api.deepgram.com/v1/speak?model=aura-luna-en"
132
  headers = {"Authorization": f"Token {api_key}", "Content-Type": "text/plain"}
133
  response = requests.post(DEEPGRAM_URL, headers=headers, data=text.encode('utf-8'))
134
  response.raise_for_status()
 
135
  path = f"users/{uid}/epiphanies/{epiphany_id}/audio/{layer_name}.mp3"
136
- return layer_name, upload_to_storage(response.content, path, 'audio/mpeg')
 
137
  except Exception as e:
138
- logger.error(f"Narration Task Error: {e}")
139
  return layer_name, None
140
 
141
- def generate_blueprint_task(subject, layer_text, uid, epiphany_id, layer_name):
142
- """Nano Banana Pro: Generates 4K Technical Blueprints."""
 
143
  try:
 
144
  prompt = (
145
  f"Generate a technical 4K blueprint infographic of {subject} - {layer_name}. "
146
- f"Context: {layer_text}. Style: White-line schematic on midnight navy background. 4K quality."
147
  )
148
  response = client.models.generate_content(
149
  model=ATHENA_PRO,
@@ -156,10 +166,11 @@ def generate_blueprint_task(subject, layer_text, uid, epiphany_id, layer_name):
156
  if image_parts:
157
  image_bytes = image_parts[0].inline_data.data
158
  path = f"users/{uid}/epiphanies/{epiphany_id}/blueprints/{layer_name}.png"
159
- return layer_name, upload_to_storage(image_bytes, path, 'image/png')
 
160
  return layer_name, None
161
  except Exception as e:
162
- logger.error(f"Nano Banana Pro Error [{layer_name}]: {e}")
163
  return layer_name, None
164
 
165
  # -----------------------------------------------------------------------------
@@ -177,37 +188,49 @@ def image_proxy():
177
 
178
  @app.route('/api/epiphany/generate', methods=['POST'])
179
  def generate_epiphany():
180
- logger.info(">>> START generate_epiphany")
181
  uid = verify_token(request.headers.get('Authorization'))
182
  if not uid: return jsonify({'error': 'Unauthorized'}), 401
183
 
184
  user_ref = db_ref.child(f'users/{uid}')
185
  user_data = user_ref.get()
 
 
186
  if not user_data or user_data.get('credits', 0) < 4:
187
- return jsonify({'error': 'Need 4 Sparks for Synthesis.'}), 402
188
 
189
  image_file = request.files['image']
190
  image_bytes = image_file.read()
191
  pil_image = Image.open(io.BytesIO(image_bytes)).convert('RGB')
192
 
193
  try:
194
- # Step 1: Rapid ID
195
  id_prompt = "Identify this precisely. Reply with ONLY the name."
196
  subject = client.models.generate_content(model=ATHENA_FLASH, contents=[id_prompt, pil_image]).text.strip()
197
- logger.info(f"Subject identified: {subject}")
198
 
199
- # Step 2: Synthesis + Feynman Scholar (Unified Search)
200
  physics_fact = query_wolfram_alpha(f"physics laws of {subject}")
201
  synthesis_prompt = f"""
202
  Act as Athena. Analyze '{subject}' grounded in: {physics_fact}.
203
  Style: Richard Feynman. Simple analogies, profound scientific truths.
204
 
205
  Tasks:
206
- 1. Search the web (ArXiv, Patents, Journals) for 3 diverse sources about {subject} using Google Search.
207
- 2. Create 4 Discovery Layers (JSON keys: title, genesis, scientific_core, engineering_edge, cross_pollination).
208
- 3. For each research source, provide Title, URL, and a 2-sentence Feynman Summary. (JSON key: scholar).
209
 
210
- Return a JSON OBJECT only.
 
 
 
 
 
 
 
 
 
 
211
  """
212
 
213
  res = client.models.generate_content(
@@ -219,25 +242,37 @@ def generate_epiphany():
219
  )
220
  )
221
 
222
- # TYPE-SAFE JSON PARSING
223
  raw_json = res.text.strip()
224
- if raw_json.startswith("```json"):
225
  raw_json = re.search(r'```json\n(.*?)\n```', raw_json, re.DOTALL).group(1)
226
 
227
  data = json.loads(raw_json)
228
- # Fix for "list indices" error: If Gemini returns a list, take the first object
229
- if isinstance(data, list):
230
- data = data[0]
231
- logger.info("Handled JSON List-to-Object conversion.")
 
232
 
233
- epiphany_id = str(uuid.uuid4())
234
  layers = ['genesis', 'scientific_core', 'engineering_edge', 'cross_pollination']
 
 
 
 
 
235
 
236
- # Step 3: Parallel Media (Titaness Execution)
 
 
 
237
  audios, blueprints = {}, {}
238
  with ThreadPoolExecutor(max_workers=8) as executor:
239
- aud_futures = [executor.submit(generate_narration_task, data[l], uid, epiphany_id, l) for l in layers]
240
- blu_futures = [executor.submit(generate_blueprint_task, subject, data[l], uid, epiphany_id, l) for l in layers]
 
 
 
 
241
  for f in aud_futures:
242
  k, v = f.result()
243
  audios[k] = v
@@ -245,7 +280,7 @@ def generate_epiphany():
245
  k, v = f.result()
246
  blueprints[k] = v
247
 
248
- # Step 4: Storage
249
  image_url = upload_to_storage(image_bytes, f"users/{uid}/epiphanies/{epiphany_id}/vision.jpg", 'image/jpeg')
250
  epiphany_record = {
251
  "epiphanyId": epiphany_id,
@@ -255,7 +290,7 @@ def generate_epiphany():
255
  "imageURL": image_url,
256
  "layers": {
257
  l: {
258
- "text": data[l],
259
  "audio": audios.get(l),
260
  "blueprint": blueprints.get(l)
261
  } for l in layers
@@ -266,15 +301,17 @@ def generate_epiphany():
266
 
267
  db_ref.child(f'epiphanies/{epiphany_id}').set(epiphany_record)
268
  user_ref.update({'credits': user_data.get('credits', 0) - 4})
 
 
269
  return jsonify(epiphany_record), 201
270
 
271
  except Exception as e:
272
- logger.error(f"Epiphany Gen Error: {e}\n{traceback.format_exc()}")
273
  return jsonify({'error': str(e)}), 500
274
 
275
  @app.route('/api/epiphany/theia', methods=['POST'])
276
  def theia_sweep():
277
- """Independent Feature: Annotated deconstruction using Code Execution."""
278
  uid = verify_token(request.headers.get('Authorization'))
279
  if not uid: return jsonify({'error': 'Unauthorized'}), 401
280
 
@@ -285,7 +322,6 @@ def theia_sweep():
285
  epiphany_id = request.form.get('epiphanyId')
286
  image_file = request.files['image']
287
 
288
- # Context fetch
289
  context = db_ref.child(f'epiphanies/{epiphany_id}').get() or {}
290
  subject = context.get('subject', 'Complex System')
291
 
@@ -296,7 +332,7 @@ def theia_sweep():
296
  1. Label.
297
  2. Bounding Box [ymin, xmin, ymax, xmax] (0-1000).
298
  3. 20-word Feynman-style Micro-Epiphany.
299
- Return JSON list.
300
  """
301
 
302
  try:
@@ -311,7 +347,7 @@ def theia_sweep():
311
  )
312
 
313
  raw_json = res.text.strip()
314
- if raw_json.startswith("```json"):
315
  raw_json = re.search(r'```json\n(.*?)\n```', raw_json, re.DOTALL).group(1)
316
 
317
  annotations = json.loads(raw_json)
@@ -330,10 +366,10 @@ def deep_dive():
330
  pil_image = Image.open(io.BytesIO(image_file.read())).convert('RGB')
331
  res = client.models.generate_content(
332
  model=ATHENA_FLASH,
333
- contents=["In 50 words Feynman style, explain this zoom detail.", pil_image]
334
  )
335
  user_ref = db_ref.child(f'users/{uid}')
336
- user_ref.update({'credits': max(0, user_ref.get().get('credits', 0) - 1)})
337
  return jsonify({"analysis": res.text.strip()}), 200
338
  except Exception as e: return jsonify({'error': str(e)}), 500
339
 
@@ -350,7 +386,7 @@ def get_chiron_briefing():
350
  ctx = "New seeker."
351
  if last:
352
  e = list(last.values())[0]
353
- ctx = f"Subject: {e['subject']}. Research: {e.get('scholar', [])}"
354
  prompt = f"Prep Chiron (Mentor). Context: {ctx}. 4-sentence brief."
355
  res = client.models.generate_content(model=ATHENA_FLASH, contents=[prompt])
356
  return jsonify({"memory_summary": res.text.strip()}), 200
@@ -363,10 +399,11 @@ def log_usage():
363
  data = request.json
364
  cost = math.ceil(data.get("durationSeconds", 0) / 60) * 3
365
  user_ref = db_ref.child(f'users/{uid}')
366
- user_ref.update({'credits': max(0, (user_ref.get().get('credits', 0)) - cost)})
 
367
  if data.get("transcript"):
368
  db_ref.child(f'transcripts/{uid}').push({"text": data["transcript"], "at": datetime.utcnow().isoformat()})
369
- return jsonify({"success": True})
370
 
371
  @app.route('/api/admin/dashboard', methods=['GET'])
372
  def admin_dashboard():
@@ -431,5 +468,5 @@ def list_epiphanies():
431
  return jsonify(list(res.values()))
432
 
433
  if __name__ == '__main__':
434
- logger.info("Titaness Paradigm Server Active...")
435
  app.run(debug=False, host="0.0.0.0", port=7860)
 
36
  app = Flask(__name__)
37
  CORS(app)
38
 
39
+ # --- Firebase Initialization ---
40
  try:
41
  logger.info("Initializing Firebase Admin SDK...")
42
  credentials_json_string = os.environ.get("FIREBASE")
 
60
  bucket = storage.bucket()
61
  db_ref = db.reference()
62
 
63
+ # --- Google GenAI Client Initialization (Gemini 3.0 Ecosystem) ---
64
  try:
65
+ logger.info("Initializing Google GenAI Client...")
66
  api_key = os.environ.get("Gemini")
67
  if not api_key:
68
  raise ValueError("The 'Gemini' API key is not set.")
 
73
  logger.error(f"FATAL: Error initializing GenAI Client: {e}")
74
  exit(1)
75
 
76
+ # Model Constants
77
  ATHENA_FLASH = "gemini-3-flash-preview"
78
  ATHENA_PRO = "gemini-3-pro-image-preview"
79
 
80
+ # Grounding / External API
81
  WOLFRAM_APP_ID = os.environ.get("WOLFRAM_APP_ID")
82
  OPENALEX_MAILTO = os.environ.get("OPENALEX_MAILTO", "rairo@sozofix.tech")
83
 
 
119
  try:
120
  url = f"http://api.wolframalpha.com/v1/result?appid={WOLFRAM_APP_ID}&i={query}"
121
  response = requests.get(url, timeout=5)
122
+ return response.text if response.status_code == 200 else "Fact-check pending..."
123
  except: return "Grounding in progress."
124
 
125
  # -----------------------------------------------------------------------------
126
+ # 3. TITANESS MEDIA ENGINE (PARALLEL BLUEPRINTS & NARRATION)
127
  # -----------------------------------------------------------------------------
128
 
129
  def generate_narration_task(text, uid, epiphany_id, layer_name):
130
+ """Async task for Deepgram Athena Voice generation."""
131
+ if not text: return layer_name, None
132
  try:
133
+ logger.info(f"Thread started: Narration for {layer_name}")
134
  api_key = os.environ.get("DEEPGRAM_API_KEY")
135
  if not api_key: return layer_name, None
136
+
137
  DEEPGRAM_URL = "https://api.deepgram.com/v1/speak?model=aura-luna-en"
138
  headers = {"Authorization": f"Token {api_key}", "Content-Type": "text/plain"}
139
  response = requests.post(DEEPGRAM_URL, headers=headers, data=text.encode('utf-8'))
140
  response.raise_for_status()
141
+
142
  path = f"users/{uid}/epiphanies/{epiphany_id}/audio/{layer_name}.mp3"
143
+ url = upload_to_storage(response.content, path, 'audio/mpeg')
144
+ return layer_name, url
145
  except Exception as e:
146
+ logger.error(f"Narration Task Error [{layer_name}]: {e}")
147
  return layer_name, None
148
 
149
+ def generate_blueprint_task(subject, text, uid, epiphany_id, layer_name):
150
+ """Async task for Nano Banana Pro technical blueprint generation."""
151
+ if not text: return layer_name, None
152
  try:
153
+ logger.info(f"Thread started: Nano Banana Blueprint for {layer_name}")
154
  prompt = (
155
  f"Generate a technical 4K blueprint infographic of {subject} - {layer_name}. "
156
+ f"Context: {text}. Style: White-line schematic on midnight navy background. 4K quality."
157
  )
158
  response = client.models.generate_content(
159
  model=ATHENA_PRO,
 
166
  if image_parts:
167
  image_bytes = image_parts[0].inline_data.data
168
  path = f"users/{uid}/epiphanies/{epiphany_id}/blueprints/{layer_name}.png"
169
+ url = upload_to_storage(image_bytes, path, 'image/png')
170
+ return layer_name, url
171
  return layer_name, None
172
  except Exception as e:
173
+ logger.error(f"Nano Banana Pro Task Error [{layer_name}]: {e}")
174
  return layer_name, None
175
 
176
  # -----------------------------------------------------------------------------
 
188
 
189
  @app.route('/api/epiphany/generate', methods=['POST'])
190
  def generate_epiphany():
191
+ logger.info(">>> START generate_epiphany request")
192
  uid = verify_token(request.headers.get('Authorization'))
193
  if not uid: return jsonify({'error': 'Unauthorized'}), 401
194
 
195
  user_ref = db_ref.child(f'users/{uid}')
196
  user_data = user_ref.get()
197
+
198
+ # 4 Sparks for Synthesis + Research + Blueprints
199
  if not user_data or user_data.get('credits', 0) < 4:
200
+ return jsonify({'error': 'Need 4 Sparks for Titaness Synthesis.'}), 402
201
 
202
  image_file = request.files['image']
203
  image_bytes = image_file.read()
204
  pil_image = Image.open(io.BytesIO(image_bytes)).convert('RGB')
205
 
206
  try:
207
+ # Step 1: ID
208
  id_prompt = "Identify this precisely. Reply with ONLY the name."
209
  subject = client.models.generate_content(model=ATHENA_FLASH, contents=[id_prompt, pil_image]).text.strip()
210
+ logger.info(f"Subject Identified: {subject}")
211
 
212
+ # Step 2: Synthesis + Feynman Scholar Search
213
  physics_fact = query_wolfram_alpha(f"physics laws of {subject}")
214
  synthesis_prompt = f"""
215
  Act as Athena. Analyze '{subject}' grounded in: {physics_fact}.
216
  Style: Richard Feynman. Simple analogies, profound scientific truths.
217
 
218
  Tasks:
219
+ 1. Search for 3 diverse sources (ArXiv, Patents, Journals) about {subject} using Google Search.
220
+ 2. Create 4 Discovery Layers.
221
+ 3. For each research source, provide Title, URL, and a 2-sentence Feynman Summary.
222
 
223
+ MANDATORY JSON SCHEMA (NO NESTING):
224
+ {{
225
+ "title": "string",
226
+ "genesis": "string",
227
+ "scientific_core": "string",
228
+ "engineering_edge": "string",
229
+ "cross_pollination": "string",
230
+ "scholar": [
231
+ {{"title": "string", "url": "string", "feynman_summary": "string"}}
232
+ ]
233
+ }}
234
  """
235
 
236
  res = client.models.generate_content(
 
242
  )
243
  )
244
 
245
+ # SCHEMA SENTINEL: Robust JSON Parsing
246
  raw_json = res.text.strip()
247
+ if "```json" in raw_json:
248
  raw_json = re.search(r'```json\n(.*?)\n```', raw_json, re.DOTALL).group(1)
249
 
250
  data = json.loads(raw_json)
251
+ if isinstance(data, list): data = data[0]
252
+
253
+ # Handle nesting variations
254
+ if "epiphany" in data: data = data["epiphany"]
255
+ elif "discovery_layers" in data: data = data["discovery_layers"]
256
 
 
257
  layers = ['genesis', 'scientific_core', 'engineering_edge', 'cross_pollination']
258
+ # Verification to avoid KeyError
259
+ for l in layers:
260
+ if l not in data:
261
+ logger.warning(f"AI omitted key '{l}'. Adding placeholder.")
262
+ data[l] = f"The details of the {l.replace('_', ' ')} are unfolding."
263
 
264
+ epiphany_id = str(uuid.uuid4())
265
+
266
+ # Step 3: PARALLEL MEDIA ENGINE (The Async Core)
267
+ logger.info(f"Step 3: Launching 8 media threads for {epiphany_id}...")
268
  audios, blueprints = {}, {}
269
  with ThreadPoolExecutor(max_workers=8) as executor:
270
+ # Submit 4 Audio Tasks
271
+ aud_futures = [executor.submit(generate_narration_task, data.get(l), uid, epiphany_id, l) for l in layers]
272
+ # Submit 4 Blueprint Tasks
273
+ blu_futures = [executor.submit(generate_blueprint_task, subject, data.get(l), uid, epiphany_id, l) for l in layers]
274
+
275
+ # Reassemble Results
276
  for f in aud_futures:
277
  k, v = f.result()
278
  audios[k] = v
 
280
  k, v = f.result()
281
  blueprints[k] = v
282
 
283
+ # Step 4: Storage & Finalization
284
  image_url = upload_to_storage(image_bytes, f"users/{uid}/epiphanies/{epiphany_id}/vision.jpg", 'image/jpeg')
285
  epiphany_record = {
286
  "epiphanyId": epiphany_id,
 
290
  "imageURL": image_url,
291
  "layers": {
292
  l: {
293
+ "text": data.get(l, ""),
294
  "audio": audios.get(l),
295
  "blueprint": blueprints.get(l)
296
  } for l in layers
 
301
 
302
  db_ref.child(f'epiphanies/{epiphany_id}').set(epiphany_record)
303
  user_ref.update({'credits': user_data.get('credits', 0) - 4})
304
+ logger.info(f"TITANESS SUCCESS: {epiphany_id}")
305
+
306
  return jsonify(epiphany_record), 201
307
 
308
  except Exception as e:
309
+ logger.error(f"Global Epiphany Error: {e}\n{traceback.format_exc()}")
310
  return jsonify({'error': str(e)}), 500
311
 
312
  @app.route('/api/epiphany/theia', methods=['POST'])
313
  def theia_sweep():
314
+ """Independent Feature: Theia Mode Annotated deconstruction."""
315
  uid = verify_token(request.headers.get('Authorization'))
316
  if not uid: return jsonify({'error': 'Unauthorized'}), 401
317
 
 
322
  epiphany_id = request.form.get('epiphanyId')
323
  image_file = request.files['image']
324
 
 
325
  context = db_ref.child(f'epiphanies/{epiphany_id}').get() or {}
326
  subject = context.get('subject', 'Complex System')
327
 
 
332
  1. Label.
333
  2. Bounding Box [ymin, xmin, ymax, xmax] (0-1000).
334
  3. 20-word Feynman-style Micro-Epiphany.
335
+ Return JSON list ONLY.
336
  """
337
 
338
  try:
 
347
  )
348
 
349
  raw_json = res.text.strip()
350
+ if "```json" in raw_json:
351
  raw_json = re.search(r'```json\n(.*?)\n```', raw_json, re.DOTALL).group(1)
352
 
353
  annotations = json.loads(raw_json)
 
366
  pil_image = Image.open(io.BytesIO(image_file.read())).convert('RGB')
367
  res = client.models.generate_content(
368
  model=ATHENA_FLASH,
369
+ contents=["Feynman style: Explain this zoom detail in 50 words.", pil_image]
370
  )
371
  user_ref = db_ref.child(f'users/{uid}')
372
+ user_ref.update({'credits': max(0, (user_ref.get().get('credits', 0) or 0) - 1)})
373
  return jsonify({"analysis": res.text.strip()}), 200
374
  except Exception as e: return jsonify({'error': str(e)}), 500
375
 
 
386
  ctx = "New seeker."
387
  if last:
388
  e = list(last.values())[0]
389
+ ctx = f"Subject: {e['subject']}. Scholar: {e.get('scholar', [])[:1]}"
390
  prompt = f"Prep Chiron (Mentor). Context: {ctx}. 4-sentence brief."
391
  res = client.models.generate_content(model=ATHENA_FLASH, contents=[prompt])
392
  return jsonify({"memory_summary": res.text.strip()}), 200
 
399
  data = request.json
400
  cost = math.ceil(data.get("durationSeconds", 0) / 60) * 3
401
  user_ref = db_ref.child(f'users/{uid}')
402
+ new_bal = max(0, (user_ref.get().get('credits', 0) or 0) - cost)
403
+ user_ref.update({'credits': new_bal})
404
  if data.get("transcript"):
405
  db_ref.child(f'transcripts/{uid}').push({"text": data["transcript"], "at": datetime.utcnow().isoformat()})
406
+ return jsonify({"success": True, "remainingCredits": new_bal})
407
 
408
  @app.route('/api/admin/dashboard', methods=['GET'])
409
  def admin_dashboard():
 
468
  return jsonify(list(res.values()))
469
 
470
  if __name__ == '__main__':
471
+ logger.info("Titaness Final Backbone Active on 7860...")
472
  app.run(debug=False, host="0.0.0.0", port=7860)