Raiff1982 commited on
Commit
1d0a0fa
·
verified ·
1 Parent(s): 3594c3e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -85
app.py CHANGED
@@ -4,9 +4,7 @@ import os
4
  import traceback
5
  import gradio as gr
6
  import logging
7
- import asyncio
8
  import torch
9
- from concurrent.futures import ThreadPoolExecutor
10
  from datetime import datetime
11
  from transformers import AutoModelForCausalLM, AutoTokenizer
12
 
@@ -16,15 +14,15 @@ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
16
  sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../..'))
17
 
18
  try:
19
- from src.components.ai_core import AICore
20
- from src.aegis_integration import AegisBridge
21
- from src.aegis_integration.config import AEGIS_CONFIG
22
  from components.search_engine import SearchEngine
23
  except ImportError:
24
  # Fallback for container environment
25
  from src.components.ai_core import AICore
26
- from src.aegis_integration import AegisBridge
27
- from src.aegis_integration.config import AEGIS_CONFIG
28
  from src.components.search_engine import SearchEngine
29
 
30
  # Configure logging
@@ -118,7 +116,7 @@ except Exception as e:
118
  logger.error(f"Error initializing model: {e}")
119
  sys.exit(1)
120
 
121
- async def process_message(message: str, history: list) -> tuple:
122
  """Process chat messages with improved context management"""
123
  try:
124
  # Clean input
@@ -127,18 +125,8 @@ async def process_message(message: str, history: list) -> tuple:
127
  return "", history
128
 
129
  try:
130
- # Get response from AI core asynchronously
131
- if hasattr(ai_core, 'generate_text_async'):
132
- response = await ai_core.generate_text_async(message)
133
- else:
134
- # Fallback to sync version in ThreadPoolExecutor
135
- loop = asyncio.get_event_loop()
136
- with ThreadPoolExecutor() as pool:
137
- response = await loop.run_in_executor(
138
- pool,
139
- ai_core.generate_text,
140
- message
141
- )
142
 
143
  # Clean and validate response
144
  if response is None:
@@ -267,81 +255,35 @@ with gr.Blocks(title="Codette") as iface:
267
  # Set up search event handlers
268
  search_btn.click(sync_search, search_input, search_output)
269
  search_input.submit(sync_search, search_input, search_output)
270
- # Run the Gradio interface with proper async handling
271
- async def shutdown():
272
- """Cleanup function for graceful shutdown"""
273
- try:
274
- # Save final quantum state if available
275
- if hasattr(ai_core, 'cocoon_manager') and ai_core.cocoon_manager:
276
- try:
277
- ai_core.cocoon_manager.save_cocoon({
278
- "type": "shutdown",
279
- "quantum_state": ai_core.quantum_state
280
- })
281
- logger.info("Final quantum state saved")
282
- except Exception as e:
283
- logger.error(f"Error saving final quantum state: {e}")
284
-
285
- # Shutdown AI core
286
- try:
287
- await ai_core.shutdown()
288
- logger.info("AI Core shutdown complete")
289
- except Exception as e:
290
- logger.error(f"Error shutting down AI Core: {e}")
291
-
292
- # Clear CUDA cache if GPU was used
293
- if torch.cuda.is_available():
294
- try:
295
- torch.cuda.empty_cache()
296
- logger.info("CUDA cache cleared")
297
- except Exception as e:
298
- logger.error(f"Error clearing CUDA cache: {e}")
299
-
300
- except Exception as e:
301
- logger.error(f"Error during shutdown: {e}")
302
- raise
303
 
 
304
  if __name__ == "__main__":
305
  try:
306
- # Set up exception handling
307
- def handle_exception(loop, context):
308
- msg = context.get("exception", context["message"])
309
- logger.error(f"Caught exception: {msg}")
310
-
311
- # Set up asyncio event loop with proper error handling
312
- loop = asyncio.new_event_loop()
313
- loop.set_exception_handler(handle_exception)
314
- asyncio.set_event_loop(loop)
315
-
316
- # Launch Gradio interface
317
  iface.queue().launch(
318
- prevent_thread_lock=True,
319
  share=False,
320
- server_name="127.0.0.1",
 
321
  show_error=True,
322
  theme=gr.themes.Soft()
323
  )
324
-
325
- try:
326
- # Keep the main loop running
327
- loop.run_forever()
328
- except Exception as e:
329
- logger.error(f"Error in main loop: {e}")
330
- traceback.print_exc()
331
  except KeyboardInterrupt:
332
  logger.info("Shutting down gracefully...")
333
  try:
334
- loop.run_until_complete(shutdown())
 
 
 
 
 
 
 
 
 
335
  except Exception as e:
336
  logger.error(f"Error during shutdown: {e}")
337
- finally:
338
- try:
339
- tasks = asyncio.all_tasks(loop)
340
- for task in tasks:
341
- task.cancel()
342
- loop.run_until_complete(asyncio.gather(*tasks, return_exceptions=True))
343
- loop.close()
344
- except Exception as e:
345
- logger.error(f"Error closing loop: {e}")
346
- sys.exit(1)
347
- sys.exit(0)
 
4
  import traceback
5
  import gradio as gr
6
  import logging
 
7
  import torch
 
8
  from datetime import datetime
9
  from transformers import AutoModelForCausalLM, AutoTokenizer
10
 
 
14
  sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../..'))
15
 
16
  try:
17
+ from components.ai_core import AICore
18
+ from components.aegis_integration import AegisBridge
19
+ from components.aegis_integration.config import AEGIS_CONFIG
20
  from components.search_engine import SearchEngine
21
  except ImportError:
22
  # Fallback for container environment
23
  from src.components.ai_core import AICore
24
+ from src.components.aegis_integration import AegisBridge
25
+ from src.components.aegis_integration.config import AEGIS_CONFIG
26
  from src.components.search_engine import SearchEngine
27
 
28
  # Configure logging
 
116
  logger.error(f"Error initializing model: {e}")
117
  sys.exit(1)
118
 
119
+ def process_message(message: str, history: list) -> tuple:
120
  """Process chat messages with improved context management"""
121
  try:
122
  # Clean input
 
125
  return "", history
126
 
127
  try:
128
+ # Get response from AI core
129
+ response = ai_core.generate_text(message)
 
 
 
 
 
 
 
 
 
 
130
 
131
  # Clean and validate response
132
  if response is None:
 
255
  # Set up search event handlers
256
  search_btn.click(sync_search, search_input, search_output)
257
  search_input.submit(sync_search, search_input, search_output)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
258
 
259
+ # Run the Gradio interface
260
  if __name__ == "__main__":
261
  try:
262
+ # Launch Gradio interface - let Gradio handle event loop
 
 
 
 
 
 
 
 
 
 
263
  iface.queue().launch(
 
264
  share=False,
265
+ server_name="0.0.0.0",
266
+ server_port=7860,
267
  show_error=True,
268
  theme=gr.themes.Soft()
269
  )
 
 
 
 
 
 
 
270
  except KeyboardInterrupt:
271
  logger.info("Shutting down gracefully...")
272
  try:
273
+ # Save final quantum state if available
274
+ if hasattr(ai_core, 'cocoon_manager') and ai_core.cocoon_manager:
275
+ try:
276
+ ai_core.cocoon_manager.save_cocoon({
277
+ "type": "shutdown",
278
+ "quantum_state": ai_core.quantum_state
279
+ })
280
+ logger.info("Final quantum state saved")
281
+ except Exception as e:
282
+ logger.error(f"Error saving final quantum state: {e}")
283
  except Exception as e:
284
  logger.error(f"Error during shutdown: {e}")
285
+ sys.exit(0)
286
+ except Exception as e:
287
+ logger.error(f"Error launching Gradio interface: {e}")
288
+ traceback.print_exc()
289
+ sys.exit(1)