Nhughes09 commited on
Commit
6b575f3
·
1 Parent(s): 7691144

V8.5: Fix UndefinedError and Ollama Model Listing

Browse files
Files changed (2) hide show
  1. Dockerfile +1 -1
  2. app.py +21 -3
Dockerfile CHANGED
@@ -15,4 +15,4 @@ EXPOSE 7860
15
  # Run
16
  CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
17
 
18
- # Force Rebuild: 2026-01-02-1705
 
15
  # Run
16
  CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
17
 
18
+ # Force Rebuild: 2026-01-02-1715
app.py CHANGED
@@ -144,7 +144,19 @@ def get_latest_data() -> Dict[str, Any]:
144
  {"month": "Jul 25", "count": 15000}, {"month": "Aug 25", "count": 16000},
145
  {"month": "Sep 25", "count": 18000}, {"month": "Oct 25", "count": 21000},
146
  {"month": "Nov 25", "count": 25000}, {"month": "Dec 25", "count": 32000}
147
- ]
 
 
 
 
 
 
 
 
 
 
 
 
148
  }
149
  }
150
 
@@ -615,8 +627,14 @@ def startup_event():
615
  try:
616
  # Quick ping to check if local server is up
617
  logger.info("Pinging local Ollama server...")
618
- models = ollama.list() # Simple list call to verify connection
619
- model_names = [m['name'] for m in models.get('models', [])]
 
 
 
 
 
 
620
  logger.info(f"Ollama is READY on local M1 GPU. Models: {model_names}")
621
  update_live_state("LOCAL ENGINE READY", f"\n--- NEURAL CORE: OLLAMA READY (M1 GPU) ---\nINSTALLED MODELS: {', '.join(model_names)}\n")
622
  except Exception as e:
 
144
  {"month": "Jul 25", "count": 15000}, {"month": "Aug 25", "count": 16000},
145
  {"month": "Sep 25", "count": 18000}, {"month": "Oct 25", "count": 21000},
146
  {"month": "Nov 25", "count": 25000}, {"month": "Dec 25", "count": 32000}
147
+ ],
148
+ "ai_live": {
149
+ "status": "IDLE",
150
+ "model": "Local/Cloud Hybrid",
151
+ "prompt_preview": "System initialized.",
152
+ "raw_response": "Neural Core online."
153
+ }
154
+ },
155
+ "ai_live": {
156
+ "status": "IDLE",
157
+ "model": "Local/Cloud Hybrid",
158
+ "prompt_preview": "System initialized.",
159
+ "raw_response": "Neural Core online."
160
  }
161
  }
162
 
 
627
  try:
628
  # Quick ping to check if local server is up
629
  logger.info("Pinging local Ollama server...")
630
+ resp = ollama.list()
631
+ # Handle different library versions (dict vs object)
632
+ models_list = resp.models if hasattr(resp, 'models') else resp.get('models', [])
633
+ model_names = []
634
+ for m in models_list:
635
+ name = getattr(m, 'model', getattr(m, 'name', None)) or str(m)
636
+ model_names.append(name)
637
+
638
  logger.info(f"Ollama is READY on local M1 GPU. Models: {model_names}")
639
  update_live_state("LOCAL ENGINE READY", f"\n--- NEURAL CORE: OLLAMA READY (M1 GPU) ---\nINSTALLED MODELS: {', '.join(model_names)}\n")
640
  except Exception as e: