satyakimitra commited on
Commit
3c028c3
·
1 Parent(s): f616fd0

Fix: Frontend url handling changed

Browse files
Files changed (2) hide show
  1. app.py +4 -4
  2. frontend/index.html +1 -1
app.py CHANGED
@@ -743,7 +743,7 @@ class AppState:
743
  vector_store_status = "Ready"
744
 
745
  # Get model info
746
- current_model = settings.OLLAMA_MODEL
747
  embedding_model = settings.EMBEDDING_MODEL
748
 
749
  # Uptime
@@ -1196,7 +1196,7 @@ async def get_system_info():
1196
  llm_info = state.llm_client.get_provider_info()
1197
 
1198
  # Get current configuration
1199
- current_config = {"inference_model" : settings.OLLAMA_MODEL,
1200
  "embedding_model" : settings.EMBEDDING_MODEL,
1201
  "vector_weight" : settings.VECTOR_WEIGHT,
1202
  "bm25_weight" : settings.BM25_WEIGHT,
@@ -1610,7 +1610,7 @@ async def chat(request: ChatRequest):
1610
  # Check LLM health (required for both general and RAG queries)
1611
  if not state.llm_client.check_health():
1612
  raise HTTPException(status_code = 503,
1613
- detail = "LLM service unavailable. Please ensure Ollama is running.",
1614
  )
1615
 
1616
  try:
@@ -1812,7 +1812,7 @@ async def get_configuration():
1812
  # Get system health
1813
  health_status = state.get_system_health()
1814
 
1815
- return {"configuration" : {"inference_model" : settings.OLLAMA_MODEL,
1816
  "embedding_model" : settings.EMBEDDING_MODEL,
1817
  "chunking_strategy" : "adaptive",
1818
  "chunk_size" : settings.FIXED_CHUNK_SIZE,
 
743
  vector_store_status = "Ready"
744
 
745
  # Get model info
746
+ current_model = settings.OPENAI_MODEL
747
  embedding_model = settings.EMBEDDING_MODEL
748
 
749
  # Uptime
 
1196
  llm_info = state.llm_client.get_provider_info()
1197
 
1198
  # Get current configuration
1199
+ current_config = {"inference_model" : settings.OPENAI_MODEL,
1200
  "embedding_model" : settings.EMBEDDING_MODEL,
1201
  "vector_weight" : settings.VECTOR_WEIGHT,
1202
  "bm25_weight" : settings.BM25_WEIGHT,
 
1610
  # Check LLM health (required for both general and RAG queries)
1611
  if not state.llm_client.check_health():
1612
  raise HTTPException(status_code = 503,
1613
+ detail = "LLM service unavailable. Please ensure OpenAI API Key is availabale or Ollama is running.",
1614
  )
1615
 
1616
  try:
 
1812
  # Get system health
1813
  health_status = state.get_system_health()
1814
 
1815
+ return {"configuration" : {"inference_model" : settings.OPENAI_MODEL,
1816
  "embedding_model" : settings.EMBEDDING_MODEL,
1817
  "chunking_strategy" : "adaptive",
1818
  "chunk_size" : settings.FIXED_CHUNK_SIZE,
frontend/index.html CHANGED
@@ -307,7 +307,7 @@
307
  </div>
308
  <div>
309
  <div class="font-bold text-gray-900 text-lg">QuerySphere</div>
310
- <div class="text-xs text-gray-500 -mt-1">RAG platform for document Q&A local LLM or cloud deployment integration</div>
311
  </div>
312
  </div>
313
  </div>
 
307
  </div>
308
  <div>
309
  <div class="font-bold text-gray-900 text-lg">QuerySphere</div>
310
+ <div class="text-xs text-gray-500 -mt-1">RAG platform for document Q&A with local LLM or Cloud LLM API integration</div>
311
  </div>
312
  </div>
313
  </div>