Gregor Betz
commited on
add health check
Browse files
app.py
CHANGED
|
@@ -98,10 +98,11 @@ try:
|
|
| 98 |
except Exception as exc:
|
| 99 |
logging.error(f"Error processing config: {exc}")
|
| 100 |
gr.Error(f"Error processing config: {exc}")
|
| 101 |
-
|
| 102 |
logging.info(f"Reasoning guide expert model is {guide_kwargs['expert_model']}.")
|
| 103 |
|
| 104 |
|
|
|
|
| 105 |
def new_conversation_id():
|
| 106 |
conversation_id = str(uuid.uuid4())
|
| 107 |
print(f"New conversation with conversation ID: {conversation_id}")
|
|
@@ -135,29 +136,16 @@ def add_message(history, message, conversation_id):
|
|
| 135 |
|
| 136 |
async def bot(
|
| 137 |
history,
|
| 138 |
-
#client_kwargs,
|
| 139 |
-
#guide_kwargs,
|
| 140 |
conversation_id,
|
| 141 |
progress=gr.Progress(),
|
| 142 |
):
|
| 143 |
|
| 144 |
-
client_llm = setup_client_llm(**client_kwargs)
|
| 145 |
-
|
| 146 |
-
if not client_llm:
|
| 147 |
-
raise gr.Error(
|
| 148 |
-
"Failed to set up client LLM.",
|
| 149 |
-
duration=0
|
| 150 |
-
)
|
| 151 |
-
|
| 152 |
print(f"History (conversation: {conversation_id}): {history}")
|
| 153 |
history_langchain_format = history_to_langchain_format(history)
|
| 154 |
|
| 155 |
# use guide always and exclusively at first turn
|
| 156 |
if len(history_langchain_format) <= 1:
|
| 157 |
|
| 158 |
-
guide_config = RecursiveBalancingGuideConfig(**guide_kwargs)
|
| 159 |
-
guide = RecursiveBalancingGuide(tourist_llm=client_llm, config=guide_config)
|
| 160 |
-
|
| 161 |
message = history[-1][0]
|
| 162 |
|
| 163 |
try:
|
|
@@ -211,6 +199,19 @@ with gr.Blocks() as demo:
|
|
| 211 |
conversation_id = gr.State(str(uuid.uuid4()))
|
| 212 |
tos_approved = gr.State(False)
|
| 213 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 214 |
with gr.Tab(label="Chatbot", visible=False) as chatbot_tab:
|
| 215 |
|
| 216 |
# chatbot
|
|
|
|
| 98 |
except Exception as exc:
|
| 99 |
logging.error(f"Error processing config: {exc}")
|
| 100 |
gr.Error(f"Error processing config: {exc}")
|
| 101 |
+
|
| 102 |
logging.info(f"Reasoning guide expert model is {guide_kwargs['expert_model']}.")
|
| 103 |
|
| 104 |
|
| 105 |
+
|
| 106 |
def new_conversation_id():
|
| 107 |
conversation_id = str(uuid.uuid4())
|
| 108 |
print(f"New conversation with conversation ID: {conversation_id}")
|
|
|
|
| 136 |
|
| 137 |
async def bot(
|
| 138 |
history,
|
|
|
|
|
|
|
| 139 |
conversation_id,
|
| 140 |
progress=gr.Progress(),
|
| 141 |
):
|
| 142 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 143 |
print(f"History (conversation: {conversation_id}): {history}")
|
| 144 |
history_langchain_format = history_to_langchain_format(history)
|
| 145 |
|
| 146 |
# use guide always and exclusively at first turn
|
| 147 |
if len(history_langchain_format) <= 1:
|
| 148 |
|
|
|
|
|
|
|
|
|
|
| 149 |
message = history[-1][0]
|
| 150 |
|
| 151 |
try:
|
|
|
|
| 199 |
conversation_id = gr.State(str(uuid.uuid4()))
|
| 200 |
tos_approved = gr.State(False)
|
| 201 |
|
| 202 |
+
# set up client and guide
|
| 203 |
+
client_llm = setup_client_llm(**client_kwargs)
|
| 204 |
+
guide_config = RecursiveBalancingGuideConfig(**guide_kwargs)
|
| 205 |
+
guide = RecursiveBalancingGuide(tourist_llm=client_llm, config=guide_config)
|
| 206 |
+
|
| 207 |
+
# health check
|
| 208 |
+
health_check = await guide.health_check()
|
| 209 |
+
if health_check.get("status", None) != "ok":
|
| 210 |
+
health_msg = " | ".join([f"{k}: {v}" for k, v in health_check.items()])
|
| 211 |
+
logging.error(f"Guide health check failed: {health_msg}")
|
| 212 |
+
gr.Error(f"LLM availability / health check failed: {health_msg}")
|
| 213 |
+
logging.info(f"Health check: {health_check}")
|
| 214 |
+
|
| 215 |
with gr.Tab(label="Chatbot", visible=False) as chatbot_tab:
|
| 216 |
|
| 217 |
# chatbot
|