Gregor Betz
commited on
check kwargs
Browse files
app.py
CHANGED
|
@@ -213,20 +213,21 @@ with gr.Blocks() as demo:
|
|
| 213 |
|
| 214 |
# set up client and guide
|
| 215 |
if not client_kwargs["inference_server_url"]:
|
| 216 |
-
gr.Error(
|
| 217 |
"⚠️ Please set the client model inference endpoint in the config.yaml file.",
|
| 218 |
duration=0
|
| 219 |
)
|
| 220 |
if not guide_kwargs["inference_server_url"]:
|
| 221 |
-
gr.Error(
|
| 222 |
"⚠️ Please set the expert model inference endpoint in the config.yaml file.",
|
| 223 |
duration=0
|
| 224 |
)
|
| 225 |
if not guide_kwargs["classifier_kwargs"]["inference_server_url"]:
|
| 226 |
-
gr.Error(
|
| 227 |
"⚠️ Please set the classifier model inference endpoint in the config.yaml file.",
|
| 228 |
duration=0
|
| 229 |
)
|
|
|
|
| 230 |
client_llm = setup_client_llm(**client_kwargs)
|
| 231 |
guide_config = RecursiveBalancingGuideConfig(**guide_kwargs)
|
| 232 |
guide = RecursiveBalancingGuide(tourist_llm=client_llm, config=guide_config)
|
|
|
|
| 213 |
|
| 214 |
# set up client and guide
|
| 215 |
if not client_kwargs["inference_server_url"]:
|
| 216 |
+
raise gr.Error(
|
| 217 |
"⚠️ Please set the client model inference endpoint in the config.yaml file.",
|
| 218 |
duration=0
|
| 219 |
)
|
| 220 |
if not guide_kwargs["inference_server_url"]:
|
| 221 |
+
raise gr.Error(
|
| 222 |
"⚠️ Please set the expert model inference endpoint in the config.yaml file.",
|
| 223 |
duration=0
|
| 224 |
)
|
| 225 |
if not guide_kwargs["classifier_kwargs"]["inference_server_url"]:
|
| 226 |
+
raise gr.Error(
|
| 227 |
"⚠️ Please set the classifier model inference endpoint in the config.yaml file.",
|
| 228 |
duration=0
|
| 229 |
)
|
| 230 |
+
|
| 231 |
client_llm = setup_client_llm(**client_kwargs)
|
| 232 |
guide_config = RecursiveBalancingGuideConfig(**guide_kwargs)
|
| 233 |
guide = RecursiveBalancingGuide(tourist_llm=client_llm, config=guide_config)
|