Gregor Betz
commited on
add comments
Browse files- app.py +5 -4
- config.yaml +1 -1
app.py
CHANGED
|
@@ -42,7 +42,8 @@ EXAMPLES = [
|
|
| 42 |
TITLE = """<div align=left>
|
| 43 |
<h1>🪂 Logikon <i>Guided Reasoning™️</i> Demo Chatbot</h1>
|
| 44 |
</div>
|
| 45 |
-
<p>This is a TEMPLATE
|
|
|
|
| 46 |
in the <code>config.yaml</code> file to get started!</p>
|
| 47 |
"""
|
| 48 |
|
|
@@ -215,15 +216,15 @@ with gr.Blocks() as demo:
|
|
| 215 |
|
| 216 |
if not client_kwargs["inference_server_url"]:
|
| 217 |
gr.Markdown(
|
| 218 |
-
"⚠️ **Error:** Please set the client model inference endpoint in the config.yaml file."
|
| 219 |
)
|
| 220 |
if not guide_kwargs["inference_server_url"]:
|
| 221 |
gr.Markdown(
|
| 222 |
-
"⚠️ **Error:** Please set the expert model inference endpoint in the config.yaml file."
|
| 223 |
)
|
| 224 |
if not guide_kwargs["classifier_kwargs"]["inference_server_url"]:
|
| 225 |
gr.Markdown(
|
| 226 |
-
"⚠️ **Error:** Please set the classifier model inference endpoint in the config.yaml file."
|
| 227 |
)
|
| 228 |
|
| 229 |
# set up client and guide
|
|
|
|
| 42 |
TITLE = """<div align=left>
|
| 43 |
<h1>🪂 Logikon <i>Guided Reasoning™️</i> Demo Chatbot</h1>
|
| 44 |
</div>
|
| 45 |
+
<p>This is a <b>TEMPLATE</b>:<br/>
|
| 46 |
+
➡️ Duplicate this space and configure your own inference endpoints
|
| 47 |
in the <code>config.yaml</code> file to get started!</p>
|
| 48 |
"""
|
| 49 |
|
|
|
|
| 216 |
|
| 217 |
if not client_kwargs["inference_server_url"]:
|
| 218 |
gr.Markdown(
|
| 219 |
+
"⚠️ **Error:** Please set the client model inference endpoint in the `config.yaml` file."
|
| 220 |
)
|
| 221 |
if not guide_kwargs["inference_server_url"]:
|
| 222 |
gr.Markdown(
|
| 223 |
+
"⚠️ **Error:** Please set the expert model inference endpoint in the `config.yaml` file."
|
| 224 |
)
|
| 225 |
if not guide_kwargs["classifier_kwargs"]["inference_server_url"]:
|
| 226 |
gr.Markdown(
|
| 227 |
+
"⚠️ **Error:** Please set the classifier model inference endpoint in the `config.yaml` file."
|
| 228 |
)
|
| 229 |
|
| 230 |
# set up client and guide
|
config.yaml
CHANGED
|
@@ -7,7 +7,7 @@ client_llm:
|
|
| 7 |
expert_llm:
|
| 8 |
url: "" # <-- start your own inference endpoint and provide url here (or use https://api-inference.huggingface.co/models/meta-llama/Meta-Llama-3-70B-Instruct)
|
| 9 |
model_id: "meta-llama/Meta-Llama-3-70B-Instruct"
|
| 10 |
-
backend: HFChat
|
| 11 |
classifier_llm:
|
| 12 |
model_id: "MoritzLaurer/DeBERTa-v3-base-mnli-fever-anli"
|
| 13 |
url: "" # <-- start your own inference endpoint of classifier model and provide url here
|
|
|
|
| 7 |
expert_llm:
|
| 8 |
url: "" # <-- start your own inference endpoint and provide url here (or use https://api-inference.huggingface.co/models/meta-llama/Meta-Llama-3-70B-Instruct)
|
| 9 |
model_id: "meta-llama/Meta-Llama-3-70B-Instruct"
|
| 10 |
+
backend: HFChat # <-- Currently supported: HFChat / VLLM / Fireworks
|
| 11 |
classifier_llm:
|
| 12 |
model_id: "MoritzLaurer/DeBERTa-v3-base-mnli-fever-anli"
|
| 13 |
url: "" # <-- start your own inference endpoint of classifier model and provide url here
|