Spaces:
Running
on
Zero
Running
on
Zero
Just a bit more UI improvements
Browse files- Rephrase the article link sentence
- Add a brain and buble icon for differente steps
app.py
CHANGED
|
@@ -94,10 +94,7 @@ def bot(history: list, max_num_tokens: int, final_num_tokens: int):
|
|
| 94 |
gr.ChatMessage(
|
| 95 |
role="assistant",
|
| 96 |
content=str(""),
|
| 97 |
-
metadata={
|
| 98 |
-
"title": "Thinking",
|
| 99 |
-
"status": "pending",
|
| 100 |
-
},
|
| 101 |
)
|
| 102 |
)
|
| 103 |
|
|
@@ -124,7 +121,7 @@ def bot(history: list, max_num_tokens: int, final_num_tokens: int):
|
|
| 124 |
# rebuild the history with the new content
|
| 125 |
history[-1].content += prepend.format(question=question)
|
| 126 |
if ANSWER_MARKER in prepend:
|
| 127 |
-
history[-1].metadata
|
| 128 |
# stop thinking, this is the answer now (no metadata for intermediate steps)
|
| 129 |
history.append(gr.ChatMessage(role="assistant", content=""))
|
| 130 |
for token in streamer:
|
|
@@ -146,7 +143,7 @@ with gr.Blocks(fill_height=True, title="Making any model reasoning") as demo:
|
|
| 146 |
This interface uses *{model_name}* model which is **not** a reasoning model. The used method
|
| 147 |
is only to force some "reasoning" steps with prefixes to help the model to enhance the answer.
|
| 148 |
|
| 149 |
-
See related article here: [Make any model reasoning](https://huggingface.co/blog/Metal3d/making-any-model-reasoning)
|
| 150 |
""")
|
| 151 |
chatbot = gr.Chatbot(
|
| 152 |
scale=1,
|
|
|
|
| 94 |
gr.ChatMessage(
|
| 95 |
role="assistant",
|
| 96 |
content=str(""),
|
| 97 |
+
metadata={"title": "🧠 Thinking...", "status": "pending"},
|
|
|
|
|
|
|
|
|
|
| 98 |
)
|
| 99 |
)
|
| 100 |
|
|
|
|
| 121 |
# rebuild the history with the new content
|
| 122 |
history[-1].content += prepend.format(question=question)
|
| 123 |
if ANSWER_MARKER in prepend:
|
| 124 |
+
history[-1].metadata = {"title": "💭 Thoughs", "status": "done"}
|
| 125 |
# stop thinking, this is the answer now (no metadata for intermediate steps)
|
| 126 |
history.append(gr.ChatMessage(role="assistant", content=""))
|
| 127 |
for token in streamer:
|
|
|
|
| 143 |
This interface uses *{model_name}* model which is **not** a reasoning model. The used method
|
| 144 |
is only to force some "reasoning" steps with prefixes to help the model to enhance the answer.
|
| 145 |
|
| 146 |
+
See my related article here: [Make any model reasoning](https://huggingface.co/blog/Metal3d/making-any-model-reasoning)
|
| 147 |
""")
|
| 148 |
chatbot = gr.Chatbot(
|
| 149 |
scale=1,
|