Emmanuel Acheampong commited on
Commit
2b76ecb
Β·
1 Parent(s): 50215f5

changes to the model names

Browse files
templates/chatbot.py CHANGED
@@ -42,9 +42,10 @@ if prompt := st.chat_input(CHAT_PLACEHOLDER):
42
 
43
  def get_stream():
44
  for chunk in stream:
45
- content = chunk.choices[0].delta.content
46
- if content:
47
- yield content
 
48
 
49
  response = st.write_stream(get_stream())
50
  st.session_state.messages.append({{"role": "assistant", "content": response}})
 
42
 
43
  def get_stream():
44
  for chunk in stream:
45
+ delta = chunk.choices[0].delta
46
+ # Skip reasoning_content from thinking models β€” yield only final answer
47
+ if delta.content:
48
+ yield delta.content
49
 
50
  response = st.write_stream(get_stream())
51
  st.session_state.messages.append({{"role": "assistant", "content": response}})
templates/comparison.py CHANGED
@@ -1,7 +1,6 @@
1
  COMPARISON_TEMPLATE = '''import streamlit as st
2
  from openai import OpenAI
3
  import os
4
- import threading
5
 
6
  TITLE = "{title}"
7
  DESCRIPTION = "{description}"
@@ -21,51 +20,70 @@ st.title(TITLE)
21
  st.caption(DESCRIPTION)
22
  st.divider()
23
 
24
- prompt = st.text_area("Enter your prompt:", height=120, placeholder="Ask anything to compare both models...")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
 
26
  if st.button("⚑ Compare Models", type="primary", disabled=not prompt):
27
  col1, col2 = st.columns(2)
28
 
29
  with col1:
30
  st.subheader(f"πŸ€– {MODEL_A_LABEL}")
31
- stream_a = client.chat.completions.create(
32
- model=MODEL_A,
33
- messages=[
34
- {{"role": "system", "content": SYSTEM_PROMPT}},
35
- {{"role": "user", "content": prompt}},
36
- ],
37
- stream=True,
38
- )
39
-
40
- def stream_a_content():
41
- for chunk in stream_a:
42
- content = chunk.choices[0].delta.content
43
- if content:
44
- yield content
45
-
46
- st.write_stream(stream_a_content())
47
 
48
  with col2:
49
  st.subheader(f"πŸ€– {MODEL_B_LABEL}")
50
- stream_b = client.chat.completions.create(
51
- model=MODEL_B,
52
- messages=[
53
- {{"role": "system", "content": SYSTEM_PROMPT}},
54
- {{"role": "user", "content": prompt}},
55
- ],
56
- stream=True,
57
- )
58
-
59
- def stream_b_content():
60
- for chunk in stream_b:
61
- content = chunk.choices[0].delta.content
62
- if content:
63
- yield content
64
-
65
- st.write_stream(stream_b_content())
66
-
67
- with st.sidebar:
68
- st.caption("Powered by [Crusoe](https://crusoe.ai)")
69
- st.markdown(f"**Model A:** `{MODEL_A}`")
70
- st.markdown(f"**Model B:** `{MODEL_B}`")
71
  '''
 
1
  COMPARISON_TEMPLATE = '''import streamlit as st
2
  from openai import OpenAI
3
  import os
 
4
 
5
  TITLE = "{title}"
6
  DESCRIPTION = "{description}"
 
20
  st.caption(DESCRIPTION)
21
  st.divider()
22
 
23
+ with st.sidebar:
24
+ st.caption("Powered by [Crusoe](https://crusoe.ai)")
25
+ st.markdown(f"**Model A:** `{{MODEL_A}}`")
26
+ st.markdown(f"**Model B:** `{{MODEL_B}}`")
27
+
28
+ prompt = st.text_area(
29
+ "Enter your prompt:",
30
+ height=120,
31
+ placeholder="Ask anything to compare both models...",
32
+ )
33
+
34
+
35
+ def _stream_response(model: str) -> str:
36
+ """Stream a response from a model, handling both content and reasoning_content."""
37
+ full_response = ""
38
+ thinking_placeholder = st.empty()
39
+ answer_placeholder = st.empty()
40
+ thinking_text = ""
41
+ answer_text = ""
42
+
43
+ stream = client.chat.completions.create(
44
+ model=model,
45
+ messages=[
46
+ {{"role": "system", "content": SYSTEM_PROMPT}},
47
+ {{"role": "user", "content": prompt}},
48
+ ],
49
+ stream=True,
50
+ )
51
+
52
+ for chunk in stream:
53
+ delta = chunk.choices[0].delta
54
+
55
+ # Handle reasoning/thinking content (DeepSeek R1, Kimi-K2-Thinking, etc.)
56
+ reasoning = getattr(delta, "reasoning_content", None)
57
+ if reasoning:
58
+ thinking_text += reasoning
59
+ thinking_placeholder.markdown(
60
+ f"<details><summary>πŸ’­ Thinking...</summary>\n\n{thinking_text}\n\n</details>",
61
+ unsafe_allow_html=True,
62
+ )
63
+
64
+ # Handle final answer content
65
+ if delta.content:
66
+ answer_text += delta.content
67
+ answer_placeholder.markdown(answer_text)
68
+ full_response += delta.content
69
+
70
+ return full_response
71
+
72
 
73
  if st.button("⚑ Compare Models", type="primary", disabled=not prompt):
74
  col1, col2 = st.columns(2)
75
 
76
  with col1:
77
  st.subheader(f"πŸ€– {MODEL_A_LABEL}")
78
+ try:
79
+ _stream_response(MODEL_A)
80
+ except Exception as e:
81
+ st.error(f"Error: {{e}}")
 
 
 
 
 
 
 
 
 
 
 
 
82
 
83
  with col2:
84
  st.subheader(f"πŸ€– {MODEL_B_LABEL}")
85
+ try:
86
+ _stream_response(MODEL_B)
87
+ except Exception as e:
88
+ st.error(f"Error: {{e}}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
89
  '''
templates/dashboard.py CHANGED
@@ -40,9 +40,10 @@ with col_output:
40
 
41
  def get_stream():
42
  for chunk in stream:
43
- content = chunk.choices[0].delta.content
44
- if content:
45
- yield content
 
46
 
47
  st.write_stream(get_stream())
48
  else:
 
40
 
41
  def get_stream():
42
  for chunk in stream:
43
+ delta = chunk.choices[0].delta
44
+ # Skip reasoning_content from thinking models β€” yield only final answer
45
+ if delta.content:
46
+ yield delta.content
47
 
48
  st.write_stream(get_stream())
49
  else: