resumesearch commited on
Commit
2fcf919
Β·
verified Β·
1 Parent(s): e4b7b93

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +76 -31
app.py CHANGED
@@ -1,6 +1,6 @@
1
  """
2
- app.py – Advanced Chatbot with Model Selector, Context & Reply Token Sliders
3
- (OpenAI Python SDK β‰₯1.0.0)
4
  """
5
 
6
  import os
@@ -16,11 +16,11 @@ client = OpenAI(api_key=os.getenv("OPENAI_API_KEY", "").strip())
16
  env_models = os.getenv("OPENAI_MODEL_LIST", "gpt-4-32k,gpt-4,gpt-3.5-turbo")
17
  ALL_MODELS = [m.strip() for m in env_models.split(",") if m.strip()]
18
 
19
- # β€”β€”β€” Default limits β€”β€”β€”
20
  DEFAULT_MAX_CONTEXT = 32768 # tokens
21
- DEFAULT_REPLY_MAX = 2048 # tokens
22
- BUFFER_TOKENS = 500 # reserved for the model’s answer
23
- TEMPERATURE = 0.3
24
 
25
  def count_tokens(text: str, model: str) -> int:
26
  enc = tiktoken.encoding_for_model(model)
@@ -34,17 +34,17 @@ def trim_conversation(convo: list[dict], model: str, max_context: int) -> list[d
34
  tokens = [count_tokens(m["content"], model) for m in convo]
35
  total = sum(tokens)
36
  while total + BUFFER_TOKENS > max_context and len(convo) > 2:
37
- convo.pop(1) # oldest user
38
- convo.pop(1) # its assistant reply
39
  tokens = [count_tokens(m["content"], model) for m in convo]
40
  total = sum(tokens)
41
  return convo
42
 
43
  def safe_chat(convo: list[dict], max_context: int, max_reply: int, model_list: list[str]):
44
  """
45
- Try each model in model_list:
46
- - on model-not-found: skip to next
47
- - on context-length: trim history & retry same model once
48
  """
49
  last_exc = None
50
  for model in model_list:
@@ -57,11 +57,11 @@ def safe_chat(convo: list[dict], max_context: int, max_reply: int, model_list: l
57
  )
58
  except Exception as e:
59
  msg = str(e).lower()
60
- # Model unavailable β†’ fallback
61
  if "does not exist" in msg or "model_not_found" in msg or "404" in msg:
62
  last_exc = e
63
  continue
64
- # Context-length error β†’ slide window & retry
65
  if "context length" in msg or "maximum context length" in msg:
66
  trimmed = trim_conversation(convo.copy(), model, max_context)
67
  try:
@@ -74,9 +74,9 @@ def safe_chat(convo: list[dict], max_context: int, max_reply: int, model_list: l
74
  except Exception as e2:
75
  last_exc = e2
76
  continue
77
- # Other errors β†’ bubble up
78
  raise
79
- # All models failed
80
  raise last_exc or RuntimeError("All models failed in safe_chat()")
81
 
82
  def chat_handler(
@@ -93,7 +93,7 @@ def chat_handler(
93
  if not client.api_key:
94
  return history, "❌ OPENAI_API_KEY not set."
95
 
96
- # Build the conversation payload
97
  convo = [{"role":"system","content":system_prompt}]
98
  for u, b in history or []:
99
  convo.append({"role":"user", "content":u})
@@ -114,15 +114,26 @@ def chat_handler(
114
  history.append((user_message, reply))
115
  return history, ""
116
 
 
 
 
 
117
  # β€”β€”β€” Gradio UI β€”β€”β€”
118
- with gr.Blocks(title="πŸ€– Advanced Chatbot") as demo:
 
 
 
 
119
  gr.Markdown(
120
  """
121
- # Advanced Chatbot
122
- - **Model selector**: pick any supported OpenAI model
123
- - **Context slider**: adjust how many tokens of history to keep
124
- - **Reply slider**: adjust maximum tokens in the answer
125
- - **Sliding-window**: oldest history drops automatically when over limit
 
 
 
126
  """
127
  )
128
 
@@ -130,31 +141,61 @@ with gr.Blocks(title="πŸ€– Advanced Chatbot") as demo:
130
  model_dropdown = gr.Dropdown(
131
  choices=ALL_MODELS,
132
  value=ALL_MODELS[0],
133
- label="Choose Model"
134
  )
135
  context_slider = gr.Slider(
136
  minimum=1000, maximum=DEFAULT_MAX_CONTEXT,
137
  step=256, value=DEFAULT_MAX_CONTEXT,
138
- label="Max Context Tokens"
139
  )
140
  reply_slider = gr.Slider(
141
  minimum=100, maximum=8192,
142
  step=100, value=DEFAULT_REPLY_MAX,
143
- label="Max Reply Tokens"
 
 
 
 
 
 
 
 
 
 
 
 
 
144
  )
 
 
 
 
 
 
145
 
146
  system_txt = gr.Textbox(
147
  lines=3,
148
  value=(
149
- "You are an expert software engineer specializing in Python and C#. "
150
- "Provide detailed, production-grade answers and include code snippets when appropriate."
151
  ),
152
- label="System Prompt"
153
  )
154
 
155
- chatbot = gr.Chatbot(label="Conversation")
156
- user_input = gr.Textbox(placeholder="Type your message here...", label="You")
157
- send_btn = gr.Button("Send")
 
 
 
 
 
 
 
 
 
 
 
158
 
159
  send_btn.click(
160
  fn=chat_handler,
@@ -168,6 +209,10 @@ with gr.Blocks(title="πŸ€– Advanced Chatbot") as demo:
168
  ],
169
  outputs=[chatbot, user_input]
170
  )
 
 
 
 
171
 
172
  if __name__ == "__main__":
173
  demo.launch()
 
1
  """
2
+ app.py – Cool Coding Assistant with Model Selector, Context/Reply Sliders, Examples & Clear Chat
3
+ (OpenAI Python SDK β‰₯1.0.0 + Gradio 5.34.1 + tiktoken)
4
  """
5
 
6
  import os
 
16
  env_models = os.getenv("OPENAI_MODEL_LIST", "gpt-4-32k,gpt-4,gpt-3.5-turbo")
17
  ALL_MODELS = [m.strip() for m in env_models.split(",") if m.strip()]
18
 
19
+ # β€”β€”β€” Configuration β€”β€”β€”
20
  DEFAULT_MAX_CONTEXT = 32768 # tokens
21
+ BUFFER_TOKENS = 500 # reserved for the model’s reply
22
+ DEFAULT_REPLY_MAX = 2048 # tokens for the answer
23
+ TEMPERATURE = 0.3 # creativity vs determinism
24
 
25
  def count_tokens(text: str, model: str) -> int:
26
  enc = tiktoken.encoding_for_model(model)
 
34
  tokens = [count_tokens(m["content"], model) for m in convo]
35
  total = sum(tokens)
36
  while total + BUFFER_TOKENS > max_context and len(convo) > 2:
37
+ convo.pop(1) # remove oldest user
38
+ convo.pop(1) # remove corresponding assistant reply
39
  tokens = [count_tokens(m["content"], model) for m in convo]
40
  total = sum(tokens)
41
  return convo
42
 
43
  def safe_chat(convo: list[dict], max_context: int, max_reply: int, model_list: list[str]):
44
  """
45
+ 1) Try each model in model_list in order.
46
+ 2) On model-not-found: skip to next.
47
+ 3) On context-length error: trim history & retry same model once.
48
  """
49
  last_exc = None
50
  for model in model_list:
 
57
  )
58
  except Exception as e:
59
  msg = str(e).lower()
60
+ # model-not-found β†’ fallback
61
  if "does not exist" in msg or "model_not_found" in msg or "404" in msg:
62
  last_exc = e
63
  continue
64
+ # context-length β†’ trim & retry
65
  if "context length" in msg or "maximum context length" in msg:
66
  trimmed = trim_conversation(convo.copy(), model, max_context)
67
  try:
 
74
  except Exception as e2:
75
  last_exc = e2
76
  continue
77
+ # other errors β†’ bubble up
78
  raise
79
+ # none succeeded
80
  raise last_exc or RuntimeError("All models failed in safe_chat()")
81
 
82
  def chat_handler(
 
93
  if not client.api_key:
94
  return history, "❌ OPENAI_API_KEY not set."
95
 
96
+ # Build conversation payload
97
  convo = [{"role":"system","content":system_prompt}]
98
  for u, b in history or []:
99
  convo.append({"role":"user", "content":u})
 
114
  history.append((user_message, reply))
115
  return history, ""
116
 
117
+ def clear_chat_handler() -> list:
118
+ """Clear all previous chat messages."""
119
+ return []
120
+
121
  # β€”β€”β€” Gradio UI β€”β€”β€”
122
+ with gr.Blocks(
123
+ title="πŸ€– CodeBot: Your Cool Coding Assistant",
124
+ theme=gr.themes.SoftDark()
125
+ ) as demo:
126
+
127
  gr.Markdown(
128
  """
129
+ ## CodeBot
130
+ A stylish coding assistant with:
131
+ - **Model selector**: pick your LLM
132
+ - **Context slider**: control how much history to keep
133
+ - **Reply slider**: set max response length
134
+ - **Examples**: load sample coding questions
135
+ - **Clear Chat**: reset conversation anytime
136
+ - **Sliding-window**: auto-drop oldest history when over limit
137
  """
138
  )
139
 
 
141
  model_dropdown = gr.Dropdown(
142
  choices=ALL_MODELS,
143
  value=ALL_MODELS[0],
144
+ label="πŸ” Choose Model"
145
  )
146
  context_slider = gr.Slider(
147
  minimum=1000, maximum=DEFAULT_MAX_CONTEXT,
148
  step=256, value=DEFAULT_MAX_CONTEXT,
149
+ label="πŸ—‚οΈ Max Context Tokens"
150
  )
151
  reply_slider = gr.Slider(
152
  minimum=100, maximum=8192,
153
  step=100, value=DEFAULT_REPLY_MAX,
154
+ label="✍️ Max Reply Tokens"
155
+ )
156
+
157
+ # Example questions
158
+ examples = [
159
+ "How do I implement quicksort in Python?",
160
+ "Show me a C# example using LINQ to group items.",
161
+ "Explain async/await in Python with sample code.",
162
+ "How to connect to SQL Server using C#?"
163
+ ]
164
+ with gr.Row():
165
+ example_dropdown = gr.Dropdown(
166
+ choices=examples,
167
+ label="πŸ’‘ Examples"
168
  )
169
+ example_btn = gr.Button("πŸ“₯ Load Example")
170
+ example_btn.click(
171
+ fn=lambda q: q or "",
172
+ inputs=[example_dropdown],
173
+ outputs=[gr.update(component_id="user_input")]
174
+ )
175
 
176
  system_txt = gr.Textbox(
177
  lines=3,
178
  value=(
179
+ "You are CodeBot, an expert software engineer specializing in Python and C#. "
180
+ "Provide detailed, production-grade answers including runnable code snippets."
181
  ),
182
+ label="πŸ’» System Prompt"
183
  )
184
 
185
+ chatbot = gr.Chatbot(
186
+ value=[("", "πŸ‘‹ Hello! I'm CodeBot. How can I help you with code today?")],
187
+ label="πŸ’¬ Conversation",
188
+ height=500
189
+ )
190
+ user_input = gr.Textbox(
191
+ placeholder="Type your question or paste code here...",
192
+ label="πŸ“ Your Message",
193
+ elem_id="user_input"
194
+ )
195
+
196
+ with gr.Row():
197
+ send_btn = gr.Button("πŸš€ Send")
198
+ clear_btn = gr.Button("πŸ—‘οΈ Clear Chat", variant="secondary")
199
 
200
  send_btn.click(
201
  fn=chat_handler,
 
209
  ],
210
  outputs=[chatbot, user_input]
211
  )
212
+ clear_btn.click(
213
+ fn=clear_chat_handler,
214
+ outputs=[chatbot]
215
+ )
216
 
217
  if __name__ == "__main__":
218
  demo.launch()