resumesearch commited on
Commit
5a82bf8
Β·
verified Β·
1 Parent(s): 0c403bd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -34
app.py CHANGED
@@ -27,10 +27,6 @@ def count_tokens(text: str, model: str) -> int:
27
  return len(enc.encode(text))
28
 
29
  def trim_conversation(convo: list[dict], model: str, max_context: int) -> list[dict]:
30
- """
31
- Slide-window: drop the oldest user/assistant turns until
32
- total tokens + BUFFER_TOKENS ≀ max_context.
33
- """
34
  tokens = [count_tokens(m["content"], model) for m in convo]
35
  total = sum(tokens)
36
  while total + BUFFER_TOKENS > max_context and len(convo) > 2:
@@ -41,11 +37,6 @@ def trim_conversation(convo: list[dict], model: str, max_context: int) -> list[d
41
  return convo
42
 
43
  def safe_chat(convo: list[dict], max_context: int, max_reply: int, model_list: list[str]):
44
- """
45
- 1) Try each model in model_list in order.
46
- 2) On model-not-found: skip to next.
47
- 3) On context-length error: trim history & retry same model once.
48
- """
49
  last_exc = None
50
  for model in model_list:
51
  try:
@@ -57,11 +48,9 @@ def safe_chat(convo: list[dict], max_context: int, max_reply: int, model_list: l
57
  )
58
  except Exception as e:
59
  msg = str(e).lower()
60
- # model-not-found β†’ fallback
61
  if "does not exist" in msg or "model_not_found" in msg or "404" in msg:
62
  last_exc = e
63
  continue
64
- # context-length β†’ trim & retry
65
  if "context length" in msg or "maximum context length" in msg:
66
  trimmed = trim_conversation(convo.copy(), model, max_context)
67
  try:
@@ -74,9 +63,7 @@ def safe_chat(convo: list[dict], max_context: int, max_reply: int, model_list: l
74
  except Exception as e2:
75
  last_exc = e2
76
  continue
77
- # other errors β†’ bubble up
78
  raise
79
- # none succeeded
80
  raise last_exc or RuntimeError("All models failed in safe_chat()")
81
 
82
  def chat_handler(
@@ -87,20 +74,17 @@ def chat_handler(
87
  max_context: int,
88
  max_reply: int
89
  ) -> tuple[list[tuple[str, str]], str]:
90
- """Gradio handler: builds convo, calls safe_chat, updates history."""
91
  if not user_message.strip():
92
  return history, ""
93
  if not client.api_key:
94
  return history, "❌ OPENAI_API_KEY not set."
95
 
96
- # Build conversation payload
97
  convo = [{"role":"system","content":system_prompt}]
98
  for u, b in history or []:
99
  convo.append({"role":"user", "content":u})
100
  convo.append({"role":"assistant", "content":b})
101
  convo.append({"role":"user","content":user_message})
102
 
103
- # Prepare model list: user choice first, then the rest
104
  fallback = [m for m in ALL_MODELS if m != selected_model]
105
  models_to_try = [selected_model] + fallback
106
 
@@ -115,18 +99,13 @@ def chat_handler(
115
  return history, ""
116
 
117
  def clear_chat_handler() -> list:
118
- """Clear all previous chat messages."""
119
  return []
120
 
121
  # β€”β€”β€” Gradio UI β€”β€”β€”
122
- with gr.Blocks(
123
- title="πŸ€– CodeBot: Your Cool Coding Assistant"
124
- ) as demo:
125
-
126
  gr.Markdown(
127
  """
128
  ## CodeBot
129
- A stylish coding assistant with:
130
  - **Model selector**: pick your LLM
131
  - **Context slider**: control how much history to keep
132
  - **Reply slider**: set max response length
@@ -153,27 +132,17 @@ with gr.Blocks(
153
  label="✍️ Max Reply Tokens"
154
  )
155
 
156
- # Example questions
157
  examples = [
158
  "How do I implement quicksort in Python?",
159
  "Show me a C# example using LINQ to group items.",
160
  "Explain async/await in Python with sample code.",
161
  "How to connect to SQL Server using C#?"
162
  ]
 
163
  with gr.Row():
164
- example_dropdown = gr.Dropdown(
165
- choices=examples,
166
- label="πŸ’‘ Examples"
167
- )
168
  example_btn = gr.Button("πŸ“₯ Load Example")
169
 
170
- # Prefill user_input when an example is selected
171
- example_btn.click(
172
- fn=lambda q: q or "",
173
- inputs=[example_dropdown],
174
- outputs=[gr.update(component_id="user_input")]
175
- )
176
-
177
  system_txt = gr.Textbox(
178
  lines=3,
179
  value=(
@@ -194,6 +163,13 @@ with gr.Blocks(
194
  elem_id="user_input"
195
  )
196
 
 
 
 
 
 
 
 
197
  with gr.Row():
198
  send_btn = gr.Button("πŸš€ Send")
199
  clear_btn = gr.Button("πŸ—‘οΈ Clear Chat", variant="secondary")
 
27
  return len(enc.encode(text))
28
 
29
  def trim_conversation(convo: list[dict], model: str, max_context: int) -> list[dict]:
 
 
 
 
30
  tokens = [count_tokens(m["content"], model) for m in convo]
31
  total = sum(tokens)
32
  while total + BUFFER_TOKENS > max_context and len(convo) > 2:
 
37
  return convo
38
 
39
  def safe_chat(convo: list[dict], max_context: int, max_reply: int, model_list: list[str]):
 
 
 
 
 
40
  last_exc = None
41
  for model in model_list:
42
  try:
 
48
  )
49
  except Exception as e:
50
  msg = str(e).lower()
 
51
  if "does not exist" in msg or "model_not_found" in msg or "404" in msg:
52
  last_exc = e
53
  continue
 
54
  if "context length" in msg or "maximum context length" in msg:
55
  trimmed = trim_conversation(convo.copy(), model, max_context)
56
  try:
 
63
  except Exception as e2:
64
  last_exc = e2
65
  continue
 
66
  raise
 
67
  raise last_exc or RuntimeError("All models failed in safe_chat()")
68
 
69
  def chat_handler(
 
74
  max_context: int,
75
  max_reply: int
76
  ) -> tuple[list[tuple[str, str]], str]:
 
77
  if not user_message.strip():
78
  return history, ""
79
  if not client.api_key:
80
  return history, "❌ OPENAI_API_KEY not set."
81
 
 
82
  convo = [{"role":"system","content":system_prompt}]
83
  for u, b in history or []:
84
  convo.append({"role":"user", "content":u})
85
  convo.append({"role":"assistant", "content":b})
86
  convo.append({"role":"user","content":user_message})
87
 
 
88
  fallback = [m for m in ALL_MODELS if m != selected_model]
89
  models_to_try = [selected_model] + fallback
90
 
 
99
  return history, ""
100
 
101
  def clear_chat_handler() -> list:
 
102
  return []
103
 
104
  # β€”β€”β€” Gradio UI β€”β€”β€”
105
+ with gr.Blocks(title="πŸ€– CodeBot: Your Cool Coding Assistant") as demo:
 
 
 
106
  gr.Markdown(
107
  """
108
  ## CodeBot
 
109
  - **Model selector**: pick your LLM
110
  - **Context slider**: control how much history to keep
111
  - **Reply slider**: set max response length
 
132
  label="✍️ Max Reply Tokens"
133
  )
134
 
 
135
  examples = [
136
  "How do I implement quicksort in Python?",
137
  "Show me a C# example using LINQ to group items.",
138
  "Explain async/await in Python with sample code.",
139
  "How to connect to SQL Server using C#?"
140
  ]
141
+
142
  with gr.Row():
143
+ example_dropdown = gr.Dropdown(choices=examples, label="πŸ’‘ Examples")
 
 
 
144
  example_btn = gr.Button("πŸ“₯ Load Example")
145
 
 
 
 
 
 
 
 
146
  system_txt = gr.Textbox(
147
  lines=3,
148
  value=(
 
163
  elem_id="user_input"
164
  )
165
 
166
+ # Wire up example loader to the actual user_input component
167
+ example_btn.click(
168
+ fn=lambda q: q or "",
169
+ inputs=[example_dropdown],
170
+ outputs=[user_input]
171
+ )
172
+
173
  with gr.Row():
174
  send_btn = gr.Button("πŸš€ Send")
175
  clear_btn = gr.Button("πŸ—‘οΈ Clear Chat", variant="secondary")