AIencoder commited on
Commit
9e3de6d
·
verified ·
1 Parent(s): f342e80

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -10
app.py CHANGED
@@ -180,7 +180,7 @@ def transcribe_audio(audio_path):
180
  # --- CORE TOOLS ---
181
  def chat_stream(message, history, model, temperature, max_tokens):
182
  if not message.strip():
183
- yield history
184
  return
185
 
186
  history = history or []
@@ -188,25 +188,36 @@ def chat_stream(message, history, model, temperature, max_tokens):
188
  # Build conversation context from history
189
  system = "You are Axon, an expert AI coding assistant. Be helpful, concise, and provide working code examples when appropriate."
190
  conversation = ""
191
- for msg in history[-10:]: # Last 10 messages
192
- if msg["role"] == "user":
193
- conversation += f"<|im_start|>user\n{msg['content']}<|im_end|>\n"
194
- elif msg["role"] == "assistant":
195
- conversation += f"<|im_start|>assistant\n{msg['content']}<|im_end|>\n"
 
 
 
 
 
 
 
 
 
196
 
197
  prompt = f"<|im_start|>system\n{system}<|im_end|>\n{conversation}<|im_start|>user\n{message}<|im_end|>\n<|im_start|>assistant\n"
198
 
199
- # Add user message to history
200
- history = history + [{"role": "user", "content": message}]
201
 
202
  response = ""
203
  for chunk in generate_stream(prompt, model, max_tokens, temperature):
204
  response += chunk
205
  # Yield history with partial assistant response
206
- yield history + [{"role": "assistant", "content": response}]
207
 
208
  # Final yield with complete response
209
- yield history + [{"role": "assistant", "content": response}]
 
 
210
 
211
  def generate_code(description, language, model, max_tokens):
212
  if not description.strip():
 
180
  # --- CORE TOOLS ---
181
  def chat_stream(message, history, model, temperature, max_tokens):
182
  if not message.strip():
183
+ yield history or []
184
  return
185
 
186
  history = history or []
 
188
  # Build conversation context from history
189
  system = "You are Axon, an expert AI coding assistant. Be helpful, concise, and provide working code examples when appropriate."
190
  conversation = ""
191
+
192
+ # Handle history - could be list of dicts with role/content
193
+ for msg in history[-10:]:
194
+ try:
195
+ if isinstance(msg, dict):
196
+ role = msg.get("role", "")
197
+ content = msg.get("content", "")
198
+ if role == "user":
199
+ conversation += f"<|im_start|>user\n{content}<|im_end|>\n"
200
+ elif role == "assistant":
201
+ conversation += f"<|im_start|>assistant\n{content}<|im_end|>\n"
202
+ except Exception as e:
203
+ print(f"Error parsing history: {e}")
204
+ continue
205
 
206
  prompt = f"<|im_start|>system\n{system}<|im_end|>\n{conversation}<|im_start|>user\n{message}<|im_end|>\n<|im_start|>assistant\n"
207
 
208
+ # Build new history with user message
209
+ new_history = list(history) + [{"role": "user", "content": message}]
210
 
211
  response = ""
212
  for chunk in generate_stream(prompt, model, max_tokens, temperature):
213
  response += chunk
214
  # Yield history with partial assistant response
215
+ yield new_history + [{"role": "assistant", "content": response}]
216
 
217
  # Final yield with complete response
218
+ if not response:
219
+ response = "❌ No response generated. Please try again."
220
+ yield new_history + [{"role": "assistant", "content": response}]
221
 
222
  def generate_code(description, language, model, max_tokens):
223
  if not description.strip():