ErNewdev0 commited on
Commit
035ce41
·
verified ·
1 Parent(s): db42d03

fix: handle_chat missing

Browse files
Files changed (1) hide show
  1. app.py +53 -53
app.py CHANGED
@@ -278,69 +278,69 @@ class RepoAnalyzer:
278
  continue
279
  return False, "Tidak dapat membaca file dengan encoding yang didukung"
280
 
281
- async def handle_chat(message, history, provider_choice, model_name, xai_key, gemini_key, selected_files):
282
- if not analyzer.current_repo:
283
- yield history + [[message, "⚠️ Mohon clone repository terlebih dahulu sebelum mengajukan pertanyaan."]]
284
- return
285
 
286
- history = history or []
287
- history.append([message, ""])
288
 
289
- try:
290
  # Add context about selected files to the prompt
291
- file_context = ""
292
- if selected_files:
293
- file_context = "\n\nFile yang dipilih:\n"
294
- for file in selected_files:
295
- content = analyzer.repo_content.get(file, "")
296
- escaped_content = content.replace('`', r'\`')
297
 
298
- html = (
299
- '<div class="wrapper-artifact">'
300
- f'<div class="header-artifact">'
301
- f'<span>{file}</span>'
302
- f'<button class="copy-button" onclick="copyToClipboard(`{escaped_content}`)">Copy</button>'
303
- '</div>'
304
- '<div class="content-artifact">'
305
- f'<pre><code>{content}</code></pre>'
306
- '</div>'
307
- '</div>'
308
- )
309
- file_context += html
310
 
311
- enhanced_message = f"{message}\n{file_context}"
312
 
313
- full_response = ""
314
- if provider_choice == AIProvider.XAI:
315
- async for chunk in analyzer.stream_xai_response(enhanced_message, xai_key, model_name):
316
  # Wrap code blocks in custom styling
317
- chunk = process_code_blocks(chunk)
318
- full_response += chunk
319
- await asyncio.sleep(0.08)
320
- history[-1][1] = full_response
321
- yield history
322
 
323
- elif provider_choice == AIProvider.GEMINI:
324
- async for chunk in analyzer.stream_gemini_response(enhanced_message, gemini_key or DEFAULT_GEMINI_KEY):
325
- chunk = process_code_blocks(chunk)
326
- full_response += chunk
327
- await asyncio.sleep(0.08)
328
- history[-1][1] = full_response
329
- yield history
330
 
331
- else: # OLLAMA
332
- response = analyze_with_ollama(model_name, enhanced_message)
333
- response = process_code_blocks(response)
334
- words = response.split()
335
- for i in range(len(words)):
336
- full_response = " ".join(words[:i+1])
337
- await asyncio.sleep(0.08)
338
- history[-1][1] = full_response
339
- yield history
340
 
341
- except Exception as e:
342
- history[-1][1] = f"⚠️ Error: {str(e)}"
343
- yield history
344
 
345
  def process_code_blocks(text):
346
  """Process markdown code blocks to use custom artifact styling"""
 
278
  continue
279
  return False, "Tidak dapat membaca file dengan encoding yang didukung"
280
 
281
+ async def handle_chat(message, history, provider_choice, model_name, xai_key, gemini_key, selected_files):
282
+ if not analyzer.current_repo:
283
+ yield history + [[message, "⚠️ Mohon clone repository terlebih dahulu sebelum mengajukan pertanyaan."]]
284
+ return
285
 
286
+ history = history or []
287
+ history.append([message, ""])
288
 
289
+ try:
290
  # Add context about selected files to the prompt
291
+ file_context = ""
292
+ if selected_files:
293
+ file_context = "\n\nFile yang dipilih:\n"
294
+ for file in selected_files:
295
+ content = analyzer.repo_content.get(file, "")
296
+ escaped_content = content.replace('`', r'\`')
297
 
298
+ html = (
299
+ '<div class="wrapper-artifact">'
300
+ f'<div class="header-artifact">'
301
+ f'<span>{file}</span>'
302
+ f'<button class="copy-button" onclick="copyToClipboard(`{escaped_content}`)">Copy</button>'
303
+ '</div>'
304
+ '<div class="content-artifact">'
305
+ f'<pre><code>{content}</code></pre>'
306
+ '</div>'
307
+ '</div>'
308
+ )
309
+ file_context += html
310
 
311
+ enhanced_message = f"{message}\n{file_context}"
312
 
313
+ full_response = ""
314
+ if provider_choice == AIProvider.XAI:
315
+ async for chunk in analyzer.stream_xai_response(enhanced_message, xai_key, model_name):
316
  # Wrap code blocks in custom styling
317
+ chunk = process_code_blocks(chunk)
318
+ full_response += chunk
319
+ await asyncio.sleep(0.08)
320
+ history[-1][1] = full_response
321
+ yield history
322
 
323
+ elif provider_choice == AIProvider.GEMINI:
324
+ async for chunk in analyzer.stream_gemini_response(enhanced_message, gemini_key or DEFAULT_GEMINI_KEY):
325
+ chunk = process_code_blocks(chunk)
326
+ full_response += chunk
327
+ await asyncio.sleep(0.08)
328
+ history[-1][1] = full_response
329
+ yield history
330
 
331
+ else: # OLLAMA
332
+ response = analyze_with_ollama(model_name, enhanced_message)
333
+ response = process_code_blocks(response)
334
+ words = response.split()
335
+ for i in range(len(words)):
336
+ full_response = " ".join(words[:i+1])
337
+ await asyncio.sleep(0.08)
338
+ history[-1][1] = full_response
339
+ yield history
340
 
341
+ except Exception as e:
342
+ history[-1][1] = f"⚠️ Error: {str(e)}"
343
+ yield history
344
 
345
  def process_code_blocks(text):
346
  """Process markdown code blocks to use custom artifact styling"""