ErNewdev0 commited on
Commit
0cfa5f9
·
verified ·
1 Parent(s): 41766c3

chore: fixes missing handle chat

Browse files
Files changed (1) hide show
  1. app.py +71 -0
app.py CHANGED
@@ -301,6 +301,77 @@ class RepoAnalyzer:
301
  continue
302
  return False, "Tidak dapat membaca file dengan encoding yang didukung"
303
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
304
 
305
  def create_ui():
306
  analyzer = RepoAnalyzer()
 
301
  continue
302
  return False, "Tidak dapat membaca file dengan encoding yang didukung"
303
 
304
+ async def handle_chat(
305
+ message,
306
+ history,
307
+ provider_choice,
308
+ model_name,
309
+ xai_key,
310
+ gemini_key,
311
+ selected_files,
312
+ ):
313
+ """Menangani interaksi chat dengan model AI"""
314
+ if not analyzer.current_repo:
315
+ new_message = {
316
+ "role": "assistant",
317
+ "content": "⚠️ Mohon clone repository terlebih dahulu sebelum mengajukan pertanyaan."
318
+ }
319
+ history = history or []
320
+ history.append({"role": "user", "content": message})
321
+ history.append(new_message)
322
+ yield history
323
+ return
324
+
325
+ history = history or []
326
+ history.append({"role": "user", "content": message})
327
+ history.append({"role": "assistant", "content": ""})
328
+
329
+ try:
330
+ # Add context about selected files to the prompt
331
+ file_context = ""
332
+ if selected_files:
333
+ file_context = "\n\nFile yang dipilih:\n"
334
+ for file in selected_files:
335
+ content = analyzer.repo_content.get(file, "")
336
+ if content: # Only include files that exist
337
+ file_context += f"\n{file}:\n```\n{content}\n```\n"
338
+
339
+ enhanced_message = f"{message}\n{file_context}"
340
+
341
+ full_response = ""
342
+ if provider_choice == AIProvider.XAI:
343
+ async for chunk in analyzer.stream_xai_response(
344
+ enhanced_message, xai_key, model_name
345
+ ):
346
+ full_response += chunk
347
+ # Add delay between chunks for readability
348
+ await asyncio.sleep(0.05)
349
+ history[-1]["content"] = full_response
350
+ yield history
351
+
352
+ elif provider_choice == AIProvider.GEMINI:
353
+ async for chunk in analyzer.stream_gemini_response(
354
+ enhanced_message, gemini_key or DEFAULT_GEMINI_KEY
355
+ ):
356
+ full_response += chunk
357
+ # Add delay between chunks for readability
358
+ await asyncio.sleep(0.05)
359
+ history[-1]["content"] = full_response
360
+ yield history
361
+
362
+ else: # OLLAMA
363
+ response = analyze_with_ollama(model_name, enhanced_message)
364
+ # Simulate streaming for OLLAMA with delay
365
+ words = response.split()
366
+ for i in range(len(words)):
367
+ full_response = " ".join(words[:i + 1])
368
+ await asyncio.sleep(0.05)
369
+ history[-1]["content"] = full_response
370
+ yield history
371
+
372
+ except Exception as e:
373
+ history[-1]["content"] = f"⚠️ Error: {str(e)}"
374
+ yield history
375
 
376
  def create_ui():
377
  analyzer = RepoAnalyzer()