Spaces:
Sleeping
Sleeping
fix: mybe this is fix anlyzer not define
Browse files
app.py
CHANGED
|
@@ -15,6 +15,7 @@ import dotenv
|
|
| 15 |
|
| 16 |
# Load environment variables
|
| 17 |
dotenv.load_dotenv()
|
|
|
|
| 18 |
|
| 19 |
# Metadata
|
| 20 |
CURRENT_TIME = "2025-05-23 12:57:22"
|
|
@@ -301,6 +302,7 @@ class RepoAnalyzer:
|
|
| 301 |
continue
|
| 302 |
return False, "Tidak dapat membaca file dengan encoding yang didukung"
|
| 303 |
|
|
|
|
| 304 |
async def handle_chat(
|
| 305 |
message,
|
| 306 |
history,
|
|
@@ -309,12 +311,13 @@ async def handle_chat(
|
|
| 309 |
xai_key,
|
| 310 |
gemini_key,
|
| 311 |
selected_files,
|
|
|
|
| 312 |
):
|
| 313 |
"""Menangani interaksi chat dengan model AI"""
|
| 314 |
if not analyzer.current_repo:
|
| 315 |
new_message = {
|
| 316 |
"role": "assistant",
|
| 317 |
-
"content": "⚠️ Mohon clone repository terlebih dahulu sebelum mengajukan pertanyaan."
|
| 318 |
}
|
| 319 |
history = history or []
|
| 320 |
history.append({"role": "user", "content": message})
|
|
@@ -364,7 +367,7 @@ async def handle_chat(
|
|
| 364 |
# Simulate streaming for OLLAMA with delay
|
| 365 |
words = response.split()
|
| 366 |
for i in range(len(words)):
|
| 367 |
-
full_response = " ".join(words[:i + 1])
|
| 368 |
await asyncio.sleep(0.05)
|
| 369 |
history[-1]["content"] = full_response
|
| 370 |
yield history
|
|
@@ -373,8 +376,10 @@ async def handle_chat(
|
|
| 373 |
history[-1]["content"] = f"⚠️ Error: {str(e)}"
|
| 374 |
yield history
|
| 375 |
|
|
|
|
| 376 |
def create_ui():
|
| 377 |
-
analyzer
|
|
|
|
| 378 |
current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
| 379 |
|
| 380 |
with gr.Blocks(title="Open Repo AI", theme=gr.themes.Soft()) as app:
|
|
|
|
| 15 |
|
| 16 |
# Load environment variables
|
| 17 |
dotenv.load_dotenv()
|
| 18 |
+
analyzer = RepoAnalyzer()
|
| 19 |
|
| 20 |
# Metadata
|
| 21 |
CURRENT_TIME = "2025-05-23 12:57:22"
|
|
|
|
| 302 |
continue
|
| 303 |
return False, "Tidak dapat membaca file dengan encoding yang didukung"
|
| 304 |
|
| 305 |
+
|
| 306 |
async def handle_chat(
|
| 307 |
message,
|
| 308 |
history,
|
|
|
|
| 311 |
xai_key,
|
| 312 |
gemini_key,
|
| 313 |
selected_files,
|
| 314 |
+
analyzer=analyzer,
|
| 315 |
):
|
| 316 |
"""Menangani interaksi chat dengan model AI"""
|
| 317 |
if not analyzer.current_repo:
|
| 318 |
new_message = {
|
| 319 |
"role": "assistant",
|
| 320 |
+
"content": "⚠️ Mohon clone repository terlebih dahulu sebelum mengajukan pertanyaan.",
|
| 321 |
}
|
| 322 |
history = history or []
|
| 323 |
history.append({"role": "user", "content": message})
|
|
|
|
| 367 |
# Simulate streaming for OLLAMA with delay
|
| 368 |
words = response.split()
|
| 369 |
for i in range(len(words)):
|
| 370 |
+
full_response = " ".join(words[: i + 1])
|
| 371 |
await asyncio.sleep(0.05)
|
| 372 |
history[-1]["content"] = full_response
|
| 373 |
yield history
|
|
|
|
| 376 |
history[-1]["content"] = f"⚠️ Error: {str(e)}"
|
| 377 |
yield history
|
| 378 |
|
| 379 |
+
|
| 380 |
def create_ui():
|
| 381 |
+
# Gunakan analyzer global
|
| 382 |
+
global analyzer
|
| 383 |
current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
| 384 |
|
| 385 |
with gr.Blocks(title="Open Repo AI", theme=gr.themes.Soft()) as app:
|