Mustafa-albakkar commited on
Commit
cf85179
·
verified ·
1 Parent(s): 2aef244

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +95 -17
app.py CHANGED
@@ -1,18 +1,47 @@
1
  # ============================================================
2
  # CodeAgent - Sub-agent for code generation and execution
3
- # Author: Mustafa Albakkar
4
- # Compatible with Hugging Face Spaces (Gradio-based)
5
  # ============================================================
6
 
7
- import os, json, atexit, logging, traceback, io, sys, contextlib, tempfile
 
 
 
 
 
 
 
 
 
 
8
  import gradio as gr
9
  from llama_cpp import Llama
10
 
11
  # ------------------------------------------------------------
12
- # 🔧 إعداد التسجيل (Logs)
13
  # ------------------------------------------------------------
14
- logging.basicConfig(level=logging.INFO)
 
 
 
 
15
  logger = logging.getLogger("CodeAgent")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
 
17
  # ------------------------------------------------------------
18
  # 🧠 تحميل النموذج (Qwen2.5-Coder-14B-Instruct-GGUF)
@@ -32,7 +61,6 @@ except Exception as e:
32
  logger.exception("❌ Model load failed: %s", e)
33
  llm = None
34
 
35
-
36
  # ------------------------------------------------------------
37
  # 🧩 دوال مساعدة
38
  # ------------------------------------------------------------
@@ -56,6 +84,27 @@ def compute_safe_max_tokens(prompt: str, max_ctx=4096):
56
  return 256
57
 
58
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
  # ------------------------------------------------------------
60
  # 🧮 أداة تنفيذ الكود بأمان
61
  # ------------------------------------------------------------
@@ -73,13 +122,15 @@ def execute_python_code(code_str: str) -> str:
73
  try:
74
  # إعادة توجيه stdout و stderr
75
  with contextlib.redirect_stdout(buffer), contextlib.redirect_stderr(buffer):
 
76
  local_env = {}
 
77
  exec(code_str, {}, local_env)
78
  result = buffer.getvalue().strip()
79
  if not result:
80
  result = "✅ Code executed successfully (no printed output)."
81
  return result
82
- except Exception as e:
83
  tb = traceback.format_exc()
84
  return f"❌ Code execution error:\n{tb}"
85
  finally:
@@ -87,13 +138,13 @@ def execute_python_code(code_str: str) -> str:
87
 
88
 
89
  # ------------------------------------------------------------
90
- # 🎯 الدالة الأساسية للتوليد والتنفيذ
91
  # ------------------------------------------------------------
92
  def generate_and_execute_fn(prompt: str) -> str:
93
  """
94
  - توليد الكود البرمجي باستخدام نموذج Qwen2.5-Coder
95
  - تنفيذ الكود مباشرة
96
- - إعادة النتائج إلى الوكيل الأساسي
97
  """
98
  try:
99
  prompt = safe_text(prompt)
@@ -101,6 +152,7 @@ def generate_and_execute_fn(prompt: str) -> str:
101
  return "⚠️ No prompt provided."
102
 
103
  if llm is None:
 
104
  return "❌ Model not loaded."
105
 
106
  # --- تحضير إدخال النموذج بصيغة محادثة ---
@@ -116,6 +168,10 @@ def generate_and_execute_fn(prompt: str) -> str:
116
 
117
  max_tokens = compute_safe_max_tokens(formatted_prompt)
118
 
 
 
 
 
119
  # --- تنفيذ التوليد ---
120
  try:
121
  out = llm(
@@ -131,30 +187,52 @@ def generate_and_execute_fn(prompt: str) -> str:
131
  temperature=0.2,
132
  stop=["<|im_end|>"]
133
  )
 
 
 
134
 
135
  # --- استخراج النص الناتج ---
136
  if isinstance(out, dict):
137
- text = out.get("choices", [{}])[0].get("text", "")
 
 
 
 
138
  else:
139
  text = str(out)
140
 
141
- if not text.strip():
 
 
142
  return "⚠️ Empty response from model."
143
 
144
- logger.info(f"🤖 Generated code:\n{text}")
 
 
 
 
 
145
 
146
  # --- تنفيذ الكود وإعادة النتيجة ---
147
- exec_result = execute_python_code(text)
 
 
 
 
 
 
 
 
148
  final_output = (
149
  f"🧠 **Prompt:** {prompt}\n\n"
150
- f"💻 **Generated Code:**\n{text}\n\n"
151
  f"🧾 **Execution Result:**\n{exec_result}"
152
  )
153
  return final_output
154
 
155
- except Exception as e:
156
- logger.exception("Generation/Execution error: %s", e)
157
- return f"❌ Error: {e}"
158
 
159
 
160
  # ------------------------------------------------------------
 
1
  # ============================================================
2
  # CodeAgent - Sub-agent for code generation and execution
3
+ # Modified to log inputs & outputs (file + jsonl)
4
+ # Author: Mustafa Albakkar (modified)
5
  # ============================================================
6
 
7
+ import os
8
+ import json
9
+ import atexit
10
+ import logging
11
+ import traceback
12
+ import io
13
+ import sys
14
+ import contextlib
15
+ import tempfile
16
+ from logging.handlers import RotatingFileHandler
17
+
18
  import gradio as gr
19
  from llama_cpp import Llama
20
 
21
  # ------------------------------------------------------------
22
+ # 🔧 إعداد التسجيل (Logs) — ملفي و console
23
  # ------------------------------------------------------------
24
+ LOG_DIR = os.environ.get("CODEAGENT_LOG_DIR", "logs")
25
+ os.makedirs(LOG_DIR, exist_ok=True)
26
+ log_file = os.path.join(LOG_DIR, "codeagent.log")
27
+ jsonl_file = os.path.join(LOG_DIR, "records.jsonl")
28
+
29
  logger = logging.getLogger("CodeAgent")
30
+ logger.setLevel(logging.INFO)
31
+
32
+ # Console handler
33
+ ch = logging.StreamHandler(sys.stdout)
34
+ ch.setLevel(logging.INFO)
35
+ ch_formatter = logging.Formatter("%(asctime)s [%(levelname)s] %(name)s: %(message)s")
36
+ ch.setFormatter(ch_formatter)
37
+ logger.addHandler(ch)
38
+
39
+ # Rotating file handler
40
+ fh = RotatingFileHandler(log_file, maxBytes=5 * 1024 * 1024, backupCount=5, encoding="utf-8")
41
+ fh.setLevel(logging.INFO)
42
+ fh_formatter = logging.Formatter("%(asctime)s [%(levelname)s] %(name)s: %(message)s")
43
+ fh.setFormatter(fh_formatter)
44
+ logger.addHandler(fh)
45
 
46
  # ------------------------------------------------------------
47
  # 🧠 تحميل النموذج (Qwen2.5-Coder-14B-Instruct-GGUF)
 
61
  logger.exception("❌ Model load failed: %s", e)
62
  llm = None
63
 
 
64
  # ------------------------------------------------------------
65
  # 🧩 دوال مساعدة
66
  # ------------------------------------------------------------
 
84
  return 256
85
 
86
 
87
+ def append_log_record(prompt: str, model_raw: str, code_clean: str, exec_result: str, extra: dict = None):
88
+ """
89
+ يضيف سطر JSON (JSONL) في ملف للمعالجة اللاحقة.
90
+ كل سطر يمثل طلب واحد مع الطوابع الزمنية.
91
+ """
92
+ try:
93
+ record = {
94
+ "ts": logging.Formatter().formatTime(logging.LogRecord("x", logging.INFO, "", 0, "", (), None)),
95
+ "prompt": prompt,
96
+ "model_raw": model_raw,
97
+ "code_clean": code_clean,
98
+ "exec_result": exec_result,
99
+ }
100
+ if extra:
101
+ record.update(extra)
102
+ with open(jsonl_file, "a", encoding="utf-8") as f:
103
+ f.write(json.dumps(record, ensure_ascii=False) + "\n")
104
+ except Exception:
105
+ logger.exception("Failed to append JSONL record.")
106
+
107
+
108
  # ------------------------------------------------------------
109
  # 🧮 أداة تنفيذ الكود بأمان
110
  # ------------------------------------------------------------
 
122
  try:
123
  # إعادة توجيه stdout و stderr
124
  with contextlib.redirect_stdout(buffer), contextlib.redirect_stderr(buffer):
125
+ # نحدد local_env فقط (لا نعطي globals حساسة)
126
  local_env = {}
127
+ # تنفيذ الكود — ضع في اعتبارك مخاطر exec عند تشغيل كود غير موثوق
128
  exec(code_str, {}, local_env)
129
  result = buffer.getvalue().strip()
130
  if not result:
131
  result = "✅ Code executed successfully (no printed output)."
132
  return result
133
+ except Exception:
134
  tb = traceback.format_exc()
135
  return f"❌ Code execution error:\n{tb}"
136
  finally:
 
138
 
139
 
140
  # ------------------------------------------------------------
141
+ # 🎯 الدالة الأساسية للتوليد والتنفيذ (مع لوق مفصّل)
142
  # ------------------------------------------------------------
143
  def generate_and_execute_fn(prompt: str) -> str:
144
  """
145
  - توليد الكود البرمجي باستخدام نموذج Qwen2.5-Coder
146
  - تنفيذ الكود مباشرة
147
+ - تسجيل المدخلات والمخرجات
148
  """
149
  try:
150
  prompt = safe_text(prompt)
 
152
  return "⚠️ No prompt provided."
153
 
154
  if llm is None:
155
+ logger.error("Model not loaded when requested.")
156
  return "❌ Model not loaded."
157
 
158
  # --- تحضير إدخال النموذج بصيغة محادثة ---
 
168
 
169
  max_tokens = compute_safe_max_tokens(formatted_prompt)
170
 
171
+ # سجل المدخل (prompt)
172
+ logger.info("🔔 New request received.")
173
+ logger.info("➡️ Prompt: %s", prompt)
174
+
175
  # --- تنفيذ التوليد ---
176
  try:
177
  out = llm(
 
187
  temperature=0.2,
188
  stop=["<|im_end|>"]
189
  )
190
+ except Exception:
191
+ logger.exception("Model generation failed.")
192
+ return "❌ Model generation failed."
193
 
194
  # --- استخراج النص الناتج ---
195
  if isinstance(out, dict):
196
+ # قد يختلف هيكل الإرجاع باختلاف binding
197
+ try:
198
+ text = out.get("choices", [{}])[0].get("text", "") or out.get("text", "")
199
+ except Exception:
200
+ text = json.dumps(out, ensure_ascii=False)
201
  else:
202
  text = str(out)
203
 
204
+ model_raw = text or ""
205
+ if not model_raw.strip():
206
+ logger.warning("Empty response from model.")
207
  return "⚠️ Empty response from model."
208
 
209
+ logger.info("🧾 Model raw output (truncated 1000 chars):\n%s", model_raw[:1000])
210
+
211
+ # --- تنظيف النص لاستخراج الكود فقط ---
212
+ # إذا كان النموذج يلف الكود بثلاثي backticks، نفرّغها
213
+ code_candidate = model_raw.replace("```python", "").replace("```", "").strip()
214
+ logger.info("🛠 Cleaned code (first 1000 chars):\n%s", code_candidate[:1000])
215
 
216
  # --- تنفيذ الكود وإعادة النتيجة ---
217
+ exec_result = execute_python_code(code_candidate)
218
+ logger.info("📤 Execution result (truncated 2000 chars):\n%s", exec_result[:2000])
219
+
220
+ # إضافة سجل JSONL
221
+ try:
222
+ append_log_record(prompt=prompt, model_raw=model_raw, code_clean=code_candidate, exec_result=exec_result)
223
+ except Exception:
224
+ logger.exception("Failed to write jsonl record.")
225
+
226
  final_output = (
227
  f"🧠 **Prompt:** {prompt}\n\n"
228
+ f"💻 **Generated Code:**\n{code_candidate}\n\n"
229
  f"🧾 **Execution Result:**\n{exec_result}"
230
  )
231
  return final_output
232
 
233
+ except Exception:
234
+ logger.exception("Generation/Execution error")
235
+ return "❌ Internal error during generation/execution."
236
 
237
 
238
  # ------------------------------------------------------------