Mustafa-albakkar commited on
Commit
4fb27a7
·
verified ·
1 Parent(s): 54ceb43

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -16
app.py CHANGED
@@ -8,9 +8,11 @@ Updates:
8
  ✅ Prints all questions and corresponding results at the end
9
  ✅ Maintains modular design and existing architecture
10
  """
 
 
 
11
  from langchain_community.llms import LlamaCpp
12
  from llama_cpp import Llama
13
- import os
14
  import re
15
  import json
16
  import requests
@@ -25,7 +27,6 @@ from langchain_core.prompts import PromptTemplate
25
  from langchain.agents import create_react_agent, AgentExecutor
26
  from datasets import load_dataset
27
  from huggingface_hub import login
28
- import os
29
  import threading
30
  import logging
31
  logging.getLogger("httpx").setLevel(logging.WARNING)
@@ -40,11 +41,11 @@ model_path = hf_hub_download(
40
  llm = LlamaCpp(
41
  model_path=model_path,
42
  n_ctx=10000,
43
- n_threads=8,
44
  n_gpu_layers=0,
45
  temperature=0.65,
46
  top_p=0.9,
47
- max_tokens=200,
48
  n_batch=64,
49
  verbose=False
50
  )
@@ -356,19 +357,27 @@ class GaiaRunner:
356
  answers = []
357
  results_log = []
358
 
359
- with ThreadPoolExecutor(max_workers=1) as executor:
360
- futures = {}
361
- for q in questions_data:
362
- task_id = q.get("task_id")
363
- qtext = q.get("question")
364
- attach = q.get("file_name")
365
- file_path = None
366
- if attach and attach.strip(): # تأكد أن الحقل غير فارغ
367
- file_path = self.download_gaia_attachment(q)
368
- else:
369
- file_path = None
370
- futures[executor.submit(self.run_on_question, qtext, file_path)] = (task_id, qtext)
 
 
 
 
 
 
 
371
 
 
372
  for fut in as_completed(futures):
373
  task_id, qtext = futures[fut]
374
  ans = fut.result()
 
8
  ✅ Prints all questions and corresponding results at the end
9
  ✅ Maintains modular design and existing architecture
10
  """
11
+ import os
12
+ os.environ["LLAMA_BLAS"] = "1"
13
+ os.environ["LLAMA_BLAS_VENDOR"] = "OpenBLAS"
14
  from langchain_community.llms import LlamaCpp
15
  from llama_cpp import Llama
 
16
  import re
17
  import json
18
  import requests
 
27
  from langchain.agents import create_react_agent, AgentExecutor
28
  from datasets import load_dataset
29
  from huggingface_hub import login
 
30
  import threading
31
  import logging
32
  logging.getLogger("httpx").setLevel(logging.WARNING)
 
41
  llm = LlamaCpp(
42
  model_path=model_path,
43
  n_ctx=10000,
44
+ n_threads=4,
45
  n_gpu_layers=0,
46
  temperature=0.65,
47
  top_p=0.9,
48
+ max_tokens=100,
49
  n_batch=64,
50
  verbose=False
51
  )
 
357
  answers = []
358
  results_log = []
359
 
360
+ futures = [] # لتخزين النتائج النهائية
361
+
362
+ for q in questions_data:
363
+ task_id = q.get("task_id")
364
+ qtext = q.get("question")
365
+ attach = q.get("file_name")
366
+
367
+ # تحميل المرفق فقط إذا كان موجودًا فعلاً
368
+ if attach and attach.strip():
369
+ file_path = self.download_gaia_attachment(q)
370
+ else:
371
+ file_path = None
372
+
373
+ # تنفيذ الدالة مباشرة (تسلسليًا)
374
+ print(f"🔹 Running question {task_id}: {qtext[:60]}...")
375
+ futures = self.run_on_question(qtext, file_path)
376
+
377
+ # حفظ النتيجة بشكل مشابه لاستخدام futures سابقًا
378
+ futures.append((task_id, qtext, result))
379
 
380
+ print("✅ All questions processed.")
381
  for fut in as_completed(futures):
382
  task_id, qtext = futures[fut]
383
  ans = fut.result()