taha454 commited on
Commit
c6e4140
·
verified ·
1 Parent(s): e073b61

Update agent.py

Browse files
Files changed (1) hide show
  1. agent.py +36 -15
agent.py CHANGED
@@ -14,6 +14,34 @@ from langchain_experimental.tools import PythonREPLTool
14
  import ast, json
15
 
16
  from langchain_community.tools import DuckDuckGoSearchRun
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
 
18
 
19
  ########## State ############
@@ -80,11 +108,8 @@ def get_code(state:InfoState) -> InfoState:
80
  f"- Import modules only if needed (e.g. datetime, math)"
81
  )
82
 
83
- # 2️⃣ Call Gemini
84
- model = ChatGoogleGenerativeAI(model="gemini-2.5-flash")
85
- response = model.invoke([HumanMessage(content=prompt)]).content.strip()
86
-
87
- state["answer_code"] = response
88
 
89
 
90
  return state
@@ -125,9 +150,7 @@ def preprocess_text(state: dict) -> InfoState:
125
  )
126
 
127
 
128
- # 2️⃣ Call Gemini
129
- model = ChatGoogleGenerativeAI(model="gemini-2.5-flash")
130
- response = model.invoke([HumanMessage(content=prompt)]).content.strip()
131
 
132
  # 3️⃣ Try to safely parse
133
  try:
@@ -166,13 +189,12 @@ def get_answer(state: InfoState) -> InfoState :
166
  "- Do not include tool names, prefixes, or metadata.\n"
167
  "- If the context contains partial hints, you can infer the answer from general knowledge of the same topic.\n"
168
  "- If the question asks about an attached file or audio, reply briefly that you cannot access attachments or audio files."
169
- "- If absolutely nothing is relevant, reply: I'm not sure because the question depends on a file or missing data.\n\n"
170
  "- Final answer should be complete text not part of answer"
171
  "Final Answer:"
172
  )
173
 
174
- model = ChatGoogleGenerativeAI(model="gemini-2.5-flash")
175
- state["final_answer"] = (model.invoke([HumanMessage(content=prompt)]).content)
176
 
177
  return state
178
 
@@ -196,9 +218,8 @@ def get_type(state: InfoState) -> InfoState:
196
  - Output nothing else. No punctuation, no quotes, no explanation.
197
  - If unsure, default to LLM.
198
  """
199
-
200
- model = ChatGoogleGenerativeAI(model="gemini-2.5-flash")
201
- state["answer_type"] = (model.invoke([HumanMessage(content=prompt)]).content)
202
 
203
  return state
204
 
@@ -275,4 +296,4 @@ def ask(compiled_graph,question):
275
 
276
  })
277
 
278
- return legitimate_result['final_answer']
 
14
  import ast, json
15
 
16
  from langchain_community.tools import DuckDuckGoSearchRun
17
+ import os
18
+ from langchain_huggingface import HuggingFaceEndpoint , ChatHuggingFace
19
+ from langchain import LLMChain, PromptTemplate
20
+
21
+ def get_gpt_and_answer(question):
22
+ # إنشاء LLM باستخدام endpoint من Hugging Face
23
+ llm = HuggingFaceEndpoint(
24
+ repo_id="openai/gpt-oss-120b", # أو أي نموذج موجود في HF
25
+ task="text-generation",
26
+
27
+ huggingfacehub_api_token=os.environ["HUGGINGFACEHUB_API_TOKEN"],
28
+ provider="auto" # يسمح باختيار المزود تلقائياً
29
+
30
+ )
31
+
32
+ # إنشاء سلسلة (chain) بسيطة
33
+ template = """
34
+ Question: {question}
35
+ Answer:
36
+ """
37
+ llm = ChatHuggingFace(llm=llm)
38
+
39
+ prompt = PromptTemplate.from_template(template)
40
+ chain = LLMChain(llm=llm, prompt=prompt)
41
+
42
+ # تجربة
43
+ result = chain.invoke({"question": question})
44
+ return (result['text'])
45
 
46
 
47
  ########## State ############
 
108
  f"- Import modules only if needed (e.g. datetime, math)"
109
  )
110
 
111
+
112
+ state["answer_code"] = get_gpt_and_answer(prompt).strip()
 
 
 
113
 
114
 
115
  return state
 
150
  )
151
 
152
 
153
+ response = get_gpt_and_answer(prompt).strip()
 
 
154
 
155
  # 3️⃣ Try to safely parse
156
  try:
 
189
  "- Do not include tool names, prefixes, or metadata.\n"
190
  "- If the context contains partial hints, you can infer the answer from general knowledge of the same topic.\n"
191
  "- If the question asks about an attached file or audio, reply briefly that you cannot access attachments or audio files."
192
+ "- If the context lacks key details or references a file, start with: I'm not sure because the question depends on missing data or an attached file. Then, add what you reasonably know about the topic."
193
  "- Final answer should be complete text not part of answer"
194
  "Final Answer:"
195
  )
196
 
197
+ state["final_answer"] = get_gpt_and_answer(prompt)
 
198
 
199
  return state
200
 
 
218
  - Output nothing else. No punctuation, no quotes, no explanation.
219
  - If unsure, default to LLM.
220
  """
221
+
222
+ state["answer_type"] = get_gpt_and_answer(prompt)
 
223
 
224
  return state
225
 
 
296
 
297
  })
298
 
299
+ return legitimate_result['final_answer'] #,legitimate_result