jinysun commited on
Commit
ed7660a
·
verified ·
1 Parent(s): 79ef66b

Update agent.py

Browse files
Files changed (1) hide show
  1. agent.py +22 -14
agent.py CHANGED
@@ -12,11 +12,15 @@ from prompts import FORMAT_INSTRUCTIONS, QUESTION_PROMPT, QUESTION_PROMPT1, SUFF
12
  from tools import make_tools
13
 
14
  from rmrkl import ChatZeroShotAgent, RetryAgentExecutor
15
-
16
  import base64
17
  from io import BytesIO
18
  from PIL import Image
 
19
  from langchain_openai import ChatOpenAI , OpenAI
 
 
 
20
 
21
  def convert_to_base64(pil_image):
22
  buffered = BytesIO()
@@ -25,7 +29,7 @@ def convert_to_base64(pil_image):
25
  return img_str
26
 
27
  def _make_llm(model, temp, api_key, streaming: bool = False):
28
- if model.startswith("claude") or model.startswith("gpt-3"):
29
  llm = OpenAI(
30
  temperature=temp,
31
  model_name=model,
@@ -33,7 +37,7 @@ max_tokens = 5000,
33
  openai_api_key=api_key,
34
  base_url="https://www.dmxapi.com/v1"
35
  )
36
- elif model.startswith("gpt-4o-2024-11-20") or model.startswith("deepseek"):
37
  llm = ChatOpenAI(model=model,
38
  temperature = 0.1,
39
 
@@ -42,7 +46,10 @@ max_tokens = 5000,
42
  callbacks=[StreamingStdOutCallbackHandler()],
43
  openai_api_key=api_key,base_url="https://www.dmxapi.com/v1"
44
  )
45
-
 
 
 
46
  else:
47
  raise ValueError(f"Invalid model name: {model}")
48
  return llm
@@ -53,7 +60,7 @@ class TeLLAgent:
53
  self,
54
  tools=None,
55
  model1: str = "deepseek-ai/DeepSeek-R1",
56
- model2: str = "deepseek-ai/DeepSeek-V3",
57
  tools_model="gpt-4o-2024-11-20",
58
  temp=0.1,
59
  max_iterations=50,
@@ -99,15 +106,15 @@ class TeLLAgent:
99
  tools,
100
  suffix=SUFFIX,
101
  format_instructions=FORMAT_INSTRUCTIONS,
102
- question_prompt=QUESTION_PROMPT,return_intermediate_steps=True ,handle_parsing_errors=True
103
  ),
104
  verbose=True,
105
- max_iterations=max_iterations , return_intermediate_steps=True ,handle_parsing_errors=True
106
  )
107
 
108
 
109
  def run(self, prompt):
110
-
111
  outputs = self.agent_executor1.invoke( {"input": prompt})
112
  if outputs["intermediate_steps"] ==[]:
113
  prompt = str(' ' +outputs["input"]+ ' ' + outputs["output"].split('Action:')[0] )
@@ -115,11 +122,12 @@ class TeLLAgent:
115
  else:
116
  prompt = str(' ' + outputs["input"] + ' ' + outputs["intermediate_steps"][0][0].log.split('Action:')[0])
117
  outputs = self.agent_executor2.invoke( {"input": prompt})
118
- return outputs
119
-
120
  if __name__ == '__main__':
121
- chem_model = TeLLAgent( temp=0.1, streaming=False,
122
  openai_api_key =r'sk-itPrztYm9F6XZZpsBMJB9O7Vq0pYUABVVBSoThuBxEGTnDik',
123
- image_path= r"C:\Users\BM109X32G-10GPU-02\Pictures\1735356359936.jpg"
124
- )
125
- chem_model.run(r"""what is Y20""")
 
 
12
  from tools import make_tools
13
 
14
  from rmrkl import ChatZeroShotAgent, RetryAgentExecutor
15
+ from langchain_ollama import OllamaLLM
16
  import base64
17
  from io import BytesIO
18
  from PIL import Image
19
+
20
  from langchain_openai import ChatOpenAI , OpenAI
21
+ from langchain.agents import load_tools, initialize_agent, AgentType
22
+ from langchain.llms import OpenAI
23
+
24
 
25
  def convert_to_base64(pil_image):
26
  buffered = BytesIO()
 
29
  return img_str
30
 
31
  def _make_llm(model, temp, api_key, streaming: bool = False):
32
+ if model.startswith("claude") :
33
  llm = OpenAI(
34
  temperature=temp,
35
  model_name=model,
 
37
  openai_api_key=api_key,
38
  base_url="https://www.dmxapi.com/v1"
39
  )
40
+ elif model.startswith("gpt") or model.startswith("deepseek"):
41
  llm = ChatOpenAI(model=model,
42
  temperature = 0.1,
43
 
 
46
  callbacks=[StreamingStdOutCallbackHandler()],
47
  openai_api_key=api_key,base_url="https://www.dmxapi.com/v1"
48
  )
49
+ elif model.startswith("llama") :
50
+ llm = OllamaLLM(model=model,
51
+ temperature = 0.1,
52
+ )
53
  else:
54
  raise ValueError(f"Invalid model name: {model}")
55
  return llm
 
60
  self,
61
  tools=None,
62
  model1: str = "deepseek-ai/DeepSeek-R1",
63
+ model2: str = "gpt-4o-2024-11-20",
64
  tools_model="gpt-4o-2024-11-20",
65
  temp=0.1,
66
  max_iterations=50,
 
106
  tools,
107
  suffix=SUFFIX,
108
  format_instructions=FORMAT_INSTRUCTIONS,
109
+ question_prompt=QUESTION_PROMPT ,handle_parsing_errors=True
110
  ),
111
  verbose=True,
112
+ max_iterations=max_iterations ,handle_parsing_errors=True
113
  )
114
 
115
 
116
  def run(self, prompt):
117
+ prompt = prompt + ' ' + str(self.file_path) + ' ' + str(self.image_path)
118
  outputs = self.agent_executor1.invoke( {"input": prompt})
119
  if outputs["intermediate_steps"] ==[]:
120
  prompt = str(' ' +outputs["input"]+ ' ' + outputs["output"].split('Action:')[0] )
 
122
  else:
123
  prompt = str(' ' + outputs["input"] + ' ' + outputs["intermediate_steps"][0][0].log.split('Action:')[0])
124
  outputs = self.agent_executor2.invoke( {"input": prompt})
125
+ return outputs['output']
126
+
127
  if __name__ == '__main__':
128
+ chem_model = TeLLAgent( temp=0.1, streaming= True,
129
  openai_api_key =r'sk-itPrztYm9F6XZZpsBMJB9O7Vq0pYUABVVBSoThuBxEGTnDik',
130
+ image_path= r"C:\Users\BM109X32G-10GPU-02\Desktop\acceptor\1.png" ,file_path = r"..." )
131
+ # A = chem_model.run(r"""who are you""")
132
+ # A = chem_model.run(r"""I want to know some basic chemical properties, HOMO /LUMO and PCE values of molecules CC(C)CCCC(C)CCC1=C(/C=C\2/C(=C(C#N)C#N)C3=C(C=C(C(=C3)F)F)C2=O)SC4=C1N(CCC(C)CCCC(C)C)C5=C4C6=NSN=C6C7=C5N(CCC(C)CCCC(C)C)C8=C7SC(=C8CCC(C)CCCC(C)C)/C=C\9/C(=C(C#N)C#N)C%10=C(C=C(C(=C%10)F)F)C9=O""")
133
+ a = chem_model.run(r""" Compare the PCE and similarity of acceptor Y6 and acceptor in image""")