File size: 3,948 Bytes
dbaa85f a99dabc dbaa85f f7e39a4 c5b57db 379155d c5b57db edfa83c c5b57db f7e39a4 c5b57db f7e39a4 c5b57db e397834 f7e39a4 e397834 eb80a08 e397834 93574a0 e397834 f7e39a4 c5b57db f7e39a4 f914e15 f7e39a4 d4aea37 f7e39a4 379155d f7e39a4 edfa83c f7e39a4 e397834 f7e39a4 dbaa85f e397834 dbaa85f f7e39a4 dbaa85f 93574a0 f7e39a4 dbaa85f f7e39a4 b2efd7b f914e15 dbaa85f 762488e f7e39a4 d4aea37 c5b57db |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 |
import os
from langchain_community.agent_toolkits.load_tools import load_tools
from langchain.base_language import BaseLanguageModel
from tool import *
def drug_tools(llm: BaseLanguageModel, api_keys: dict = {}, verbose=True, image_path = r"...", file_path = r"..."):
serp_api_key = api_keys.get("SERP_API_KEY") or os.getenv("SERP_API_KEY")
openai_api_key = api_keys.get("OPENAI_API_KEY") or os.getenv("OPENAI_API_KEY")
image_path = image_path
file_path = file_path
semantic_scholar_api_key = api_keys.get("SEMANTIC_SCHOLAR_API_KEY") or os.getenv(
"SEMANTIC_SCHOLAR_API_KEY"
)
chemspace_api_key = api_keys.get("CHEMSPACE_API_KEY") or os.getenv(
"CHEMSPACE_API_KEY"
)
all_tools = load_tools(
[
#"python_repl",
# "ddg-search",
"wikipedia",
"human"
]
)
all_tools += [
rag(openai_api_key),
codewriter(llm=llm ,openai_api_key= openai_api_key),
graphconverter(),
Query2SMILES(chemspace_api_key),
Mol2SMILES(chemspace_api_key) ,
Query2CAS(),
SMILES2Name(),
SMILES2SAScore(),
SMILES2LogP(),
SMILES2Properties(),
MolSimilarity(),
SMILES2Weight(),
FuncGroups(),
QMGen(),
molgen(),
dap_predictor(),
# browseruse(openai_api_key),
druglike(),
ADMETLab(),
]
# if semantic_scholar_api_key:
# all_tools += [ LiteratureSearch(
# llm=llm,
# openai_api_key=openai_api_key,
# semantic_scholar_api_key=semantic_scholar_api_key ),
# ]
if serp_api_key:
all_tools += [WebSearch(serp_api_key)
]
if file_path is not None:
all_tools += [pdfreader(file_path),
]
if image_path is not None:
all_tools += [Imageanalysis(image_path),
]
return all_tools
def make_tools(llm: BaseLanguageModel, api_keys: dict = {}, verbose=True, image_path = r"...", file_path = r"..."):
serp_api_key = api_keys.get("SERP_API_KEY") or os.getenv("SERP_API_KEY")
image_path = image_path
file_path = file_path
openai_api_key = api_keys.get("OPENAI_API_KEY") or os.getenv("OPENAI_API_KEY")
semantic_scholar_api_key = api_keys.get("SEMANTIC_SCHOLAR_API_KEY") or os.getenv(
"SEMANTIC_SCHOLAR_API_KEY"
)
chemspace_api_key = api_keys.get("CHEMSPACE_API_KEY") or os.getenv(
"CHEMSPACE_API_KEY"
)
all_tools = load_tools(
[
#"python_repl",
# "ddg-search",
"wikipedia",
"human"
]
)
all_tools += [
rag(openai_api_key),
codewriter(llm=llm ,openai_api_key= openai_api_key),
graphconverter(),
Query2SMILES(chemspace_api_key),
Mol2SMILES(chemspace_api_key) ,
Query2CAS(),
SMILES2Name(),
SMILES2SAScore(),
SMILES2LogP(),
SMILES2Properties(),
MolSimilarity(),
SMILES2Weight(),
FuncGroups(),
donor_predictor(),
acceptor_predictor(),
homolumo_predictor(),
dap_screen(),
molgen(),
dap_predictor(),
#browseruse(openai_api_key),
]
# if semantic_scholar_api_key:
# all_tools += [ LiteratureSearch(
# llm=llm,
# openai_api_key=openai_api_key,
# semantic_scholar_api_key=semantic_scholar_api_key ),
# ]
if serp_api_key:
all_tools += [WebSearch(serp_api_key)
]
if file_path is not None:
all_tools += [pdfreader(file_path),
]
if image_path is not None:
all_tools += [Imageanalysis(image_path),
]
return all_tools |