Kartikeyabe22 commited on
Commit
840618d
·
0 Parent(s):

Initial commit

Browse files
Files changed (5) hide show
  1. .gitignore +1 -0
  2. app.py +52 -0
  3. how_to_run.txt +1 -0
  4. requirements.txt +32 -0
  5. tools_agents.ipynb +477 -0
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ .env
app.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from langchain_groq import ChatGroq
3
+ from langchain_community.utilities import ArxivAPIWrapper,WikipediaAPIWrapper
4
+ from langchain_community.tools import ArxivQueryRun,WikipediaQueryRun,DuckDuckGoSearchRun
5
+ from langchain.agents import initialize_agent,AgentType
6
+ from langchain.callbacks import StreamlitCallbackHandler
7
+ import os
8
+ from dotenv import load_dotenv
9
+
10
+ ## Arxiv and wikipedia Tools
11
+ arxiv_wrapper=ArxivAPIWrapper(top_k_results=1, doc_content_chars_max=200)
12
+ arxiv=ArxivQueryRun(api_wrapper=arxiv_wrapper)
13
+
14
+ api_wrapper=WikipediaAPIWrapper(top_k_results=1,doc_content_chars_max=200)
15
+ wiki=WikipediaQueryRun(api_wrapper=api_wrapper)
16
+
17
+ search=DuckDuckGoSearchRun(name="Search")
18
+
19
+
20
+ st.title("🔎 LangChain - Chat with search")
21
+ """
22
+ In this example, we're using `StreamlitCallbackHandler` to display the thoughts and actions of an agent in an interactive Streamlit app.
23
+ Try more LangChain 🤝 Streamlit Agent examples at [github.com/langchain-ai/streamlit-agent](https://github.com/langchain-ai/streamlit-agent).
24
+ """
25
+
26
+ ## Sidebar for settings
27
+ st.sidebar.title("Settings")
28
+ api_key=st.sidebar.text_input("Enter your Groq API Key:",type="password")
29
+
30
+ if "messages" not in st.session_state:
31
+ st.session_state["messages"]=[
32
+ {"role":"assisstant","content":"Hi,I'm a chatbot who can search the web. How can I help you?"}
33
+ ]
34
+
35
+ for msg in st.session_state.messages:
36
+ st.chat_message(msg["role"]).write(msg['content'])
37
+
38
+ if prompt:=st.chat_input(placeholder="What is machine learning?"):
39
+ st.session_state.messages.append({"role":"user","content":prompt})
40
+ st.chat_message("user").write(prompt)
41
+
42
+ llm=ChatGroq(groq_api_key=api_key,model_name="llama-3.1-8b-instant",streaming=True)
43
+ tools=[search,arxiv,wiki]
44
+
45
+ search_agent=initialize_agent(tools,llm,agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,handling_parsing_errors=True)
46
+
47
+ with st.chat_message("assistant"):
48
+ st_cb=StreamlitCallbackHandler(st.container(),expand_new_thoughts=False)
49
+ response=search_agent.run(st.session_state.messages,callbacks=[st_cb])
50
+ st.session_state.messages.append({'role':'assistant',"content":response})
51
+ st.write(response)
52
+
how_to_run.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ So i used Project1 environment to run this . So kindly search for that and run the project
requirements.txt ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ langchain
2
+ python-dotenv
3
+ ipykernel
4
+ langchain-community
5
+ pypdf
6
+ bs4
7
+ arxiv
8
+ pymupdf
9
+ wikipedia
10
+ langchain-text-splitters
11
+ langchain-openai
12
+ chromadb
13
+ sentence_transformers
14
+ langchain_huggingface
15
+ faiss-cpu
16
+ langchain_chroma
17
+ duckdb
18
+ pandas
19
+ openai
20
+ langchain-groq
21
+ duckduckgo-search
22
+ pymupdf
23
+ arxiv
24
+ wikipedia
25
+ mysql-connector-python
26
+ SQLAlchemy
27
+ validators==0.28.1
28
+ youtube_transcript_api
29
+ unstructured
30
+ pytube
31
+ numexpr
32
+ huggingface_hub
tools_agents.ipynb ADDED
@@ -0,0 +1,477 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {},
6
+ "source": [
7
+ "### Search Engine With Tools And Agents"
8
+ ]
9
+ },
10
+ {
11
+ "cell_type": "code",
12
+ "execution_count": 1,
13
+ "metadata": {},
14
+ "outputs": [],
15
+ "source": [
16
+ "## Arxiv--Research\n",
17
+ "## Tools creation\n",
18
+ "from langchain_community.tools import ArxivQueryRun,WikipediaQueryRun\n",
19
+ "from langchain_community.utilities import WikipediaAPIWrapper,ArxivAPIWrapper"
20
+ ]
21
+ },
22
+ {
23
+ "cell_type": "code",
24
+ "execution_count": 2,
25
+ "metadata": {},
26
+ "outputs": [
27
+ {
28
+ "data": {
29
+ "text/plain": [
30
+ "'wikipedia'"
31
+ ]
32
+ },
33
+ "execution_count": 2,
34
+ "metadata": {},
35
+ "output_type": "execute_result"
36
+ }
37
+ ],
38
+ "source": [
39
+ "## Used the inbuilt tool of wikipedia\n",
40
+ "api_wrapper_wiki=WikipediaAPIWrapper(top_k_results=1,doc_content_chars_max=250)\n",
41
+ "wiki=WikipediaQueryRun(api_wrapper=api_wrapper_wiki)\n",
42
+ "wiki.name"
43
+ ]
44
+ },
45
+ {
46
+ "cell_type": "code",
47
+ "execution_count": 3,
48
+ "metadata": {},
49
+ "outputs": [
50
+ {
51
+ "name": "stdout",
52
+ "output_type": "stream",
53
+ "text": [
54
+ "arxiv\n"
55
+ ]
56
+ }
57
+ ],
58
+ "source": [
59
+ "api_wrapper_arxiv=ArxivAPIWrapper(top_k_results=1,doc_content_chars_max=250)\n",
60
+ "arxiv=ArxivQueryRun(api_wrapper=api_wrapper_arxiv)\n",
61
+ "print(arxiv.name)"
62
+ ]
63
+ },
64
+ {
65
+ "cell_type": "code",
66
+ "execution_count": 4,
67
+ "metadata": {},
68
+ "outputs": [],
69
+ "source": [
70
+ "tools=[wiki,arxiv]"
71
+ ]
72
+ },
73
+ {
74
+ "cell_type": "code",
75
+ "execution_count": 7,
76
+ "metadata": {},
77
+ "outputs": [
78
+ {
79
+ "name": "stderr",
80
+ "output_type": "stream",
81
+ "text": [
82
+ "c:\\AI\\Krish Naik Gen AI\\Project1\\.venv\\Lib\\site-packages\\sentence_transformers\\cross_encoder\\CrossEncoder.py:13: TqdmExperimentalWarning: Using `tqdm.autonotebook.tqdm` in notebook mode. Use `tqdm.tqdm` instead to force console mode (e.g. in jupyter console)\n",
83
+ " from tqdm.autonotebook import tqdm, trange\n"
84
+ ]
85
+ }
86
+ ],
87
+ "source": [
88
+ "import os\n",
89
+ "from dotenv import load_dotenv\n",
90
+ "load_dotenv() \n",
91
+ "#load all the environment variables\n",
92
+ "os.environ['HF_TOKEN'] = os.getenv(\"HF_TOKEN\")\n",
93
+ "from langchain_huggingface import HuggingFaceEmbeddings\n",
94
+ "embeddings=HuggingFaceEmbeddings(model_name=\"all-MiniLM-L6-v2\")"
95
+ ]
96
+ },
97
+ {
98
+ "cell_type": "code",
99
+ "execution_count": 8,
100
+ "metadata": {},
101
+ "outputs": [
102
+ {
103
+ "name": "stderr",
104
+ "output_type": "stream",
105
+ "text": [
106
+ "USER_AGENT environment variable not set, consider setting it to identify your requests.\n"
107
+ ]
108
+ }
109
+ ],
110
+ "source": [
111
+ "## Custom tools[RAG Tool]\n",
112
+ "from langchain_community.document_loaders import WebBaseLoader\n",
113
+ "from langchain_community.vectorstores import FAISS\n",
114
+ "from langchain_text_splitters import RecursiveCharacterTextSplitter"
115
+ ]
116
+ },
117
+ {
118
+ "cell_type": "code",
119
+ "execution_count": 9,
120
+ "metadata": {},
121
+ "outputs": [
122
+ {
123
+ "data": {
124
+ "text/plain": [
125
+ "VectorStoreRetriever(tags=['FAISS', 'HuggingFaceEmbeddings'], vectorstore=<langchain_community.vectorstores.faiss.FAISS object at 0x0000028274D63CD0>, search_kwargs={})"
126
+ ]
127
+ },
128
+ "execution_count": 9,
129
+ "metadata": {},
130
+ "output_type": "execute_result"
131
+ }
132
+ ],
133
+ "source": [
134
+ "loader=WebBaseLoader(\"https://docs.smith.langchain.com/\")\n",
135
+ "docs=loader.load()\n",
136
+ "documents=RecursiveCharacterTextSplitter(chunk_size=1000,chunk_overlap=200).split_documents(docs)\n",
137
+ "vectordb=FAISS.from_documents(documents,embeddings)\n",
138
+ "retriever=vectordb.as_retriever()\n",
139
+ "retriever"
140
+ ]
141
+ },
142
+ {
143
+ "cell_type": "code",
144
+ "execution_count": 10,
145
+ "metadata": {},
146
+ "outputs": [
147
+ {
148
+ "data": {
149
+ "text/plain": [
150
+ "'langsmith-search'"
151
+ ]
152
+ },
153
+ "execution_count": 10,
154
+ "metadata": {},
155
+ "output_type": "execute_result"
156
+ }
157
+ ],
158
+ "source": [
159
+ "from langchain.tools.retriever import create_retriever_tool\n",
160
+ "retriever_tool=create_retriever_tool(retriever,\"langsmith-search\",\"Search any information about Langsmith \")\n",
161
+ "retriever_tool.name"
162
+ ]
163
+ },
164
+ {
165
+ "cell_type": "code",
166
+ "execution_count": 11,
167
+ "metadata": {},
168
+ "outputs": [],
169
+ "source": [
170
+ "tools=[wiki,arxiv,retriever_tool]"
171
+ ]
172
+ },
173
+ {
174
+ "cell_type": "code",
175
+ "execution_count": 12,
176
+ "metadata": {},
177
+ "outputs": [
178
+ {
179
+ "data": {
180
+ "text/plain": [
181
+ "[WikipediaQueryRun(api_wrapper=WikipediaAPIWrapper(wiki_client=<module 'wikipedia' from 'c:\\\\AI\\\\Krish Naik Gen AI\\\\Project1\\\\.venv\\\\Lib\\\\site-packages\\\\wikipedia\\\\__init__.py'>, top_k_results=1, lang='en', load_all_available_meta=False, doc_content_chars_max=250)),\n",
182
+ " ArxivQueryRun(api_wrapper=ArxivAPIWrapper(arxiv_search=<class 'arxiv.Search'>, arxiv_exceptions=(<class 'arxiv.ArxivError'>, <class 'arxiv.UnexpectedEmptyPageError'>, <class 'arxiv.HTTPError'>), top_k_results=1, ARXIV_MAX_QUERY_LENGTH=300, continue_on_failure=False, load_max_docs=100, load_all_available_meta=False, doc_content_chars_max=250)),\n",
183
+ " Tool(name='langsmith-search', description='Search any information about Langsmith ', args_schema=<class 'langchain_core.tools.retriever.RetrieverInput'>, func=functools.partial(<function _get_relevant_documents at 0x0000028247DF2F20>, retriever=VectorStoreRetriever(tags=['FAISS', 'HuggingFaceEmbeddings'], vectorstore=<langchain_community.vectorstores.faiss.FAISS object at 0x0000028274D63CD0>, search_kwargs={}), document_prompt=PromptTemplate(input_variables=['page_content'], input_types={}, partial_variables={}, template='{page_content}'), document_separator='\\n\\n'), coroutine=functools.partial(<function _aget_relevant_documents at 0x0000028247DF3060>, retriever=VectorStoreRetriever(tags=['FAISS', 'HuggingFaceEmbeddings'], vectorstore=<langchain_community.vectorstores.faiss.FAISS object at 0x0000028274D63CD0>, search_kwargs={}), document_prompt=PromptTemplate(input_variables=['page_content'], input_types={}, partial_variables={}, template='{page_content}'), document_separator='\\n\\n'))]"
184
+ ]
185
+ },
186
+ "execution_count": 12,
187
+ "metadata": {},
188
+ "output_type": "execute_result"
189
+ }
190
+ ],
191
+ "source": [
192
+ "tools"
193
+ ]
194
+ },
195
+ {
196
+ "cell_type": "code",
197
+ "execution_count": null,
198
+ "metadata": {},
199
+ "outputs": [],
200
+ "source": [
201
+ "## Run all this tools with Agents and LLM Models\n",
202
+ "\n",
203
+ "## Tools, LLM-->AgentExecutor\n",
204
+ "from langchain_groq import ChatGroq\n",
205
+ "from dotenv import load_dotenv\n",
206
+ "import openai\n",
207
+ "load_dotenv()\n",
208
+ "import os\n",
209
+ "\n",
210
+ "groq_api_key=os.getenv(\"GROQ_API_KEY\")\n",
211
+ "openai.api_key=os.getenv(\"OPENAI_API_KEY\")\n",
212
+ "\n",
213
+ "llm=ChatGroq(groq_api_key=groq_api_key,model_name=\"llama-3.1-8b-instant\")"
214
+ ]
215
+ },
216
+ {
217
+ "cell_type": "code",
218
+ "execution_count": 13,
219
+ "metadata": {},
220
+ "outputs": [
221
+ {
222
+ "data": {
223
+ "text/plain": [
224
+ "[SystemMessagePromptTemplate(prompt=PromptTemplate(input_variables=[], template='You are a helpful assistant')),\n",
225
+ " MessagesPlaceholder(variable_name='chat_history', optional=True),\n",
226
+ " HumanMessagePromptTemplate(prompt=PromptTemplate(input_variables=['input'], template='{input}')),\n",
227
+ " MessagesPlaceholder(variable_name='agent_scratchpad')]"
228
+ ]
229
+ },
230
+ "execution_count": 13,
231
+ "metadata": {},
232
+ "output_type": "execute_result"
233
+ }
234
+ ],
235
+ "source": [
236
+ "## Prompt Template\n",
237
+ "from langchain import hub\n",
238
+ "prompt=hub.pull(\"hwchase17/openai-functions-agent\")\n",
239
+ "prompt.messages"
240
+ ]
241
+ },
242
+ {
243
+ "cell_type": "code",
244
+ "execution_count": 15,
245
+ "metadata": {},
246
+ "outputs": [
247
+ {
248
+ "data": {
249
+ "text/plain": [
250
+ "RunnableAssign(mapper={\n",
251
+ " agent_scratchpad: RunnableLambda(lambda x: format_to_openai_tool_messages(x['intermediate_steps']))\n",
252
+ "})\n",
253
+ "| ChatPromptTemplate(input_variables=['agent_scratchpad', 'input'], input_types={'chat_history': typing.List[typing.Union[langchain_core.messages.ai.AIMessage, langchain_core.messages.human.HumanMessage, langchain_core.messages.chat.ChatMessage, langchain_core.messages.system.SystemMessage, langchain_core.messages.function.FunctionMessage, langchain_core.messages.tool.ToolMessage]], 'agent_scratchpad': typing.List[typing.Union[langchain_core.messages.ai.AIMessage, langchain_core.messages.human.HumanMessage, langchain_core.messages.chat.ChatMessage, langchain_core.messages.system.SystemMessage, langchain_core.messages.function.FunctionMessage, langchain_core.messages.tool.ToolMessage]]}, metadata={'lc_hub_owner': 'hwchase17', 'lc_hub_repo': 'openai-functions-agent', 'lc_hub_commit_hash': 'a1655024b06afbd95d17449f21316291e0726f13dcfaf990cc0d18087ad689a5'}, messages=[SystemMessagePromptTemplate(prompt=PromptTemplate(input_variables=[], template='You are a helpful assistant')), MessagesPlaceholder(variable_name='chat_history', optional=True), HumanMessagePromptTemplate(prompt=PromptTemplate(input_variables=['input'], template='{input}')), MessagesPlaceholder(variable_name='agent_scratchpad')])\n",
254
+ "| RunnableBinding(bound=ChatGroq(client=<groq.resources.chat.completions.Completions object at 0x0000016EC8FE4D90>, async_client=<groq.resources.chat.completions.AsyncCompletions object at 0x0000016EC8FE5900>, model_name='Llama3-8b-8192', groq_api_key=SecretStr('**********')), kwargs={'tools': [{'type': 'function', 'function': {'name': 'wikipedia', 'description': 'A wrapper around Wikipedia. Useful for when you need to answer general questions about people, places, companies, facts, historical events, or other subjects. Input should be a search query.', 'parameters': {'type': 'object', 'properties': {'query': {'description': 'query to look up on wikipedia', 'type': 'string'}}, 'required': ['query']}}}, {'type': 'function', 'function': {'name': 'arxiv', 'description': 'A wrapper around Arxiv.org Useful for when you need to answer questions about Physics, Mathematics, Computer Science, Quantitative Biology, Quantitative Finance, Statistics, Electrical Engineering, and Economics from scientific articles on arxiv.org. Input should be a search query.', 'parameters': {'type': 'object', 'properties': {'query': {'description': 'search query to look up', 'type': 'string'}}, 'required': ['query']}}}, {'type': 'function', 'function': {'name': 'langsmith-search', 'description': 'Search any information about Langsmith ', 'parameters': {'type': 'object', 'properties': {'query': {'description': 'query to look up in retriever', 'type': 'string'}}, 'required': ['query']}}}]})\n",
255
+ "| OpenAIToolsAgentOutputParser()"
256
+ ]
257
+ },
258
+ "execution_count": 15,
259
+ "metadata": {},
260
+ "output_type": "execute_result"
261
+ }
262
+ ],
263
+ "source": [
264
+ "## Agents\n",
265
+ "from langchain.agents import create_openai_tools_agent\n",
266
+ "agent=create_openai_tools_agent(llm,tools,prompt)\n",
267
+ "agent"
268
+ ]
269
+ },
270
+ {
271
+ "cell_type": "code",
272
+ "execution_count": 16,
273
+ "metadata": {},
274
+ "outputs": [
275
+ {
276
+ "data": {
277
+ "text/plain": [
278
+ "AgentExecutor(verbose=True, agent=RunnableMultiActionAgent(runnable=RunnableAssign(mapper={\n",
279
+ " agent_scratchpad: RunnableLambda(lambda x: format_to_openai_tool_messages(x['intermediate_steps']))\n",
280
+ "})\n",
281
+ "| ChatPromptTemplate(input_variables=['agent_scratchpad', 'input'], input_types={'chat_history': typing.List[typing.Union[langchain_core.messages.ai.AIMessage, langchain_core.messages.human.HumanMessage, langchain_core.messages.chat.ChatMessage, langchain_core.messages.system.SystemMessage, langchain_core.messages.function.FunctionMessage, langchain_core.messages.tool.ToolMessage]], 'agent_scratchpad': typing.List[typing.Union[langchain_core.messages.ai.AIMessage, langchain_core.messages.human.HumanMessage, langchain_core.messages.chat.ChatMessage, langchain_core.messages.system.SystemMessage, langchain_core.messages.function.FunctionMessage, langchain_core.messages.tool.ToolMessage]]}, metadata={'lc_hub_owner': 'hwchase17', 'lc_hub_repo': 'openai-functions-agent', 'lc_hub_commit_hash': 'a1655024b06afbd95d17449f21316291e0726f13dcfaf990cc0d18087ad689a5'}, messages=[SystemMessagePromptTemplate(prompt=PromptTemplate(input_variables=[], template='You are a helpful assistant')), MessagesPlaceholder(variable_name='chat_history', optional=True), HumanMessagePromptTemplate(prompt=PromptTemplate(input_variables=['input'], template='{input}')), MessagesPlaceholder(variable_name='agent_scratchpad')])\n",
282
+ "| RunnableBinding(bound=ChatGroq(client=<groq.resources.chat.completions.Completions object at 0x0000016EC8FE4D90>, async_client=<groq.resources.chat.completions.AsyncCompletions object at 0x0000016EC8FE5900>, model_name='Llama3-8b-8192', groq_api_key=SecretStr('**********')), kwargs={'tools': [{'type': 'function', 'function': {'name': 'wikipedia', 'description': 'A wrapper around Wikipedia. Useful for when you need to answer general questions about people, places, companies, facts, historical events, or other subjects. Input should be a search query.', 'parameters': {'type': 'object', 'properties': {'query': {'description': 'query to look up on wikipedia', 'type': 'string'}}, 'required': ['query']}}}, {'type': 'function', 'function': {'name': 'arxiv', 'description': 'A wrapper around Arxiv.org Useful for when you need to answer questions about Physics, Mathematics, Computer Science, Quantitative Biology, Quantitative Finance, Statistics, Electrical Engineering, and Economics from scientific articles on arxiv.org. Input should be a search query.', 'parameters': {'type': 'object', 'properties': {'query': {'description': 'search query to look up', 'type': 'string'}}, 'required': ['query']}}}, {'type': 'function', 'function': {'name': 'langsmith-search', 'description': 'Search any information about Langsmith ', 'parameters': {'type': 'object', 'properties': {'query': {'description': 'query to look up in retriever', 'type': 'string'}}, 'required': ['query']}}}]})\n",
283
+ "| OpenAIToolsAgentOutputParser(), input_keys_arg=[], return_keys_arg=[], stream_runnable=True), tools=[WikipediaQueryRun(api_wrapper=WikipediaAPIWrapper(wiki_client=<module 'wikipedia' from 'e:\\\\UDemy Final\\\\Langchain\\\\venv\\\\lib\\\\site-packages\\\\wikipedia\\\\__init__.py'>, top_k_results=1, lang='en', load_all_available_meta=False, doc_content_chars_max=250)), ArxivQueryRun(api_wrapper=ArxivAPIWrapper(arxiv_search=<class 'arxiv.Search'>, arxiv_exceptions=(<class 'arxiv.ArxivError'>, <class 'arxiv.UnexpectedEmptyPageError'>, <class 'arxiv.HTTPError'>), top_k_results=1, ARXIV_MAX_QUERY_LENGTH=300, continue_on_failure=False, load_max_docs=100, load_all_available_meta=False, doc_content_chars_max=250, arxiv_result=<class 'arxiv.Result'>)), Tool(name='langsmith-search', description='Search any information about Langsmith ', args_schema=<class 'langchain_core.tools.RetrieverInput'>, func=functools.partial(<function _get_relevant_documents at 0x0000016ED39FA440>, retriever=VectorStoreRetriever(tags=['FAISS', 'OpenAIEmbeddings'], vectorstore=<langchain_community.vectorstores.faiss.FAISS object at 0x0000016EFFDD3460>), document_prompt=PromptTemplate(input_variables=['page_content'], template='{page_content}'), document_separator='\\n\\n'), coroutine=functools.partial(<function _aget_relevant_documents at 0x0000016ED39FA5F0>, retriever=VectorStoreRetriever(tags=['FAISS', 'OpenAIEmbeddings'], vectorstore=<langchain_community.vectorstores.faiss.FAISS object at 0x0000016EFFDD3460>), document_prompt=PromptTemplate(input_variables=['page_content'], template='{page_content}'), document_separator='\\n\\n'))])"
284
+ ]
285
+ },
286
+ "execution_count": 16,
287
+ "metadata": {},
288
+ "output_type": "execute_result"
289
+ }
290
+ ],
291
+ "source": [
292
+ "## Agent Executer\n",
293
+ "from langchain.agents import AgentExecutor\n",
294
+ "agent_executor=AgentExecutor(agent=agent,tools=tools,verbose=True)\n",
295
+ "agent_executor"
296
+ ]
297
+ },
298
+ {
299
+ "cell_type": "code",
300
+ "execution_count": 18,
301
+ "metadata": {},
302
+ "outputs": [
303
+ {
304
+ "name": "stdout",
305
+ "output_type": "stream",
306
+ "text": [
307
+ "\n",
308
+ "\n",
309
+ "\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
310
+ "\u001b[32;1m\u001b[1;3m\n",
311
+ "Invoking: `langsmith-search` with `{'query': 'Langsmith'}`\n",
312
+ "\n",
313
+ "\n",
314
+ "\u001b[0m\u001b[38;5;200m\u001b[1;3mGet started with LangSmith | 🦜️🛠️ LangSmith\n",
315
+ "\n",
316
+ "Skip to main contentGo to API DocsSearchGo to AppQuick startTutorialsHow-to guidesConceptsReferencePricingSelf-hostingLangGraph CloudQuick startOn this pageGet started with LangSmithLangSmith is a platform for building production-grade LLM applications. It allows you to closely monitor and evaluate your application, so you can ship quickly and with confidence. Use of LangChain is not necessary - LangSmith works on its own!1. Install LangSmith​PythonTypeScriptpip install -U langsmithyarn add langchain langsmith2. Create an API key​To create an API key head to the Settings page. Then click Create API Key.3. Set up your environment​Shellexport LANGCHAIN_TRACING_V2=trueexport LANGCHAIN_API_KEY=<your-api-key># The below examples use the OpenAI API, though it's not necessary in generalexport OPENAI_API_KEY=<your-openai-api-key>4. Log your first trace​We provide multiple ways to log traces to LangSmith. Below, we'll highlight\n",
317
+ "\n",
318
+ "\"revision_id\": \"beta\" },)import { Client, Run, Example } from \"langsmith\";import { evaluate } from \"langsmith/evaluation\";import { EvaluationResult } from \"langsmith/evaluation\";const client = new Client();// Define dataset: these are your test casesconst datasetName = \"Sample Dataset\";const dataset = await client.createDataset(datasetName, { description: \"A sample dataset in LangSmith.\",});await client.createExamples({ inputs: [ { postfix: \"to LangSmith\" }, { postfix: \"to Evaluations in LangSmith\" }, ], outputs: [ { output: \"Welcome to LangSmith\" }, { output: \"Welcome to Evaluations in LangSmith\" }, ], datasetId: dataset.id,});// Define your evaluatorconst exactMatch = async ( run: Run, example: Example): Promise<EvaluationResult> => { return { key: \"exact_match\", score: run.outputs?.output === example?.outputs?.output, };};await evaluate( (input: { postfix: string }) => ({ output: `Welcome ${input.postfix}` }), { data: datasetName, evaluators:\n",
319
+ "\n",
320
+ "score: run.outputs?.output === example?.outputs?.output, };};await evaluate( (input: { postfix: string }) => ({ output: `Welcome ${input.postfix}` }), { data: datasetName, evaluators: [exactMatch], metadata: { version: \"1.0.0\", revision_id: \"beta\", }, });Learn more about evaluation in the how-to guides.Was this page helpful?You can leave detailed feedback on GitHub.NextTutorials1. Install LangSmith2. Create an API key3. Set up your environment4. Log your first trace5. Run your first evaluationCommunityDiscordTwitterGitHubDocs CodeLangSmith SDKPythonJS/TSMoreHomepageBlogLangChain Python DocsLangChain JS/TS DocsCopyright © 2024 LangChain, Inc.\u001b[0m\u001b[32;1m\u001b[1;3mThe result of the tool call id \"call_6hkd\" is a webpage about LangSmith, a platform for building production-grade LLM applications.\u001b[0m\n",
321
+ "\n",
322
+ "\u001b[1m> Finished chain.\u001b[0m\n"
323
+ ]
324
+ },
325
+ {
326
+ "data": {
327
+ "text/plain": [
328
+ "{'input': 'Tell me about Langsmith',\n",
329
+ " 'output': 'The result of the tool call id \"call_6hkd\" is a webpage about LangSmith, a platform for building production-grade LLM applications.'}"
330
+ ]
331
+ },
332
+ "execution_count": 18,
333
+ "metadata": {},
334
+ "output_type": "execute_result"
335
+ }
336
+ ],
337
+ "source": [
338
+ "agent_executor.invoke({\"input\":\"Tell me about Langsmith\"})"
339
+ ]
340
+ },
341
+ {
342
+ "cell_type": "code",
343
+ "execution_count": 19,
344
+ "metadata": {},
345
+ "outputs": [
346
+ {
347
+ "name": "stdout",
348
+ "output_type": "stream",
349
+ "text": [
350
+ "\n",
351
+ "\n",
352
+ "\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
353
+ "\u001b[32;1m\u001b[1;3m\n",
354
+ "Invoking: `wikipedia` with `{'query': 'machine learning'}`\n",
355
+ "\n",
356
+ "\n",
357
+ "\u001b[0m\u001b[36;1m\u001b[1;3mPage: Machine learning\n",
358
+ "Summary: Machine learning (ML) is a field of study in artificial intelligence concerned with the development and study of statistical algorithms that can learn from data and generalize to unseen data and thus perform tasks with\u001b[0m\u001b[32;1m\u001b[1;3m\n",
359
+ "Invoking: `arxiv` with `{'query': 'machine learning'}`\n",
360
+ "\n",
361
+ "\n",
362
+ "\u001b[0m\u001b[33;1m\u001b[1;3mPublished: 2019-09-08\n",
363
+ "Title: Lecture Notes: Optimization for Machine Learning\n",
364
+ "Authors: Elad Hazan\n",
365
+ "Summary: Lecture notes on optimization for machine learning, derived from a course at\n",
366
+ "Princeton University and tutorials given in MLSS, Buenos Aires, as\u001b[0m\u001b[32;1m\u001b[1;3m\n",
367
+ "Invoking: `langsmith-search` with `{'query': 'Elad Hazan'}`\n",
368
+ "\n",
369
+ "\n",
370
+ "\u001b[0m\u001b[38;5;200m\u001b[1;3mGet started with LangSmith | 🦜️🛠️ LangSmith\n",
371
+ "\n",
372
+ "\"revision_id\": \"beta\" },)import { Client, Run, Example } from \"langsmith\";import { evaluate } from \"langsmith/evaluation\";import { EvaluationResult } from \"langsmith/evaluation\";const client = new Client();// Define dataset: these are your test casesconst datasetName = \"Sample Dataset\";const dataset = await client.createDataset(datasetName, { description: \"A sample dataset in LangSmith.\",});await client.createExamples({ inputs: [ { postfix: \"to LangSmith\" }, { postfix: \"to Evaluations in LangSmith\" }, ], outputs: [ { output: \"Welcome to LangSmith\" }, { output: \"Welcome to Evaluations in LangSmith\" }, ], datasetId: dataset.id,});// Define your evaluatorconst exactMatch = async ( run: Run, example: Example): Promise<EvaluationResult> => { return { key: \"exact_match\", score: run.outputs?.output === example?.outputs?.output, };};await evaluate( (input: { postfix: string }) => ({ output: `Welcome ${input.postfix}` }), { data: datasetName, evaluators:\n",
373
+ "\n",
374
+ "score: run.outputs?.output === example?.outputs?.output, };};await evaluate( (input: { postfix: string }) => ({ output: `Welcome ${input.postfix}` }), { data: datasetName, evaluators: [exactMatch], metadata: { version: \"1.0.0\", revision_id: \"beta\", }, });Learn more about evaluation in the how-to guides.Was this page helpful?You can leave detailed feedback on GitHub.NextTutorials1. Install LangSmith2. Create an API key3. Set up your environment4. Log your first trace5. Run your first evaluationCommunityDiscordTwitterGitHubDocs CodeLangSmith SDKPythonJS/TSMoreHomepageBlogLangChain Python DocsLangChain JS/TS DocsCopyright © 2024 LangChain, Inc.\n",
375
+ "\n",
376
+ "= traceable(async (user_input) => { const result = await client.chat.completions.create({ messages: [{ role: \"user\", content: user_input }], model: \"gpt-3.5-turbo\", }); return result.choices[0].message.content;});await pipeline(\"Hello, world!\")// Out: Hello there! How can I assist you today?View a sample output trace.Learn more about tracing in the how-to guides.5. Run your first evaluation​Evaluation requires a system to test, data to serve as test cases, and optionally evaluators to grade the results. Here we use a built-in accuracy evaluator.PythonTypeScriptfrom langsmith import Clientfrom langsmith.evaluation import evaluateclient = Client()# Define dataset: these are your test casesdataset_name = \"Sample Dataset\"dataset = client.create_dataset(dataset_name, description=\"A sample dataset in LangSmith.\")client.create_examples( inputs=[ {\"postfix\": \"to LangSmith\"}, {\"postfix\": \"to Evaluations in LangSmith\"}, ], outputs=[\u001b[0m\u001b[32;1m\u001b[1;3mThank you for the information about LangSmith! Based on this, I can provide a text response.\n",
377
+ "\n",
378
+ "Machine learning is a field of study in artificial intelligence that involves the development and use of algorithms that can learn from data and improve their performance on a task over time. These algorithms can be used to make predictions, classify objects, and make decisions, and they are widely used in many fields, including computer science, engineering, and healthcare.\n",
379
+ "\n",
380
+ "It's great to see LangSmith as a tool that can assist with machine learning tasks, providing a platform for users to easily evaluate and improve their models. With LangSmith, users can create datasets, define evaluators, and run evaluations to test their models and improve their performance.\n",
381
+ "\n",
382
+ "I hope this response provides a good overview of machine learning and LangSmith!\u001b[0m\n",
383
+ "\n",
384
+ "\u001b[1m> Finished chain.\u001b[0m\n"
385
+ ]
386
+ },
387
+ {
388
+ "data": {
389
+ "text/plain": [
390
+ "{'input': 'What is machine learning',\n",
391
+ " 'output': \"Thank you for the information about LangSmith! Based on this, I can provide a text response.\\n\\nMachine learning is a field of study in artificial intelligence that involves the development and use of algorithms that can learn from data and improve their performance on a task over time. These algorithms can be used to make predictions, classify objects, and make decisions, and they are widely used in many fields, including computer science, engineering, and healthcare.\\n\\nIt's great to see LangSmith as a tool that can assist with machine learning tasks, providing a platform for users to easily evaluate and improve their models. With LangSmith, users can create datasets, define evaluators, and run evaluations to test their models and improve their performance.\\n\\nI hope this response provides a good overview of machine learning and LangSmith!\"}"
392
+ ]
393
+ },
394
+ "execution_count": 19,
395
+ "metadata": {},
396
+ "output_type": "execute_result"
397
+ }
398
+ ],
399
+ "source": [
400
+ "agent_executor.invoke({\"input\":\"What is machine learning\"})"
401
+ ]
402
+ },
403
+ {
404
+ "cell_type": "code",
405
+ "execution_count": 20,
406
+ "metadata": {},
407
+ "outputs": [
408
+ {
409
+ "name": "stdout",
410
+ "output_type": "stream",
411
+ "text": [
412
+ "\n",
413
+ "\n",
414
+ "\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
415
+ "\u001b[32;1m\u001b[1;3m\n",
416
+ "Invoking: `arxiv` with `{'query': '1706.03762'}`\n",
417
+ "\n",
418
+ "\n",
419
+ "\u001b[0m\u001b[33;1m\u001b[1;3mPublished: 2023-08-02\n",
420
+ "Title: Attention Is All You Need\n",
421
+ "Authors: Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, Illia Polosukhin\n",
422
+ "Summary: The dominant sequence transduction models are based on c\u001b[0m\u001b[32;1m\u001b[1;3m\n",
423
+ "Invoking: `wikipedia` with `{'query': 'Attention Is All You Need'}`\n",
424
+ "\n",
425
+ "\n",
426
+ "\u001b[0m\u001b[36;1m\u001b[1;3mPage: Attention Is All You Need\n",
427
+ "Summary: \"Attention Is All You Need\" is a 2017 landmark research paper authored by eight scientists working at Google, that introduced a new deep learning architecture known as the transformer based on attention mechan\u001b[0m\u001b[32;1m\u001b[1;3mThe Transformer is a neural network architecture that was first introduced in this paper. It's primarily used for natural language processing tasks, such as machine translation, text summarization, and language modeling. The Transformer architecture is based on self-attention mechanisms, which allow it to focus on specific parts of the input data while processing it. This architecture has been widely adopted in many NLP tasks and has achieved state-of-the-art results in many applications.\u001b[0m\n",
428
+ "\n",
429
+ "\u001b[1m> Finished chain.\u001b[0m\n"
430
+ ]
431
+ },
432
+ {
433
+ "data": {
434
+ "text/plain": [
435
+ "{'input': \"What's the paper 1706.03762 about?\",\n",
436
+ " 'output': \"The Transformer is a neural network architecture that was first introduced in this paper. It's primarily used for natural language processing tasks, such as machine translation, text summarization, and language modeling. The Transformer architecture is based on self-attention mechanisms, which allow it to focus on specific parts of the input data while processing it. This architecture has been widely adopted in many NLP tasks and has achieved state-of-the-art results in many applications.\"}"
437
+ ]
438
+ },
439
+ "execution_count": 20,
440
+ "metadata": {},
441
+ "output_type": "execute_result"
442
+ }
443
+ ],
444
+ "source": [
445
+ "agent_executor.invoke({\"input\":\"What's the paper 1706.03762 about?\"})"
446
+ ]
447
+ },
448
+ {
449
+ "cell_type": "code",
450
+ "execution_count": null,
451
+ "metadata": {},
452
+ "outputs": [],
453
+ "source": []
454
+ }
455
+ ],
456
+ "metadata": {
457
+ "kernelspec": {
458
+ "display_name": "Project1 (.venv)",
459
+ "language": "python",
460
+ "name": "project1-venv"
461
+ },
462
+ "language_info": {
463
+ "codemirror_mode": {
464
+ "name": "ipython",
465
+ "version": 3
466
+ },
467
+ "file_extension": ".py",
468
+ "mimetype": "text/x-python",
469
+ "name": "python",
470
+ "nbconvert_exporter": "python",
471
+ "pygments_lexer": "ipython3",
472
+ "version": "3.11.9"
473
+ }
474
+ },
475
+ "nbformat": 4,
476
+ "nbformat_minor": 2
477
+ }