Spaces:
Configuration error
Configuration error
RISHABH KUMAR commited on
Commit ·
dd4e278
0
Parent(s):
first commit
Browse files- app.py +75 -0
- app2.py +94 -0
- requirements.txt +32 -0
- tools_agents.ipynb +527 -0
app.py
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
from langchain_groq import ChatGroq
|
| 3 |
+
from langchain_community.utilities import ArxivAPIWrapper, WikipediaAPIWrapper
|
| 4 |
+
from langchain_community.tools import ArxivQueryRun, WikipediaQueryRun
|
| 5 |
+
from langchain.agents import initialize_agent, AgentType
|
| 6 |
+
from langchain_community.callbacks import StreamlitCallbackHandler
|
| 7 |
+
from langchain.tools import Tool
|
| 8 |
+
from duckduckgo_search import DDGS
|
| 9 |
+
import os
|
| 10 |
+
from dotenv import load_dotenv
|
| 11 |
+
|
| 12 |
+
# Custom DuckDuckGo Search with error handling
|
| 13 |
+
def duckduckgo_search(query):
|
| 14 |
+
try:
|
| 15 |
+
with DDGS() as ddgs:
|
| 16 |
+
results = [r for r in ddgs.text(query, max_results=3)]
|
| 17 |
+
return "\n".join([f"{r['title']}: {r['body']}" for r in results]) if results else "No results found"
|
| 18 |
+
except Exception as e:
|
| 19 |
+
return f"Search error: {str(e)}"
|
| 20 |
+
|
| 21 |
+
# Create custom search tool
|
| 22 |
+
search_tool = Tool(
|
| 23 |
+
name="DuckDuckGo Search",
|
| 24 |
+
func=duckduckgo_search,
|
| 25 |
+
description="Useful for searching the internet"
|
| 26 |
+
)
|
| 27 |
+
|
| 28 |
+
# Arxiv and Wikipedia Tools
|
| 29 |
+
arxiv_wrapper = ArxivAPIWrapper(top_k_results=1, doc_content_chars_max=200)
|
| 30 |
+
arxiv = ArxivQueryRun(api_wrapper=arxiv_wrapper)
|
| 31 |
+
|
| 32 |
+
wiki_wrapper = WikipediaAPIWrapper(top_k_results=1, doc_content_chars_max=200)
|
| 33 |
+
wiki = WikipediaQueryRun(api_wrapper=wiki_wrapper)
|
| 34 |
+
|
| 35 |
+
st.title("🔎 LangChain - Chat with search")
|
| 36 |
+
|
| 37 |
+
# Sidebar for settings
|
| 38 |
+
st.sidebar.title("Settings")
|
| 39 |
+
api_key = st.sidebar.text_input("Enter your Groq API Key:", type="password")
|
| 40 |
+
|
| 41 |
+
if "messages" not in st.session_state:
|
| 42 |
+
st.session_state["messages"] = [
|
| 43 |
+
{"role": "assistant", "content": "Hi, I'm a chatbot who can search the web. How can I help you?"}
|
| 44 |
+
]
|
| 45 |
+
|
| 46 |
+
for msg in st.session_state.messages:
|
| 47 |
+
st.chat_message(msg["role"]).write(msg['content'])
|
| 48 |
+
|
| 49 |
+
if prompt := st.chat_input(placeholder="What is machine learning?"):
|
| 50 |
+
st.session_state.messages.append({"role": "user", "content": prompt})
|
| 51 |
+
st.chat_message("user").write(prompt)
|
| 52 |
+
|
| 53 |
+
llm = ChatGroq(groq_api_key=api_key, model_name="Llama3-8b-8192", streaming=True)
|
| 54 |
+
tools = [search_tool, arxiv, wiki]
|
| 55 |
+
|
| 56 |
+
search_agent = initialize_agent(
|
| 57 |
+
tools,
|
| 58 |
+
llm,
|
| 59 |
+
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
|
| 60 |
+
handle_parsing_errors=True,
|
| 61 |
+
verbose=True
|
| 62 |
+
)
|
| 63 |
+
|
| 64 |
+
with st.chat_message("assistant"):
|
| 65 |
+
st_cb = StreamlitCallbackHandler(st.container(), expand_new_thoughts=False)
|
| 66 |
+
try:
|
| 67 |
+
response = search_agent.invoke(
|
| 68 |
+
{"input": prompt},
|
| 69 |
+
{"callbacks": [st_cb]}
|
| 70 |
+
)["output"]
|
| 71 |
+
st.session_state.messages.append({'role': 'assistant', "content": response})
|
| 72 |
+
st.write(response)
|
| 73 |
+
except Exception as e:
|
| 74 |
+
st.error(f"An error occurred: {str(e)}")
|
| 75 |
+
st.session_state.messages.append({'role': 'assistant', "content": f"Error: {str(e)}"})
|
app2.py
ADDED
|
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
from langchain_groq import ChatGroq
|
| 3 |
+
from langchain_community.utilities import ArxivAPIWrapper, WikipediaAPIWrapper
|
| 4 |
+
from langchain_community.tools import ArxivQueryRun, WikipediaQueryRun, DuckDuckGoSearchRun
|
| 5 |
+
from langchain.agents import initialize_agent, AgentType
|
| 6 |
+
from langchain_community.callbacks import StreamlitCallbackHandler
|
| 7 |
+
import os
|
| 8 |
+
from dotenv import load_dotenv
|
| 9 |
+
|
| 10 |
+
# 1. Create Tools
|
| 11 |
+
arxiv_wrapper = ArxivAPIWrapper(top_k_results=1, doc_content_chars_max=200)
|
| 12 |
+
arxiv = ArxivQueryRun(api_wrapper=arxiv_wrapper)
|
| 13 |
+
|
| 14 |
+
wiki_wrapper = WikipediaAPIWrapper(top_k_results=1, doc_content_chars_max=200)
|
| 15 |
+
wiki = WikipediaQueryRun(api_wrapper=wiki_wrapper)
|
| 16 |
+
|
| 17 |
+
search = DuckDuckGoSearchRun(
|
| 18 |
+
name="Search",
|
| 19 |
+
region="us-en",
|
| 20 |
+
safesearch="Off",
|
| 21 |
+
time="y",
|
| 22 |
+
max_results=3
|
| 23 |
+
)
|
| 24 |
+
|
| 25 |
+
# 2. Streamlit UI
|
| 26 |
+
st.title("🔎 LangChain - Chat with search (Improved)")
|
| 27 |
+
|
| 28 |
+
"""
|
| 29 |
+
In this example, we instruct the agent to search only until it has
|
| 30 |
+
enough information, then provide a concise final answer.
|
| 31 |
+
"""
|
| 32 |
+
|
| 33 |
+
st.sidebar.title("Settings")
|
| 34 |
+
api_key = st.sidebar.text_input("Enter your Groq API Key:", type="password")
|
| 35 |
+
|
| 36 |
+
if "messages" not in st.session_state:
|
| 37 |
+
st.session_state["messages"] = [
|
| 38 |
+
{
|
| 39 |
+
"role": "assistant",
|
| 40 |
+
"content": "Hi, I'm a chatbot with access to search tools. Ask me anything!"
|
| 41 |
+
}
|
| 42 |
+
]
|
| 43 |
+
|
| 44 |
+
for msg in st.session_state.messages:
|
| 45 |
+
st.chat_message(msg["role"]).write(msg["content"])
|
| 46 |
+
|
| 47 |
+
user_input = st.chat_input(placeholder="Ask me something, e.g. 'What is machine learning?'")
|
| 48 |
+
|
| 49 |
+
if user_input:
|
| 50 |
+
st.session_state.messages.append({"role": "user", "content": user_input})
|
| 51 |
+
st.chat_message("user").write(user_input)
|
| 52 |
+
|
| 53 |
+
# 3. Supply System-Like Instructions via the Agent Prefix
|
| 54 |
+
system_instructions = (
|
| 55 |
+
"You are a concise, knowledgeable assistant. "
|
| 56 |
+
"Use the provided tools ONLY IF absolutely necessary to answer the user's query. "
|
| 57 |
+
"Once you have enough information, STOP searching and compile a final, concise answer. "
|
| 58 |
+
"Do not keep repeating the same queries."
|
| 59 |
+
)
|
| 60 |
+
|
| 61 |
+
# 4. Create ChatGroq LLM
|
| 62 |
+
llm = ChatGroq(
|
| 63 |
+
groq_api_key=api_key,
|
| 64 |
+
model_name="Llama3-8b-8192",
|
| 65 |
+
streaming=True
|
| 66 |
+
# No `system_message` here; ChatGroq doesn't support that param
|
| 67 |
+
)
|
| 68 |
+
|
| 69 |
+
# 5. Initialize Agent with a prompt prefix
|
| 70 |
+
# max_iterations=3 ensures the agent doesn’t get stuck searching forever
|
| 71 |
+
agent = initialize_agent(
|
| 72 |
+
tools=[search, arxiv, wiki],
|
| 73 |
+
llm=llm,
|
| 74 |
+
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
|
| 75 |
+
handle_parsing_errors=True,
|
| 76 |
+
max_iterations=3,
|
| 77 |
+
early_stopping_method="force",
|
| 78 |
+
verbose=False, # set True if you want more logs in your terminal
|
| 79 |
+
agent_kwargs={
|
| 80 |
+
"prefix": system_instructions,
|
| 81 |
+
}
|
| 82 |
+
)
|
| 83 |
+
|
| 84 |
+
# 6. Run the Agent
|
| 85 |
+
with st.chat_message("assistant"):
|
| 86 |
+
st_cb = StreamlitCallbackHandler(st.container(), expand_new_thoughts=False)
|
| 87 |
+
response = agent.invoke(
|
| 88 |
+
{"input": user_input},
|
| 89 |
+
callbacks=[st_cb]
|
| 90 |
+
)["output"]
|
| 91 |
+
|
| 92 |
+
# Save to session and display
|
| 93 |
+
st.session_state.messages.append({"role": "assistant", "content": response})
|
| 94 |
+
st.write(response)
|
requirements.txt
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
langchain
|
| 2 |
+
python-dotenv
|
| 3 |
+
ipykernel
|
| 4 |
+
langchain-community
|
| 5 |
+
pypdf
|
| 6 |
+
bs4
|
| 7 |
+
arxiv
|
| 8 |
+
pymupdf
|
| 9 |
+
wikipedia
|
| 10 |
+
langchain-text-splitters
|
| 11 |
+
langchain-openai
|
| 12 |
+
chromadb
|
| 13 |
+
sentence_transformers
|
| 14 |
+
langchain_huggingface
|
| 15 |
+
faiss-cpu
|
| 16 |
+
langchain_chroma
|
| 17 |
+
duckdb
|
| 18 |
+
pandas
|
| 19 |
+
openai
|
| 20 |
+
langchain-groq
|
| 21 |
+
duckduckgo-search
|
| 22 |
+
pymupdf
|
| 23 |
+
arxiv
|
| 24 |
+
wikipedia
|
| 25 |
+
mysql-connector-python
|
| 26 |
+
SQLAlchemy
|
| 27 |
+
validators==0.28.1
|
| 28 |
+
youtube_transcript_api
|
| 29 |
+
unstructured
|
| 30 |
+
pytube
|
| 31 |
+
numexpr
|
| 32 |
+
huggingface_hub
|
tools_agents.ipynb
ADDED
|
@@ -0,0 +1,527 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cells": [
|
| 3 |
+
{
|
| 4 |
+
"cell_type": "markdown",
|
| 5 |
+
"metadata": {},
|
| 6 |
+
"source": [
|
| 7 |
+
"### Search Engine With Tools And Agents"
|
| 8 |
+
]
|
| 9 |
+
},
|
| 10 |
+
{
|
| 11 |
+
"cell_type": "code",
|
| 12 |
+
"execution_count": 1,
|
| 13 |
+
"metadata": {},
|
| 14 |
+
"outputs": [],
|
| 15 |
+
"source": [
|
| 16 |
+
"## Arxiv--Research\n",
|
| 17 |
+
"## Tools creation\n",
|
| 18 |
+
"from langchain_community.tools import ArxivQueryRun,WikipediaQueryRun\n",
|
| 19 |
+
"from langchain_community.utilities import WikipediaAPIWrapper,ArxivAPIWrapper"
|
| 20 |
+
]
|
| 21 |
+
},
|
| 22 |
+
{
|
| 23 |
+
"cell_type": "code",
|
| 24 |
+
"execution_count": 2,
|
| 25 |
+
"metadata": {},
|
| 26 |
+
"outputs": [
|
| 27 |
+
{
|
| 28 |
+
"data": {
|
| 29 |
+
"text/plain": [
|
| 30 |
+
"'wikipedia'"
|
| 31 |
+
]
|
| 32 |
+
},
|
| 33 |
+
"execution_count": 2,
|
| 34 |
+
"metadata": {},
|
| 35 |
+
"output_type": "execute_result"
|
| 36 |
+
}
|
| 37 |
+
],
|
| 38 |
+
"source": [
|
| 39 |
+
"## Used the inbuilt tool of wikipedia\n",
|
| 40 |
+
"api_wrapper_wiki=WikipediaAPIWrapper(top_k_results=1,doc_content_chars_max=250)\n",
|
| 41 |
+
"wiki=WikipediaQueryRun(api_wrapper=api_wrapper_wiki)\n",
|
| 42 |
+
"wiki.name"
|
| 43 |
+
]
|
| 44 |
+
},
|
| 45 |
+
{
|
| 46 |
+
"cell_type": "code",
|
| 47 |
+
"execution_count": 3,
|
| 48 |
+
"metadata": {},
|
| 49 |
+
"outputs": [
|
| 50 |
+
{
|
| 51 |
+
"name": "stdout",
|
| 52 |
+
"output_type": "stream",
|
| 53 |
+
"text": [
|
| 54 |
+
"arxiv\n"
|
| 55 |
+
]
|
| 56 |
+
}
|
| 57 |
+
],
|
| 58 |
+
"source": [
|
| 59 |
+
"## Create a new tool for Arxiv\n",
|
| 60 |
+
"\n",
|
| 61 |
+
"api_wrapper_arxiv=ArxivAPIWrapper(top_k_results=1,doc_content_chars_max=250)\n",
|
| 62 |
+
"arxiv=ArxivQueryRun(api_wrapper=api_wrapper_arxiv)\n",
|
| 63 |
+
"print(arxiv.name)"
|
| 64 |
+
]
|
| 65 |
+
},
|
| 66 |
+
{
|
| 67 |
+
"cell_type": "code",
|
| 68 |
+
"execution_count": 4,
|
| 69 |
+
"metadata": {},
|
| 70 |
+
"outputs": [],
|
| 71 |
+
"source": [
|
| 72 |
+
"tools=[wiki,arxiv]"
|
| 73 |
+
]
|
| 74 |
+
},
|
| 75 |
+
{
|
| 76 |
+
"cell_type": "code",
|
| 77 |
+
"execution_count": 5,
|
| 78 |
+
"metadata": {},
|
| 79 |
+
"outputs": [
|
| 80 |
+
{
|
| 81 |
+
"name": "stderr",
|
| 82 |
+
"output_type": "stream",
|
| 83 |
+
"text": [
|
| 84 |
+
"USER_AGENT environment variable not set, consider setting it to identify your requests.\n"
|
| 85 |
+
]
|
| 86 |
+
}
|
| 87 |
+
],
|
| 88 |
+
"source": [
|
| 89 |
+
"## Custom tools[RAG Tool]\n",
|
| 90 |
+
"from langchain_community.document_loaders import WebBaseLoader\n",
|
| 91 |
+
"from langchain_community.vectorstores import FAISS\n",
|
| 92 |
+
"from langchain_openai import OpenAIEmbeddings\n",
|
| 93 |
+
"from langchain_text_splitters import RecursiveCharacterTextSplitter"
|
| 94 |
+
]
|
| 95 |
+
},
|
| 96 |
+
{
|
| 97 |
+
"cell_type": "code",
|
| 98 |
+
"execution_count": 6,
|
| 99 |
+
"metadata": {},
|
| 100 |
+
"outputs": [
|
| 101 |
+
{
|
| 102 |
+
"data": {
|
| 103 |
+
"text/plain": [
|
| 104 |
+
"VectorStoreRetriever(tags=['FAISS', 'OpenAIEmbeddings'], vectorstore=<langchain_community.vectorstores.faiss.FAISS object at 0x1216be7b0>, search_kwargs={})"
|
| 105 |
+
]
|
| 106 |
+
},
|
| 107 |
+
"execution_count": 6,
|
| 108 |
+
"metadata": {},
|
| 109 |
+
"output_type": "execute_result"
|
| 110 |
+
}
|
| 111 |
+
],
|
| 112 |
+
"source": [
|
| 113 |
+
"loader=WebBaseLoader(\"https://docs.smith.langchain.com/\")\n",
|
| 114 |
+
"docs=loader.load()\n",
|
| 115 |
+
"documents=RecursiveCharacterTextSplitter(chunk_size=1000,chunk_overlap=200).split_documents(docs)\n",
|
| 116 |
+
"vectordb=FAISS.from_documents(documents,OpenAIEmbeddings())\n",
|
| 117 |
+
"retriever=vectordb.as_retriever()\n",
|
| 118 |
+
"retriever"
|
| 119 |
+
]
|
| 120 |
+
},
|
| 121 |
+
{
|
| 122 |
+
"cell_type": "code",
|
| 123 |
+
"execution_count": 7,
|
| 124 |
+
"metadata": {},
|
| 125 |
+
"outputs": [
|
| 126 |
+
{
|
| 127 |
+
"data": {
|
| 128 |
+
"text/plain": [
|
| 129 |
+
"'langsmith-search'"
|
| 130 |
+
]
|
| 131 |
+
},
|
| 132 |
+
"execution_count": 7,
|
| 133 |
+
"metadata": {},
|
| 134 |
+
"output_type": "execute_result"
|
| 135 |
+
}
|
| 136 |
+
],
|
| 137 |
+
"source": [
|
| 138 |
+
"from langchain.tools.retriever import create_retriever_tool\n",
|
| 139 |
+
"retriever_tool=create_retriever_tool(retriever,\"langsmith-search\",\"Search any information about Langsmith \")\n",
|
| 140 |
+
"\n",
|
| 141 |
+
"retriever_tool.name"
|
| 142 |
+
]
|
| 143 |
+
},
|
| 144 |
+
{
|
| 145 |
+
"cell_type": "code",
|
| 146 |
+
"execution_count": 8,
|
| 147 |
+
"metadata": {},
|
| 148 |
+
"outputs": [],
|
| 149 |
+
"source": [
|
| 150 |
+
"tools=[wiki,arxiv,retriever_tool]"
|
| 151 |
+
]
|
| 152 |
+
},
|
| 153 |
+
{
|
| 154 |
+
"cell_type": "code",
|
| 155 |
+
"execution_count": 9,
|
| 156 |
+
"metadata": {},
|
| 157 |
+
"outputs": [
|
| 158 |
+
{
|
| 159 |
+
"data": {
|
| 160 |
+
"text/plain": [
|
| 161 |
+
"[WikipediaQueryRun(api_wrapper=WikipediaAPIWrapper(wiki_client=<module 'wikipedia' from '/Users/rishabh/anaconda3/envs/venv4/lib/python3.13/site-packages/wikipedia/__init__.py'>, top_k_results=1, lang='en', load_all_available_meta=False, doc_content_chars_max=250)),\n",
|
| 162 |
+
" ArxivQueryRun(api_wrapper=ArxivAPIWrapper(arxiv_search=<class 'arxiv.Search'>, arxiv_exceptions=(<class 'arxiv.ArxivError'>, <class 'arxiv.UnexpectedEmptyPageError'>, <class 'arxiv.HTTPError'>), top_k_results=1, ARXIV_MAX_QUERY_LENGTH=300, continue_on_failure=False, load_max_docs=100, load_all_available_meta=False, doc_content_chars_max=250)),\n",
|
| 163 |
+
" Tool(name='langsmith-search', description='Search any information about Langsmith ', args_schema=<class 'langchain_core.tools.retriever.RetrieverInput'>, func=functools.partial(<function _get_relevant_documents at 0x107bb8ea0>, retriever=VectorStoreRetriever(tags=['FAISS', 'OpenAIEmbeddings'], vectorstore=<langchain_community.vectorstores.faiss.FAISS object at 0x1216be7b0>, search_kwargs={}), document_prompt=PromptTemplate(input_variables=['page_content'], input_types={}, partial_variables={}, template='{page_content}'), document_separator='\\n\\n', response_format='content'), coroutine=functools.partial(<function _aget_relevant_documents at 0x1150d40e0>, retriever=VectorStoreRetriever(tags=['FAISS', 'OpenAIEmbeddings'], vectorstore=<langchain_community.vectorstores.faiss.FAISS object at 0x1216be7b0>, search_kwargs={}), document_prompt=PromptTemplate(input_variables=['page_content'], input_types={}, partial_variables={}, template='{page_content}'), document_separator='\\n\\n', response_format='content'))]"
|
| 164 |
+
]
|
| 165 |
+
},
|
| 166 |
+
"execution_count": 9,
|
| 167 |
+
"metadata": {},
|
| 168 |
+
"output_type": "execute_result"
|
| 169 |
+
}
|
| 170 |
+
],
|
| 171 |
+
"source": [
|
| 172 |
+
"tools"
|
| 173 |
+
]
|
| 174 |
+
},
|
| 175 |
+
{
|
| 176 |
+
"cell_type": "code",
|
| 177 |
+
"execution_count": 10,
|
| 178 |
+
"metadata": {},
|
| 179 |
+
"outputs": [],
|
| 180 |
+
"source": [
|
| 181 |
+
"## Run all this tools with Agents and LLM Models\n",
|
| 182 |
+
"\n",
|
| 183 |
+
"## Tools, LLM-->AgentExecutor\n",
|
| 184 |
+
"from langchain_groq import ChatGroq\n",
|
| 185 |
+
"from dotenv import load_dotenv\n",
|
| 186 |
+
"import openai\n",
|
| 187 |
+
"load_dotenv()\n",
|
| 188 |
+
"import os\n",
|
| 189 |
+
"\n",
|
| 190 |
+
"groq_api_key=os.getenv(\"GROQ_API_KEY\")\n",
|
| 191 |
+
"openai.api_key=os.getenv(\"OPENAI_API_KEY\")\n",
|
| 192 |
+
"\n",
|
| 193 |
+
"llm=ChatGroq(groq_api_key=groq_api_key,model_name=\"Llama3-8b-8192\")"
|
| 194 |
+
]
|
| 195 |
+
},
|
| 196 |
+
{
|
| 197 |
+
"cell_type": "code",
|
| 198 |
+
"execution_count": 11,
|
| 199 |
+
"metadata": {},
|
| 200 |
+
"outputs": [
|
| 201 |
+
{
|
| 202 |
+
"data": {
|
| 203 |
+
"text/plain": [
|
| 204 |
+
"[SystemMessagePromptTemplate(prompt=PromptTemplate(input_variables=[], input_types={}, partial_variables={}, template='You are a helpful assistant'), additional_kwargs={}),\n",
|
| 205 |
+
" MessagesPlaceholder(variable_name='chat_history', optional=True),\n",
|
| 206 |
+
" HumanMessagePromptTemplate(prompt=PromptTemplate(input_variables=['input'], input_types={}, partial_variables={}, template='{input}'), additional_kwargs={}),\n",
|
| 207 |
+
" MessagesPlaceholder(variable_name='agent_scratchpad')]"
|
| 208 |
+
]
|
| 209 |
+
},
|
| 210 |
+
"execution_count": 11,
|
| 211 |
+
"metadata": {},
|
| 212 |
+
"output_type": "execute_result"
|
| 213 |
+
}
|
| 214 |
+
],
|
| 215 |
+
"source": [
|
| 216 |
+
"## Prompt Template\n",
|
| 217 |
+
"from langchain import hub\n",
|
| 218 |
+
"prompt=hub.pull(\"hwchase17/openai-functions-agent\")\n",
|
| 219 |
+
"prompt.messages"
|
| 220 |
+
]
|
| 221 |
+
},
|
| 222 |
+
{
|
| 223 |
+
"cell_type": "code",
|
| 224 |
+
"execution_count": 12,
|
| 225 |
+
"metadata": {},
|
| 226 |
+
"outputs": [
|
| 227 |
+
{
|
| 228 |
+
"data": {
|
| 229 |
+
"text/plain": [
|
| 230 |
+
"RunnableAssign(mapper={\n",
|
| 231 |
+
" agent_scratchpad: RunnableLambda(lambda x: format_to_openai_tool_messages(x['intermediate_steps']))\n",
|
| 232 |
+
"})\n",
|
| 233 |
+
"| ChatPromptTemplate(input_variables=['agent_scratchpad', 'input'], optional_variables=['chat_history'], input_types={'chat_history': list[typing.Annotated[typing.Union[typing.Annotated[langchain_core.messages.ai.AIMessage, Tag(tag='ai')], typing.Annotated[langchain_core.messages.human.HumanMessage, Tag(tag='human')], typing.Annotated[langchain_core.messages.chat.ChatMessage, Tag(tag='chat')], typing.Annotated[langchain_core.messages.system.SystemMessage, Tag(tag='system')], typing.Annotated[langchain_core.messages.function.FunctionMessage, Tag(tag='function')], typing.Annotated[langchain_core.messages.tool.ToolMessage, Tag(tag='tool')], typing.Annotated[langchain_core.messages.ai.AIMessageChunk, Tag(tag='AIMessageChunk')], typing.Annotated[langchain_core.messages.human.HumanMessageChunk, Tag(tag='HumanMessageChunk')], typing.Annotated[langchain_core.messages.chat.ChatMessageChunk, Tag(tag='ChatMessageChunk')], typing.Annotated[langchain_core.messages.system.SystemMessageChunk, Tag(tag='SystemMessageChunk')], typing.Annotated[langchain_core.messages.function.FunctionMessageChunk, Tag(tag='FunctionMessageChunk')], typing.Annotated[langchain_core.messages.tool.ToolMessageChunk, Tag(tag='ToolMessageChunk')]], FieldInfo(annotation=NoneType, required=True, discriminator=Discriminator(discriminator=<function _get_type at 0x10703fec0>, custom_error_type=None, custom_error_message=None, custom_error_context=None))]], 'agent_scratchpad': list[typing.Annotated[typing.Union[typing.Annotated[langchain_core.messages.ai.AIMessage, Tag(tag='ai')], typing.Annotated[langchain_core.messages.human.HumanMessage, Tag(tag='human')], typing.Annotated[langchain_core.messages.chat.ChatMessage, Tag(tag='chat')], typing.Annotated[langchain_core.messages.system.SystemMessage, Tag(tag='system')], typing.Annotated[langchain_core.messages.function.FunctionMessage, Tag(tag='function')], typing.Annotated[langchain_core.messages.tool.ToolMessage, Tag(tag='tool')], typing.Annotated[langchain_core.messages.ai.AIMessageChunk, Tag(tag='AIMessageChunk')], typing.Annotated[langchain_core.messages.human.HumanMessageChunk, Tag(tag='HumanMessageChunk')], typing.Annotated[langchain_core.messages.chat.ChatMessageChunk, Tag(tag='ChatMessageChunk')], typing.Annotated[langchain_core.messages.system.SystemMessageChunk, Tag(tag='SystemMessageChunk')], typing.Annotated[langchain_core.messages.function.FunctionMessageChunk, Tag(tag='FunctionMessageChunk')], typing.Annotated[langchain_core.messages.tool.ToolMessageChunk, Tag(tag='ToolMessageChunk')]], FieldInfo(annotation=NoneType, required=True, discriminator=Discriminator(discriminator=<function _get_type at 0x10703fec0>, custom_error_type=None, custom_error_message=None, custom_error_context=None))]]}, partial_variables={'chat_history': []}, metadata={'lc_hub_owner': 'hwchase17', 'lc_hub_repo': 'openai-functions-agent', 'lc_hub_commit_hash': 'a1655024b06afbd95d17449f21316291e0726f13dcfaf990cc0d18087ad689a5'}, messages=[SystemMessagePromptTemplate(prompt=PromptTemplate(input_variables=[], input_types={}, partial_variables={}, template='You are a helpful assistant'), additional_kwargs={}), MessagesPlaceholder(variable_name='chat_history', optional=True), HumanMessagePromptTemplate(prompt=PromptTemplate(input_variables=['input'], input_types={}, partial_variables={}, template='{input}'), additional_kwargs={}), MessagesPlaceholder(variable_name='agent_scratchpad')])\n",
|
| 234 |
+
"| RunnableBinding(bound=ChatGroq(client=<groq.resources.chat.completions.Completions object at 0x121829550>, async_client=<groq.resources.chat.completions.AsyncCompletions object at 0x12182aba0>, model_name='Llama3-8b-8192', model_kwargs={}, groq_api_key=SecretStr('**********')), kwargs={'tools': [{'type': 'function', 'function': {'name': 'wikipedia', 'description': 'A wrapper around Wikipedia. Useful for when you need to answer general questions about people, places, companies, facts, historical events, or other subjects. Input should be a search query.', 'parameters': {'properties': {'query': {'description': 'query to look up on wikipedia', 'type': 'string'}}, 'required': ['query'], 'type': 'object'}}}, {'type': 'function', 'function': {'name': 'arxiv', 'description': 'A wrapper around Arxiv.org Useful for when you need to answer questions about Physics, Mathematics, Computer Science, Quantitative Biology, Quantitative Finance, Statistics, Electrical Engineering, and Economics from scientific articles on arxiv.org. Input should be a search query.', 'parameters': {'properties': {'query': {'description': 'search query to look up', 'type': 'string'}}, 'required': ['query'], 'type': 'object'}}}, {'type': 'function', 'function': {'name': 'langsmith-search', 'description': 'Search any information about Langsmith ', 'parameters': {'properties': {'query': {'description': 'query to look up in retriever', 'type': 'string'}}, 'required': ['query'], 'type': 'object'}}}]}, config={}, config_factories=[])\n",
|
| 235 |
+
"| OpenAIToolsAgentOutputParser()"
|
| 236 |
+
]
|
| 237 |
+
},
|
| 238 |
+
"execution_count": 12,
|
| 239 |
+
"metadata": {},
|
| 240 |
+
"output_type": "execute_result"
|
| 241 |
+
}
|
| 242 |
+
],
|
| 243 |
+
"source": [
|
| 244 |
+
"## Agents\n",
|
| 245 |
+
"from langchain.agents import create_openai_tools_agent\n",
|
| 246 |
+
"agent=create_openai_tools_agent(llm,tools,prompt)\n",
|
| 247 |
+
"agent"
|
| 248 |
+
]
|
| 249 |
+
},
|
| 250 |
+
{
|
| 251 |
+
"cell_type": "code",
|
| 252 |
+
"execution_count": 13,
|
| 253 |
+
"metadata": {},
|
| 254 |
+
"outputs": [
|
| 255 |
+
{
|
| 256 |
+
"data": {
|
| 257 |
+
"text/plain": [
|
| 258 |
+
"AgentExecutor(verbose=True, agent=RunnableMultiActionAgent(runnable=RunnableAssign(mapper={\n",
|
| 259 |
+
" agent_scratchpad: RunnableLambda(lambda x: format_to_openai_tool_messages(x['intermediate_steps']))\n",
|
| 260 |
+
"})\n",
|
| 261 |
+
"| ChatPromptTemplate(input_variables=['agent_scratchpad', 'input'], optional_variables=['chat_history'], input_types={'chat_history': list[typing.Annotated[typing.Union[typing.Annotated[langchain_core.messages.ai.AIMessage, Tag(tag='ai')], typing.Annotated[langchain_core.messages.human.HumanMessage, Tag(tag='human')], typing.Annotated[langchain_core.messages.chat.ChatMessage, Tag(tag='chat')], typing.Annotated[langchain_core.messages.system.SystemMessage, Tag(tag='system')], typing.Annotated[langchain_core.messages.function.FunctionMessage, Tag(tag='function')], typing.Annotated[langchain_core.messages.tool.ToolMessage, Tag(tag='tool')], typing.Annotated[langchain_core.messages.ai.AIMessageChunk, Tag(tag='AIMessageChunk')], typing.Annotated[langchain_core.messages.human.HumanMessageChunk, Tag(tag='HumanMessageChunk')], typing.Annotated[langchain_core.messages.chat.ChatMessageChunk, Tag(tag='ChatMessageChunk')], typing.Annotated[langchain_core.messages.system.SystemMessageChunk, Tag(tag='SystemMessageChunk')], typing.Annotated[langchain_core.messages.function.FunctionMessageChunk, Tag(tag='FunctionMessageChunk')], typing.Annotated[langchain_core.messages.tool.ToolMessageChunk, Tag(tag='ToolMessageChunk')]], FieldInfo(annotation=NoneType, required=True, discriminator=Discriminator(discriminator=<function _get_type at 0x10703fec0>, custom_error_type=None, custom_error_message=None, custom_error_context=None))]], 'agent_scratchpad': list[typing.Annotated[typing.Union[typing.Annotated[langchain_core.messages.ai.AIMessage, Tag(tag='ai')], typing.Annotated[langchain_core.messages.human.HumanMessage, Tag(tag='human')], typing.Annotated[langchain_core.messages.chat.ChatMessage, Tag(tag='chat')], typing.Annotated[langchain_core.messages.system.SystemMessage, Tag(tag='system')], typing.Annotated[langchain_core.messages.function.FunctionMessage, Tag(tag='function')], typing.Annotated[langchain_core.messages.tool.ToolMessage, Tag(tag='tool')], typing.Annotated[langchain_core.messages.ai.AIMessageChunk, Tag(tag='AIMessageChunk')], typing.Annotated[langchain_core.messages.human.HumanMessageChunk, Tag(tag='HumanMessageChunk')], typing.Annotated[langchain_core.messages.chat.ChatMessageChunk, Tag(tag='ChatMessageChunk')], typing.Annotated[langchain_core.messages.system.SystemMessageChunk, Tag(tag='SystemMessageChunk')], typing.Annotated[langchain_core.messages.function.FunctionMessageChunk, Tag(tag='FunctionMessageChunk')], typing.Annotated[langchain_core.messages.tool.ToolMessageChunk, Tag(tag='ToolMessageChunk')]], FieldInfo(annotation=NoneType, required=True, discriminator=Discriminator(discriminator=<function _get_type at 0x10703fec0>, custom_error_type=None, custom_error_message=None, custom_error_context=None))]]}, partial_variables={'chat_history': []}, metadata={'lc_hub_owner': 'hwchase17', 'lc_hub_repo': 'openai-functions-agent', 'lc_hub_commit_hash': 'a1655024b06afbd95d17449f21316291e0726f13dcfaf990cc0d18087ad689a5'}, messages=[SystemMessagePromptTemplate(prompt=PromptTemplate(input_variables=[], input_types={}, partial_variables={}, template='You are a helpful assistant'), additional_kwargs={}), MessagesPlaceholder(variable_name='chat_history', optional=True), HumanMessagePromptTemplate(prompt=PromptTemplate(input_variables=['input'], input_types={}, partial_variables={}, template='{input}'), additional_kwargs={}), MessagesPlaceholder(variable_name='agent_scratchpad')])\n",
|
| 262 |
+
"| RunnableBinding(bound=ChatGroq(client=<groq.resources.chat.completions.Completions object at 0x121829550>, async_client=<groq.resources.chat.completions.AsyncCompletions object at 0x12182aba0>, model_name='Llama3-8b-8192', model_kwargs={}, groq_api_key=SecretStr('**********')), kwargs={'tools': [{'type': 'function', 'function': {'name': 'wikipedia', 'description': 'A wrapper around Wikipedia. Useful for when you need to answer general questions about people, places, companies, facts, historical events, or other subjects. Input should be a search query.', 'parameters': {'properties': {'query': {'description': 'query to look up on wikipedia', 'type': 'string'}}, 'required': ['query'], 'type': 'object'}}}, {'type': 'function', 'function': {'name': 'arxiv', 'description': 'A wrapper around Arxiv.org Useful for when you need to answer questions about Physics, Mathematics, Computer Science, Quantitative Biology, Quantitative Finance, Statistics, Electrical Engineering, and Economics from scientific articles on arxiv.org. Input should be a search query.', 'parameters': {'properties': {'query': {'description': 'search query to look up', 'type': 'string'}}, 'required': ['query'], 'type': 'object'}}}, {'type': 'function', 'function': {'name': 'langsmith-search', 'description': 'Search any information about Langsmith ', 'parameters': {'properties': {'query': {'description': 'query to look up in retriever', 'type': 'string'}}, 'required': ['query'], 'type': 'object'}}}]}, config={}, config_factories=[])\n",
|
| 263 |
+
"| OpenAIToolsAgentOutputParser(), input_keys_arg=[], return_keys_arg=[], stream_runnable=True), tools=[WikipediaQueryRun(api_wrapper=WikipediaAPIWrapper(wiki_client=<module 'wikipedia' from '/Users/rishabh/anaconda3/envs/venv4/lib/python3.13/site-packages/wikipedia/__init__.py'>, top_k_results=1, lang='en', load_all_available_meta=False, doc_content_chars_max=250)), ArxivQueryRun(api_wrapper=ArxivAPIWrapper(arxiv_search=<class 'arxiv.Search'>, arxiv_exceptions=(<class 'arxiv.ArxivError'>, <class 'arxiv.UnexpectedEmptyPageError'>, <class 'arxiv.HTTPError'>), top_k_results=1, ARXIV_MAX_QUERY_LENGTH=300, continue_on_failure=False, load_max_docs=100, load_all_available_meta=False, doc_content_chars_max=250)), Tool(name='langsmith-search', description='Search any information about Langsmith ', args_schema=<class 'langchain_core.tools.retriever.RetrieverInput'>, func=functools.partial(<function _get_relevant_documents at 0x107bb8ea0>, retriever=VectorStoreRetriever(tags=['FAISS', 'OpenAIEmbeddings'], vectorstore=<langchain_community.vectorstores.faiss.FAISS object at 0x1216be7b0>, search_kwargs={}), document_prompt=PromptTemplate(input_variables=['page_content'], input_types={}, partial_variables={}, template='{page_content}'), document_separator='\\n\\n', response_format='content'), coroutine=functools.partial(<function _aget_relevant_documents at 0x1150d40e0>, retriever=VectorStoreRetriever(tags=['FAISS', 'OpenAIEmbeddings'], vectorstore=<langchain_community.vectorstores.faiss.FAISS object at 0x1216be7b0>, search_kwargs={}), document_prompt=PromptTemplate(input_variables=['page_content'], input_types={}, partial_variables={}, template='{page_content}'), document_separator='\\n\\n', response_format='content'))])"
|
| 264 |
+
]
|
| 265 |
+
},
|
| 266 |
+
"execution_count": 13,
|
| 267 |
+
"metadata": {},
|
| 268 |
+
"output_type": "execute_result"
|
| 269 |
+
}
|
| 270 |
+
],
|
| 271 |
+
"source": [
|
| 272 |
+
"## Agent Executer\n",
|
| 273 |
+
"from langchain.agents import AgentExecutor\n",
|
| 274 |
+
"agent_executor=AgentExecutor(agent=agent,tools=tools,verbose=True)\n",
|
| 275 |
+
"agent_executor"
|
| 276 |
+
]
|
| 277 |
+
},
|
| 278 |
+
{
|
| 279 |
+
"cell_type": "code",
|
| 280 |
+
"execution_count": 14,
|
| 281 |
+
"metadata": {},
|
| 282 |
+
"outputs": [
|
| 283 |
+
{
|
| 284 |
+
"name": "stdout",
|
| 285 |
+
"output_type": "stream",
|
| 286 |
+
"text": [
|
| 287 |
+
"\n",
|
| 288 |
+
"\n",
|
| 289 |
+
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
| 290 |
+
"\u001b[32;1m\u001b[1;3m\n",
|
| 291 |
+
"Invoking: `langsmith-search` with `{'query': 'Langsmith'}`\n",
|
| 292 |
+
"\n",
|
| 293 |
+
"\n",
|
| 294 |
+
"\u001b[0m\u001b[38;5;200m\u001b[1;3mGet started with LangSmith | ü¶úÔ∏èüõ†Ô∏è LangSmith\n",
|
| 295 |
+
"\n",
|
| 296 |
+
"LangSmith + LangChain OSSLangSmith integrates seamlessly with LangChain's open source frameworks langchain and langgraph, with no extra instrumentation needed.If you're already using either of these, see the how-to guide for setting up LangSmith with LangChain or setting up LangSmith with LangGraph.\n",
|
| 297 |
+
"Observability‚Äã\n",
|
| 298 |
+
"Observability is important for any software application, but especially so for LLM applications. LLMs are non-deterministic by nature, meaning they can produce unexpected results. This makes them trickier than normal to debug.\n",
|
| 299 |
+
"This is where LangSmith can help! LangSmith has LLM-native observability, allowing you to get meaningful insights from your application. LangSmith’s observability features have you covered throughout all stages of application development - from prototyping, to beta testing, to production.\n",
|
| 300 |
+
"\n",
|
| 301 |
+
"Skip to main contentJoin us at Interrupt: The Agent AI Conference by LangChain on May 13 & 14 in San Francisco!API ReferenceRESTPythonJS/TSSearchRegionUSEUGo to AppGet StartedObservabilityEvaluationPrompt EngineeringDeployment (LangGraph Platform)AdministrationSelf-hostingPricingReferenceCloud architecture and scalabilityAuthz and AuthnAuthentication methodsdata_formatsEvaluationDataset transformationsRegions FAQsdk_referenceGet StartedOn this pageGet started with LangSmith\n",
|
| 302 |
+
"LangSmith is a platform for building production-grade LLM applications.\n",
|
| 303 |
+
"It allows you to closely monitor and evaluate your application, so you can ship quickly and with confidence.\n",
|
| 304 |
+
"ObservabilityAnalyze traces in LangSmith and configure metrics, dashboards, alerts based on these.EvalsEvaluate your application over production traffic — score application performance and get human feedback on your data.Prompt EngineeringIterate on prompts, with automatic version control and collaboration features.\n",
|
| 305 |
+
"\n",
|
| 306 |
+
"Get started by adding tracing to your application.\n",
|
| 307 |
+
"Create dashboards to view key metrics like RPS, error rates and costs.\n",
|
| 308 |
+
"\n",
|
| 309 |
+
"Evals‚Äã\n",
|
| 310 |
+
"The quality and development speed of AI applications depends on high-quality evaluation datasets and metrics to test and optimize your applications on. The LangSmith SDK and UI make building and running high-quality evaluations easy.\n",
|
| 311 |
+
"\n",
|
| 312 |
+
"Get started by creating your first evaluation.\n",
|
| 313 |
+
"Quickly assess the performance of your application using our off-the-shelf evaluators (Python only) as a starting point.\n",
|
| 314 |
+
"Analyze results of evaluations in the LangSmith UI and compare results over time.\n",
|
| 315 |
+
"Easily collect human feedback on your data to improve your application.\n",
|
| 316 |
+
"\n",
|
| 317 |
+
"Prompt Engineering‚Äã\n",
|
| 318 |
+
"While traditional software applications are built by writing code, AI applications involve writing prompts to instruct the LLM on what to do. LangSmith provides a set of tools designed to enable and facilitate prompt engineering to help you find the perfect prompt for your application.\u001b[0m\u001b[32;1m\u001b[1;3mIt seems like Langsmith is a platform for building and managing production-grade Large Language Models (LLMs) applications. It provides tools for observability, evaluation, prompt engineering, and deployment. The platform allows developers to closely monitor and evaluate their applications, making it easier to ship quickly and with confidence.\n",
|
| 319 |
+
"\n",
|
| 320 |
+
"Some of the key features of Langsmith include:\n",
|
| 321 |
+
"\n",
|
| 322 |
+
"* Observability: Langsmith provides LLM-native observability, allowing developers to get meaningful insights from their application. This includes tracing, metrics, dashboards, and alerts.\n",
|
| 323 |
+
"* Evaluation: Langsmith allows developers to evaluate their application over production traffic, scoring its performance and getting human feedback on the data.\n",
|
| 324 |
+
"* Prompt Engineering: Langsmith provides tools for prompt engineering, including automatic version control and collaboration features.\n",
|
| 325 |
+
"* Deployment: Langsmith allows developers to deploy their applications to production, with features like cloud architecture and scalability.\n",
|
| 326 |
+
"\n",
|
| 327 |
+
"Overall, Langsmith seems like a powerful tool for building and managing LLM applications, and its features could be useful for developers who are working with these types of models.\u001b[0m\n",
|
| 328 |
+
"\n",
|
| 329 |
+
"\u001b[1m> Finished chain.\u001b[0m\n"
|
| 330 |
+
]
|
| 331 |
+
},
|
| 332 |
+
{
|
| 333 |
+
"data": {
|
| 334 |
+
"text/plain": [
|
| 335 |
+
"{'input': 'Tell me about Langsmith',\n",
|
| 336 |
+
" 'output': 'It seems like Langsmith is a platform for building and managing production-grade Large Language Models (LLMs) applications. It provides tools for observability, evaluation, prompt engineering, and deployment. The platform allows developers to closely monitor and evaluate their applications, making it easier to ship quickly and with confidence.\\n\\nSome of the key features of Langsmith include:\\n\\n* Observability: Langsmith provides LLM-native observability, allowing developers to get meaningful insights from their application. This includes tracing, metrics, dashboards, and alerts.\\n* Evaluation: Langsmith allows developers to evaluate their application over production traffic, scoring its performance and getting human feedback on the data.\\n* Prompt Engineering: Langsmith provides tools for prompt engineering, including automatic version control and collaboration features.\\n* Deployment: Langsmith allows developers to deploy their applications to production, with features like cloud architecture and scalability.\\n\\nOverall, Langsmith seems like a powerful tool for building and managing LLM applications, and its features could be useful for developers who are working with these types of models.'}"
|
| 337 |
+
]
|
| 338 |
+
},
|
| 339 |
+
"execution_count": 14,
|
| 340 |
+
"metadata": {},
|
| 341 |
+
"output_type": "execute_result"
|
| 342 |
+
}
|
| 343 |
+
],
|
| 344 |
+
"source": [
|
| 345 |
+
"agent_executor.invoke({\"input\":\"Tell me about Langsmith\"})"
|
| 346 |
+
]
|
| 347 |
+
},
|
| 348 |
+
{
|
| 349 |
+
"cell_type": "code",
|
| 350 |
+
"execution_count": 15,
|
| 351 |
+
"metadata": {},
|
| 352 |
+
"outputs": [
|
| 353 |
+
{
|
| 354 |
+
"name": "stdout",
|
| 355 |
+
"output_type": "stream",
|
| 356 |
+
"text": [
|
| 357 |
+
"\n",
|
| 358 |
+
"\n",
|
| 359 |
+
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
| 360 |
+
"\u001b[32;1m\u001b[1;3m\n",
|
| 361 |
+
"Invoking: `wikipedia` with `{'query': 'Machine learning'}`\n",
|
| 362 |
+
"\n",
|
| 363 |
+
"\n",
|
| 364 |
+
"\u001b[0m\u001b[36;1m\u001b[1;3mPage: Machine learning\n",
|
| 365 |
+
"Summary: Machine learning (ML) is a field of study in artificial intelligence concerned with the development and study of statistical algorithms that can learn from data and generalize to unseen data, and thus perform tasks wit\u001b[0m\u001b[32;1m\u001b[1;3m\n",
|
| 366 |
+
"Invoking: `wikipedia` with `{'query': 'Machine learning#History'}`\n",
|
| 367 |
+
"\n",
|
| 368 |
+
"\n",
|
| 369 |
+
"\u001b[0m\u001b[36;1m\u001b[1;3mPage: Machine learning\n",
|
| 370 |
+
"Summary: Machine learning (ML) is a field of study in artificial intelligence concerned with the development and study of statistical algorithms that can learn from data and generalize to unseen data, and thus perform tasks wit\u001b[0m\u001b[32;1m\u001b[1;3mIt appears that the previous tool call didn't yield any new information. Let me try again.\n",
|
| 371 |
+
"\n",
|
| 372 |
+
"Machine learning (ML) has its roots in artificial intelligence and computational learning theory. The term \"machine learning\" was coined in 1959 by Arthur Samuel, and the term \"learning\" was chosen to emphasize the ability of machines to learn from data without being explicitly programmed.\n",
|
| 373 |
+
"\n",
|
| 374 |
+
"In the 1970s and 1980s, machine learning research focused on developing algorithms that could learn from examples, such as decision trees and neural networks. This was driven in part by the need for computers to learn from the vast amounts of data being generated in fields such as medicine and finance.\n",
|
| 375 |
+
"\n",
|
| 376 |
+
"In the 1990s and 2000s, machine learning saw a resurgence in popularity, driven in part by the availability of large amounts of data and the development of new algorithms such as support vector machines and random forests. This led to the creation of new fields such as bioinformatics and text mining.\n",
|
| 377 |
+
"\n",
|
| 378 |
+
"Today, machine learning is a key component of many artificial intelligence systems, and is used in a wide range of applications, from image and speech recognition to natural language processing and expert systems.\u001b[0m\n",
|
| 379 |
+
"\n",
|
| 380 |
+
"\u001b[1m> Finished chain.\u001b[0m\n"
|
| 381 |
+
]
|
| 382 |
+
},
|
| 383 |
+
{
|
| 384 |
+
"data": {
|
| 385 |
+
"text/plain": [
|
| 386 |
+
"{'input': 'What is machine learning',\n",
|
| 387 |
+
" 'output': 'It appears that the previous tool call didn\\'t yield any new information. Let me try again.\\n\\nMachine learning (ML) has its roots in artificial intelligence and computational learning theory. The term \"machine learning\" was coined in 1959 by Arthur Samuel, and the term \"learning\" was chosen to emphasize the ability of machines to learn from data without being explicitly programmed.\\n\\nIn the 1970s and 1980s, machine learning research focused on developing algorithms that could learn from examples, such as decision trees and neural networks. This was driven in part by the need for computers to learn from the vast amounts of data being generated in fields such as medicine and finance.\\n\\nIn the 1990s and 2000s, machine learning saw a resurgence in popularity, driven in part by the availability of large amounts of data and the development of new algorithms such as support vector machines and random forests. This led to the creation of new fields such as bioinformatics and text mining.\\n\\nToday, machine learning is a key component of many artificial intelligence systems, and is used in a wide range of applications, from image and speech recognition to natural language processing and expert systems.'}"
|
| 388 |
+
]
|
| 389 |
+
},
|
| 390 |
+
"execution_count": 15,
|
| 391 |
+
"metadata": {},
|
| 392 |
+
"output_type": "execute_result"
|
| 393 |
+
}
|
| 394 |
+
],
|
| 395 |
+
"source": [
|
| 396 |
+
"agent_executor.invoke({\"input\":\"What is machine learning\"})"
|
| 397 |
+
]
|
| 398 |
+
},
|
| 399 |
+
{
|
| 400 |
+
"cell_type": "code",
|
| 401 |
+
"execution_count": 16,
|
| 402 |
+
"metadata": {},
|
| 403 |
+
"outputs": [
|
| 404 |
+
{
|
| 405 |
+
"name": "stdout",
|
| 406 |
+
"output_type": "stream",
|
| 407 |
+
"text": [
|
| 408 |
+
"\n",
|
| 409 |
+
"\n",
|
| 410 |
+
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
| 411 |
+
"\u001b[32;1m\u001b[1;3m\n",
|
| 412 |
+
"Invoking: `arxiv` with `{'query': '1706.03762'}`\n",
|
| 413 |
+
"\n",
|
| 414 |
+
"\n",
|
| 415 |
+
"\u001b[0m\u001b[33;1m\u001b[1;3mPublished: 2023-08-02\n",
|
| 416 |
+
"Title: Attention Is All You Need\n",
|
| 417 |
+
"Authors: Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, Illia Polosukhin\n",
|
| 418 |
+
"Summary: The dominant sequence transduction models are based on c\u001b[0m\u001b[32;1m\u001b[1;3mBased on the tool's output, I can see that the paper \"Attention Is All You Need\" was published on August 2, 2023, and it's about sequence transduction models.\u001b[0m\n",
|
| 419 |
+
"\n",
|
| 420 |
+
"\u001b[1m> Finished chain.\u001b[0m\n"
|
| 421 |
+
]
|
| 422 |
+
},
|
| 423 |
+
{
|
| 424 |
+
"data": {
|
| 425 |
+
"text/plain": [
|
| 426 |
+
"{'input': \"What's the paper 1706.03762 about?\",\n",
|
| 427 |
+
" 'output': 'Based on the tool\\'s output, I can see that the paper \"Attention Is All You Need\" was published on August 2, 2023, and it\\'s about sequence transduction models.'}"
|
| 428 |
+
]
|
| 429 |
+
},
|
| 430 |
+
"execution_count": 16,
|
| 431 |
+
"metadata": {},
|
| 432 |
+
"output_type": "execute_result"
|
| 433 |
+
}
|
| 434 |
+
],
|
| 435 |
+
"source": [
|
| 436 |
+
"agent_executor.invoke({\"input\":\"What's the paper 1706.03762 about?\"})"
|
| 437 |
+
]
|
| 438 |
+
},
|
| 439 |
+
{
|
| 440 |
+
"cell_type": "code",
|
| 441 |
+
"execution_count": 17,
|
| 442 |
+
"metadata": {},
|
| 443 |
+
"outputs": [
|
| 444 |
+
{
|
| 445 |
+
"name": "stdout",
|
| 446 |
+
"output_type": "stream",
|
| 447 |
+
"text": [
|
| 448 |
+
"\n",
|
| 449 |
+
"\n",
|
| 450 |
+
"\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n",
|
| 451 |
+
"\u001b[32;1m\u001b[1;3m\n",
|
| 452 |
+
"Invoking: `wikipedia` with `{'query': 'Dragon Ball Z'}`\n",
|
| 453 |
+
"\n",
|
| 454 |
+
"\n",
|
| 455 |
+
"\u001b[0m\u001b[36;1m\u001b[1;3mPage: Dragon Ball Z\n",
|
| 456 |
+
"Summary: Dragon Ball Z (DBZ) is a Japanese anime television series produced by Toei Animation. Part of the Dragon Ball media franchise, it is the sequel to the 1986 Dragon Ball television series and adapts the latter 325 chapters \u001b[0m\u001b[32;1m\u001b[1;3m\n",
|
| 457 |
+
"Invoking: `wikipedia` with `{'query': 'Dragon Ball Z villains'}`\n",
|
| 458 |
+
"\n",
|
| 459 |
+
"\n",
|
| 460 |
+
"\u001b[0m\u001b[36;1m\u001b[1;3mPage: List of Dragon Ball characters\n",
|
| 461 |
+
"Summary: Dragon Ball is a Japanese media franchise created by Akira Toriyama in 1984. The franchise features an ensemble cast of characters and takes place in the same fictional universe as Toriyama's other work, \u001b[0m\u001b[32;1m\u001b[1;3m\n",
|
| 462 |
+
"Invoking: `wikipedia` with `{'query': 'Dragon Ball Z villains'}`\n",
|
| 463 |
+
"responded: It seems like the previous tool call didn't yield the expected result. Let's try again.\n",
|
| 464 |
+
"\n",
|
| 465 |
+
"\n",
|
| 466 |
+
"\n",
|
| 467 |
+
"\u001b[0m\u001b[36;1m\u001b[1;3mPage: List of Dragon Ball characters\n",
|
| 468 |
+
"Summary: Dragon Ball is a Japanese media franchise created by Akira Toriyama in 1984. The franchise features an ensemble cast of characters and takes place in the same fictional universe as Toriyama's other work, \u001b[0m\u001b[32;1m\u001b[1;3m\n",
|
| 469 |
+
"Invoking: `wikipedia` with `{'query': 'Dragon Ball Z villains'}`\n",
|
| 470 |
+
"responded: It looks like the tool didn't yield the expected result. Let me try again.\n",
|
| 471 |
+
"\n",
|
| 472 |
+
"\n",
|
| 473 |
+
"\n",
|
| 474 |
+
"\u001b[0m\u001b[36;1m\u001b[1;3mPage: List of Dragon Ball characters\n",
|
| 475 |
+
"Summary: Dragon Ball is a Japanese media franchise created by Akira Toriyama in 1984. The franchise features an ensemble cast of characters and takes place in the same fictional universe as Toriyama's other work, \u001b[0m\u001b[32;1m\u001b[1;3mIt seems like the tool is stuck in an infinite loop. Let me try a different approach.\n",
|
| 476 |
+
"\n",
|
| 477 |
+
"According to the summary, Dragon Ball Z has a list of villains. One of the most well-known villains in Dragon Ball Z is Frieza.\u001b[0m\n",
|
| 478 |
+
"\n",
|
| 479 |
+
"\u001b[1m> Finished chain.\u001b[0m\n"
|
| 480 |
+
]
|
| 481 |
+
},
|
| 482 |
+
{
|
| 483 |
+
"data": {
|
| 484 |
+
"text/plain": [
|
| 485 |
+
"{'input': 'What do you know about Dragon Ball Z?',\n",
|
| 486 |
+
" 'output': 'It seems like the tool is stuck in an infinite loop. Let me try a different approach.\\n\\nAccording to the summary, Dragon Ball Z has a list of villains. One of the most well-known villains in Dragon Ball Z is Frieza.'}"
|
| 487 |
+
]
|
| 488 |
+
},
|
| 489 |
+
"execution_count": 17,
|
| 490 |
+
"metadata": {},
|
| 491 |
+
"output_type": "execute_result"
|
| 492 |
+
}
|
| 493 |
+
],
|
| 494 |
+
"source": [
|
| 495 |
+
"agent_executor.invoke({\"input\":\"What do you know about Dragon Ball Z?\"})"
|
| 496 |
+
]
|
| 497 |
+
},
|
| 498 |
+
{
|
| 499 |
+
"cell_type": "code",
|
| 500 |
+
"execution_count": null,
|
| 501 |
+
"metadata": {},
|
| 502 |
+
"outputs": [],
|
| 503 |
+
"source": []
|
| 504 |
+
}
|
| 505 |
+
],
|
| 506 |
+
"metadata": {
|
| 507 |
+
"kernelspec": {
|
| 508 |
+
"display_name": "venv4",
|
| 509 |
+
"language": "python",
|
| 510 |
+
"name": "python3"
|
| 511 |
+
},
|
| 512 |
+
"language_info": {
|
| 513 |
+
"codemirror_mode": {
|
| 514 |
+
"name": "ipython",
|
| 515 |
+
"version": 3
|
| 516 |
+
},
|
| 517 |
+
"file_extension": ".py",
|
| 518 |
+
"mimetype": "text/x-python",
|
| 519 |
+
"name": "python",
|
| 520 |
+
"nbconvert_exporter": "python",
|
| 521 |
+
"pygments_lexer": "ipython3",
|
| 522 |
+
"version": "3.13.1"
|
| 523 |
+
}
|
| 524 |
+
},
|
| 525 |
+
"nbformat": 4,
|
| 526 |
+
"nbformat_minor": 2
|
| 527 |
+
}
|