issue_owner_repo
listlengths
2
2
issue_body
stringlengths
0
261k
issue_title
stringlengths
1
925
issue_comments_url
stringlengths
56
81
issue_comments_count
int64
0
2.5k
issue_created_at
stringlengths
20
20
issue_updated_at
stringlengths
20
20
issue_html_url
stringlengths
37
62
issue_github_id
int64
387k
2.46B
issue_number
int64
1
127k
[ "langchain-ai", "langchain" ]
### Checked other resources - [X] I added a very descriptive title to this issue. - [X] I searched the LangChain documentation with the integrated search. - [X] I used the GitHub search to find a similar question and didn't find it. - [X] I am sure that this is a bug in LangChain rather than my code. ### Example Code ``` from langchain.chains import SequentialChain from langchain.chains.router import MultiPromptChain from langchain.chains.router.llm_router import LLMRouterChain,RouterOutputParser from langchain.prompts import PromptTemplate, ChatPromptTemplate from langchain.chains import LLMChain from langchain.chat_models import ChatVertexAI from langchain.chains.router.multi_prompt_prompt import MULTI_PROMPT_ROUTER_TEMPLATE chat=ChatVertexAI( model_name="chat-bison@001", max_output_tokens=1000, temperature=0.0, top_k=10, top_p=0.8 ) def create_sequential_chain(llm): first_prompt = ChatPromptTemplate.from_template( "Translate the following review to french:" "\n\n{input}" ) # chain 1: input= Review and output= English_Review chain_one = LLMChain(llm=llm, prompt=first_prompt, output_key="French_Review" ) second_prompt = ChatPromptTemplate.from_template( "Can you summarize the following review in 1 sentence:" "\n\n{French_Review}" ) # chain 2: input= English_Review and output= summary chain_two = LLMChain(llm=llm, prompt=second_prompt, output_key="summary" ) # prompt template 3: translate to english third_prompt = ChatPromptTemplate.from_template( "What language is the following review:\n\n{input}" ) # chain 3: input= Review and output= language chain_three = LLMChain(llm=llm, prompt=third_prompt, output_key="language" ) # prompt template 4: follow up message fourth_prompt = ChatPromptTemplate.from_template( "Write a follow up response to the following " "summary in the specified language:" "\n\nSummary: {summary}\n\nLanguage: {language}" ) # chain 4: input= summary, language and output= followup_message chain_four = LLMChain(llm=llm, prompt=fourth_prompt, output_key="followup_message" ) # overall_chain: input= Review # and output= English_Review,summary, followup_message sequential_chain = SequentialChain( chains=[chain_one, chain_two, chain_three, chain_four], input_variables=["input"], output_variables=["French_Review", "summary","followup_message"], verbose=True ) return sequential_chain sequential_chain = create_sequential_chain(chat) ####### The sequeential chain works fine sequential_chain_response = sequential_chain("I find the taste mediocre. The foam doesn't hold, it's strange. I buy the same ones in stores and the taste is much better.") print(sequential_chain_response) ######## Doesn't work in the router chain analysis_template = """You are a data analyst. You are great at analyze and summarize product reviews. When you don't know the answer to a question you admit that you don't know. \nFor example: I find the taste mediocre. The foam doesn't hold, it's strange. ""\nHere is a question: {input}""" prompt_infos = [ { "name": "analysis", "description": "great at analyze and summarize product reviews", "prompt_template": analysis_template, "destination_chain": sequential_chain } ] destination_chains = {} for p_info in prompt_infos: name = p_info["name"] destination_chains[name] = p_info["destination_chain"] destinations = [f"{p['name']}: {p['description']}" for p in prompt_infos] destinations_str = "\n".join(destinations) default_prompt = ChatPromptTemplate.from_template("{input}") default_chain = LLMChain(llm=chat, prompt=default_prompt) router_template = MULTI_PROMPT_ROUTER_TEMPLATE.format( destinations=destinations_str ) router_prompt = PromptTemplate( template=router_template, input_variables=["input"], output_parser=RouterOutputParser(), ) router_chain = LLMRouterChain.from_llm(chat, router_prompt) chain = MultiPromptChain(router_chain=router_chain, destination_chains=destination_chains, default_chain=default_chain, verbose=True ) chain.run("I find the taste mediocre. The foam doesn't hold, it's strange. I buy the same ones in stores and the taste is much better.") ``` ### Error Message and Stack Trace (if applicable) > Entering new MultiPromptChain chain... /opt/conda/lib/python3.10/site-packages/langchain_core/_api/deprecation.py:117: LangChainDeprecationWarning: The function `run` was deprecated in LangChain 0.1.0 and will be removed in 0.2.0. Use invoke instead. warn_deprecated( /opt/conda/lib/python3.10/site-packages/langchain/chains/llm.py:316: UserWarning: The predict_and_parse method is deprecated, instead pass an output parser directly to LLMChain. warnings.warn( analysis: {'input': "I find the taste mediocre. The foam doesn't hold, it's strange. I buy the same ones in stores and the taste is much better."} > Entering new SequentialChain chain... > Finished chain. > Finished chain. --------------------------------------------------------------------------- ValueError Traceback (most recent call last) Cell In[1], line 112 106 router_chain = LLMRouterChain.from_llm(chat, router_prompt) 107 chain = MultiPromptChain(router_chain=router_chain, 108 destination_chains=destination_chains, 109 default_chain=default_chain, 110 verbose=True 111 ) --> 112 chain.run("I find the taste mediocre. The foam doesn't hold, it's strange. I buy the same ones in stores and the taste is much better.") File /opt/conda/lib/python3.10/site-packages/langchain_core/_api/deprecation.py:145, in deprecated.<locals>.deprecate.<locals>.warning_emitting_wrapper(*args, **kwargs) 143 warned = True 144 emit_warning() --> 145 return wrapped(*args, **kwargs) File /opt/conda/lib/python3.10/site-packages/langchain/chains/base.py:538, in Chain.run(self, callbacks, tags, metadata, *args, **kwargs) 536 if len(args) != 1: 537 raise ValueError("`run` supports only one positional argument.") --> 538 return self(args[0], callbacks=callbacks, tags=tags, metadata=metadata)[ 539 _output_key 540 ] 542 if kwargs and not args: 543 return self(kwargs, callbacks=callbacks, tags=tags, metadata=metadata)[ 544 _output_key 545 ] File /opt/conda/lib/python3.10/site-packages/langchain_core/_api/deprecation.py:145, in deprecated.<locals>.deprecate.<locals>.warning_emitting_wrapper(*args, **kwargs) 143 warned = True 144 emit_warning() --> 145 return wrapped(*args, **kwargs) File /opt/conda/lib/python3.10/site-packages/langchain/chains/base.py:363, in Chain.__call__(self, inputs, return_only_outputs, callbacks, tags, metadata, run_name, include_run_info) 331 """Execute the chain. 332 333 Args: (...) 354 `Chain.output_keys`. 355 """ 356 config = { 357 "callbacks": callbacks, 358 "tags": tags, 359 "metadata": metadata, 360 "run_name": run_name, 361 } --> 363 return self.invoke( 364 inputs, 365 cast(RunnableConfig, {k: v for k, v in config.items() if v is not None}), 366 return_only_outputs=return_only_outputs, 367 include_run_info=include_run_info, 368 ) File /opt/conda/lib/python3.10/site-packages/langchain/chains/base.py:164, in Chain.invoke(self, input, config, **kwargs) 162 raise e 163 run_manager.on_chain_end(outputs) --> 164 final_outputs: Dict[str, Any] = self.prep_outputs( 165 inputs, outputs, return_only_outputs 166 ) 167 if include_run_info: 168 final_outputs[RUN_KEY] = RunInfo(run_id=run_manager.run_id) File /opt/conda/lib/python3.10/site-packages/langchain/chains/base.py:438, in Chain.prep_outputs(self, inputs, outputs, return_only_outputs) 420 def prep_outputs( 421 self, 422 inputs: Dict[str, str], 423 outputs: Dict[str, str], 424 return_only_outputs: bool = False, 425 ) -> Dict[str, str]: 426 """Validate and prepare chain outputs, and save info about this run to memory. 427 428 Args: (...) 436 A dict of the final chain outputs. 437 """ --> 438 self._validate_outputs(outputs) 439 if self.memory is not None: 440 self.memory.save_context(inputs, outputs) File /opt/conda/lib/python3.10/site-packages/langchain/chains/base.py:269, in Chain._validate_outputs(self, outputs) 267 missing_keys = set(self.output_keys).difference(outputs) 268 if missing_keys: --> 269 raise ValueError(f"Missing some output keys: {missing_keys}") ValueError: Missing some output keys: {'text'} ### Description I am trying to use a router chain to route human input to a sequential chain or a default llm. Given the same prompt, the sequential chain worked fine, but the router chain returned "ValueError: Missing some output keys: {'text'}". From the output I can see the error occured after the chain finished, and I don't have any variable named "text". Can you take a look at this? Thank you! ### System Info System Information ------------------ > OS: Linux > OS Version: #1 SMP Debian 5.10.209-2 (2024-01-31) > Python Version: 3.10.12 | packaged by conda-forge | (main, Jun 23 2023, 22:40:32) [GCC 12.3.0] Package Information ------------------- > langchain_core: 0.1.23 > langchain: 0.1.7 > langchain_community: 0.0.20 > langsmith: 0.0.87 > langchain_google_vertexai: 0.0.5 > langchainplus_sdk: 0.0.20 Packages not installed (Not Necessarily a Problem) -------------------------------------------------- The following packages were not found: > langgraph > langserve
ValueError: Missing some output keys: {'text'} even if the chain finished.
https://api.github.com/repos/langchain-ai/langchain/issues/17816/comments
6
2024-02-20T16:42:11Z
2024-03-11T00:58:07Z
https://github.com/langchain-ai/langchain/issues/17816
2,144,815,632
17,816
[ "langchain-ai", "langchain" ]
i didn't understand please provide me full code _Originally posted by @shraddhaa26 in https://github.com/langchain-ai/langchain/discussions/17801#discussioncomment-8530449_
i didn't understand please provide me full code
https://api.github.com/repos/langchain-ai/langchain/issues/17804/comments
1
2024-02-20T14:03:19Z
2024-02-20T16:04:26Z
https://github.com/langchain-ai/langchain/issues/17804
2,144,455,799
17,804
[ "langchain-ai", "langchain" ]
### Checked other resources - [X] I added a very descriptive title to this issue. - [X] I searched the LangChain documentation with the integrated search. - [X] I used the GitHub search to find a similar question and didn't find it. - [X] I am sure that this is a bug in LangChain rather than my code. ### Example Code llm = GoogleGenerativeAI(   model="gemini-pro",   temperature=0.3,   max_output_tokens=2048,   )   chain = ConversationalRetrievalChain.from_llm(   llm=llm,   chain_type="stuff",   retriever= st.session_state.compression_retriever_reordered,   verbose=True,   combine_docs_chain_kwargs={"prompt": st.session_state.prompt},   return_source_documents=True,   )   conversation = get_conversation_string(st.session_state.messages)       res = chain({"question":user_question,"chat_history":chat_history})   answer =res["answer"] ### Error Message and Stack Trace (if applicable) IndexError: list index out of range Traceback: File "/home/user/.local/lib/python3.10/site-packages/streamlit/runtime/scriptrunner/script_runner.py", line 534, in _run_script exec(code, module.__dict__) File "/home/user/app/app.py", line 210, in <module> res = chain({"question":user_question,"chat_history":chat_history}) File "/home/user/.local/lib/python3.10/site-packages/langchain_core/_api/deprecation.py", line 145, in warning_emitting_wrapper return wrapped(*args, **kwargs) File "/home/user/.local/lib/python3.10/site-packages/langchain/chains/base.py", line 383, in __call__ return self.invoke( File "/home/user/.local/lib/python3.10/site-packages/langchain/chains/base.py", line 168, in invoke raise e File "/home/user/.local/lib/python3.10/site-packages/langchain/chains/base.py", line 158, in invoke self._call(inputs, run_manager=run_manager) File "/home/user/.local/lib/python3.10/site-packages/langchain/chains/conversational_retrieval/base.py", line 166, in _call answer = self.combine_docs_chain.run( File "/home/user/.local/lib/python3.10/site-packages/langchain_core/_api/deprecation.py", line 145, in warning_emitting_wrapper return wrapped(*args, **kwargs) File "/home/user/.local/lib/python3.10/site-packages/langchain/chains/base.py", line 555, in run return self(kwargs, callbacks=callbacks, tags=tags, metadata=metadata)[ File "/home/user/.local/lib/python3.10/site-packages/langchain_core/_api/deprecation.py", line 145, in warning_emitting_wrapper return wrapped(*args, **kwargs) File "/home/user/.local/lib/python3.10/site-packages/langchain/chains/base.py", line 383, in __call__ return self.invoke( File "/home/user/.local/lib/python3.10/site-packages/langchain/chains/base.py", line 168, in invoke raise e File "/home/user/.local/lib/python3.10/site-packages/langchain/chains/base.py", line 158, in invoke self._call(inputs, run_manager=run_manager) File "/home/user/.local/lib/python3.10/site-packages/langchain/chains/combine_documents/base.py", line 136, in _call output, extra_return_dict = self.combine_docs( File "/home/user/.local/lib/python3.10/site-packages/langchain/chains/combine_documents/stuff.py", line 244, in combine_docs return self.llm_chain.predict(callbacks=callbacks, **inputs), {} File "/home/user/.local/lib/python3.10/site-packages/langchain/chains/llm.py", line 293, in predict return self(kwargs, callbacks=callbacks)[self.output_key] File "/home/user/.local/lib/python3.10/site-packages/langchain_core/_api/deprecation.py", line 145, in warning_emitting_wrapper return wrapped(*args, **kwargs) File "/home/user/.local/lib/python3.10/site-packages/langchain/chains/base.py", line 383, in __call__ return self.invoke( File "/home/user/.local/lib/python3.10/site-packages/langchain/chains/base.py", line 168, in invoke raise e File "/home/user/.local/lib/python3.10/site-packages/langchain/chains/base.py", line 158, in invoke self._call(inputs, run_manager=run_manager) File "/home/user/.local/lib/python3.10/site-packages/langchain/chains/llm.py", line 104, in _call return self.create_outputs(response)[0] File "/home/user/.local/lib/python3.10/site-packages/langchain/chains/llm.py", line 258, in create_outputs result = [ File "/home/user/.local/lib/python3.10/site-packages/langchain/chains/llm.py", line 261, in <listcomp> self.output_key: self.output_parser.parse_result(generation), File "/home/user/.local/lib/python3.10/site-packages/langchain_core/output_parsers/base.py", line 219, in parse_result return self.parse(result[0].text) ### Description im trying to use gemini pro, but got error list index out of range ### System Info python = 3.11 langchain-google-gen-ai = 0.0.9 langchain = 0.1.5
List Index out of range gemini
https://api.github.com/repos/langchain-ai/langchain/issues/17800/comments
9
2024-02-20T13:15:32Z
2024-07-29T16:06:37Z
https://github.com/langchain-ai/langchain/issues/17800
2,144,358,183
17,800
[ "langchain-ai", "langchain" ]
### Checklist - [X] I added a very descriptive title to this issue. - [X] I included a link to the documentation page I am referring to (if applicable). ### Issue with current documentation: https://python.langchain.com/docs/modules/agents/quick_start#retriever https://python.langchain.com/docs/use_cases/chatbots/retrieval#creating-a-retriever https://python.langchain.com/docs/use_cases/chatbots/quickstart#retrievers https://python.langchain.com/docs/get_started/quickstart#server ### Idea or request for content: _No response_
DOC: https://docs.smith.langchain.com/overview -> 404
https://api.github.com/repos/langchain-ai/langchain/issues/17799/comments
2
2024-02-20T13:04:28Z
2024-05-31T23:52:14Z
https://github.com/langchain-ai/langchain/issues/17799
2,144,337,299
17,799
[ "langchain-ai", "langchain" ]
### Checked other resources - [X] I added a very descriptive title to this issue. - [X] I searched the LangChain documentation with the integrated search. - [X] I used the GitHub search to find a similar question and didn't find it. - [X] I am sure that this is a bug in LangChain rather than my code. ### Example Code The code I took comes from https://redis.com/blog/build-ecommerce-chatbot-with-redis/ blogpost. ```py import json from langchain.schema import BaseRetriever from langchain.vectorstores import VectorStore from langchain.schema import Document from pydantic import BaseModel class RedisProductRetriever(BaseRetriever, BaseModel): vectorstore: VectorStore class Config: arbitrary_types_allowed = True def combine_metadata(self, doc) -> str: metadata = doc.metadata return ( "Item Name: " + metadata["item_name"] + ". " + "Item Description: " + metadata["bullet_point"] + ". " + "Item Keywords: " + metadata["item_keywords"] + "." ) def get_relevant_documents(self, query): docs = [] for doc in self.vectorstore.similarity_search(query): content = self.combine_metadata(doc) docs.append(Document( page_content=content, metadata=doc.metadata )) return docs ``` ### Error Message and Stack Trace (if applicable) on class definition it fail with: ``` TypeError: metaclass conflict: the metaclass of a derived class must be a (non-strict) subclass of the metaclasses of all its bases ``` ### Description I'm searching for a way to create a custom retriver but the instruction from the https://redis.com/blog/build-ecommerce-chatbot-with-redis/ blogpost doesn't work. ### System Info ``` langchain==0.1.4 langchain-community==0.0.16 langchain-core==0.1.16 langchain-openai==0.0.5 ``` Python 3.11.7 macOS
metaclass conflict error when trying to set up a custom retriever
https://api.github.com/repos/langchain-ai/langchain/issues/17796/comments
2
2024-02-20T11:58:44Z
2024-05-31T23:46:25Z
https://github.com/langchain-ai/langchain/issues/17796
2,144,212,734
17,796
[ "langchain-ai", "langchain" ]
### Checked other resources - [X] I added a very descriptive title to this issue. - [X] I searched the LangChain documentation with the integrated search. - [X] I used the GitHub search to find a similar question and didn't find it. - [X] I am sure that this is a bug in LangChain rather than my code. ### Example Code The following line of code ```python from langchain.agents import create_react_agent ``` raises the error reported below. ### Error Message and Stack Trace (if applicable) ```python TypeError Traceback (most recent call last) Cell In[6], line 1 ----> 1 from langchain.agents import create_react_agent File ~\anaconda3\envs\langchain_env_for_reviews\lib\site-packages\langchain\agents\__init__.py:34 31 from pathlib import Path 32 from typing import Any ---> 34 from langchain_community.agent_toolkits import ( 35 create_json_agent, 36 create_openapi_agent, 37 create_pbi_agent, 38 create_pbi_chat_agent, 39 create_spark_sql_agent, 40 create_sql_agent, 41 ) 42 from langchain_core._api.path import as_import_path 44 from langchain.agents.agent import ( 45 Agent, 46 AgentExecutor, (...) 50 LLMSingleActionAgent, 51 ) File ~\anaconda3\envs\langchain_env_for_reviews\lib\site-packages\langchain_community\agent_toolkits\__init__.py:46 44 from langchain_community.agent_toolkits.spark_sql.base import create_spark_sql_agent 45 from langchain_community.agent_toolkits.spark_sql.toolkit import SparkSQLToolkit ---> 46 from langchain_community.agent_toolkits.sql.base import create_sql_agent 47 from langchain_community.agent_toolkits.sql.toolkit import SQLDatabaseToolkit 48 from langchain_community.agent_toolkits.steam.toolkit import SteamToolkit File ~\anaconda3\envs\langchain_env_for_reviews\lib\site-packages\langchain_community\agent_toolkits\sql\base.py:29 19 from langchain_core.prompts.chat import ( 20 ChatPromptTemplate, 21 HumanMessagePromptTemplate, 22 MessagesPlaceholder, 23 ) 25 from langchain_community.agent_toolkits.sql.prompt import ( 26 SQL_FUNCTIONS_SUFFIX, 27 SQL_PREFIX, 28 ) ---> 29 from langchain_community.agent_toolkits.sql.toolkit import SQLDatabaseToolkit 30 from langchain_community.tools.sql_database.tool import ( 31 InfoSQLDatabaseTool, 32 ListSQLDatabaseTool, 33 ) 35 if TYPE_CHECKING: File ~\anaconda3\envs\langchain_env_for_reviews\lib\site-packages\langchain_community\agent_toolkits\sql\toolkit.py:9 7 from langchain_community.agent_toolkits.base import BaseToolkit 8 from langchain_community.tools import BaseTool ----> 9 from langchain_community.tools.sql_database.tool import ( 10 InfoSQLDatabaseTool, 11 ListSQLDatabaseTool, 12 QuerySQLCheckerTool, 13 QuerySQLDataBaseTool, 14 ) 15 from langchain_community.utilities.sql_database import SQLDatabase 18 class SQLDatabaseToolkit(BaseToolkit): File ~\anaconda3\envs\langchain_env_for_reviews\lib\site-packages\langchain_community\tools\sql_database\tool.py:33 29 class _QuerySQLDataBaseToolInput(BaseModel): 30 query: str = Field(..., description="A detailed and correct SQL query.") ---> 33 class QuerySQLDataBaseTool(BaseSQLDatabaseTool, BaseTool): 34 """Tool for querying a SQL database.""" 36 name: str = "sql_db_query" File ~\anaconda3\envs\langchain_env_for_reviews\lib\site-packages\langchain_community\tools\sql_database\tool.py:47, in QuerySQLDataBaseTool() 36 name: str = "sql_db_query" 37 description: str = """ 38 Execute a SQL query against the database and get back the result.. 39 If the query is not correct, an error message will be returned. 40 If an error is returned, rewrite the query, check the query, and try again. 41 """ 43 def _run( 44 self, 45 query: str, 46 run_manager: Optional[CallbackManagerForToolRun] = None, ---> 47 ) -> Union[str, Sequence[Dict[str, Any]], Result[Any]]: 48 """Execute the query, return the results or an error message.""" 49 return self.db.run_no_throw(query) TypeError: 'type' object is not subscriptable ``` ### Description I'm trying to import the `create_react_agent` function but encounter a `TypeError`. ### System Info System Information ------------------ > OS: Windows > OS Version: 10.0.22631 > Python version: 3.8.17 Package Information ------------------- > langchain_core: 0.1.24 > langchain: 0.1.8 > langchain_community: 0.0.21 > langsmith: 0.1.3 > langchain_openai: 0.0.2 > langchainhub: 0.1.14
Error when importing create_react_agent: TypeError: 'type' object is not subscriptable
https://api.github.com/repos/langchain-ai/langchain/issues/17786/comments
4
2024-02-20T09:32:19Z
2024-02-20T14:55:29Z
https://github.com/langchain-ai/langchain/issues/17786
2,143,940,690
17,786
[ "langchain-ai", "langchain" ]
### Checklist - [X] I added a very descriptive title to this issue. - [X] I included a link to the documentation page I am referring to (if applicable). ### Issue with current documentation: - ### Idea or request for content: I tried RAG using GraphIndexCreator instead of conventional VectorDB. So the process o VectorDB is to load the file, split it using RecursiveCharacterTextSplitter, then save it to FAISS vectordb, so that it'll not exceed the context limit of the LLM. But for the Knowledge Graph i.e. GraphIndexCreator, i've used below code ``` from langchain.indexes import GraphIndexCreator from langchain_openai import OpenAI with open("/content/1.txt") as f: all_text = f.read() text = all_text graph = index_creator.from_text(text) ``` it is returning the below issue, that it is exceeding the context limit ``` BadRequestError Traceback (most recent call last) [<ipython-input-12-e007a66c39f1>](https://localhost:8080/#) in <cell line: 1>() ----> 1 graph = index_creator.from_text(text) 18 frames [/usr/local/lib/python3.10/dist-packages/openai/_base_client.py](https://localhost:8080/#) in _request(self, cast_to, options, remaining_retries, stream, stream_cls) 978 979 log.debug("Re-raising status error") --> 980 raise self._make_status_error_from_response(err.response) from None 981 982 return self._process_response( BadRequestError: Error code: 400 - {'error': {'message': "This model's maximum context length is 4097 tokens, however you requested 17145 tokens (16889 in your prompt; 256 for the completion). Please reduce your prompt; or completion length.", 'type': 'invalid_request_error', 'param': None, 'code': None}} ``` How to overcome this issue?
BadRequestError: Error code: 400 - {'error': {'message': "This model's maximum context length is 4097 tokens when using GraphIndexCreator
https://api.github.com/repos/langchain-ai/langchain/issues/17783/comments
3
2024-02-20T09:09:23Z
2024-02-20T15:42:37Z
https://github.com/langchain-ai/langchain/issues/17783
2,143,893,557
17,783
[ "langchain-ai", "langchain" ]
### Checked other resources - [X] I added a very descriptive title to this issue. - [X] I searched the LangChain documentation with the integrated search. - [X] I used the GitHub search to find a similar question and didn't find it. - [X] I am sure that this is a bug in LangChain rather than my code. ### Example Code ```python from pymongo.mongo_client import MongoClient from pymongo.server_api import ServerApi from langchain_openai.embeddings import AzureOpenAIEmbeddings from langchain.schema import Document from langchain.vectorstores.mongodb_atlas import MongoDBAtlasVectorSearch import os os.environ["OPENAI_API_KEY"] = "asd" os.environ["OPENAI_API_TYPE"] = "azure" os.environ["OPENAI_API_VERSION"] = "2023-05-15" ATLAS_CONNECTION_STRING = "blabla" COLLECTION_NAME = "documents" DB_NAME = "FraDev" embeddings = AzureOpenAIEmbeddings( deployment="text-embedding-ada-002", chunk_size=1, # we need to use one because azure is poop azure_endpoint="asd", ) # Create a new client and connect to the server client = MongoClient(ATLAS_CONNECTION_STRING, server_api=ServerApi("1")) collection = client["FraDev"][COLLECTION_NAME] def create_vector_search(): """ Creates a MongoDBAtlasVectorSearch object using the connection string, database, and collection names, along with the OpenAI embeddings and index configuration. :return: MongoDBAtlasVectorSearch object """ vector_search = MongoDBAtlasVectorSearch.from_connection_string( ATLAS_CONNECTION_STRING, f"{DB_NAME}.{COLLECTION_NAME}", embeddings, index_name="default", ) return vector_search docs = [ Document(page_content="foo", metadata={"id": 123, "file": {"name": "test.txt"}}) ] vector_search = MongoDBAtlasVectorSearch.from_documents( documents=docs, embedding=embeddings, collection=collection, index_name="default4", # Use a predefined index name ) vector_search = create_vector_search() results = vector_search.similarity_search_with_score( query="foo", k=1, pre_filter={"name": {"$eq": "test.txt"}} ) print(results) ``` ### Error Message and Stack Trace (if applicable) _No response_ ### Description Should add all the metadata into a `metadata` object inside the document stored into mongo, but it doesn't ![Screenshot 2024-02-20 at 10 08 07](https://github.com/langchain-ai/langchain/assets/15908060/91f465c8-698b-4e97-8f01-1de77774b235) Thanks a lot ### System Info System Information ------------------ > OS: Darwin > OS Version: Darwin Kernel Version 23.3.0: Wed Dec 20 21:33:31 PST 2023; root:xnu-10002.81.5~7/RELEASE_ARM64_T8112 > Python Version: 3.11.6 (main, Nov 2 2023, 04:39:43) [Clang 14.0.3 (clang-1403.0.22.14.1)] Package Information ------------------- > langchain_core: 0.1.23 > langchain: 0.1.7 > langchain_community: 0.0.20 > langsmith: 0.0.87 > langchain_openai: 0.0.6 Packages not installed (Not Necessarily a Problem) -------------------------------------------------- The following packages were not found: > langgraph > langserve
MongoAtlas doesn't add metadata in metadata
https://api.github.com/repos/langchain-ai/langchain/issues/17782/comments
1
2024-02-20T09:08:18Z
2024-05-31T23:46:27Z
https://github.com/langchain-ai/langchain/issues/17782
2,143,890,706
17,782
[ "langchain-ai", "langchain" ]
### Checked other resources - [X] I added a very descriptive title to this issue. - [X] I searched the LangChain documentation with the integrated search. - [X] I used the GitHub search to find a similar question and didn't find it. - [X] I am sure that this is a bug in LangChain rather than my code. ### Example Code from langchain_community.llms import HuggingFaceEndpoint from langchain.chat_models import ChatHuggingFace llm = HuggingFaceEndpoint( repo_id="HuggingFaceH4/zephyr-7b-beta", ) agent = ChatHuggingFace(llm=llm) ### Error Message and Stack Trace (if applicable) --------------------------------------------------------------------------- ValueError Traceback (most recent call last) [/home/ubuntu/benchmark_agents/test_prompt_optimizer.ipynb](https://vscode-remote+ssh-002dremote-002bec2.vscode-resource.vscode-cdn.net/home/ubuntu/benchmark_agents/test_prompt_optimizer.ipynb) Cell 11 line 9 [2](vscode-notebook-cell://ssh-remote%2Bec2/home/ubuntu/benchmark_agents/test_prompt_optimizer.ipynb#X10sdnNjb2RlLXJlbW90ZQ%3D%3D?line=1) from langchain.chat_models import ChatHuggingFace [4](vscode-notebook-cell://ssh-remote%2Bec2/home/ubuntu/benchmark_agents/test_prompt_optimizer.ipynb#X10sdnNjb2RlLXJlbW90ZQ%3D%3D?line=3) llm = HuggingFaceEndpoint( [5](vscode-notebook-cell://ssh-remote%2Bec2/home/ubuntu/benchmark_agents/test_prompt_optimizer.ipynb#X10sdnNjb2RlLXJlbW90ZQ%3D%3D?line=4) repo_id="HuggingFaceH4/zephyr-7b-beta", [6](vscode-notebook-cell://ssh-remote%2Bec2/home/ubuntu/benchmark_agents/test_prompt_optimizer.ipynb#X10sdnNjb2RlLXJlbW90ZQ%3D%3D?line=5) max_new_tokens=100, [7](vscode-notebook-cell://ssh-remote%2Bec2/home/ubuntu/benchmark_agents/test_prompt_optimizer.ipynb#X10sdnNjb2RlLXJlbW90ZQ%3D%3D?line=6) ) ----> [9](vscode-notebook-cell://ssh-remote%2Bec2/home/ubuntu/benchmark_agents/test_prompt_optimizer.ipynb#X10sdnNjb2RlLXJlbW90ZQ%3D%3D?line=8) agent = ChatHuggingFace(llm=llm) File [~/langchain_fuse_hf_endpoints/libs/community/langchain_community/chat_models/huggingface.py:55](https://vscode-remote+ssh-002dremote-002bec2.vscode-resource.vscode-cdn.net/home/ubuntu/benchmark_agents/~/langchain_fuse_hf_endpoints/libs/community/langchain_community/chat_models/huggingface.py:55), in ChatHuggingFace.__init__(self, **kwargs) [51](https://vscode-remote+ssh-002dremote-002bec2.vscode-resource.vscode-cdn.net/home/ubuntu/benchmark_agents/~/langchain_fuse_hf_endpoints/libs/community/langchain_community/chat_models/huggingface.py:51) super().__init__(**kwargs) [53](https://vscode-remote+ssh-002dremote-002bec2.vscode-resource.vscode-cdn.net/home/ubuntu/benchmark_agents/~/langchain_fuse_hf_endpoints/libs/community/langchain_community/chat_models/huggingface.py:53) from transformers import AutoTokenizer ---> [55](https://vscode-remote+ssh-002dremote-002bec2.vscode-resource.vscode-cdn.net/home/ubuntu/benchmark_agents/~/langchain_fuse_hf_endpoints/libs/community/langchain_community/chat_models/huggingface.py:55) self._resolve_model_id() [57](https://vscode-remote+ssh-002dremote-002bec2.vscode-resource.vscode-cdn.net/home/ubuntu/benchmark_agents/~/langchain_fuse_hf_endpoints/libs/community/langchain_community/chat_models/huggingface.py:57) self.tokenizer = ( [58](https://vscode-remote+ssh-002dremote-002bec2.vscode-resource.vscode-cdn.net/home/ubuntu/benchmark_agents/~/langchain_fuse_hf_endpoints/libs/community/langchain_community/chat_models/huggingface.py:58) AutoTokenizer.from_pretrained(self.model_id) [59](https://vscode-remote+ssh-002dremote-002bec2.vscode-resource.vscode-cdn.net/home/ubuntu/benchmark_agents/~/langchain_fuse_hf_endpoints/libs/community/langchain_community/chat_models/huggingface.py:59) if self.tokenizer is None [60](https://vscode-remote+ssh-002dremote-002bec2.vscode-resource.vscode-cdn.net/home/ubuntu/benchmark_agents/~/langchain_fuse_hf_endpoints/libs/community/langchain_community/chat_models/huggingface.py:60) else self.tokenizer [61](https://vscode-remote+ssh-002dremote-002bec2.vscode-resource.vscode-cdn.net/home/ubuntu/benchmark_agents/~/langchain_fuse_hf_endpoints/libs/community/langchain_community/chat_models/huggingface.py:61) ) File [~/langchain_fuse_hf_endpoints/libs/community/langchain_community/chat_models/huggingface.py:155](https://vscode-remote+ssh-002dremote-002bec2.vscode-resource.vscode-cdn.net/home/ubuntu/benchmark_agents/~/langchain_fuse_hf_endpoints/libs/community/langchain_community/chat_models/huggingface.py:155), in ChatHuggingFace._resolve_model_id(self) [152](https://vscode-remote+ssh-002dremote-002bec2.vscode-resource.vscode-cdn.net/home/ubuntu/benchmark_agents/~/langchain_fuse_hf_endpoints/libs/community/langchain_community/chat_models/huggingface.py:152) self.model_id = endpoint.repository [154](https://vscode-remote+ssh-002dremote-002bec2.vscode-resource.vscode-cdn.net/home/ubuntu/benchmark_agents/~/langchain_fuse_hf_endpoints/libs/community/langchain_community/chat_models/huggingface.py:154) if not self.model_id: --> [155](https://vscode-remote+ssh-002dremote-002bec2.vscode-resource.vscode-cdn.net/home/ubuntu/benchmark_agents/~/langchain_fuse_hf_endpoints/libs/community/langchain_community/chat_models/huggingface.py:155) raise ValueError( [156](https://vscode-remote+ssh-002dremote-002bec2.vscode-resource.vscode-cdn.net/home/ubuntu/benchmark_agents/~/langchain_fuse_hf_endpoints/libs/community/langchain_community/chat_models/huggingface.py:156) "Failed to resolve model_id:" [157](https://vscode-remote+ssh-002dremote-002bec2.vscode-resource.vscode-cdn.net/home/ubuntu/benchmark_agents/~/langchain_fuse_hf_endpoints/libs/community/langchain_community/chat_models/huggingface.py:157) f"Could not find model id for inference server: {endpoint_url}" [158](https://vscode-remote+ssh-002dremote-002bec2.vscode-resource.vscode-cdn.net/home/ubuntu/benchmark_agents/~/langchain_fuse_hf_endpoints/libs/community/langchain_community/chat_models/huggingface.py:158) "Make sure that your Hugging Face token has access to the endpoint." [159](https://vscode-remote+ssh-002dremote-002bec2.vscode-resource.vscode-cdn.net/home/ubuntu/benchmark_agents/~/langchain_fuse_hf_endpoints/libs/community/langchain_community/chat_models/huggingface.py:159) ) ValueError: Failed to resolve model_id:Could not find model id for inference server: Make sure that your Hugging Face token has access to the endpoint. ### Description The `model_id` cannot be resolved in [_resolve_model_id](https://github.com/langchain-ai/langchain/blob/865cabff052fe74996bef45faaf00df6f322c215/libs/community/langchain_community/chat_models/huggingface.py#L134), due to the `self.llm` attribute of the ChatHuggingFace object incorrectly being identified as a `HuggingFaceTextGenInference`. But if we switch the order of [this type hint](https://github.com/langchain-ai/langchain/blob/865cabff052fe74996bef45faaf00df6f322c215/libs/community/langchain_community/chat_models/huggingface.py#L45) to `Union[HuggingFaceEndpoint, HuggingFaceTextGenInference, HuggingFaceHub]`, the llm type is correctly detected again. This seems really fucked up. ### System Info langchain==0.1.8 langchain-benchmarks==0.0.10 langchain-community==0.0.21 langchain-core==0.1.24 langchainhub==0.1.14
Cannot resolve model_id on ChatHuggingFace, depending on the order of type hints
https://api.github.com/repos/langchain-ai/langchain/issues/17780/comments
7
2024-02-20T09:04:44Z
2024-03-12T15:36:47Z
https://github.com/langchain-ai/langchain/issues/17780
2,143,880,914
17,780
[ "langchain-ai", "langchain" ]
### Checked other resources - [X] I added a very descriptive title to this issue. - [X] I searched the LangChain documentation with the integrated search. - [X] I used the GitHub search to find a similar question and didn't find it. - [X] I am sure that this is a bug in LangChain rather than my code. ### Example Code ``` import os from langchain_community.llms import HuggingFaceTextGenInference ENDPOINT_URL = "<YOUR_ENDPOINT_URL_HERE>" HF_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN") llm = HuggingFaceTextGenInference( inference_server_url=ENDPOINT_URL, max_new_tokens=512, top_k=50, temperature=0.1, repetition_penalty=1.03, server_kwargs={ "headers": { "Authorization": f"Bearer {HF_TOKEN}", "Content-Type": "application/json", } }, ) ``` ### Error Message and Stack Trace (if applicable) ``` File "/app/test_lang.py", line 36, in chat_model = ChatHuggingFace(llm=llm) ^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/local/lib/python3.11/site-packages/langchain_community/chat_models/huggingface.py", line 54, in init self._resolve_model_id() File "/usr/local/lib/python3.11/site-packages/langchain_community/chat_models/huggingface.py", line 158, in _resolve_model_id raise ValueError( ValueError: Failed to resolve model_id Could not find model id for inference server provided: http://xx.xx.xx.xxx/ Make sure that your Hugging Face token has access to the endpoint. ``` ### Description I have hosted text-generation-inference on a seperate instance and i am trying to call it from langchain server hosted on another server. But i am getting this error. ### System Info ``` langchain==0.1.7 langchain-community==0.0.20 langchain-core==0.1.23 ```
ValueError: Failed to resolve model_id when calling text-generation-inference service from Langchain
https://api.github.com/repos/langchain-ai/langchain/issues/17779/comments
11
2024-02-20T08:45:14Z
2024-08-04T16:06:35Z
https://github.com/langchain-ai/langchain/issues/17779
2,143,831,835
17,779
[ "langchain-ai", "langchain" ]
### Checked other resources - [X] I added a very descriptive title to this issue. - [X] I searched the LangChain documentation with the integrated search. - [X] I used the GitHub search to find a similar question and didn't find it. - [X] I am sure that this is a bug in LangChain rather than my code. ### Example Code session = boto3.Session() credentials = session.get_credentials() awsauth = AWS4Auth(credentials.access_key, credentials.secret_key, region, 'es', session_token=credentials.token) es_client = Elasticsearch( hosts=[{'host': 'aws Es url ', 'port': 443, 'scheme': 'https'}], http_auth=awsauth, verify_certs=True, use_ssl=True ) ### Error Message and Stack Trace (if applicable) --------------------------------------------------------------------------- TypeError Traceback (most recent call last) Cell In[79], line 1 ----> 1 es_client = Elasticsearch( 2 hosts=[{'host': 'vpc-builddemo-dik6sgzdbwneff6v3ophihop24.us-east-1.es.amazonaws.com', 'port': 443, 'scheme': 'https'}], 3 http_auth=awsauth, 4 verify_certs=True, 5 use_ssl=True 6 ) TypeError: Elasticsearch.__init__() got an unexpected keyword argument 'use_ssl' ### Description 1. I am tring to connect Elasticsearch using my aws credential but not able to connect as it throght error "TypeError: Elasticsearch.__init__() got an unexpected keyword argument 'use_ssl' " 2. if I remove use_ssl then it gets error "ValueError: Using a custom 'requests.auth.AuthBase' class for 'http_auth' must be used with node_class='requests' " ### System Info from langchain_community.vectorstores import ElasticsearchStore from langchain.schema import Document
unable to connect opensearch using Elasticsearch libary
https://api.github.com/repos/langchain-ai/langchain/issues/17777/comments
1
2024-02-20T07:40:37Z
2024-05-31T23:48:54Z
https://github.com/langchain-ai/langchain/issues/17777
2,143,726,236
17,777
[ "langchain-ai", "langchain" ]
### Checked other resources - [X] I added a very descriptive title to this issue. - [X] I searched the LangChain documentation with the integrated search. - [X] I used the GitHub search to find a similar question and didn't find it. - [X] I am sure that this is a bug in LangChain rather than my code. ### Example Code File "/home/house365ai/xxm/langchain/examples/demo.py", line 42, in <module> docsearch = Chroma.from_documents(texts, embedding=embedding) File "/home/house365ai/anaconda3/envs/langchain/lib/python3.10/site-packages/langchain_community/vectorstores/chroma.py", line 778, in from_documents return cls.from_texts( File "/home/house365ai/anaconda3/envs/langchain/lib/python3.10/site-packages/langchain_community/vectorstores/chroma.py", line 736, in from_texts chroma_collection.add_texts( File "/home/house365ai/anaconda3/envs/langchain/lib/python3.10/site-packages/langchain_community/vectorstores/chroma.py", line 275, in add_texts embeddings = self._embedding_function.embed_documents(texts) File "/home/house365ai/anaconda3/envs/langchain/lib/python3.10/site-packages/langchain_community/embeddings/localai.py", line 302, in embed_documents return [self._embedding_func(text, engine=self.deployment) for text in texts] File "/home/house365ai/anaconda3/envs/langchain/lib/python3.10/site-packages/langchain_community/embeddings/localai.py", line 302, in <listcomp> return [self._embedding_func(text, engine=self.deployment) for text in texts] File "/home/house365ai/anaconda3/envs/langchain/lib/python3.10/site-packages/langchain_community/embeddings/localai.py", line 267, in _embedding_func return embed_with_retry( File "/home/house365ai/anaconda3/envs/langchain/lib/python3.10/site-packages/langchain_community/embeddings/localai.py", line 98, in embed_with_retry retry_decorator = _create_retry_decorator(embeddings) File "/home/house365ai/anaconda3/envs/langchain/lib/python3.10/site-packages/langchain_community/embeddings/localai.py", line 45, in _create_retry_decorator retry_if_exception_type(openai.error.Timeout) AttributeError: module 'openai' has no attribute 'error' ### Error Message and Stack Trace (if applicable) 2 ### Description File "/home/house365ai/xxm/langchain/examples/demo.py", line 42, in <module> docsearch = Chroma.from_documents(texts, embedding=embedding) File "/home/house365ai/anaconda3/envs/langchain/lib/python3.10/site-packages/langchain_community/vectorstores/chroma.py", line 778, in from_documents return cls.from_texts( File "/home/house365ai/anaconda3/envs/langchain/lib/python3.10/site-packages/langchain_community/vectorstores/chroma.py", line 736, in from_texts chroma_collection.add_texts( File "/home/house365ai/anaconda3/envs/langchain/lib/python3.10/site-packages/langchain_community/vectorstores/chroma.py", line 275, in add_texts embeddings = self._embedding_function.embed_documents(texts) File "/home/house365ai/anaconda3/envs/langchain/lib/python3.10/site-packages/langchain_community/embeddings/localai.py", line 302, in embed_documents return [self._embedding_func(text, engine=self.deployment) for text in texts] File "/home/house365ai/anaconda3/envs/langchain/lib/python3.10/site-packages/langchain_community/embeddings/localai.py", line 302, in <listcomp> return [self._embedding_func(text, engine=self.deployment) for text in texts] File "/home/house365ai/anaconda3/envs/langchain/lib/python3.10/site-packages/langchain_community/embeddings/localai.py", line 267, in _embedding_func return embed_with_retry( File "/home/house365ai/anaconda3/envs/langchain/lib/python3.10/site-packages/langchain_community/embeddings/localai.py", line 98, in embed_with_retry retry_decorator = _create_retry_decorator(embeddings) File "/home/house365ai/anaconda3/envs/langchain/lib/python3.10/site-packages/langchain_community/embeddings/localai.py", line 45, in _create_retry_decorator retry_if_exception_type(openai.error.Timeout) AttributeError: module 'openai' has no attribute 'error' ### System Info File "/home/house365ai/xxm/langchain/examples/demo.py", line 42, in <module> docsearch = Chroma.from_documents(texts, embedding=embedding) File "/home/house365ai/anaconda3/envs/langchain/lib/python3.10/site-packages/langchain_community/vectorstores/chroma.py", line 778, in from_documents return cls.from_texts( File "/home/house365ai/anaconda3/envs/langchain/lib/python3.10/site-packages/langchain_community/vectorstores/chroma.py", line 736, in from_texts chroma_collection.add_texts( File "/home/house365ai/anaconda3/envs/langchain/lib/python3.10/site-packages/langchain_community/vectorstores/chroma.py", line 275, in add_texts embeddings = self._embedding_function.embed_documents(texts) File "/home/house365ai/anaconda3/envs/langchain/lib/python3.10/site-packages/langchain_community/embeddings/localai.py", line 302, in embed_documents return [self._embedding_func(text, engine=self.deployment) for text in texts] File "/home/house365ai/anaconda3/envs/langchain/lib/python3.10/site-packages/langchain_community/embeddings/localai.py", line 302, in <listcomp> return [self._embedding_func(text, engine=self.deployment) for text in texts] File "/home/house365ai/anaconda3/envs/langchain/lib/python3.10/site-packages/langchain_community/embeddings/localai.py", line 267, in _embedding_func return embed_with_retry( File "/home/house365ai/anaconda3/envs/langchain/lib/python3.10/site-packages/langchain_community/embeddings/localai.py", line 98, in embed_with_retry retry_decorator = _create_retry_decorator(embeddings) File "/home/house365ai/anaconda3/envs/langchain/lib/python3.10/site-packages/langchain_community/embeddings/localai.py", line 45, in _create_retry_decorator retry_if_exception_type(openai.error.Timeout) AttributeError: module 'openai' has no attribute 'error'
module 'openai' has no attribute 'error'
https://api.github.com/repos/langchain-ai/langchain/issues/17775/comments
3
2024-02-20T06:37:04Z
2024-06-03T01:17:06Z
https://github.com/langchain-ai/langchain/issues/17775
2,143,643,123
17,775
[ "langchain-ai", "langchain" ]
### Checked other resources - [X] I added a very descriptive title to this issue. - [X] I searched the LangChain documentation with the integrated search. - [X] I used the GitHub search to find a similar question and didn't find it. - [X] I am sure that this is a bug in LangChain rather than my code. ### Example Code a ### Error Message and Stack Trace (if applicable) a ### Description Is it possible to create an sql agent to make queries on Google BigQuery on latests versions of langchain?. It was possible on older versions. ### System Info a
SQL Agent for Google Big Query
https://api.github.com/repos/langchain-ai/langchain/issues/17762/comments
6
2024-02-19T20:47:16Z
2024-07-24T07:46:13Z
https://github.com/langchain-ai/langchain/issues/17762
2,143,121,768
17,762
[ "langchain-ai", "langchain" ]
### Checklist - [X] I added a very descriptive title to this issue. - [X] I included a link to the documentation page I am referring to (if applicable). ### Issue with current documentation: Sometimes I come across simple issues in documentation like [this](https://github.com/langchain-ai/langchain/issues/17758) I would like to report. I find going to the github page and filling out an entire issue to be a bit too much effort for these small bugs, and will typically just move on and ignore it ### Idea or request for content: I think it would be nice to have a simple button somewhere on the documentation page to submit simple issues. Personally an easier way to report these kinds of things would give me more of an incentive to do it
Simple submit feedback option in documentation
https://api.github.com/repos/langchain-ai/langchain/issues/17759/comments
1
2024-02-19T20:37:48Z
2024-05-31T23:46:24Z
https://github.com/langchain-ai/langchain/issues/17759
2,143,109,902
17,759
[ "langchain-ai", "langchain" ]
### Checklist - [X] I added a very descriptive title to this issue. - [X] I included a link to the documentation page I am referring to (if applicable). ### Issue with current documentation: The parser links in [here](https://python.langchain.com/docs/modules/model_io/output_parsers#output-parser-types) are no longer valid. I see their URLs got moved, but the links in here have not been updated ### Idea or request for content: _No response_
Output parser links broken
https://api.github.com/repos/langchain-ai/langchain/issues/17758/comments
1
2024-02-19T20:29:53Z
2024-05-31T23:46:24Z
https://github.com/langchain-ai/langchain/issues/17758
2,143,100,260
17,758
[ "langchain-ai", "langchain" ]
### Checklist - [X] I added a very descriptive title to this issue. - [X] I included a link to the documentation page I am referring to (if applicable). ### Issue with current documentation: When trying to follow the links to specific guides about prompt usage on [this docs page](https://python.langchain.com/docs/modules/model_io/prompts), all of the links were broken. Here is the relevant part of the documents: <img width="537" alt="image" src="https://github.com/langchain-ai/langchain/assets/3274/abca83c0-47d5-45d8-8224-26abab4712f7"> ### Idea or request for content: _No response_
DOC: Broken links on Prompts docs page: links to all How-To Guides are broken
https://api.github.com/repos/langchain-ai/langchain/issues/17753/comments
2
2024-02-19T19:22:16Z
2024-06-13T19:46:23Z
https://github.com/langchain-ai/langchain/issues/17753
2,143,011,204
17,753
[ "langchain-ai", "langchain" ]
### Checklist - [X] I added a very descriptive title to this issue. - [X] I included a link to the documentation page I am referring to (if applicable). ### Issue with current documentation: - ### Idea or request for content: below's the code ``` examples = [ { "data": """ cricket is going to be crucial these days, """, "question": "how's cricket going to be?", "answer": "yes", "reason": """ cricket in this country is good """, } ] # Define your prompt template prompt_template = """This template is designed to streamline the process of responding to user queries by focusing on delivering concise and direct answers. Follow these steps for effective use: Carefully read the user's question to fully grasp the nature of the inquiry. Review the provided context, if any, to gather relevant information. Based on the understanding of the question and the context, determine the most appropriate answer. Respond with a simple 'Yes' or 'No', ensuring clarity and precision in addressing the user's query. In cases where no context is provided, abstain from giving an answer. Ensure your response is structured as follows for clarity: User Question: {question} (Repeat the user's question here.) Direct Answer: (Provide a straightforward 'Yes' or 'No' based on the query.) This approach ensures that responses are not only relevant and to the point but also structured in a way that is easy for users to understand.""" example_prompt = PromptTemplate( input_variables=["data", "question", "answer", "reason"], template=prompt_template ) from langchain.prompts.few_shot import FewShotPromptTemplate prompt_template = FewShotPromptTemplate( examples=examples, example_prompt=example_prompt, suffix="Question: {input}", input_variables=["input"], ) ``` is the above correct way of using the examples, example_prompt, input etc in fewshottemplate? If not, what's the right way to use it? Can you help me with the code? If the above code is wrong, return with the correct code
how to add examples and example_prompt to fewshot?
https://api.github.com/repos/langchain-ai/langchain/issues/17741/comments
1
2024-02-19T16:22:52Z
2024-02-20T02:30:13Z
https://github.com/langchain-ai/langchain/issues/17741
2,142,745,040
17,741
[ "langchain-ai", "langchain" ]
### Checklist - [X] I added a very descriptive title to this issue. - [X] I included a link to the documentation page I am referring to (if applicable). ### Issue with current documentation: below are the examples ``` examples = [ { "data": """ cricket is going to be crucial these days, """, "question": "how's cricket going to be?", "answer": "yes", "reason": """ cricket in this country is good """, } ] # Define your prompt template prompt_template = """This template is designed to streamline the process of responding to user queries by focusing on delivering concise and direct answers. Follow these steps for effective use: Carefully read the user's question to fully grasp the nature of the inquiry. Review the provided context, if any, to gather relevant information. Based on the understanding of the question and the context, determine the most appropriate answer. Respond with a simple 'Yes' or 'No', ensuring clarity and precision in addressing the user's query. In cases where no context is provided, abstain from giving an answer. Ensure your response is structured as follows for clarity: User Question: {question} (Repeat the user's question here.) Direct Answer: (Provide a straightforward 'Yes' or 'No' based on the query.) This approach ensures that responses are not only relevant and to the point but also structured in a way that is easy for users to understand.""" example_prompt = PromptTemplate( input_variables=["data", "question", "answer", "reason"], template=prompt_template ) from langchain.prompts.few_shot import FewShotPromptTemplate prompt_template = FewShotPromptTemplate( examples=examples, example_prompt=example_prompt, suffix="Question: {input}", input_variables=["input"], ) ``` is the above correct way of using the examples, example_prompt, input etc in fewshottemplate? If not, what's the right way to use it? Can you help me with the code? ### Idea or request for content: _No response_
how to add examples and example_prompt to fewshort?
https://api.github.com/repos/langchain-ai/langchain/issues/17737/comments
3
2024-02-19T15:35:41Z
2024-02-20T02:30:12Z
https://github.com/langchain-ai/langchain/issues/17737
2,142,649,529
17,737
[ "langchain-ai", "langchain" ]
### Checked other resources - [X] I added a very descriptive title to this issue. - [X] I searched the LangChain documentation with the integrated search. - [X] I used the GitHub search to find a similar question and didn't find it. - [X] I am sure that this is a bug in LangChain rather than my code. ### Example Code ``` docsearch_in_os = OpenSearchVectorSearch( opensearch_url=os.environ.get("OPENSEARCH_URL"), index_name=index_name, embedding_function=bedrock_embeddings, http_auth=auth, timeout=200, use_ssl=True, verify_certs=True, connection_class=RequestsHttpConnection, is_aoss=True, ) retriever = docsearch_in_os.as_retriever() chain = RetrievalQA.from_chain_type( llm=llm, chain_type="stuff", retriever=retriever, chain_type_kwargs={"prompt":PROMPT}, ) result = chain.invoke({"query": user_input}) ``` ### Error Message and Stack Trace (if applicable) result = chain.invoke({"query": user_input}) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/opt/python/langchain/chains/base.py", line 162, in invoke raise e File "/opt/python/langchain/chains/base.py", line 156, in invoke self._call(inputs, run_manager=run_manager) File "/opt/python/langchain/chains/retrieval_qa/base.py", line 141, in _call docs = self._get_docs(question, run_manager=_run_manager) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/opt/python/langchain/chains/retrieval_qa/base.py", line 221, in _get_docs return self.retriever.get_relevant_documents( ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/opt/python/langchain_core/retrievers.py", line 224, in get_relevant_documents raise e File "/opt/python/langchain_core/retrievers.py", line 217, in get_relevant_documents result = self._get_relevant_documents( ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/opt/python/langchain_core/vectorstores.py", line 654, in _get_relevant_documents docs = self.vectorstore.similarity_search(query, **self.search_kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/opt/python/langchain_community/vectorstores/opensearch_vector_search.py", line 516, in similarity_search docs_with_scores = self.similarity_search_with_score(query, k, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/opt/python/langchain_community/vectorstores/opensearch_vector_search.py", line 543, in similarity_search_with_score documents_with_scores = [ ^ File "/opt/python/langchain_community/vectorstores/opensearch_vector_search.py", line 545, in <listcomp> Document( File "/opt/python/langchain_core/documents/base.py", line 22, in __init__ super().__init__(page_content=page_content, **kwargs) File "/opt/python/langchain_core/load/serializable.py", line 107, in __init__ super().__init__(**kwargs) File "/opt/python/pydantic/v1/main.py", line 341, in __init__ raise validation_error pydantic.v1.error_wrappers.ValidationError: 1 validation error for Document metadata value is not a valid dict (type=type_error.dict) 1 validation error for Document metadata value is not a valid dict (type=type_error.dict) 'ValidationError' object is not subscriptable ### Description I am trying to implement RAG using langchain. The above code works perfectly, when the collection I use is created using langchain `from_documents` function. But when I create the OpenSearch collection using the AWS Bedrock console (from the "Create Knowledge base"), the above code fails and throws the error I have shared. ### System Info I am running this on an AWS lambda function, x86_64 architecture.
value is not a valid dict (type=type_error.dict)
https://api.github.com/repos/langchain-ai/langchain/issues/17736/comments
2
2024-02-19T14:32:34Z
2024-06-08T16:10:15Z
https://github.com/langchain-ai/langchain/issues/17736
2,142,507,312
17,736
[ "langchain-ai", "langchain" ]
### Checked other resources - [X] I added a very descriptive title to this issue. - [X] I searched the LangChain documentation with the integrated search. - [X] I used the GitHub search to find a similar question and didn't find it. - [X] I am sure that this is a bug in LangChain rather than my code. ### Example Code I followed this [langchain examples notebook](https://github.com/langchain-ai/langgraph/blob/main/examples/multi_agent/agent_supervisor.ipynb) to the letter and just changed the llm initialization from ``` from langchain_openai import ChatOpenAI llm = ChatOpenAI(model=<model_name>) ``` to ``` from langchain_community.chat_models import AzureChatOpenAI llm = AzureChatOpenAI( model_name=<model_name>, deployment_name=<deployment_name>, temperature=0, verbose=True, api_key=<api_key>, azure_endpoint=<api_base>, ) ``` I get the error when invoking the graph: ``` for s in graph.stream( { "messages": [ HumanMessage(content="Code hello world and print it to the terminal") ] } ): if "__end__" not in s: print(s) print("----") ``` It works fine when using `ChatOpenAI` but returns an error when using `AzureChatOpenAI` ### Error Message and Stack Trace (if applicable) ``` { "name": "NotFoundError", "message": "Error code: 404 - {'error': {'message': 'Unrecognized request argument supplied: functions', 'type': 'invalid_request_error', 'param': None, 'code': None}}", "stack": "--------------------------------------------------------------------------- NotFoundError Traceback (most recent call last) Cell In[10], line 1 ----> 1 for s in graph.stream( 2 { 3 \"messages\": [ 4 HumanMessage(content=\"Code hello world and print it to the terminal\") 5 ] 6 } 7 ): 8 if \"__end__\" not in s: 9 print(s) File ~/exp-venv/lib/python3.10/site-packages/langgraph/pregel/__init__.py:615, in Pregel.transform(self, input, config, output_keys, input_keys, **kwargs) 606 def transform( 607 self, 608 input: Iterator[Union[dict[str, Any], Any]], (...) 613 **kwargs: Any, 614 ) -> Iterator[Union[dict[str, Any], Any]]: --> 615 for chunk in self._transform_stream_with_config( 616 input, 617 self._transform, 618 config, 619 output_keys=output_keys, 620 input_keys=input_keys, 621 **kwargs, 622 ): 623 yield chunk File ~/exp-venv/lib/python3.10/site-packages/langchain_core/runnables/base.py:1497, in Runnable._transform_stream_with_config(self, input, transformer, config, run_type, **kwargs) 1495 try: 1496 while True: -> 1497 chunk: Output = context.run(next, iterator) # type: ignore 1498 yield chunk 1499 if final_output_supported: File ~/exp-venv/lib/python3.10/site-packages/langgraph/pregel/__init__.py:355, in Pregel._transform(self, input, run_manager, config, input_keys, output_keys, interrupt) 348 done, inflight = concurrent.futures.wait( 349 futures, 350 return_when=concurrent.futures.FIRST_EXCEPTION, 351 timeout=self.step_timeout, 352 ) 354 # interrupt on failure or timeout --> 355 _interrupt_or_proceed(done, inflight, step) 357 # apply writes to channels 358 _apply_writes( 359 checkpoint, channels, pending_writes, config, step + 1 360 ) File ~/exp-venv/lib/python3.10/site-packages/langgraph/pregel/__init__.py:698, in _interrupt_or_proceed(done, inflight, step) 696 inflight.pop().cancel() 697 # raise the exception --> 698 raise exc 699 # TODO this is where retry of an entire step would happen 701 if inflight: 702 # if we got here means we timed out File /usr/lib/python3.10/concurrent/futures/thread.py:58, in _WorkItem.run(self) 55 return 57 try: ---> 58 result = self.fn(*self.args, **self.kwargs) 59 except BaseException as exc: 60 self.future.set_exception(exc) File ~/exp-venv/lib/python3.10/site-packages/langchain_core/runnables/base.py:4064, in RunnableBindingBase.invoke(self, input, config, **kwargs) 4058 def invoke( 4059 self, 4060 input: Input, 4061 config: Optional[RunnableConfig] = None, 4062 **kwargs: Optional[Any], 4063 ) -> Output: -> 4064 return self.bound.invoke( 4065 input, 4066 self._merge_configs(config), 4067 **{**self.kwargs, **kwargs}, 4068 ) File ~/exp-venv/lib/python3.10/site-packages/langchain_core/runnables/base.py:2053, in RunnableSequence.invoke(self, input, config) 2051 try: 2052 for i, step in enumerate(self.steps): -> 2053 input = step.invoke( 2054 input, 2055 # mark each step as a child run 2056 patch_config( 2057 config, callbacks=run_manager.get_child(f\"seq:step:{i+1}\") 2058 ), 2059 ) 2060 # finish the root run 2061 except BaseException as e: File ~/exp-venv/lib/python3.10/site-packages/langchain_core/runnables/base.py:4064, in RunnableBindingBase.invoke(self, input, config, **kwargs) 4058 def invoke( 4059 self, 4060 input: Input, 4061 config: Optional[RunnableConfig] = None, 4062 **kwargs: Optional[Any], 4063 ) -> Output: -> 4064 return self.bound.invoke( 4065 input, 4066 self._merge_configs(config), 4067 **{**self.kwargs, **kwargs}, 4068 ) File ~/exp-venv/lib/python3.10/site-packages/langchain_core/language_models/chat_models.py:166, in BaseChatModel.invoke(self, input, config, stop, **kwargs) 155 def invoke( 156 self, 157 input: LanguageModelInput, (...) 161 **kwargs: Any, 162 ) -> BaseMessage: 163 config = ensure_config(config) 164 return cast( 165 ChatGeneration, --> 166 self.generate_prompt( 167 [self._convert_input(input)], 168 stop=stop, 169 callbacks=config.get(\"callbacks\"), 170 tags=config.get(\"tags\"), 171 metadata=config.get(\"metadata\"), 172 run_name=config.get(\"run_name\"), 173 **kwargs, 174 ).generations[0][0], 175 ).message File ~/exp-venv/lib/python3.10/site-packages/langchain_core/language_models/chat_models.py:544, in BaseChatModel.generate_prompt(self, prompts, stop, callbacks, **kwargs) 536 def generate_prompt( 537 self, 538 prompts: List[PromptValue], (...) 541 **kwargs: Any, 542 ) -> LLMResult: 543 prompt_messages = [p.to_messages() for p in prompts] --> 544 return self.generate(prompt_messages, stop=stop, callbacks=callbacks, **kwargs) File ~/exp-venv/lib/python3.10/site-packages/langchain_core/language_models/chat_models.py:408, in BaseChatModel.generate(self, messages, stop, callbacks, tags, metadata, run_name, **kwargs) 406 if run_managers: 407 run_managers[i].on_llm_error(e, response=LLMResult(generations=[])) --> 408 raise e 409 flattened_outputs = [ 410 LLMResult(generations=[res.generations], llm_output=res.llm_output) 411 for res in results 412 ] 413 llm_output = self._combine_llm_outputs([res.llm_output for res in results]) File ~/exp-venv/lib/python3.10/site-packages/langchain_core/language_models/chat_models.py:398, in BaseChatModel.generate(self, messages, stop, callbacks, tags, metadata, run_name, **kwargs) 395 for i, m in enumerate(messages): 396 try: 397 results.append( --> 398 self._generate_with_cache( 399 m, 400 stop=stop, 401 run_manager=run_managers[i] if run_managers else None, 402 **kwargs, 403 ) 404 ) 405 except BaseException as e: 406 if run_managers: File ~/exp-venv/lib/python3.10/site-packages/langchain_core/language_models/chat_models.py:577, in BaseChatModel._generate_with_cache(self, messages, stop, run_manager, **kwargs) 573 raise ValueError( 574 \"Asked to cache, but no cache found at `langchain.cache`.\" 575 ) 576 if new_arg_supported: --> 577 return self._generate( 578 messages, stop=stop, run_manager=run_manager, **kwargs 579 ) 580 else: 581 return self._generate(messages, stop=stop, **kwargs) File ~/exp-venv/lib/python3.10/site-packages/langchain_community/chat_models/openai.py:439, in ChatOpenAI._generate(self, messages, stop, run_manager, stream, **kwargs) 433 message_dicts, params = self._create_message_dicts(messages, stop) 434 params = { 435 **params, 436 **({\"stream\": stream} if stream is not None else {}), 437 **kwargs, 438 } --> 439 response = self.completion_with_retry( 440 messages=message_dicts, run_manager=run_manager, **params 441 ) 442 return self._create_chat_result(response) File ~/exp-venv/lib/python3.10/site-packages/langchain_community/chat_models/openai.py:356, in ChatOpenAI.completion_with_retry(self, run_manager, **kwargs) 354 \"\"\"Use tenacity to retry the completion call.\"\"\" 355 if is_openai_v1(): --> 356 return self.client.create(**kwargs) 358 retry_decorator = _create_retry_decorator(self, run_manager=run_manager) 360 @retry_decorator 361 def _completion_with_retry(**kwargs: Any) -> Any: File ~/exp-venv/lib/python3.10/site-packages/openai/_utils/_utils.py:275, in required_args.<locals>.inner.<locals>.wrapper(*args, **kwargs) 273 msg = f\"Missing required argument: {quote(missing[0])}\" 274 raise TypeError(msg) --> 275 return func(*args, **kwargs) File ~/exp-venv/lib/python3.10/site-packages/openai/resources/chat/completions.py:663, in Completions.create(self, messages, model, frequency_penalty, function_call, functions, logit_bias, logprobs, max_tokens, n, presence_penalty, response_format, seed, stop, stream, temperature, tool_choice, tools, top_logprobs, top_p, user, extra_headers, extra_query, extra_body, timeout) 611 @required_args([\"messages\", \"model\"], [\"messages\", \"model\", \"stream\"]) 612 def create( 613 self, (...) 661 timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, 662 ) -> ChatCompletion | Stream[ChatCompletionChunk]: --> 663 return self._post( 664 \"/chat/completions\", 665 body=maybe_transform( 666 { 667 \"messages\": messages, 668 \"model\": model, 669 \"frequency_penalty\": frequency_penalty, 670 \"function_call\": function_call, 671 \"functions\": functions, 672 \"logit_bias\": logit_bias, 673 \"logprobs\": logprobs, 674 \"max_tokens\": max_tokens, 675 \"n\": n, 676 \"presence_penalty\": presence_penalty, 677 \"response_format\": response_format, 678 \"seed\": seed, 679 \"stop\": stop, 680 \"stream\": stream, 681 \"temperature\": temperature, 682 \"tool_choice\": tool_choice, 683 \"tools\": tools, 684 \"top_logprobs\": top_logprobs, 685 \"top_p\": top_p, 686 \"user\": user, 687 }, 688 completion_create_params.CompletionCreateParams, 689 ), 690 options=make_request_options( 691 extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout 692 ), 693 cast_to=ChatCompletion, 694 stream=stream or False, 695 stream_cls=Stream[ChatCompletionChunk], 696 ) File ~/exp-venv/lib/python3.10/site-packages/openai/_base_client.py:1200, in SyncAPIClient.post(self, path, cast_to, body, options, files, stream, stream_cls) 1186 def post( 1187 self, 1188 path: str, (...) 1195 stream_cls: type[_StreamT] | None = None, 1196 ) -> ResponseT | _StreamT: 1197 opts = FinalRequestOptions.construct( 1198 method=\"post\", url=path, json_data=body, files=to_httpx_files(files), **options 1199 ) -> 1200 return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls)) File ~/exp-venv/lib/python3.10/site-packages/openai/_base_client.py:889, in SyncAPIClient.request(self, cast_to, options, remaining_retries, stream, stream_cls) 880 def request( 881 self, 882 cast_to: Type[ResponseT], (...) 887 stream_cls: type[_StreamT] | None = None, 888 ) -> ResponseT | _StreamT: --> 889 return self._request( 890 cast_to=cast_to, 891 options=options, 892 stream=stream, 893 stream_cls=stream_cls, 894 remaining_retries=remaining_retries, 895 ) File ~/exp-venv/lib/python3.10/site-packages/openai/_base_client.py:980, in SyncAPIClient._request(self, cast_to, options, remaining_retries, stream, stream_cls) 977 err.response.read() 979 log.debug(\"Re-raising status error\") --> 980 raise self._make_status_error_from_response(err.response) from None 982 return self._process_response( 983 cast_to=cast_to, 984 options=options, (...) 987 stream_cls=stream_cls, 988 ) NotFoundError: Error code: 404 - {'error': {'message': 'Unrecognized request argument supplied: functions', 'type': 'invalid_request_error', 'param': None, 'code': None}}" } ``` ### Description I am trying to use langchain agents (specifically the langgraph implementation in this example notebook: https://github.com/langchain-ai/langgraph/blob/main/examples/multi_agent/agent_supervisor.ipynb. The code is working fine when using `ChatOpenAI` but it fails when using `AzureChatOpenAI` ### System Info ``` langchain==0.1.6 langchain-community==0.0.19 langchain-core==0.1.22 langchain-experimental==0.0.50 langchain-openai==0.0.5 langchainhub==0.1.14 ```
Agents returning an error when using AzureChatOpenAI
https://api.github.com/repos/langchain-ai/langchain/issues/17735/comments
1
2024-02-19T13:56:33Z
2024-05-31T23:46:31Z
https://github.com/langchain-ai/langchain/issues/17735
2,142,433,760
17,735
[ "langchain-ai", "langchain" ]
### Checklist - [X] I added a very descriptive title to this issue. - [X] I included a link to the documentation page I am referring to (if applicable). ### Issue with current documentation: Let's say I have a spreadsheet with 30 rows and need to find specific answers for each one. Typically, the RetrievalQAChain method relies on a retriever to select the top-k results, which can overlook details in some rows. I'm looking to circumvent the retriever step by directly embedding the data, saving it into a vector store, and then extracting answers using the RetrievalQAChain. This approach aims to replicate the benefits of the RAG (Retrieval-Augmented Generation) model without missing out on any information due to the limitations of the retriever. How can this be achieved? ### Idea or request for content: Below is the code ``` # Iterate over the sorted file paths and create a loader for each file loaders = [CSVLoader(file_path=file_path, metadata_columns=['cricket'], encoding="utf-8") for file_path in csv_files_sorted] # Optional: If you need to combine the data from all loaders documents = [] for loader in loaders: data = loader.load() documents.extend(data) # Instantiate the OpenAIEmbeddings class openai = OpenAIEmbeddings() # Create a FAISS vector store from the embeddings vectorstore = FAISS.from_documents(documents, openai) retriever = vector_store.as_retriever(search_kwargs={"k": 5}) # Define your prompt template prompt_template = """Use the following pieces of information to answer the user's question. If you don't know the answer, just say that you don't know, don't try to make up an answer. Context: {context} Question: {question} Only return the helpful answer below and nothing else. If no context, then no answer. Helpful Answer:""" # Answer a question related to 'Cricket' category = 'engie' qa_chain = RetrievalQA.from_chain_type(llm=OpenAI(temperature=0.2), chain_type="stuff", retriever=retriever, return_source_documents=True) # Format the prompt using the template context = "" question = "what for it strives?" formatted_prompt = prompt_template.format(context=context, question=question) # Pass the formatted prompt to the RetrievalQA function llm_response = qa_chain(formatted_prompt) process_llm_response(llm_response) ``` Let's say I have a spreadsheet with 30 rows and need to find specific answers for each one. Typically, the RetrievalQAChain method relies on a retriever to select the top-k results, which can overlook details in some rows. I'm looking to circumvent the retriever step by directly embedding the data, saving it into a vector store, and then extracting answers using the RetrievalQAChain. This approach aims to replicate the benefits of the RAG (Retrieval-Augmented Generation) model without missing out on any information due to the limitations of the retriever. can you help me with the code?
how to achieve all the answers for all the rows present in the excel using LLM?
https://api.github.com/repos/langchain-ai/langchain/issues/17731/comments
1
2024-02-19T12:45:12Z
2024-02-20T02:30:12Z
https://github.com/langchain-ai/langchain/issues/17731
2,142,291,204
17,731
[ "langchain-ai", "langchain" ]
### Checklist - [X] I added a very descriptive title to this issue. - [X] I included a link to the documentation page I am referring to (if applicable). ### Issue with current documentation: Let's say I have an Excel file containing 30 rows, and I need to find answers for each row individually. When using the RetrievalQAChain approach, the retriever typically selects only the top-k results, potentially missing information from other rows. To address this, I'd like to bypass the retriever by uploading the Excel data into a vector store and directly query the Large Language Model (LLM) to obtain answers for each of the 30 rows. How can this be accomplished? ### Idea or request for content: Below's the code ``` # Iterate over the sorted file paths and create a loader for each file loaders = [CSVLoader(file_path=file_path, metadata_columns=['cricket'], encoding="utf-8") for file_path in csv_files_sorted] # Optional: If you need to combine the data from all loaders documents = [] for loader in loaders: data = loader.load() documents.extend(data) # Instantiate the OpenAIEmbeddings class openai = OpenAIEmbeddings() # Create a FAISS vector store from the embeddings vectorstore = FAISS.from_documents(documents, openai) retriever = vector_store.as_retriever(search_kwargs={"k": 5}) # Define your prompt template prompt_template = """Use the following pieces of information to answer the user's question. If you don't know the answer, just say that you don't know, don't try to make up an answer. Context: {context} Question: {question} Only return the helpful answer below and nothing else. If no context, then no answer. Helpful Answer:""" # Answer a question related to 'Cricket' category = 'engie' qa_chain = RetrievalQA.from_chain_type(llm=OpenAI(temperature=0.2), chain_type="stuff", retriever=retriever, return_source_documents=True) # Format the prompt using the template context = "" question = "what for it strives?" formatted_prompt = prompt_template.format(context=context, question=question) # Pass the formatted prompt to the RetrievalQA function llm_response = qa_chain(formatted_prompt) process_llm_response(llm_response) ``` Let's say I have an Excel file containing 30 rows, and I need to find answers for each row individually. When using the RetrievalQAChain approach, the retriever typically selects only the top-k results, potentially missing information from other rows. To address this, I'd like to bypass the retriever by uploading the Excel data into a vector store and directly query the Large Language Model (LLM) to obtain answers for each of the 30 rows. Can you help me with the code?
how to get the answers for all the rows present in the excel using LLM?
https://api.github.com/repos/langchain-ai/langchain/issues/17730/comments
1
2024-02-19T12:34:13Z
2024-02-20T02:30:11Z
https://github.com/langchain-ai/langchain/issues/17730
2,142,270,974
17,730
[ "langchain-ai", "langchain" ]
### Checklist - [X] I added a very descriptive title to this issue. - [X] I included a link to the documentation page I am referring to (if applicable). ### Issue with current documentation: Let's say I have an Excel file containing 30 rows, and I need to find answers for each row individually. When using the RetrievalQAChain approach, the retriever typically selects only the top-k results, potentially missing information from other rows. To address this, I'd like to bypass the retriever by uploading the Excel data into a vector store and directly query the Large Language Model (LLM) to obtain answers for each of the 30 rows. How can this be accomplished? ### Idea or request for content: Below is the code which i used will upload csv data and get embeddings for data and store them into vectordb. Now, how to use QAChain to get the answer for every row? ``` # Iterate over the sorted file paths and create a loader for each file loaders = [CSVLoader(file_path=file_path, metadata_columns=['cricket'], encoding="utf-8") for file_path in csv_files_sorted] # Optional: If you need to combine the data from all loaders documents = [] for loader in loaders: data = loader.load() documents.extend(data) # Instantiate the OpenAIEmbeddings class openai = OpenAIEmbeddings() # Create a FAISS vector store from the embeddings vectorstore = FAISS.from_documents(documents, openai) ``` Let's say I have an Excel file containing 30 rows, and I need to find answers for each row individually. When using the RetrievalQAChain approach, the retriever typically selects only the top-k results, potentially missing information from other rows. To address this, I'd like to bypass the retriever by uploading the Excel data into a vector store and directly query the Large Language Model (LLM) to obtain answers for each of the 30 rows. How can this be accomplished?
how to get answer for the question without using retriever?
https://api.github.com/repos/langchain-ai/langchain/issues/17729/comments
3
2024-02-19T12:28:39Z
2024-02-20T02:30:11Z
https://github.com/langchain-ai/langchain/issues/17729
2,142,260,464
17,729
[ "langchain-ai", "langchain" ]
### Checked other resources - [X] I added a very descriptive title to this issue. - [X] I searched the LangChain documentation with the integrated search. - [X] I used the GitHub search to find a similar question and didn't find it. - [X] I am sure that this is a bug in LangChain rather than my code. ### Example Code python ... ``` logging.info("Loading documents from Azure Blob Storage.") docs = load_documents_from_blob() logging.info("Splitting the loaded documents into chunks.") splits = partition_text_into_chunks(docs) embeddings = AzureOpenAIEmbeddings( azure_deployment=os.environ["EMBEDDING_MODEL_DEPLOYMENT"], openai_api_version="2023-05-15", ) logging.info("Connecting to Azure Cognitive Search...") vector_store: AzureSearch = AzureSearch( azure_search_endpoint=azure_search_endpoint, azure_search_key=azure_search_key, index_name=index_name, embedding_function=embeddings.embed_query, ) logging.info( "Indexing the split documents into Azure Cognitive Search for documents." ) vector_store.add_documents(documents=splits) ``` ### Error Message and Stack Trace (if applicable) Error: ``` INFO:root:Loading documents from Azure Blob Storage. INFO:root:Preparing to load data from Azure Blob Storage. INFO:pikepdf._core:pikepdf C++ to Python logger bridge initialized INFO:root:Successfully loaded 39 documents from Azure Blob Storage. INFO:root:Splitting the loaded documents into chunks. INFO:root:Initializing text splitter with chunk size of 1000 and overlap of 100 characters. WARNING:langchain.text_splitter:Created a chunk of size 1370, which is longer than the specified 1000 WARNING:langchain.text_splitter:Created a chunk of size 1235, which is longer than the specified 1000 WARNING:langchain.text_splitter:Created a chunk of size 2133, which is longer than the specified 1000 WARNING:langchain.text_splitter:Created a chunk of size 6548, which is longer than the specified 1000 WARNING:langchain.text_splitter:Created a chunk of size 1901, which is longer than the specified 1000 WARNING:langchain.text_splitter:Created a chunk of size 5381, which is longer than the specified 1000 WARNING:langchain.text_splitter:Created a chunk of size 2180, which is longer than the specified 1000 WARNING:langchain.text_splitter:Created a chunk of size 1978, which is longer than the specified 1000 WARNING:langchain.text_splitter:Created a chunk of size 3180, which is longer than the specified 1000 WARNING:langchain.text_splitter:Created a chunk of size 3180, which is longer than the specified 1000 WARNING:langchain.text_splitter:Created a chunk of size 3180, which is longer than the specified 1000 WARNING:langchain.text_splitter:Created a chunk of size 6581, which is longer than the specified 1000 WARNING:langchain.text_splitter:Created a chunk of size 2482, which is longer than the specified 1000 WARNING:langchain.text_splitter:Created a chunk of size 1266, which is longer than the specified 1000 WARNING:langchain.text_splitter:Created a chunk of size 1266, which is longer than the specified 1000 WARNING:langchain.text_splitter:Created a chunk of size 1424, which is longer than the specified 1000 WARNING:langchain.text_splitter:Created a chunk of size 1353, which is longer than the specified 1000 WARNING:langchain.text_splitter:Created a chunk of size 1264, which is longer than the specified 1000 WARNING:langchain.text_splitter:Created a chunk of size 1782, which is longer than the specified 1000 WARNING:langchain.text_splitter:Created a chunk of size 1285, which is longer than the specified 1000 WARNING:langchain.text_splitter:Created a chunk of size 1317, which is longer than the specified 1000 WARNING:langchain.text_splitter:Created a chunk of size 6141, which is longer than the specified 1000 WARNING:langchain.text_splitter:Created a chunk of size 1719, which is longer than the specified 1000 WARNING:langchain.text_splitter:Created a chunk of size 6119, which is longer than the specified 1000 WARNING:langchain.text_splitter:Created a chunk of size 1025, which is longer than the specified 1000 WARNING:langchain.text_splitter:Created a chunk of size 3017, which is longer than the specified 1000 WARNING:langchain.text_splitter:Created a chunk of size 1080, which is longer than the specified 1000 WARNING:langchain.text_splitter:Created a chunk of size 1140, which is longer than the specified 1000 WARNING:langchain.text_splitter:Created a chunk of size 1365, which is longer than the specified 1000 WARNING:langchain.text_splitter:Created a chunk of size 1166, which is longer than the specified 1000 WARNING:langchain.text_splitter:Created a chunk of size 1006, which is longer than the specified 1000 INFO:root:Successfully split 39 documents. INFO:root:Initializing the Azure Cognitive Search model for documents. INFO:root:Initializing embeddings... INFO:root:Connecting to Azure Cognitive Search... INFO:httpx:HTTP Request: POST https://mobiz-gpt-4-deployment.openai.azure.com//openai/deployments/ada-002/embeddings?api-version=2023-05-15 "HTTP/1.1 200 OK" Traceback (most recent call last): File "/home/ayaz/Desktop/dev/env_sqllatest/lib/python3.10/site-packages/langchain_community/vectorstores/azuresearch.py", line 111, in _get_search_client index_client.get_index(name=index_name) File "/home/user/Desktop/dev/env_sqllatest/lib/python3.10/site-packages/azure/core/tracing/decorator.py", line 78, in wrapper_use_tracer return func(*args, **kwargs) File "/home/user/Desktop/dev/env_sqllatest/lib/python3.10/site-packages/azure/search/documents/indexes/_search_index_client.py", line 144, in get_index result = self._client.indexes.get(name, **kwargs) File "/home/user/Desktop/dev/env_sqllatest/lib/python3.10/site-packages/azure/core/tracing/decorator.py", line 78, in wrapper_use_tracer return func(*args, **kwargs) File "/home/user/Desktop/dev/env_sqllatest/lib/python3.10/site-packages/azure/search/documents/indexes/_generated/operations/_indexes_operations.py", line 864, in get map_error(status_code=response.status_code, response=response, error_map=error_map) File "/home/user/Desktop/dev/env_sqllatest/lib/python3.10/site-packages/azure/core/exceptions.py", line 164, in map_error raise error azure.core.exceptions.ResourceNotFoundError: () No index with the name 'apollo-knowledge-base' was found in the service 'knowledge-bot-basic-15'. Code: Message: No index with the name 'apollo-knowledge-base' was found in the service 'knowledge-bot-basic-15'. During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/home/user/Desktop/dev/ALL DEV CODE/hybrid-sql-agent/backend/main.py", line 3, in <module> from api.routes import hybrid_agent File "/home/user/Desktop/dev/ALL DEV CODE/hybrid-sql-agent/backend/api/routes/hybrid_agent.py", line 7, in <module> from ai.main import apollo_conversation_chain File "/home/user/Desktop/dev/ALL DEV CODE/hybrid-sql-agent/backend/ai/main.py", line 28, in <module> acs_documents, acs_fewshots = process_and_index_data_to_azure() File "/home/user/Desktop/dev/ALL DEV CODE/hybrid-sql-agent/backend/ai/documents_processing.py", line 486, in process_and_index_data_to_azure acs_documents = configure_azure_search_for_documents() File "/home/user/Desktop/dev/ALL DEV CODE/hybrid-sql-agent/backend/ai/documents_processing.py", line 406, in configure_azure_search_for_documents vector_store: AzureSearch = AzureSearch( File "/home/user/Desktop/dev/env_sqllatest/lib/python3.10/site-packages/langchain_community/vectorstores/azuresearch.py", line 268, in __init__ self.client = _get_search_client( File "/home/user/Desktop/dev/env_sqllatest/lib/python3.10/site-packages/langchain_community/vectorstores/azuresearch.py", line 144, in _get_search_client vector_search = VectorSearch( NameError: name 'VectorSearch' is not defined. Did you mean: 'vector_search'? ``` ### Description My env spec: i have used latested version of langchain and azure's SDK everything is working but when i try to create index it give me this error ``` langchain==0.1.7 langchain-community==0.0.20 langchain-core==0.1.23 langchain-openai==0.0.6 azure-common==1.1.28 azure-core==1.30.0 azure-identity==1.15.0 azure-search-documents==11.4.0 azure-storage-blob==12.19.0 fastapi==0.109.2 uvicorn==0.27.1 python-dotenv==1.0.1 pandas==2.2.0 unstructured==0.12.4 python-docx==1.1.0 unstructured[pdf] ``` ### System Info `System Information ------------------ > OS: Linux > OS Version: #18~22.04.1-Ubuntu SMP PREEMPT_DYNAMIC Wed Feb 7 11:40:03 UTC 2 > Python Version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] Package Information ------------------- > langchain_core: 0.1.23 > langchain: 0.1.7 > langchain_community: 0.0.20 > langsmith: 0.0.87 > langchain_openai: 0.0.6 Packages not installed (Not Necessarily a Problem) -------------------------------------------------- The following packages were not found: > langgraph > langserve ` **pip freeze output:** `aiohttp==3.9.3 aiosignal==1.3.1 annotated-types==0.6.0 antlr4-python3-runtime==4.9.3 anyio==4.3.0 async-timeout==4.0.3 attrs==23.2.0 azure-common==1.1.28 azure-core==1.30.0 azure-identity==1.15.0 azure-search-documents==11.4.0 azure-storage-blob==12.19.0 backoff==2.2.1 beautifulsoup4==4.12.3 certifi==2024.2.2 cffi==1.16.0 chardet==5.2.0 charset-normalizer==3.3.2 click==8.1.7 coloredlogs==15.0.1 contourpy==1.2.0 cryptography==42.0.3 cycler==0.12.1 dataclasses-json==0.6.4 dataclasses-json-speakeasy==0.5.11 Deprecated==1.2.14 distro==1.9.0 effdet==0.4.1 emoji==2.10.1 exceptiongroup==1.2.0 fastapi==0.109.2 filelock==3.13.1 filetype==1.2.0 flatbuffers==23.5.26 fonttools==4.49.0 frozenlist==1.4.1 fsspec==2024.2.0 greenlet==3.0.3 h11==0.14.0 httpcore==1.0.3 httpx==0.26.0 huggingface-hub==0.20.3 humanfriendly==10.0 idna==3.6 iopath==0.1.10 isodate==0.6.1 Jinja2==3.1.3 joblib==1.3.2 jsonpatch==1.33 jsonpath-python==1.0.6 jsonpointer==2.4 kiwisolver==1.4.5 langchain==0.1.7 langchain-community==0.0.20 langchain-core==0.1.23 langchain-openai==0.0.6 langdetect==1.0.9 langsmith==0.0.87 layoutparser==0.3.4 lxml==5.1.0 MarkupSafe==2.1.5 marshmallow==3.20.2 matplotlib==3.8.3 mpmath==1.3.0 msal==1.26.0 msal-extensions==1.1.0 multidict==6.0.5 mypy-extensions==1.0.0 networkx==3.2.1 nltk==3.8.1 numpy==1.26.4 nvidia-cublas-cu12==12.1.3.1 nvidia-cuda-cupti-cu12==12.1.105 nvidia-cuda-nvrtc-cu12==12.1.105 nvidia-cuda-runtime-cu12==12.1.105 nvidia-cudnn-cu12==8.9.2.26 nvidia-cufft-cu12==11.0.2.54 nvidia-curand-cu12==10.3.2.106 nvidia-cusolver-cu12==11.4.5.107 nvidia-cusparse-cu12==12.1.0.106 nvidia-nccl-cu12==2.19.3 nvidia-nvjitlink-cu12==12.3.101 nvidia-nvtx-cu12==12.1.105 omegaconf==2.3.0 onnx==1.15.0 onnxruntime==1.15.1 openai==1.12.0 opencv-python==4.9.0.80 packaging==23.2 pandas==2.2.0 pdf2image==1.17.0 pdfminer.six==20221105 pdfplumber==0.10.4 pikepdf==8.13.0 pillow==10.2.0 pillow_heif==0.15.0 portalocker==2.8.2 protobuf==4.25.3 pycocotools==2.0.7 pycparser==2.21 pydantic==2.6.1 pydantic_core==2.16.2 PyJWT==2.8.0 pyparsing==3.1.1 pypdf==4.0.2 pypdfium2==4.27.0 pytesseract==0.3.10 python-dateutil==2.8.2 python-docx==1.1.0 python-dotenv==1.0.1 python-iso639==2024.2.7 python-magic==0.4.27 python-multipart==0.0.9 pytz==2024.1 PyYAML==6.0.1 rapidfuzz==3.6.1 regex==2023.12.25 requests==2.31.0 safetensors==0.4.2 scipy==1.12.0 six==1.16.0 sniffio==1.3.0 soupsieve==2.5 SQLAlchemy==2.0.27 starlette==0.36.3 sympy==1.12 tabulate==0.9.0 tenacity==8.2.3 tiktoken==0.6.0 timm==0.9.12 tokenizers==0.15.2 torch==2.2.0 torchvision==0.17.0 tqdm==4.66.2 transformers==4.37.2 triton==2.2.0 typing-inspect==0.9.0 typing_extensions==4.9.0 tzdata==2024.1 unstructured==0.12.4 unstructured-client==0.18.0 unstructured-inference==0.7.23 unstructured.pytesseract==0.3.12 urllib3==2.2.1 uvicorn==0.27.1 wrapt==1.16.0 yarl==1.9.4 `
AzureSearch giving error during creation of index (NameError: name 'VectorSearch' is not defined. Did you mean: 'vector_search'? )
https://api.github.com/repos/langchain-ai/langchain/issues/17725/comments
3
2024-02-19T11:03:00Z
2024-05-31T23:49:25Z
https://github.com/langchain-ai/langchain/issues/17725
2,142,104,102
17,725
[ "langchain-ai", "langchain" ]
### Checked other resources - [X] I added a very descriptive title to this issue. - [X] I searched the LangChain documentation with the integrated search. - [X] I used the GitHub search to find a similar question and didn't find it. - [X] I am sure that this is a bug in LangChain rather than my code. ### Example Code ``` memory = ConversationBufferWindowMemory( memory_key="memory", return_messages=True, k=config.INTERACTIONS_IN_MEMORY, chat_memory=PostgresChatMessageHistory( connection_string=config.CONNECTION_STRING, session_id=request.args.get("session"), ), ) prompt = ChatPromptTemplate.from_template(GENERAL_SYSTEM_PROMPT) chain = ( { "context": get_collection( request.args.get("collection"), embeddings ).as_retriever(), "input": RunnablePassthrough(), "history": RunnableLambda(memory.load_memory_variables) } | prompt | llm | StrOutputParser() ) ai_response = await chain.ainvoke(request.form.get("input")) ``` ### Error Message and Stack Trace (if applicable) _No response_ ### Description Memory is not referenced when invoking chain. The chain should be able to reference previous messages. ### System Info aiofiles==23.2.1 aiohttp==3.9.2 aiosignal==1.3.1 aiosqlite==0.17.0 annotated-types==0.6.0 anyio==4.2.0 async-timeout==4.0.3 asyncpg==0.29.0 attrs==23.2.0 black==24.1.1 certifi==2023.11.17 charset-normalizer==3.3.2 click==8.1.7 colorama==0.4.6 dataclasses-json==0.6.3 distro==1.9.0 exceptiongroup==1.2.0 frozenlist==1.4.1 greenlet==3.0.3 h11==0.14.0 html5tagger==1.3.0 httpcore==1.0.2 httptools==0.6.1 httpx==0.26.0 idna==3.6 iso8601==1.1.0 jsonpatch==1.33 jsonpointer==2.4 langchain==0.1.4 langchain-community==0.0.16 langchain-core==0.1.17 langchain-openai==0.0.5 langsmith==0.0.84 marshmallow==3.20.2 multidict==6.0.4 mypy-extensions==1.0.0 numpy==1.26.3 openai==1.10.0 packaging==23.2 pathspec==0.12.1 pgvector==0.2.4 platformdirs==4.1.0 psycopg==3.1.18 psycopg-binary==3.1.18 psycopg-pool==3.2.1 psycopg2-binary==2.9.9 pydantic==2.6.0 pydantic_core==2.16.1 pypdf==4.0.1 pypika-tortoise==0.1.6 pytz==2023.4 PyYAML==6.0.1 regex==2023.12.25 requests==2.31.0 sanic==23.12.1 sanic-routing==23.12.0 sniffio==1.3.0 SQLAlchemy==2.0.25 tenacity==8.2.3 tiktoken==0.5.2 tomli==2.0.1 tortoise-orm==0.20.0 tqdm==4.66.1 tracerite==1.1.1 typing-inspect==0.9.0 typing_extensions==4.9.0 tzdata==2024.1 urllib3==2.1.0 websockets==12.0 yarl==1.9.4
Having trouble implementing memory.
https://api.github.com/repos/langchain-ai/langchain/issues/17719/comments
5
2024-02-19T09:03:23Z
2024-02-19T10:12:10Z
https://github.com/langchain-ai/langchain/issues/17719
2,141,861,804
17,719
[ "langchain-ai", "langchain" ]
### Checked other resources - [X] I added a very descriptive title to this issue. - [X] I searched the LangChain documentation with the integrated search. - [X] I used the GitHub search to find a similar question and didn't find it. - [X] I am sure that this is a bug in LangChain rather than my code. ### Example Code ``` from langserve import RemoteRunnable database_answer = RemoteRunnable("http://localhost:2031/database_answer/") database_answer.invoke({"input": "nba2k23 email address"}, {"configurable": {"session_id": "6666"}}) # get return {'input': 'nba2k23 email address', 'answer': 'nba2k23 email address is nba2kmyteammobilesupport@2k.com。'} ``` behind the RemoteRunnable, the code just like blow, so i will not show up ``` local_embedding = HuggingFaceEmbeddings(model_name="/root/autodl-tmp/bge-large-zh-v1.5", multi_process=True) local_vdb = FAISS.load_local("/xg/packages/database/vector", local_embedding, "default") llm = ChatOpenAI( api_key="EMPTY", base_url="http://127.0.0.1:8000/v1", temperature=0, max_tokens=600, model="qwen-14b-chat" ) prompt_1 = ChatPromptTemplate.from_messages( [ ("system", "you are a helpful assiant"), MessagesPlaceholder(variable_name="history"), ("human", "请根据数据内容作答,若问题与数据无关,则自行作答。数据:{context} 问题:{input}") ] ) retriever = local_vdb.as_retriever( search_type="similarity_score_threshold", search_kwargs={"k": 3, 'score_threshold': 0.3} ) database_answer = RunnableWithMessageHistory( create_retrieval_chain(retriever, create_stuff_documents_chain(llm, prompt_1)), RedisChatMessageHistory, input_messages_key="input", history_messages_key="history", output_messages_key="answer" ) database_answer.invoke({"input": "nba2k23 email address"}, {"configurable": {"session_id": "asdasd"}}) # get return {'input': 'nba2k23 email address', 'history': [], 'context': [Document(page_content='# 【 NBA 2K23 】游戏官方联系方式是什么\n参考资料( https://too.st/80p )\n欲联系客服,请发邮件至 nba2kmyteammobilesupport@2k.com。', metadata={'一级标题': '【 NBA 2K23 】游戏官方联系方式是什么'}), Document(page_content='# 【 NBA 2K23 】注册教程( 怎么注册 )\n参考资料( https://too.st/80i )\nGoogle( 谷歌账号 ):登录【 谷歌官网 】> 点击【 注册谷歌账号 】> 输入所要求填写的信息( 注册时会有填空框 )> 按照指引操作即可。Xbox( 微软账号 ):百度搜索 "Xbox 官网" 进入官网( 港服网页后缀带 "HK",国服则带 "CN" )> 按照提示【 创建账户 】即可。PS( 索尼 PlayStation 账号 ):百度搜索 "PlayStation 官网" 进入官网( 港服网页后缀带 "HK" )> 按照提示【 创建账户 】即可。', metadata={'一级标题': '【 NBA 2K23 】注册教程( 怎么注册 )'}), Document(page_content='# 【 NBA 2K23 】登不上怎么办( 怎么登录 )\n参考资料( https://too.st/80j )\n可能的原因:Xbox、PS、Google 平台服务器问题、自身网络环境不稳定、未安装 "谷歌套件"、使用【 Xbox账号 】 登录时未进行验证。方法一:使用【 网络加速器 】,如 biubiu 加速器 > 加速【 NBA 2K MyTEAM 】> 重启游戏 > 再次登录( 可多次尝试 )。方法二;在光环助手 APP 下载【 谷歌安装器 】,启动安装器安装谷歌套件并重启游戏。方法三;切换登录账号,如【 Google 账号 】无法登录,则换成【 Xbox 账号】或【 PS 账号 】再登录试试。方法四:WIFI 换成数据网络或数据网络换成 WIFI 之后再重启游戏( 可多次尝试 ),不行可再尝试切换加速游戏或节点。方法五:换个时间段再试( 如早上时间段无法登录,则等到下午或晚上再试 )。', metadata={'一级标题': '【 NBA 2K23 】登不上怎么办( 怎么登录 )'})], 'answer': 'nba2k23 email address is nba2kmyteammobilesupport@2k.com。'} ``` ### Error Message and Stack Trace (if applicable) no error, just not my expect ### Description use RemoteRunnable invoke & local invoke, the return information not the same amount ### System Info python 3.9.18 ubuntu 20.04
use RemoteRunnable invoke & local invoke, the return information not the same amount
https://api.github.com/repos/langchain-ai/langchain/issues/17703/comments
12
2024-02-18T15:42:58Z
2024-04-24T10:15:20Z
https://github.com/langchain-ai/langchain/issues/17703
2,141,049,422
17,703
[ "langchain-ai", "langchain" ]
### Checked other resources - [X] I added a very descriptive title to this issue. - [X] I searched the LangChain documentation with the integrated search. - [X] I used the GitHub search to find a similar question and didn't find it. - [X] I am sure that this is a bug in LangChain rather than my code. ### Example Code ``` # Self file import os import sys current_dir = os.path.dirname(os.path.abspath(__file__)) # 获取当前文件的绝对路径 parent_dir = os.path.dirname(current_dir) # 获取父目录的路径 sys.path.append(parent_dir) # 把父目录添加到 sys.path from packages.core.api_invoke import return_mode_choose, base_answer, database_answer, database_answer_nolimit, clean_history, better_query # Basic from fastapi import FastAPI from fastapi.responses import RedirectResponse from langserve import add_routes app = FastAPI( title="XiaoGuangLLM", version="1.2.0" ) add_routes( app, return_mode_choose, path="/return_mode_choose", disabled_endpoints=["playground"] ) add_routes( app, base_answer, path="/base_answer", disabled_endpoints=["playground"] ) add_routes( app, database_answer, path="/database_answer", disabled_endpoints=["playground"] ) add_routes( app, database_answer_nolimit, path="/database_answer_nolimit", disabled_endpoints=["playground"] ) add_routes( app, clean_history, path="/clean_history", disabled_endpoints=["playground"] ) add_routes( app, better_query, path="/better_query", disabled_endpoints=["playground"] ) @app.get("/") async def redirect_root_to_docs(): return RedirectResponse("/docs") if __name__ == "__main__": import uvicorn uvicorn.run(app, host="0.0.0.0", port=8000) ``` ### Error Message and Stack Trace (if applicable) ``` INFO: 123.185.63.166:0 - "POST /base_answer/invoke HTTP/1.1" 500 Internal Server Error ERROR: Exception in ASGI application RuntimeError: super(): __class__ cell not found The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/root/miniconda3/envs/xg/lib/python3.9/site-packages/uvicorn/protocols/http/httptools_impl.py", line 426, in run_asgi result = await app( # type: ignore[func-returns-value] File "/root/miniconda3/envs/xg/lib/python3.9/site-packages/uvicorn/middleware/proxy_headers.py", line 84, in __call__ return await self.app(scope, receive, send) File "/root/miniconda3/envs/xg/lib/python3.9/site-packages/fastapi/applications.py", line 1054, in __call__ await super().__call__(scope, receive, send) File "/root/miniconda3/envs/xg/lib/python3.9/site-packages/starlette/applications.py", line 123, in __call__ await self.middleware_stack(scope, receive, send) File "/root/miniconda3/envs/xg/lib/python3.9/site-packages/starlette/middleware/errors.py", line 186, in __call__ raise exc File "/root/miniconda3/envs/xg/lib/python3.9/site-packages/starlette/middleware/errors.py", line 164, in __call__ await self.app(scope, receive, _send) File "/root/miniconda3/envs/xg/lib/python3.9/site-packages/starlette/middleware/exceptions.py", line 62, in __call__ await wrap_app_handling_exceptions(self.app, conn)(scope, receive, send) File "/root/miniconda3/envs/xg/lib/python3.9/site-packages/starlette/_exception_handler.py", line 64, in wrapped_app raise exc File "/root/miniconda3/envs/xg/lib/python3.9/site-packages/starlette/_exception_handler.py", line 53, in wrapped_app await app(scope, receive, sender) File "/root/miniconda3/envs/xg/lib/python3.9/site-packages/starlette/routing.py", line 758, in __call__ await self.middleware_stack(scope, receive, send) File "/root/miniconda3/envs/xg/lib/python3.9/site-packages/starlette/routing.py", line 778, in app await route.handle(scope, receive, send) File "/root/miniconda3/envs/xg/lib/python3.9/site-packages/starlette/routing.py", line 299, in handle await self.app(scope, receive, send) File "/root/miniconda3/envs/xg/lib/python3.9/site-packages/starlette/routing.py", line 79, in app await wrap_app_handling_exceptions(app, request)(scope, receive, send) File "/root/miniconda3/envs/xg/lib/python3.9/site-packages/starlette/_exception_handler.py", line 64, in wrapped_app raise exc File "/root/miniconda3/envs/xg/lib/python3.9/site-packages/starlette/_exception_handler.py", line 53, in wrapped_app await app(scope, receive, sender) File "/root/miniconda3/envs/xg/lib/python3.9/site-packages/starlette/routing.py", line 74, in app response = await func(request) File "/root/miniconda3/envs/xg/lib/python3.9/site-packages/fastapi/routing.py", line 299, in app raise e File "/root/miniconda3/envs/xg/lib/python3.9/site-packages/fastapi/routing.py", line 294, in app raw_response = await run_endpoint_function( File "/root/miniconda3/envs/xg/lib/python3.9/site-packages/fastapi/routing.py", line 191, in run_endpoint_function return await dependant.call(**values) File "/root/miniconda3/envs/xg/lib/python3.9/site-packages/langserve/server.py", line 464, in invoke return await api_handler.invoke(request) File "/root/miniconda3/envs/xg/lib/python3.9/site-packages/langserve/api_handler.py", line 720, in invoke output=self._serializer.dumpd(output), File "/root/miniconda3/envs/xg/lib/python3.9/site-packages/langserve/serialization.py", line 164, in dumpd return orjson.loads(orjson.dumps(obj, default=default)) TypeError: Type is not JSON serializable: ModelMetaclass ``` ### Description is anyone knows how to fix this? ### System Info ubuntu 20.4 LTS
langchain serve TypeError: Type is not JSON serializable: ModelMetaclass
https://api.github.com/repos/langchain-ai/langchain/issues/17700/comments
10
2024-02-18T11:19:23Z
2024-02-18T15:35:05Z
https://github.com/langchain-ai/langchain/issues/17700
2,140,935,720
17,700
[ "langchain-ai", "langchain" ]
### Checked other resources - [X] I added a very descriptive title to this issue. - [X] I searched the LangChain documentation with the integrated search. - [X] I used the GitHub search to find a similar question and didn't find it. - [X] I am sure that this is a bug in LangChain rather than my code. ### Example Code vectordb = Chroma.from_documents(documents=chunks , embedding=embeddings) I am using this code to solve problem but I am not able to solve its showing ttributeError Traceback (most recent call last) [<ipython-input-46-6f2359e6b5b4>](https://ii3889p2cn-496ff2e9c6d22116-0-colab.googleusercontent.com/outputframe.html?vrz=colab_20240214-060113_RC00_606917857#) in <cell line: 1>() ----> 1 vectordb = Chroma.from_documents(documents=chunks 2 , embedding=embeddings) 1 frames [/usr/local/lib/python3.10/dist-packages/langchain_community/vectorstores/chroma.py](https://ii3889p2cn-496ff2e9c6d22116-0-colab.googleusercontent.com/outputframe.html?vrz=colab_20240214-060113_RC00_606917857#) in <listcomp>(.0) 774 Chroma: Chroma vectorstore. 775 """ --> 776 texts = [doc.page_content for doc in documents] 777 metadatas = [doc.metadata for doc in documents] 778 return cls.from_texts( AttributeError: 'str' object has no attribute 'page_content' ### Error Message and Stack Trace (if applicable) _No response_ ### Description I am using langchain to create chunks and perform vector embedding throughcgroma db , How can I change it into ### System Info I am using Mac
str' object has no attribute 'page_content'
https://api.github.com/repos/langchain-ai/langchain/issues/17699/comments
1
2024-02-18T10:58:13Z
2024-06-01T00:19:31Z
https://github.com/langchain-ai/langchain/issues/17699
2,140,924,488
17,699
[ "langchain-ai", "langchain" ]
### Checked other resources - [X] I added a very descriptive title to this issue. - [X] I searched the LangChain documentation with the integrated search. - [X] I used the GitHub search to find a similar question and didn't find it. - [X] I am sure that this is a bug in LangChain rather than my code. ### Example Code Im using a standard SelfQueryRetriver to extract relevant documents (car listings) that match a user query. Its has been working pretty well but recently it started giving me errors (stack trace attached). ise ` retriever = SelfQueryRetriever.from_llm( llm, vectordb, document_content_description, metadata_field_info, verbose=True )` ### Error Message and Stack Trace (if applicable) OutputParserException('Parsing text\n```json\n{\n "query": "with bluetooth and a reversing camera recent",\n "filter": "or(eq(\\"vehicle_type\\", \\"Hatchback\\"), eq(\\"vehicle_type\\", \\"Sedan\\")), in(\\"location\\", [\\"Westgate\\", \\"North Shore\\", \\"Otahuhu\\", \\"Penrose\\", \\"Botany\\", \\"Manukau\\"])"\n}\n```\n raised following error:\nUnexpected token Token(\'COMMA\', \',\') at line 1, column 65.\nExpected one of: \n\t* $END\n')Traceback (most recent call last): File "/home/dharshana/.local/share/virtualenvs/tina-virtual-assistant-eLldwkZS/lib/python3.11/site-packages/lark/parsers/lalr_parser_state.py", line 77, in feed_token action, arg = states[state][token.type] ~~~~~~~~~~~~~^^^^^^^^^^^^ KeyError: 'COMMA' During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/home/dharshana/.local/share/virtualenvs/tina-virtual-assistant-eLldwkZS/lib/python3.11/site-packages/langchain/chains/query_constructor/base.py", line 56, in parse parsed["filter"] = self.ast_parse(parsed["filter"]) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/dharshana/.local/share/virtualenvs/tina-virtual-assistant-eLldwkZS/lib/python3.11/site-packages/lark/lark.py", line 658, in parse return self.parser.parse(text, start=start, on_error=on_error) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/dharshana/.local/share/virtualenvs/tina-virtual-assistant-eLldwkZS/lib/python3.11/site-packages/lark/parser_frontends.py", line 104, in parse return self.parser.parse(stream, chosen_start, **kw) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/dharshana/.local/share/virtualenvs/tina-virtual-assistant-eLldwkZS/lib/python3.11/site-packages/lark/parsers/lalr_parser.py", line 42, in parse return self.parser.parse(lexer, start) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/dharshana/.local/share/virtualenvs/tina-virtual-assistant-eLldwkZS/lib/python3.11/site-packages/lark/parsers/lalr_parser.py", line 88, in parse return self.parse_from_state(parser_state) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/dharshana/.local/share/virtualenvs/tina-virtual-assistant-eLldwkZS/lib/python3.11/site-packages/lark/parsers/lalr_parser.py", line 111, in parse_from_state raise e File "/home/dharshana/.local/share/virtualenvs/tina-virtual-assistant-eLldwkZS/lib/python3.11/site-packages/lark/parsers/lalr_parser.py", line 102, in parse_from_state state.feed_token(token) File "/home/dharshana/.local/share/virtualenvs/tina-virtual-assistant-eLldwkZS/lib/python3.11/site-packages/lark/parsers/lalr_parser_state.py", line 80, in feed_token raise UnexpectedToken(token, expected, state=self, interactive_parser=None) lark.exceptions.UnexpectedToken: Unexpected token Token('COMMA', ',') at line 1, column 65. Expected one of: * $END During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/home/dharshana/.local/share/virtualenvs/tina-virtual-assistant-eLldwkZS/lib/python3.11/site-packages/langchain_core/runnables/base.py", line 1246, in _call_with_config context.run( File "/home/dharshana/.local/share/virtualenvs/tina-virtual-assistant-eLldwkZS/lib/python3.11/site-packages/langchain_core/runnables/config.py", line 326, in call_func_with_variable_args return func(input, **kwargs) # type: ignore[call-arg] ^^^^^^^^^^^^^^^^^^^^^ File "/home/dharshana/.local/share/virtualenvs/tina-virtual-assistant-eLldwkZS/lib/python3.11/site-packages/langchain_core/output_parsers/base.py", line 168, in <lambda> lambda inner_input: self.parse_result( ^^^^^^^^^^^^^^^^^^ File "/home/dharshana/.local/share/virtualenvs/tina-virtual-assistant-eLldwkZS/lib/python3.11/site-packages/langchain_core/output_parsers/base.py", line 219, in parse_result return self.parse(result[0].text) ^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/dharshana/.local/share/virtualenvs/tina-virtual-assistant-eLldwkZS/lib/python3.11/site-packages/langchain/chains/query_constructor/base.py", line 63, in parse raise OutputParserException( langchain_core.exceptions.OutputParserException: Parsing text ```json { "query": "with bluetooth and a reversing camera recent", "filter": "or(eq(\"vehicle_type\", \"Hatchback\"), eq(\"vehicle_type\", \"Sedan\")), in(\"location\", [\"Westgate\", \"North Shore\", \"Otahuhu\", \"Penrose\", \"Botany\", \"Manukau\"])" } ``` raised following error: Unexpected token Token('COMMA', ',') at line 1, column 65. Expected one of: * $END ### Description im getting lark.exceptions.UnexpectedToken: Unexpected token Token('COMMA', ',') at line 1, column 65. Expected one of: * $END Seems its not happy with a COMMA. Im not entirely sure if the cause of the error is change in the Pinecone query api or an update in langchain version ### System Info "langchain": { "hashes": [ "sha256:29d95f12afe9690953820970205dba3b098ee1f7531e80eb18c1236d3feda921", "sha256:b40fbe2b65360afe6c0d5bbf37e79469f990779460640edde5b906175c49807e" ], "index": "pypi", "version": "==0.1.7" }, "langchain-community": { "hashes": [ "sha256:bd112b5813702919c50f89b1afa2b63adf1da89999df4842b327ee11220f8c39", "sha256:c56c48bc77d24e1fc399a9ee9a637d96e3b2ff952e3a080b5a41820d9d00fb3c" ], "index": "pypi", "version": "==0.0.20" }, "langchain-core": { "hashes": [ "sha256:34359cc8b6f8c3d45098c54a6a9b35c9f538ef58329cd943a2249d6d7b4e5806", "sha256:d42fac013c39a8b0bcd7e337a4cb6c17c16046c60d768f89df582ad73ec3c5cb" ], "markers": "python_full_version >= '3.8.1' and python_version < '4.0'", "version": "==0.1.23" }, "langchain-openai": { "hashes": [ "sha256:2ef040e4447a26a9d3bd45dfac9cefa00797ea58555a3d91ab4f88699eb3a005", "sha256:f5c4ebe46f2c8635c8f0c26cc8df27700aacafea025410e418d5a080039974dd" ], "index": "pypi", "version": "==0.0.6" },
Error in StructuredQueryOutputParser using SelfQueryRetriever with Pinecone
https://api.github.com/repos/langchain-ai/langchain/issues/17696/comments
3
2024-02-18T07:42:21Z
2024-07-14T16:06:02Z
https://github.com/langchain-ai/langchain/issues/17696
2,140,798,399
17,696
[ "langchain-ai", "langchain" ]
### Checked other resources - [X] I added a very descriptive title to this issue. - [X] I searched the LangChain documentation with the integrated search. - [X] I used the GitHub search to find a similar question and didn't find it. - [X] I am sure that this is a bug in LangChain rather than my code. ### Example Code Example code that doesn't work: ``` from langchain_community.tools.google_lens import GoogleLensQueryRun from langchain_community.utilities.google_lens import GoogleLensAPIWrapper SERPAPI_API_KEY = "api_key_here" tool = GoogleLensQueryRun(api_wrapper=GoogleLensAPIWrapper()) # Runs google lens on an image of Danny Devito tool.run("https://i.imgur.com/HBrB8p0.png") ``` This is the code from langchain.utilities.google_lens that is incorrect: ``` if len(responseValue["knowledge_graph"]) > 0: subject = responseValue["knowledge_graph"][0] xs += f"Subject:{subject['title']}({subject['subtitle']})\n" xs += f"Link to subject:{subject['link']}\n\n" xs += "Related Images:\n\n" for image in responseValue["visual_matches"]: xs += f"Title: {image['title']}\n" xs += f"Source({image['source']}): {image['link']}\n" xs += f"Image: {image['thumbnail']}\n\n" xs += ( "Reverse Image Search" + f"Link: {responseValue['reverse_image_search']['link']}\n" ) print(xs) ``` ### Error Message and Stack Trace (if applicable) Traceback (most recent call last): File "/Users/simonquach/Documents/vs-code/treehacks/google-lens.py", line 77, in <module> tool.run("https://i.imgur.com/HBrB8p0.png") File "/Users/simonquach/Documents/vs-code/treehacks/.venv/lib/python3.12/site-packages/langchain_core/tools.py", line 373, in run raise e File "/Users/simonquach/Documents/vs-code/treehacks/.venv/lib/python3.12/site-packages/langchain_core/tools.py", line 345, in run self._run(*tool_args, run_manager=run_manager, **tool_kwargs) File "/Users/simonquach/Documents/vs-code/treehacks/.venv/lib/python3.12/site-packages/langchain_community/tools/google_lens/tool.py", line 29, in _run return self.api_wrapper.run(query) ^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/Users/simonquach/Documents/vs-code/treehacks/.venv/lib/python3.12/site-packages/langchain_community/utilities/google_lens.py", line 67, in run if len(responseValue["knowledge_graph"]) > 0: ~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^ KeyError: 'knowledge_graph' ### Description I encountered a KeyError while using the GoogleLensQueryRun tool from the langchain_community package, specifically when attempting to run a Google Lens search on an image URL. The issue arises within the langchain_community.utilities.google_lens module, during the handling of the API response. The problematic code within langchain_community.utilities.google_lens attempts to access a knowledge_graph key in the response. However, this results in a KeyError if the knowledge_graph key is not present in the response. It seems that the code does not account for scenarios where the knowledge_graph key might be missing from the Google Lens API response. ### System Info langchain==0.1.5 langchain-community==0.0.17 langchain-core==0.1.18 mac Python 3.12.1
No "knowledge_graph" property in Google Lens API call from SerpAPI
https://api.github.com/repos/langchain-ai/langchain/issues/17690/comments
1
2024-02-17T23:04:21Z
2024-06-01T00:21:19Z
https://github.com/langchain-ai/langchain/issues/17690
2,140,650,229
17,690
[ "langchain-ai", "langchain" ]
### Checked other resources - [X] I added a very descriptive title to this issue. - [X] I searched the LangChain documentation with the integrated search. - [X] I used the GitHub search to find a similar question and didn't find it. - [X] I am sure that this is a bug in LangChain rather than my code. ### Example Code ```python import asyncio import os from langchain_community.chat_models import ChatCohere from langchain_core.messages import SystemMessage, HumanMessage, AIMessage from langchain_core.output_parsers import StrOutputParser async def idk(provider, model, messages, api_key): if provider == 'cohere': llm = ChatCohere(cohere_api_key=api_key, model_name=model, temperature=0) else: raise Exception("Provider not supported") output_parser = StrOutputParser() chain = llm | output_parser # This works for chunk in chain.stream(messages): print(chunk, end="") print() print('---------------') print() # This works then breaks async for chunk in chain.astream(messages): print(chunk, end="") # yield chunk messages = [ SystemMessage("You are world class mathematician."), HumanMessage("Whats 10 + 10?"), AIMessage("10 + 10 is"), HumanMessage("What?") ] provider_inputs = [ { 'provider': 'cohere', 'api_key': os.environ.get('COHERE_API_KEY'), 'model': 'command' } ] for x in provider_inputs: print(f"Running inputs for {x['provider']}") asyncio.run( idk( provider=x['provider'], messages=messages, model=x['model'], api_key=x['api_key'] ) ) print() print() ``` ### Error Message and Stack Trace (if applicable) Unclosed client session client_session: <aiohttp.client.ClientSession object at 0x1046d6d30> Unclosed connector connections: ['[(<aiohttp.client_proto.ResponseHandler object at 0x1046ccc40>, 2.635331583)]'] connector: <aiohttp.connector.TCPConnector object at 0x103474130> Fatal error on SSL transport protocol: <asyncio.sslproto.SSLProtocol object at 0x1046d6eb0> transport: <_SelectorSocketTransport closing fd=11> Traceback (most recent call last): File "/Library/Developer/CommandLineTools/Library/Frameworks/Python3.framework/Versions/3.9/lib/python3.9/asyncio/selector_events.py", line 918, in write n = self._sock.send(data) OSError: [Errno 9] Bad file descriptor During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/Library/Developer/CommandLineTools/Library/Frameworks/Python3.framework/Versions/3.9/lib/python3.9/asyncio/sslproto.py", line 684, in _process_write_backlog self._transport.write(chunk) File "/Library/Developer/CommandLineTools/Library/Frameworks/Python3.framework/Versions/3.9/lib/python3.9/asyncio/selector_events.py", line 924, in write self._fatal_error(exc, 'Fatal write error on socket transport') File "/Library/Developer/CommandLineTools/Library/Frameworks/Python3.framework/Versions/3.9/lib/python3.9/asyncio/selector_events.py", line 719, in _fatal_error self._force_close(exc) File "/Library/Developer/CommandLineTools/Library/Frameworks/Python3.framework/Versions/3.9/lib/python3.9/asyncio/selector_events.py", line 731, in _force_close self._loop.call_soon(self._call_connection_lost, exc) File "/Library/Developer/CommandLineTools/Library/Frameworks/Python3.framework/Versions/3.9/lib/python3.9/asyncio/base_events.py", line 746, in call_soon self._check_closed() File "/Library/Developer/CommandLineTools/Library/Frameworks/Python3.framework/Versions/3.9/lib/python3.9/asyncio/base_events.py", line 510, in _check_closed raise RuntimeError('Event loop is closed') RuntimeError: Event loop is closed ### Description I'm trying to use the langchain ChatCohere integration to asynchronously stream responses back to the user. I noticed invoke, ainvoke, and stream work fine, but astream does not. Swapping ChatCohere with Google and OpenAIs langchain modules worked fine in this same scenario ### System Info System Information ------------------ > OS: Darwin > OS Version: Darwin Kernel Version 23.2.0: Wed Nov 15 21:59:33 PST 2023; root:xnu-10002.61.3~2/RELEASE_ARM64_T8112 > Python Version: 3.9.6 (default, Nov 10 2023, 13:38:27) [Clang 15.0.0 (clang-1500.1.0.2.5)] Package Information ------------------- > langchain_core: 0.1.23 > langchain: 0.0.354 > langchain_community: 0.0.20 > langsmith: 0.0.87 > langchain_google_genai: 0.0.9 > langchain_mistralai: 0.0.4 > langchain_openai: 0.0.6 Packages not installed (Not Necessarily a Problem) -------------------------------------------------- The following packages were not found: > langgraph > langserve
ChatCohere async stream operation breaks after each run
https://api.github.com/repos/langchain-ai/langchain/issues/17687/comments
1
2024-02-17T20:31:21Z
2024-06-01T00:20:13Z
https://github.com/langchain-ai/langchain/issues/17687
2,140,574,863
17,687
[ "langchain-ai", "langchain" ]
### Checked other resources - [X] I added a very descriptive title to this issue. - [X] I searched the LangChain documentation with the integrated search. - [X] I used the GitHub search to find a similar question and didn't find it. - [X] I am sure that this is a bug in LangChain rather than my code. ### Example Code ```python import asyncio import os from langchain_community.chat_models import ChatAnyscale from langchain_core.messages import SystemMessage, HumanMessage, AIMessage from langchain_core.output_parsers import StrOutputParser async def idk(provider, model, messages, api_key): if provider == 'anyscale': llm = ChatAnyscale(anyscale_api_key=api_key, model_name=model, temperature=0) else: raise Exception("Provider not supported") output_parser = StrOutputParser() chain = llm | output_parser # This works for chunk in chain.stream(messages): print(chunk, end="") # This does not work async for chunk in chain.astream(messages): print(chunk, end="") # yield chunk messages = [ SystemMessage("You are world class mathematician."), HumanMessage("Whats 10 + 10?"), AIMessage("10 + 10 is"), HumanMessage("What?") ] provider_inputs = [ { 'provider': 'anyscale', 'api_key': os.environ.get('ANYSCALE_API_KEY'), 'model': 'mistralai/Mixtral-8x7B-Instruct-v0.1' } ] for x in provider_inputs: print(f"Running inputs for {x['provider']}") asyncio.run( idk( provider=x['provider'], messages=messages, model=x['model'], api_key=x['api_key'] ) ) print() print() ``` ### Error Message and Stack Trace (if applicable) Traceback (most recent call last): File "/Users/julianshalaby/Desktop/LLM_Server/lc_errror.py", line 45, in <module> asyncio.run( File "/Library/Developer/CommandLineTools/Library/Frameworks/Python3.framework/Versions/3.9/lib/python3.9/asyncio/runners.py", line 44, in run return loop.run_until_complete(main) File "/Library/Developer/CommandLineTools/Library/Frameworks/Python3.framework/Versions/3.9/lib/python3.9/asyncio/base_events.py", line 642, in run_until_complete return future.result() File "/Users/julianshalaby/Desktop/LLM_Server/lc_errror.py", line 23, in idk async for chunk in chain.astream(messages): File "/Users/julianshalaby/Desktop/LLM_Server/venv/lib/python3.9/site-packages/langchain_core/runnables/base.py", line 2449, in astream async for chunk in self.atransform(input_aiter(), config, **kwargs): File "/Users/julianshalaby/Desktop/LLM_Server/venv/lib/python3.9/site-packages/langchain_core/runnables/base.py", line 2432, in atransform async for chunk in self._atransform_stream_with_config( File "/Users/julianshalaby/Desktop/LLM_Server/venv/lib/python3.9/site-packages/langchain_core/runnables/base.py", line 1600, in _atransform_stream_with_config chunk = cast(Output, await py_anext(iterator)) File "/Users/julianshalaby/Desktop/LLM_Server/venv/lib/python3.9/site-packages/langchain_core/runnables/base.py", line 2402, in _atransform async for output in final_pipeline: File "/Users/julianshalaby/Desktop/LLM_Server/venv/lib/python3.9/site-packages/langchain_core/output_parsers/transform.py", line 60, in atransform async for chunk in self._atransform_stream_with_config( File "/Users/julianshalaby/Desktop/LLM_Server/venv/lib/python3.9/site-packages/langchain_core/runnables/base.py", line 1560, in _atransform_stream_with_config final_input: Optional[Input] = await py_anext(input_for_tracing, None) File "/Users/julianshalaby/Desktop/LLM_Server/venv/lib/python3.9/site-packages/langchain_core/utils/aiter.py", line 62, in anext_impl return await __anext__(iterator) File "/Users/julianshalaby/Desktop/LLM_Server/venv/lib/python3.9/site-packages/langchain_core/utils/aiter.py", line 97, in tee_peer item = await iterator.__anext__() File "/Users/julianshalaby/Desktop/LLM_Server/venv/lib/python3.9/site-packages/langchain_core/runnables/base.py", line 1071, in atransform async for output in self.astream(final, config, **kwargs): File "/Users/julianshalaby/Desktop/LLM_Server/venv/lib/python3.9/site-packages/langchain_core/language_models/chat_models.py", line 308, in astream raise e File "/Users/julianshalaby/Desktop/LLM_Server/venv/lib/python3.9/site-packages/langchain_core/language_models/chat_models.py", line 292, in astream async for chunk in self._astream( File "/Users/julianshalaby/Desktop/LLM_Server/venv/lib/python3.9/site-packages/langchain_community/chat_models/openai.py", line 488, in _astream async for chunk in await acompletion_with_retry( File "/Users/julianshalaby/Desktop/LLM_Server/venv/lib/python3.9/site-packages/langchain_community/chat_models/openai.py", line 105, in acompletion_with_retry return await llm.async_client.create(**kwargs) AttributeError: 'NoneType' object has no attribute 'create' ### Description I'm trying to use the langchain ChatAnyscale integration to asynchronously stream responses back to the user. I noticed invoke and stream work fine, but ainvoke and astream do not. Swapping ChatAnyscale with Google and OpenAIs langchain modules worked fine in this same scenario ### System Info System Information ------------------ > OS: Darwin > OS Version: Darwin Kernel Version 23.2.0: Wed Nov 15 21:59:33 PST 2023; root:xnu-10002.61.3~2/RELEASE_ARM64_T8112 > Python Version: 3.9.6 (default, Nov 10 2023, 13:38:27) [Clang 15.0.0 (clang-1500.1.0.2.5)] Package Information ------------------- > langchain_core: 0.1.23 > langchain: 0.0.354 > langchain_community: 0.0.20 > langsmith: 0.0.87 > langchain_google_genai: 0.0.9 > langchain_openai: 0.0.6 Packages not installed (Not Necessarily a Problem) -------------------------------------------------- The following packages were not found: > langgraph > langserve
ChatAnyscale async operation not functional
https://api.github.com/repos/langchain-ai/langchain/issues/17685/comments
2
2024-02-17T20:11:19Z
2024-06-01T00:07:42Z
https://github.com/langchain-ai/langchain/issues/17685
2,140,550,648
17,685
[ "langchain-ai", "langchain" ]
Getting an error on "from langchain.chains import RetrievalQA"; error message is cannot import name 'NeptuneRdfGraph' from 'langchain_community.graphs' . using langcain version 0.1.7 _Originally posted by @NarayananParthasarathy in https://github.com/langchain-ai/langchain/issues/2725#issuecomment-1950262407_
Getting an error on "from langchain.chains import RetrievalQA"; error message is cannot import name 'NeptuneRdfGraph' from 'langchain_community.graphs' . using langcain version 0.1.7
https://api.github.com/repos/langchain-ai/langchain/issues/17680/comments
7
2024-02-17T17:26:06Z
2024-05-05T13:53:12Z
https://github.com/langchain-ai/langchain/issues/17680
2,140,342,672
17,680
[ "langchain-ai", "langchain" ]
### Checked other resources - [X] I added a very descriptive title to this issue. - [X] I searched the LangChain documentation with the integrated search. - [X] I used the GitHub search to find a similar question and didn't find it. - [X] I am sure that this is a bug in LangChain rather than my code. ### Example Code When I run the following code: ``` embeddings: OpenAIEmbeddings = OpenAIEmbeddings(deployment="text-embedding-ada-002", chunk_size=1) index_name: str = "langchain-example" vector_store: AzureSearch = AzureSearch( azure_search_endpoint=os.environ.get("SEARCH_ENDPOINT"), azure_search_key=os.environ.get("SEARCH_API_KEY"), index_name=index_name, embedding_function=embeddings.embed_query, ) ``` I get the following Error. ``` Traceback (most recent call last): File "D:\pythonProject2\main.py", line 11, in <module> vector_store: AzureSearch = AzureSearch( ^^^^^^^^^^^^ File "D:\pythonProject2\.venv\Lib\site-packages\langchain_community\vectorstores\azuresearch.py", line 268, in __init__ self.client = _get_search_client( ^^^^^^^^^^^^^^^^^^^ File "D:\pythonProject2\.venv\Lib\site-packages\langchain_community\vectorstores\azuresearch.py", line 84, in _get_search_client from azure.search.documents.indexes.models import ( ImportError: cannot import name 'ExhaustiveKnnAlgorithmConfiguration' from 'azure.search.documents.indexes.models' (D:\pythonProject2\.venv\Lib\s ite-packages\azure\search\documents\indexes\models\__init__.py) ``` ExhaustiveKnnAlgorithmConfiguration was removed as it seems. I used `azure-search-documents==11.4.0b8`. Downgrading and upgrading will result in a warning: ``` Successfully installed azure-search-documents-11.4.0 PS D:\pythonProject2> python .\main.py vector_search_configuration is not a known attribute of class <class 'azure.search.documents.indexes.models._index.SearchField'> and will be igno red ``` I tried different versions of LangChain from 0.1.0 to 0.1.7 and all resulted in the same issue. Any ideas for a workaround or a solution? It´s probably not a known issue yet. ### Error Message and Stack Trace (if applicable) _No response_ ### Description I expect to be able to instantiate AzureSearch without any errors or warnings. ### System Info langchain-openai==0.0.6 azure-identity==1.15.0 azure-search-documents==11.4.0 langchain==0.1.7 langchain-community==0.0.20 langchain-core==0.1.23 langchain-openai==0.0.6 langsmith==0.0.87
LangChain does not work with AzureSearch anymore due to ImportError
https://api.github.com/repos/langchain-ai/langchain/issues/17679/comments
6
2024-02-17T17:23:20Z
2024-05-12T07:48:02Z
https://github.com/langchain-ai/langchain/issues/17679
2,140,339,097
17,679
[ "langchain-ai", "langchain" ]
### Checked other resources - [X] I added a very descriptive title to this issue. - [X] I searched the LangChain documentation with the integrated search. - [X] I used the GitHub search to find a similar question and didn't find it. - [X] I am sure that this is a bug in LangChain rather than my code. ### Example Code local_embedding = HuggingFaceEmbeddings(model_name=embedding_path) local_vdb = FAISS.load_local(vector_path, local_embedding, "default") ### Error Message and Stack Trace (if applicable) _No response_ ### Description ![CleanShot 2024-02-18 at 00 47 51](https://github.com/langchain-ai/langchain/assets/154310050/3caeb068-28b8-46c7-bab8-7fbe1a4b6e1a) i find when i try to get something from faiss, the gpu memory raise up, that's normal. but when the work is down, i mean i reterive already, but gpu memory not falling down ( even i closed the interface "gradio web" ), that the problem is. i'm building a gradio web app for my commpany, many people will use, when oneperson use to get something from faiss, the embedding model will use another memory, i mean if embedding working will use 2gib ( suppose ),so two person call = 4gib, three person call = 6gib, not 2gib, 2.3 gib.... it cost too many resources, so, how i can mannully stop the embedding model, when the work is down and release the gpu memory. tha's very important to me, thanks for your help. 🌹 ### System Info python 3.9.18 langchain lastest ubuntu 20.04 lts
CUDA memory won't release with HuggingFaceEmbeddings + local embedding model
https://api.github.com/repos/langchain-ai/langchain/issues/17678/comments
6
2024-02-17T17:08:34Z
2024-02-19T13:35:28Z
https://github.com/langchain-ai/langchain/issues/17678
2,140,320,363
17,678
[ "langchain-ai", "langchain" ]
### Checked other resources - [X] I added a very descriptive title to this issue. - [X] I searched the LangChain documentation with the integrated search. - [X] I used the GitHub search to find a similar question and didn't find it. - [X] I am sure that this is a bug in LangChain rather than my code. ### Example Code ``` pdf_path = './pdf/2402.10021v1.pdf' loader = PyPDFium2Loader(pdf_path) documents = loader.load() ```` ### Error Message and Stack Trace (if applicable) corrupted size vs. prev_size [1] 619773 abort (core dumped) ### Description When I am trying to load pdf of https://arxiv.org/abs/2402.10021v1, this error occurs. However, when I load other pdfs, there's no errors. I also tried to load this pdf directed by fitz using the following code, there is no error. ``` import fitz pdf_path = './pdf/2402.10021v1.pdf' pdf_document = fitz.open(pdf_path) text = "" for page_number in range(len(pdf_document)): page = pdf_document.load_page(page_number) text += page.get_text() print(text) ``` ### System Info langchain==0.1.5 langchain-community==0.0.17 langchain-core==0.1.18 pypdfium2==4.26.0 ubantu 20.04 Python 3.10.13
[Bug]error: corrupted size vs. prev_size occurs during loading pdf by PyPDFium2Loader
https://api.github.com/repos/langchain-ai/langchain/issues/17667/comments
1
2024-02-17T01:27:13Z
2024-06-08T16:10:10Z
https://github.com/langchain-ai/langchain/issues/17667
2,139,724,060
17,667
[ "langchain-ai", "langchain" ]
### Checked other resources - [X] I added a very descriptive title to this issue. - [X] I searched the LangChain documentation with the integrated search. - [X] I used the GitHub search to find a similar question and didn't find it. - [X] I am sure that this is a bug in LangChain rather than my code. ### Example Code ```python from langchain_community.vectorstores.elasticsearch import ElasticsearchStore from langchain_community.document_loaders import DirectoryLoader from elasticsearch import Elasticsearch loader = DirectoryLoader('../', glob="**/*.pdf", show_progress=True) docs = loader.load() print(docs[0]) es_connection = Elasticsearch( hosts=['https://XXXXXXX.es.us-central1.gcp.cloud.es.io'], basic_auth=('XXXXX', 'XXXXX') ) vector_store = ElasticsearchStore( index_name="test-elser", es_connection=es_connection, strategy=ElasticsearchStore.SparseVectorRetrievalStrategy( model_id=".elser_model_2_linux-x86_64" ), ) vector_store.add_documents(docs) ``` ### Error Message and Stack Trace (if applicable) > First error reason: Could not find trained model [.elser_model_1] > Traceback (most recent call last): > File "/Users/gustavollermalylarrain/Documents/proyectos/labs/langchain-elser/langchain_elser/vector_store.py", line 23, in <module> > vector_store.add_documents(docs) > File "/Users/gustavollermalylarrain/Documents/proyectos/labs/langchain-elser/.venv/lib/python3.11/site-packages/langchain_core/vectorstores.py", line 119, in add_documents > return self.add_texts(texts, metadatas, **kwargs) > ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ > File "/Users/gustavollermalylarrain/Documents/proyectos/labs/langchain-elser/.venv/lib/python3.11/site-packages/langchain_community/vectorstores/elasticsearch.py", line 1040, in add_texts > return self.__add( > ^^^^^^^^^^^ > File "/Users/gustavollermalylarrain/Documents/proyectos/labs/langchain-elser/.venv/lib/python3.11/site-packages/langchain_community/vectorstores/elasticsearch.py", line 998, in __add > raise e > File "/Users/gustavollermalylarrain/Documents/proyectos/labs/langchain-elser/.venv/lib/python3.11/site-packages/langchain_community/vectorstores/elasticsearch.py", line 981, in __add > success, failed = bulk( > ^^^^^ > File "/Users/gustavollermalylarrain/Documents/proyectos/labs/langchain-elser/.venv/lib/python3.11/site-packages/elasticsearch/helpers/actions.py", line 521, in bulk > for ok, item in streaming_bulk( > File "/Users/gustavollermalylarrain/Documents/proyectos/labs/langchain-elser/.venv/lib/python3.11/site-packages/elasticsearch/helpers/actions.py", line 436, in streaming_bulk > for data, (ok, info) in zip( > File "/Users/gustavollermalylarrain/Documents/proyectos/labs/langchain-elser/.venv/lib/python3.11/site-packages/elasticsearch/helpers/actions.py", line 355, in _process_bulk_chunk > yield from gen > File "/Users/gustavollermalylarrain/Documents/proyectos/labs/langchain-elser/.venv/lib/python3.11/site-packages/elasticsearch/helpers/actions.py", line 274, in _process_bulk_chunk_success > raise BulkIndexError(f"{len(errors)} document(s) failed to index.", errors) > elasticsearch.helpers.BulkIndexError: 2 document(s) failed to index. ### Description The ELSER ingestion is not working if I use a different elser model id than the default. I tried with both `ElasticsearchStore.from_documents` and `ElasticsearchStore.add_documents` with no luck ### System Info System Information ------------------ > OS: Darwin > OS Version: Darwin Kernel Version 23.3.0: Wed Dec 20 21:30:44 PST 2023; root:xnu-10002.81.5~7/RELEASE_ARM64_T6000 > Python Version: 3.11.6 (main, Nov 2 2023, 04:39:43) [Clang 14.0.3 (clang-1403.0.22.14.1)] Package Information ------------------- > langchain_core: 0.1.23 > langchain: 0.1.7 > langchain_community: 0.0.20 > langsmith: 0.0.87 Packages not installed (Not Necessarily a Problem) -------------------------------------------------- The following packages were not found: > langgraph > langserve
[Elasticsearch VectorStore] Could not find trained model
https://api.github.com/repos/langchain-ai/langchain/issues/17665/comments
4
2024-02-17T00:34:26Z
2024-07-05T09:19:14Z
https://github.com/langchain-ai/langchain/issues/17665
2,139,676,380
17,665
[ "langchain-ai", "langchain" ]
### Checked other resources - [X] I added a very descriptive title to this issue. - [X] I searched the LangChain documentation with the integrated search. - [X] I used the GitHub search to find a similar question and didn't find it. - [X] I am sure that this is a bug in LangChain rather than my code. ### Example Code loader = GoogleDriveLoader( document_ids=document_list, recursive=True, file_loader_cls=UnstructuredFileIOLoader, file_loader_kwargs={"mode": "elements"} ) data = loader.load() ### Error Message and Stack Trace (if applicable) Code output is provided in the following. There is supposed to be header info indicating that the title is "One-Pager for Generative Classifier Results" and a few section other names. ``` [{'content': '\ufeffOne-Pager for Generative Classifier Results\r\nData\r\nHate Speech Dataset from Kaggle (comments from Twitter)\r\nMethod\r\nIn the system prompt, ask GPT4 to summarize in one word YES or NO whether a comment is offensive language or not.\r\n\r\n\r\nReturn the top 5 log probabilities of the next token. Since YES or Yes is just one token, we calibrate the probability using \\sum P(<token>.upper() == YES) as the offensive language score.\r\nExperiments\r\nWe created the validation dataset that the ratio between positive and negative samples is around 1:2. Specifically, there are 206 positive samples and 416 negative samples. We let GPT4 to generate offensive language score as a classifier\r\nHistograms of positive and negative samples\r\nBelow are hisgoram and 1-CDF plots for positive and negative samples under zero-shot setup.\r\n \r\n\r\n\r\n\r\nZero-shot v.s. Few-shots\r\nBelow is the figure comparing the zero-shot classifier and the few-shots classifier. Specifically, we randomly select 10 offensive languages (outside of the validation dataset) and provide them as examples in the system prompt.\r\n\r\n\r\nWe shall see that the few shot classifier outperforms zero-shot classifier, especially in reducing the volume of false positives.\r\n \r\n\r\n\r\n\r\nNext steps\r\n* Calibrate precision through manual label correction on FPs.\r\n* Precision curve with respect to pos-neg ratio in the validation datasets.\r\n* Comparison of GPT3.5 and GPT4.', 'metadata': {'name': 'Generative Classifier', 'id': '1CMkmfv2CTy9qx3gAwiDOhdYUPjv2-5WOfGYodiXEr_I', 'version': '45', 'modifiedTime': '2024-02-16T21:55:15.296Z', 'createdTime': '2024-01-12T02:37:57.462Z', 'webViewLink': 'https://docs.google.com/document/d/1CMkmfv2CTy9qx3gAwiDOhdYUPjv2-5WOfGYodiXEr_I/edit?usp=drivesdk', 'type': 'google_doc', 'url': 'https://docs.google.com/document/d/1CMkmfv2CTy9qx3gAwiDOhdYUPjv2-5WOfGYodiXEr_I/edit?usp=drivesdk'}}] ``` ### Description I followed the instructions provided in GoogleDriveLoader and passed additional parameters indicating the google document should be parsed as elements so that I get header information of each text chunk. However, the loader does not work as expected. It still concatenate all plain texts together into one LangChain Document. What should I do to parse the header/section name in a google document? ### System Info langchain_community.__version__ == 0.0.19
Google documents not parsed as elements
https://api.github.com/repos/langchain-ai/langchain/issues/17664/comments
2
2024-02-17T00:30:21Z
2024-06-08T16:10:05Z
https://github.com/langchain-ai/langchain/issues/17664
2,139,672,570
17,664
[ "langchain-ai", "langchain" ]
### Checked other resources - [X] I added a very descriptive title to this issue. - [X] I searched the LangChain documentation with the integrated search. - [X] I used the GitHub search to find a similar question and didn't find it. - [X] I am sure that this is a bug in LangChain rather than my code. ### Example Code One cannot set params, in my particular case, to 0 for the VertexAI models as they evaluate to False and are ignored. This is not a problem for PaLM models (i.e. `text-bison`) as the [default temperature](https://cloud.google.com/vertex-ai/docs/generative-ai/model-reference/text) is `0.0`, however this is an issue for Gemini Pro as the [default temperature for text](https://cloud.google.com/vertex-ai/docs/generative-ai/model-reference/gemini) is 0.9. MRE ```python from langchain_google_vertexai import VertexAI llm = VertexAI(model_name='gemini-pro', project='test', temperature=0.0) print(llm._default_params) ``` You'll see that temperature is unset and so will use the Google API's default when generating. ### Error Message and Stack Trace (if applicable) _No response_ ### Description * I'm trying to set the temperature of `gemini-pro` to 0.0 and am unsuccessful, so its using the default of 0.9 This is a nefarious bug because users may not realise their temperature settings are being ignored...and typically a temperature of 0.0 is done for a very particular reason. I am submitting a PR to fix this. ### System Info Package Information ------------------- > langchain_core: 0.1.23 > langchain: 0.1.7 > langchain_community: 0.0.20 > langsmith: 0.0.87 > langchain_google_vertexai: 0.0.5
Can't set params to 0 on VertexAI models
https://api.github.com/repos/langchain-ai/langchain/issues/17658/comments
1
2024-02-16T22:42:10Z
2024-06-01T00:19:19Z
https://github.com/langchain-ai/langchain/issues/17658
2,139,587,257
17,658
[ "langchain-ai", "langchain" ]
### Checklist - [X] I added a very descriptive title to this issue. - [X] I included a link to the documentation page I am referring to (if applicable). ### Issue with current documentation: In the quickstart docs [(here)](https://github.com/langchain-ai/langchain/blob/master/docs/docs/get_started/quickstart.mdx) , there is a grammatical error on line 61. - Original: `This allows you interact in a chat manner with this LLM, so it remembers previous questions.` ### Idea or request for content: Grammer added: `This allows you to interact in a chat manner with this LLM, so it remembers previous questions.` There should be a "to" between "you" and "interact".
DOC: Grammatical Error in quickstart.mdx
https://api.github.com/repos/langchain-ai/langchain/issues/17657/comments
1
2024-02-16T22:34:11Z
2024-02-16T22:46:53Z
https://github.com/langchain-ai/langchain/issues/17657
2,139,579,508
17,657
[ "langchain-ai", "langchain" ]
### Checked other resources - [X] I added a very descriptive title to this issue. - [X] I searched the LangChain documentation with the integrated search. - [X] I used the GitHub search to find a similar question and didn't find it. - [X] I am sure that this is a bug in LangChain rather than my code. ### Example Code ```python from langchain_community.chat_models import BedrockChat b = BedrockChat(model_id="anthropic.claude-v2", model_kwargs={"temperature": 0.1}) ``` ### Error Message and Stack Trace (if applicable) If none AWS env variables are set, you see: > ValidationError: 1 validation error for BedrockChat > __root__ > Could not load credentials to authenticate with AWS client. Please check that credentials in the specified profile name are valid. (type=value_error) While it is actually: > Did not find region_name, please add an environment variable `AWS_DEFAULT_REGION` which contains it, or pass `region_name` as a named parameter ### Description If no ENV variables are set, you get misleading error message. Langchain is attempting to propagate true root cause by using 'raise from'. The problem is that this error is happening inside pydantic validation. And pydantic effectively [re-wraps errors](https://github.com/pydantic/pydantic/blob/12ebdfc6790ab0c29cc8aefd1d97dd04603eb7cb/pydantic/v1/main.py#L1030) loosing __context__ and __cause__ info. Only top-level error message is left. That is occasionally misleading when the problem is not about AWS creds mismatch. AmazonComprehendModerationChain, BedrockEmbeddings, BedrockBase, AmazonTextractPDFLoader, BedrockChat, Bedrock classes are affected. ### System Info platform-independent
AWS errors propagation is broken in Bedrock classes constructors validation
https://api.github.com/repos/langchain-ai/langchain/issues/17654/comments
1
2024-02-16T21:58:01Z
2024-06-01T00:07:41Z
https://github.com/langchain-ai/langchain/issues/17654
2,139,543,299
17,654
[ "langchain-ai", "langchain" ]
### Checked other resources - [X] I added a very descriptive title to this issue. - [X] I searched the LangChain documentation with the integrated search. - [X] I used the GitHub search to find a similar question and didn't find it. - [X] I am sure that this is a bug in LangChain rather than my code. ### Example Code loader = Docx2txtLoader(filename) docs = loader.load() ### Error Message and Stack Trace (if applicable) Traceback (most recent call last): File "C:\Users\MarkRamsey\PycharmProjects\ri_genaipro_source\RI_GenAIPro_Data_Pipeline.py", line 57, in <module> from loaders_local import * File "C:\Users\MarkRamsey\PycharmProjects\ri_genaipro_source\loaders_local.py", line 1, in <module> from langchain_community.document_loaders import Docx2txtLoader File "C:\Users\MarkRamsey\PycharmProjects\ri_genaipro_source\.venv\Lib\site-packages\langchain_community\document_loaders\__init__.py", line 163, in <module> from langchain_community.document_loaders.pebblo import PebbloSafeLoader File "C:\Users\MarkRamsey\PycharmProjects\ri_genaipro_source\.venv\Lib\site-packages\langchain_community\document_loaders\pebblo.py", line 5, in <module> import pwd ModuleNotFoundError: No module named 'pwd' ### Description the document_loaders\pebblo.py module is using pwd which is only valid in Linux, so it fails on windows ### System Info platform Windows 11 python 3.11.8
Use of pwd in document loaders causes failure in Windows with langchain_community 0.0.20
https://api.github.com/repos/langchain-ai/langchain/issues/17651/comments
3
2024-02-16T20:20:11Z
2024-04-27T21:50:46Z
https://github.com/langchain-ai/langchain/issues/17651
2,139,392,957
17,651
[ "langchain-ai", "langchain" ]
### Checked other resources - [X] I added a very descriptive title to this issue. - [X] I searched the LangChain documentation with the integrated search. - [X] I used the GitHub search to find a similar question and didn't find it. - [X] I am sure that this is a bug in LangChain rather than my code. ### Example Code ```python vectorstore = Pinecone.from_existing_index(index_name="primary", embedding=embedding) vectorstore.as_retriever(search_kwargs={"score_threshold": .80}) ``` ### Error Message and Stack Trace (if applicable) ```python TypeError: Pinecone.similarity_search_with_score() got an unexpected keyword argument 'score_threshold' ``` ### Description No score threshold filter available on Pinecone in Langchain. ### System Info Langchain v0.1.7 Python v3.11.5 Windows 10
Pinecone No Score_Threshold Argument
https://api.github.com/repos/langchain-ai/langchain/issues/17650/comments
7
2024-02-16T20:09:33Z
2024-06-08T16:10:01Z
https://github.com/langchain-ai/langchain/issues/17650
2,139,369,620
17,650
[ "langchain-ai", "langchain" ]
### Checked other resources - [X] I added a very descriptive title to this issue. - [X] I searched the LangChain documentation with the integrated search. - [X] I used the GitHub search to find a similar question and didn't find it. - [X] I am sure that this is a bug in LangChain rather than my code. ### Example Code from pymilvus import connections , utility import os os.environ["OPENAI_API_TYPE"] = "azure" os.environ["AZURE_OPENAI_ENDPOINT"] = "set this to end point" os.environ["AZURE_OPENAI_API_KEY"] = "azure openai api key" os.environ["OPENAI_API_VERSION"] = "2023-05-15" from langchain_openai import AzureOpenAIEmbeddings embeddings = AzureOpenAIEmbeddings( azure_deployment="text-embedding-ada-002", openai_api_version="2023-05-15" ) text = "This is a test query." query_result = embeddings.embed_query(text) print(query_result) ### Error Message and Stack Trace (if applicable) Traceback (most recent call last): File "test.py", line 14, in <module> embeddings = AzureOpenAIEmbeddings( File "../lib/python3.10/site-packages/pydantic/v1/main.py", line 341, in __init__ raise validation_error pydantic.v1.error_wrappers.ValidationError: 1 validation error for AzureOpenAIEmbeddings __root__ As of openai>=1.0.0, if `deployment` (or alias `azure_deployment`) is specified then `openai_api_base` (or alias `base_url`) should not be. Instead use `deployment` (or alias `azure_deployment`) and `azure_endpoint`. (type=value_error) ### Description I am trying to use the langchain_openai library for embedding but I also need to import and use pymilvus. When doing so the embedding doesn't work. see the error. I use the exact code from the langchain library website. ### System Info Name: langchain-openai Version: 0.0.6 Name: pymilvus Version: 2.3.6
AzureOpenAIEmbeddings gives an error if pymilvus is imported before
https://api.github.com/repos/langchain-ai/langchain/issues/17646/comments
2
2024-02-16T16:52:13Z
2024-02-25T20:03:42Z
https://github.com/langchain-ai/langchain/issues/17646
2,138,993,189
17,646
[ "langchain-ai", "langchain" ]
### Checked other resources - [X] I added a very descriptive title to this issue. - [X] I searched the LangChain documentation with the integrated search. - [X] I used the GitHub search to find a similar question and didn't find it. - [X] I am sure that this is a bug in LangChain rather than my code. ### Example Code from langchain.text_splitter import RecursiveCharacterTextSplitter text_to_split = 'any text can be put here if I am splitting from_tiktoken_encoder and have a chunk_overlap greater than 0 it will not work. The start_index metadata will have intermittant -1 values in it.' text_splitter = RecursiveCharacterTextSplitter(length_function=len, is_separator_regex=False).from_tiktoken_encoder( chunk_size=20, chunk_overlap=10, ) split_texts = text_splitter.create_documents([text_to_split]) ### Error Message and Stack Trace (if applicable) _No response_ ### Description Basically the error comes if you are splitting "from_tiktoken_encoder" rather than splitting by character count, and if you are specifying a chunk_overlap greater than 0. The error is caused by line 150 of text_splitter.py: offset = index + previous_chunk_len - self._chunk_overlap It won't calculate the correct offset because out self._chunk_overlap is specified as a token count, but that line in the code is calculating offset as a number of characters. ### System Info aiohttp==3.9.3 aiosignal==1.3.1 anyio==3.5.0 argon2-cffi==21.3.0 argon2-cffi-bindings==21.2.0 asttokens==2.0.5 async-timeout==4.0.3 attrs==22.1.0 backcall==0.2.0 beautifulsoup4==4.11.1 black==22.6.0 bleach==4.1.0 blinker==1.4 boto3==1.24.28 botocore==1.27.96 certifi==2022.12.7 cffi==1.15.1 chardet==4.0.0 charset-normalizer==2.0.4 click==8.0.4 comm==0.1.2 contourpy==1.0.5 cryptography==39.0.1 cycler==0.11.0 Cython==0.29.32 databricks-sdk==0.1.6 dataclasses-json==0.6.4 dbus-python==1.2.18 debugpy==1.6.7 decorator==5.1.1 defusedxml==0.7.1 distlib==0.3.7 distro==1.7.0 distro-info==1.1+ubuntu0.2 docopt==0.6.2 docstring-to-markdown==0.11 entrypoints==0.4 executing==0.8.3 facets-overview==1.1.1 fastjsonschema==2.19.0 filelock==3.12.4 fonttools==4.25.0 frozenlist==1.4.1 googleapis-common-protos==1.61.0 greenlet==3.0.3 grpcio==1.48.2 grpcio-status==1.48.1 h11==0.14.0 httpcore==1.0.3 httplib2==0.20.2 httpx==0.26.0 idna==3.4 importlib-metadata==4.6.4 ipykernel==6.25.0 ipython==8.14.0 ipython-genutils==0.2.0 ipywidgets==7.7.2 jedi==0.18.1 jeepney==0.7.1 Jinja2==3.1.2 jmespath==0.10.0 joblib==1.2.0 jsonpatch==1.33 jsonpointer==2.4 jsonschema==4.17.3 jupyter-client==7.3.4 jupyter-server==1.23.4 jupyter_core==5.2.0 jupyterlab-pygments==0.1.2 jupyterlab-widgets==1.0.0 keyring==23.5.0 kiwisolver==1.4.4 langchain==0.1.7 langchain-community==0.0.20 langchain-core==0.1.23 langsmith==0.0.87 launchpadlib==1.10.16 lazr.restfulclient==0.14.4 lazr.uri==1.0.6 lxml==4.9.1 MarkupSafe==2.1.1 marshmallow==3.20.2 matplotlib==3.7.0 matplotlib-inline==0.1.6 mccabe==0.7.0 mistune==0.8.4 more-itertools==8.10.0 multidict==6.0.5 mypy-extensions==0.4.3 nbclassic==0.5.2 nbclient==0.5.13 nbconvert==6.5.4 nbformat==5.7.0 nest-asyncio==1.5.6 nodeenv==1.8.0 notebook==6.5.2 notebook_shim==0.2.2 num2words==0.5.13 numpy==1.23.5 oauthlib==3.2.0 openai==1.12.0 packaging==23.2 pandas==1.5.3 pandocfilters==1.5.0 parso==0.8.3 pathspec==0.10.3 patsy==0.5.3 pexpect==4.8.0 pickleshare==0.7.5 Pillow==9.4.0 platformdirs==2.5.2 plotly==5.9.0 pluggy==1.0.0 prometheus-client==0.14.1 prompt-toolkit==3.0.36 protobuf==4.24.0 psutil==5.9.0 psycopg2==2.9.3 ptyprocess==0.7.0 pure-eval==0.2.2 pyarrow==8.0.0 pyarrow-hotfix==0.5 pycparser==2.21 pydantic==1.10.6 pyflakes==3.1.0 Pygments==2.11.2 PyGObject==3.42.1 PyJWT==2.3.0 pyodbc==4.0.32 pyparsing==3.0.9 pyright==1.1.294 pyrsistent==0.18.0 python-apt==2.4.0+ubuntu2 python-dateutil==2.8.2 python-lsp-jsonrpc==1.1.1 python-lsp-server==1.8.0 pytoolconfig==1.2.5 pytz==2022.7 PyYAML==6.0.1 pyzmq==23.2.0 regex==2023.12.25 requests==2.28.1 rope==1.7.0 s3transfer==0.6.2 scikit-learn==1.1.1 scipy==1.10.0 seaborn==0.12.2 SecretStorage==3.3.1 Send2Trash==1.8.0 six==1.16.0 sniffio==1.2.0 soupsieve==2.3.2.post1 SQLAlchemy==2.0.27 ssh-import-id==5.11 stack-data==0.2.0 statsmodels==0.13.5 tenacity==8.1.0 terminado==0.17.1 threadpoolctl==2.2.0 tiktoken==0.6.0 tinycss2==1.2.1 tokenize-rt==4.2.1 tomli==2.0.1 tornado==6.1 tqdm==4.66.2 traitlets==5.7.1 typing-inspect==0.9.0 typing_extensions==4.9.0 ujson==5.4.0 unattended-upgrades==0.1 urllib3==1.26.14 virtualenv==20.16.7 wadllib==1.3.6 wcwidth==0.2.5 webencodings==0.5.1 websocket-client==0.58.0 whatthepatch==1.0.2 widgetsnbextension==3.6.1 yapf==0.33.0 yarl==1.9.4 zipp==1.0.0
langchain.textsplitter "add_start_index" option broken for create_documents() when splitting text by token count rather than character count
https://api.github.com/repos/langchain-ai/langchain/issues/17642/comments
1
2024-02-16T14:43:15Z
2024-05-31T23:49:27Z
https://github.com/langchain-ai/langchain/issues/17642
2,138,764,620
17,642
[ "langchain-ai", "langchain" ]
### Checked other resources - [X] I added a very descriptive title to this issue. - [X] I searched the LangChain documentation with the integrated search. - [X] I used the GitHub search to find a similar question and didn't find it. - [X] I am sure that this is a bug in LangChain rather than my code. ### Example Code I tried to run code from langchain [doc](https://python.langchain.com/docs/integrations/vectorstores/faiss#similarity-search-with-filtering) where is called similarity search with filter, but the results are differend than in documentation. ```python from langchain_community.vectorstores import FAISS from langchain.schema import Document list_of_documents = [ Document(page_content="foo", metadata=dict(page=1)), Document(page_content="bar", metadata=dict(page=1)), Document(page_content="foo", metadata=dict(page=2)), Document(page_content="barbar", metadata=dict(page=2)), Document(page_content="foo", metadata=dict(page=3)), Document(page_content="bar burr", metadata=dict(page=3)), Document(page_content="foo", metadata=dict(page=4)), Document(page_content="bar bruh", metadata=dict(page=4)), ] db = FAISS.from_documents(list_of_documents, embeddings) results_with_scores = db.similarity_search_with_score("foo", filter=dict(page=1)) # Or with a callable: # results_with_scores = db.similarity_search_with_score("foo", filter=lambda d: d["page"] == 1) for doc, score in results_with_scores: print(f"Content: {doc.page_content}, Metadata: {doc.metadata}, Score: {score}") ``` My results are same as results without filtering. ``` Content: foo, Metadata: {'page': 1}, Score: 5.159960813797904e-15 Content: foo, Metadata: {'page': 2}, Score: 5.159960813797904e-15 Content: foo, Metadata: {'page': 3}, Score: 5.159960813797904e-15 Content: foo, Metadata: {'page': 4}, Score: 5.159960813797904e-15 ``` ### Error Message and Stack Trace (if applicable) _No response_ ### Description I'm migrating from langchain==0.0.349 to new langchain 0.1.X and filtering worked just fine in version 0.0.349 ### System Info faiss-cpu==1.7.4 langchain==0.1.6 langchain-community==0.0.19 langchain-core==0.1.23 langchain-openai==0.0.6 windows 10
FAISS vectorstore filter not working
https://api.github.com/repos/langchain-ai/langchain/issues/17633/comments
4
2024-02-16T13:00:42Z
2024-08-05T16:07:30Z
https://github.com/langchain-ai/langchain/issues/17633
2,138,581,516
17,633
[ "langchain-ai", "langchain" ]
### Checked other resources - [X] I added a very descriptive title to this issue. - [X] I searched the LangChain documentation with the integrated search. - [X] I used the GitHub search to find a similar question and didn't find it. - [X] I am sure that this is a bug in LangChain rather than my code. ### Example Code ``` python from langchain.sql_database import SQLDatabase from langchain_experimental.sql import SQLDatabaseChain from langchain.chat_models import AzureChatOpenAI DB = SQLDatabase.from_uri(SQLCONNECTION, schema=SQLSCHEMA, include_tables=[SQL_TBL1, SQL_TBL2]) LLM = AzureChatOpenAI(model=OPENAI_MODEL_NAME, temperature=0, openai_api_key=OPENAI_API_KEY, openai_api_version=OPENAI_DEPLOYMENT_VERSION, azure_endpoint=OPENAI_DEPLOYMENT_ENDPOINT, deployment_name=OPENAI_DEPLOYMENT_NAME) db_chain = SQLDatabaseChain(llm=LLM, database=DB, verbose=True) db_chain.run(USERQUESTION) ``` ### Error Message and Stack Trace (if applicable) N/A ### Description Each time a question is asked, the SQL query is created and remains pending on the server even after the answer is returned to the user. The SQL query is only discarded after killing the console where the script runs. ### System Info langchain==0.1.7 langchain-experimental==0.0.51 Windows 11 Pro Python 3.11.4
SQL connection remains active on the server
https://api.github.com/repos/langchain-ai/langchain/issues/17628/comments
6
2024-02-16T11:49:24Z
2024-05-20T14:40:39Z
https://github.com/langchain-ai/langchain/issues/17628
2,138,443,293
17,628
[ "langchain-ai", "langchain" ]
### Checked other resources - [X] I added a very descriptive title to this issue. - [X] I searched the LangChain documentation with the integrated search. - [X] I used the GitHub search to find a similar question and didn't find it. - [X] I am sure that this is a bug in LangChain rather than my code. ### Example Code ```python from langchain_community.utilities import SQLDatabase db = SQLDatabase.from_uri(URI) ``` ### Error Message and Stack Trace (if applicable) ``` { "name": "ImportError", "message": "cannot import name 'string_types' from 'sqlalchemy.util.compat' (/home/airflow/.local/lib/python3.10/site-packages/sqlalchemy/util/compat.py)", "stack": "--------------------------------------------------------------------------- ImportError Traceback (most recent call last) Cell In[3], line 1 ----> 1 db = SQLDatabase.from_uri(URI) File ~/.local/lib/python3.10/site-packages/langchain_community/utilities/sql_database.py:133, in SQLDatabase.from_uri(cls, database_uri, engine_args, **kwargs) 131 \"\"\"Construct a SQLAlchemy engine from URI.\"\"\" 132 _engine_args = engine_args or {} --> 133 return cls(create_engine(database_uri, **_engine_args), **kwargs) File <string>:2, in create_engine(url, **kwargs) File ~/.local/lib/python3.10/site-packages/sqlalchemy/util/deprecations.py:281, in deprecated_params.<locals>.decorate.<locals>.warned(fn, *args, **kwargs) 274 if m in kwargs: 275 _warn_with_version( 276 messages[m], 277 versions[m], 278 version_warnings[m], 279 stacklevel=3, 280 ) --> 281 return fn(*args, **kwargs) File ~/.local/lib/python3.10/site-packages/sqlalchemy/engine/create.py:550, in create_engine(url, **kwargs) 546 u = _url.make_url(url) 548 u, plugins, kwargs = u._instantiate_plugins(kwargs) --> 550 entrypoint = u._get_entrypoint() 551 _is_async = kwargs.pop(\"_is_async\", False) 552 if _is_async: File ~/.local/lib/python3.10/site-packages/sqlalchemy/engine/url.py:758, in URL._get_entrypoint(self) 756 else: 757 name = self.drivername.replace(\"+\", \".\") --> 758 cls = registry.load(name) 759 # check for legacy dialects that 760 # would return a module with 'dialect' as the 761 # actual class 762 if ( 763 hasattr(cls, \"dialect\") 764 and isinstance(cls.dialect, type) 765 and issubclass(cls.dialect, Dialect) 766 ): File ~/.local/lib/python3.10/site-packages/sqlalchemy/util/langhelpers.py:372, in PluginLoader.load(self, name) 370 if impl.name == name: 371 self.impls[name] = impl.load --> 372 return impl.load() 374 raise exc.NoSuchModuleError( 375 \"Can't load plugin: %s:%s\" % (self.group, name) 376 ) File /usr/local/lib/python3.10/importlib/metadata/__init__.py:171, in EntryPoint.load(self) 166 \"\"\"Load the entry point from its definition. If only a module 167 is indicated by the value, return that module. Otherwise, 168 return the named object. 169 \"\"\" 170 match = self.pattern.match(self.value) --> 171 module = import_module(match.group('module')) 172 attrs = filter(None, (match.group('attr') or '').split('.')) 173 return functools.reduce(getattr, attrs, module) File /usr/local/lib/python3.10/importlib/__init__.py:126, in import_module(name, package) 124 break 125 level += 1 --> 126 return _bootstrap._gcd_import(name[level:], package, level) File <frozen importlib._bootstrap>:1050, in _gcd_import(name, package, level) File <frozen importlib._bootstrap>:1027, in _find_and_load(name, import_) File <frozen importlib._bootstrap>:1006, in _find_and_load_unlocked(name, import_) File <frozen importlib._bootstrap>:688, in _load_unlocked(spec) File <frozen importlib._bootstrap_external>:883, in exec_module(self, module) File <frozen importlib._bootstrap>:241, in _call_with_frames_removed(f, *args, **kwds) File ~/.local/lib/python3.10/site-packages/snowflake/sqlalchemy/__init__.py:30 10 import importlib.metadata as importlib_metadata 12 from sqlalchemy.types import ( 13 BIGINT, 14 BINARY, (...) 27 VARCHAR, 28 ) ---> 30 from . import base, snowdialect 31 from .custom_commands import ( 32 AWSBucket, 33 AzureContainer, (...) 42 PARQUETFormatter, 43 ) 44 from .custom_types import ( 45 ARRAY, 46 BYTEINT, (...) 61 VARIANT, 62 ) File ~/.local/lib/python3.10/site-packages/snowflake/sqlalchemy/base.py:13 11 from sqlalchemy.sql import compiler, expression 12 from sqlalchemy.sql.elements import quoted_name ---> 13 from sqlalchemy.util.compat import string_types 15 from .custom_commands import AWSBucket, AzureContainer, ExternalStage 16 from .util import _set_connection_interpolate_empty_sequences ImportError: cannot import name 'string_types' from 'sqlalchemy.util.compat' (/home/airflow/.local/lib/python3.10/site-packages/sqlalchemy/util/compat.py)" } ``` ### Description When trying to connect to a DB with the SQLDatabase module, I get the error as shown. ### System Info sqlalchemy-2.0.27 langchain-community 0.0.20 langchain 0.1.5
ImportError: cannot import name 'string_types' from 'sqlalchemy.util.compat'
https://api.github.com/repos/langchain-ai/langchain/issues/17616/comments
6
2024-02-16T04:17:58Z
2024-02-27T20:18:02Z
https://github.com/langchain-ai/langchain/issues/17616
2,137,842,219
17,616
[ "langchain-ai", "langchain" ]
### Checked other resources - [X] I added a very descriptive title to this issue. - [X] I searched the LangChain documentation with the integrated search. - [X] I used the GitHub search to find a similar question and didn't find it. - [X] I am sure that this is a bug in LangChain rather than my code. ### Example Code The issues are described in the description. The following code will work to reproduce (1) as long as an index called `azure-search` doesn't already exist. To reproduce (2), the import for `VectorSearch` should be added back in [azuresearch.py](https://github.com/langchain-ai/langchain/pull/15659/files#diff-b691fd57bb6a6d89396c11c8d198f361be2f53e19ef4059904232cd3b5698b77L92) and then the code should be run again. ```python import os from langchain_openai import AzureOpenAIEmbeddings from langchain_community.vectorstores.azuresearch import AzureSearch embeddings = AzureOpenAIEmbeddings( deployment=os.environ["AZURE_EMBEDDINGS_DEPLOYMENT"], chunk_size=1 ) vector_store: AzureSearch = AzureSearch( azure_search_endpoint=os.environ["AZURE_SEARCH_ENDPOINT"], azure_search_key=os.environ["AZURE_SEARCH_KEY"], index_name="azure-search", embedding_function=embeddings.embed_query, ) ``` ### Error Message and Stack Trace (if applicable) _No response_ ### Description In this PR: https://github.com/langchain-ai/langchain/pull/15659, the AzureSearch vectorstore was update to work the latest stable azure-search-documents. In the process, this introduced a few regressions. 1. In the code [here](https://github.com/langchain-ai/langchain/pull/15659/files#diff-b691fd57bb6a6d89396c11c8d198f361be2f53e19ef4059904232cd3b5698b77L92), the import for `VectorSearch` was removed. If a search index needs to be created, we run into the following error: ```txt Traceback (most recent call last): File "/home/krpratic/langchain/repro_bug.py", line 14, in <module> vector_store: AzureSearch = AzureSearch( ^^^^^^^^^^^^ File "/home/krpratic/langchain/libs/community/langchain_community/vectorstores/azuresearch.py", line 270, in __init__ self.client = _get_search_client( ^^^^^^^^^^^^^^^^^^^ File "/home/krpratic/langchain/libs/community/langchain_community/vectorstores/azuresearch.py", line 145, in _get_search_client vector_search = VectorSearch( ^^^^^^^^^^^^ NameError: name 'VectorSearch' is not defined. Did you mean: 'vector_search'? ``` 2. If I edit in the code in (1) to add the import for `VectorSearch` back and re-run the code, I get the following error: ```text (InvalidRequestParameter) The request is invalid. Details: definition : The vector field 'content_vector' must have the property 'vectorSearchConfiguration' set. ``` This is due to a change from the beta --> stable version of azure-search-documents where `vector_search_configuration` was renamed to `vector_search_profile_name`: [changelog](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/search/azure-search-documents/CHANGELOG.md?plain=1#L96) + [code](https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/search/azure-search-documents/azure/search/documents/indexes/models/_index.py#L212). To fix, the code should be updated to `vector_search_profile_name="myHnswProfile"` to point to the name of the vector search profile that specifies the algorithm to use when searching the vector field: ```python SearchField( name=FIELDS_CONTENT_VECTOR, type=SearchFieldDataType.Collection(SearchFieldDataType.Single), searchable=True, vector_search_dimensions=len(self.embed_query("Text")), vector_search_profile_name="myHnswProfile", ), ``` ### System Info langchain-cli==0.0.21 langchain-core==0.1.23 langchain-openai==0.0.6 azure-search-documents==11.4.0 linux; (Linux-5.15.133.1-microsoft-standard-WSL2-x86_64-with-glibc2.35) Python v3.11
regressions with AzureSearch vectorstore update to v11.4.0
https://api.github.com/repos/langchain-ai/langchain/issues/17598/comments
1
2024-02-15T22:24:24Z
2024-02-16T17:11:47Z
https://github.com/langchain-ai/langchain/issues/17598
2,137,548,937
17,598
[ "langchain-ai", "langchain" ]
### Checked other resources - [X] I added a very descriptive title to this issue. - [X] I searched the LangChain documentation with the integrated search. - [X] I used the GitHub search to find a similar question and didn't find it. - [X] I am sure that this is a bug in LangChain rather than my code. ### Example Code using `langchain_openai.ChatOpenAI` works without any problems when using [Ollama OpenAI Compatible API](https://registry.ollama.ai/blog/openai-compatibility) ``` python from langchain_openai import ChatOpenAI, OpenAI llm = ChatOpenAI( temperature=0, model_name="phi", openai_api_base="http://localhost:11434/v1", openai_api_key="Not needed for local server", ) print(llm.invoke("Hello, how are you?").content) ``` result: ``` I'm doing well, thank you for asking. How can I assist you today? ``` However, using same code for `langchain_openai.OpenAI` results in an error:' ``` python from langchain_openai import ChatOpenAI, OpenAI llm = OpenAI( temperature=0, model_name="phi", openai_api_base="http://localhost:11434/v1", openai_api_key="Not needed for local server", ) print(llm.invoke("Hello, how are you?").content) ``` results in ``` NotFoundError: 404 page not found ``` I checked that there's no problem with `Ollama` itself or the localhost, I repeated the same expirement many times and it always worked for `ChatOpenAI` and didnt' work for `OpenAI` ### Error Message and Stack Trace (if applicable) ``` { "name": "NotFoundError", "message": "404 page not found", "stack": "--------------------------------------------------------------------------- NotFoundError Traceback (most recent call last) Cell In[3], line 8 1 from langchain_openai import ChatOpenAI, OpenAI 2 llm = OpenAI( 3 temperature=0, 4 model_name=\"phi\", 5 openai_api_base=\"http://localhost:11434/v1\", 6 openai_api_key=\"Not needed for local server\", 7 ) ----> 8 print(llm.invoke(\"Hello, how are you?\").content) File ~/miniconda3/envs/main/lib/python3.11/site-packages/langchain_core/language_models/llms.py:273, in BaseLLM.invoke(self, input, config, stop, **kwargs) 263 def invoke( 264 self, 265 input: LanguageModelInput, (...) 269 **kwargs: Any, 270 ) -> str: 271 config = ensure_config(config) 272 return ( --> 273 self.generate_prompt( 274 [self._convert_input(input)], 275 stop=stop, 276 callbacks=config.get(\"callbacks\"), 277 tags=config.get(\"tags\"), 278 metadata=config.get(\"metadata\"), 279 run_name=config.get(\"run_name\"), 280 **kwargs, 281 ) 282 .generations[0][0] 283 .text 284 ) File ~/miniconda3/envs/main/lib/python3.11/site-packages/langchain_core/language_models/llms.py:568, in BaseLLM.generate_prompt(self, prompts, stop, callbacks, **kwargs) 560 def generate_prompt( 561 self, 562 prompts: List[PromptValue], (...) 565 **kwargs: Any, 566 ) -> LLMResult: 567 prompt_strings = [p.to_string() for p in prompts] --> 568 return self.generate(prompt_strings, stop=stop, callbacks=callbacks, **kwargs) File ~/miniconda3/envs/main/lib/python3.11/site-packages/langchain_core/language_models/llms.py:741, in BaseLLM.generate(self, prompts, stop, callbacks, tags, metadata, run_name, **kwargs) 725 raise ValueError( 726 \"Asked to cache, but no cache found at `langchain.cache`.\" 727 ) 728 run_managers = [ 729 callback_manager.on_llm_start( 730 dumpd(self), (...) 739 ) 740 ] --> 741 output = self._generate_helper( 742 prompts, stop, run_managers, bool(new_arg_supported), **kwargs 743 ) 744 return output 745 if len(missing_prompts) > 0: File ~/miniconda3/envs/main/lib/python3.11/site-packages/langchain_core/language_models/llms.py:605, in BaseLLM._generate_helper(self, prompts, stop, run_managers, new_arg_supported, **kwargs) 603 for run_manager in run_managers: 604 run_manager.on_llm_error(e, response=LLMResult(generations=[])) --> 605 raise e 606 flattened_outputs = output.flatten() 607 for manager, flattened_output in zip(run_managers, flattened_outputs): File ~/miniconda3/envs/main/lib/python3.11/site-packages/langchain_core/language_models/llms.py:592, in BaseLLM._generate_helper(self, prompts, stop, run_managers, new_arg_supported, **kwargs) 582 def _generate_helper( 583 self, 584 prompts: List[str], (...) 588 **kwargs: Any, 589 ) -> LLMResult: 590 try: 591 output = ( --> 592 self._generate( 593 prompts, 594 stop=stop, 595 # TODO: support multiple run managers 596 run_manager=run_managers[0] if run_managers else None, 597 **kwargs, 598 ) 599 if new_arg_supported 600 else self._generate(prompts, stop=stop) 601 ) 602 except BaseException as e: 603 for run_manager in run_managers: File ~/miniconda3/envs/main/lib/python3.11/site-packages/langchain_openai/llms/base.py:340, in BaseOpenAI._generate(self, prompts, stop, run_manager, **kwargs) 328 choices.append( 329 { 330 \"text\": generation.text, (...) 337 } 338 ) 339 else: --> 340 response = self.client.create(prompt=_prompts, **params) 341 if not isinstance(response, dict): 342 # V1 client returns the response in an PyDantic object instead of 343 # dict. For the transition period, we deep convert it to dict. 344 response = response.dict() File ~/miniconda3/envs/main/lib/python3.11/site-packages/openai/_utils/_utils.py:275, in required_args.<locals>.inner.<locals>.wrapper(*args, **kwargs) 273 msg = f\"Missing required argument: {quote(missing[0])}\" 274 raise TypeError(msg) --> 275 return func(*args, **kwargs) File ~/miniconda3/envs/main/lib/python3.11/site-packages/openai/resources/completions.py:506, in Completions.create(self, model, prompt, best_of, echo, frequency_penalty, logit_bias, logprobs, max_tokens, n, presence_penalty, seed, stop, stream, suffix, temperature, top_p, user, extra_headers, extra_query, extra_body, timeout) 478 @required_args([\"model\", \"prompt\"], [\"model\", \"prompt\", \"stream\"]) 479 def create( 480 self, (...) 504 timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, 505 ) -> Completion | Stream[Completion]: --> 506 return self._post( 507 \"/completions\", 508 body=maybe_transform( 509 { 510 \"model\": model, 511 \"prompt\": prompt, 512 \"best_of\": best_of, 513 \"echo\": echo, 514 \"frequency_penalty\": frequency_penalty, 515 \"logit_bias\": logit_bias, 516 \"logprobs\": logprobs, 517 \"max_tokens\": max_tokens, 518 \"n\": n, 519 \"presence_penalty\": presence_penalty, 520 \"seed\": seed, 521 \"stop\": stop, 522 \"stream\": stream, 523 \"suffix\": suffix, 524 \"temperature\": temperature, 525 \"top_p\": top_p, 526 \"user\": user, 527 }, 528 completion_create_params.CompletionCreateParams, 529 ), 530 options=make_request_options( 531 extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout 532 ), 533 cast_to=Completion, 534 stream=stream or False, 535 stream_cls=Stream[Completion], 536 ) File ~/miniconda3/envs/main/lib/python3.11/site-packages/openai/_base_client.py:1200, in SyncAPIClient.post(self, path, cast_to, body, options, files, stream, stream_cls) 1186 def post( 1187 self, 1188 path: str, (...) 1195 stream_cls: type[_StreamT] | None = None, 1196 ) -> ResponseT | _StreamT: 1197 opts = FinalRequestOptions.construct( 1198 method=\"post\", url=path, json_data=body, files=to_httpx_files(files), **options 1199 ) -> 1200 return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls)) File ~/miniconda3/envs/main/lib/python3.11/site-packages/openai/_base_client.py:889, in SyncAPIClient.request(self, cast_to, options, remaining_retries, stream, stream_cls) 880 def request( 881 self, 882 cast_to: Type[ResponseT], (...) 887 stream_cls: type[_StreamT] | None = None, 888 ) -> ResponseT | _StreamT: --> 889 return self._request( 890 cast_to=cast_to, 891 options=options, 892 stream=stream, 893 stream_cls=stream_cls, 894 remaining_retries=remaining_retries, 895 ) File ~/miniconda3/envs/main/lib/python3.11/site-packages/openai/_base_client.py:980, in SyncAPIClient._request(self, cast_to, options, remaining_retries, stream, stream_cls) 977 err.response.read() 979 log.debug(\"Re-raising status error\") --> 980 raise self._make_status_error_from_response(err.response) from None 982 return self._process_response( 983 cast_to=cast_to, 984 options=options, (...) 987 stream_cls=stream_cls, 988 ) NotFoundError: 404 page not found" } ``` ### Description using `langchain_openai.ChatOpenAI` works without any problems when using [Ollama OpenAI Compatible API](https://registry.ollama.ai/blog/openai-compatibility) However, using same code for `langchain_openai.OpenAI` results in an error. I checked that there's no problem with `Ollama` itself or the localhost, I repeated the same expirement many times and it always worked for `ChatOpenAI` and didnt' work for `OpenAI` ### System Info ``` System Information ------------------ > OS: Linux > OS Version: #1 SMP Thu Jan 11 04:09:03 UTC 2024 > Python Version: 3.11.5 (main, Sep 11 2023, 13:54:46) [GCC 11.2.0] Package Information ------------------- > langchain_core: 0.1.22 > langchain: 0.1.7 > langchain_community: 0.0.20 > langsmith: 0.0.87 > langchain_openai: 0.0.6 > langchainplus_sdk: 0.0.20 ```
ChatOpenAI and OpenAI give different behaviors when using local openai_api_base
https://api.github.com/repos/langchain-ai/langchain/issues/17596/comments
1
2024-02-15T21:58:34Z
2024-05-08T22:51:59Z
https://github.com/langchain-ai/langchain/issues/17596
2,137,519,690
17,596
[ "langchain-ai", "langchain" ]
### Checklist - [X] I added a very descriptive title to this issue. - [X] I included a link to the documentation page I am referring to (if applicable). ### Issue with current documentation: - ### Idea or request for content: below is the code ``` child_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=50) parent_splitter = RecursiveCharacterTextSplitter(chunk_size=1200, chunk_overlap=300) vectorstore = Chroma( collection_name="full_documents", embedding_function=embeddings) store = InMemoryStore() retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter ) retriever.add_documents(documents, ids=None) docss = retriever.get_relevant_documents("data related to cricket") ``` `print(docss)` it is only returning 1 output. How to retrieve topk documents?
how to get topk relevant documents using retriever with ParentDocumentRetriever?
https://api.github.com/repos/langchain-ai/langchain/issues/17589/comments
1
2024-02-15T19:14:05Z
2024-02-15T22:10:37Z
https://github.com/langchain-ai/langchain/issues/17589
2,137,248,520
17,589
[ "langchain-ai", "langchain" ]
### Checked other resources - [X] I added a very descriptive title to this issue. - [X] I searched the LangChain documentation with the integrated search. - [X] I used the GitHub search to find a similar question and didn't find it. - [X] I am sure that this is a bug in LangChain rather than my code. ### Example Code When you instantiate an AzureSearch instance with a search type of `semantic_hybrid` and the metadata is not on the list of fields for the `fields` of the vector store or in the index, the `semantic_hybrid_search_with_score_and_rerank` method fails. ### Error Message and Stack Trace (if applicable) ``` Traceback (most recent call last): File "/Users/carlos/PycharmProjects/project/backend/api/app/controllers/session/send_message.py", line 76, in send_message response = agent({"input": prompt}) ^^^^^^^^^^^^^^^^^^^^^^^^ File "/Users/carlos/PycharmProjects/project/backend/api/services/agent.py", line 166, in __call__ return self._executor.invoke(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/Users/carlos/PycharmProjects/project/.venv/lib/python3.11/site-packages/langchain/chains/base.py", line 162, in invoke raise e File "/Users/carlos/PycharmProjects/project/.venv/lib/python3.11/site-packages/langchain/chains/base.py", line 156, in invoke self._call(inputs, run_manager=run_manager) File "/Users/carlos/PycharmProjects/project/.venv/lib/python3.11/site-packages/langchain/agents/agent.py", line 1391, in _call next_step_output = self._take_next_step( ^^^^^^^^^^^^^^^^^^^^^ File "/Users/carlos/PycharmProjects/project/.venv/lib/python3.11/site-packages/langchain/agents/agent.py", line 1097, in _take_next_step [ File "/Users/carlos/PycharmProjects/project/.venv/lib/python3.11/site-packages/langchain/agents/agent.py", line 1097, in <listcomp> [ File "/Users/carlos/PycharmProjects/project/.venv/lib/python3.11/site-packages/langchain/agents/agent.py", line 1182, in _iter_next_step yield self._perform_agent_action( ^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/Users/carlos/PycharmProjects/project/.venv/lib/python3.11/site-packages/langchain/agents/agent.py", line 1204, in _perform_agent_action observation = tool.run( ^^^^^^^^^ File "/Users/carlos/PycharmProjects/project/.venv/lib/python3.11/site-packages/langchain_core/tools.py", line 401, in run raise e File "/Users/carlos/PycharmProjects/project/.venv/lib/python3.11/site-packages/langchain_core/tools.py", line 358, in run self._run(*tool_args, run_manager=run_manager, **tool_kwargs) File "/Users/carlos/PycharmProjects/project/backend/api/services/agent.py", line 254, in _run docs = self.vectorstore.semantic_hybrid_search(query=query, k=4) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/Users/carlos/PycharmProjects/project/.venv/lib/python3.11/site-packages/langchain_community/vectorstores/azuresearch.py", line 520, in semantic_hybrid_search docs_and_scores = self.semantic_hybrid_search_with_score_and_rerank( ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/Users/carlos/PycharmProjects/project/.venv/lib/python3.11/site-packages/langchain_community/vectorstores/azuresearch.py", line 582, in semantic_hybrid_search_with_score_and_rerank docs = [ ^ File "/Users/carlos/PycharmProjects/project/.venv/lib/python3.11/site-packages/langchain_community/vectorstores/azuresearch.py", line 606, in <listcomp> json.loads(result["metadata"]).get("key"), ~~~~~~^^^^^^^^^^^^ KeyError: 'metadata' ``` ### Description A fix was introduced in PR #15642 and the bug was re-introduced in PR #15659. For this to work the method should look like this: ```python def semantic_hybrid_search_with_score_and_rerank( self, query: str, k: int = 4, filters: Optional[str] = None ) -> List[Tuple[Document, float, float]]: """Return docs most similar to query with an hybrid query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query and score for each """ from azure.search.documents.models import VectorizedQuery results = self.client.search( search_text=query, vector_queries=[ VectorizedQuery( vector=np.array(self.embed_query(query), dtype=np.float32).tolist(), k_nearest_neighbors=k, fields=FIELDS_CONTENT_VECTOR, ) ], filter=filters, query_type="semantic", semantic_configuration_name=self.semantic_configuration_name, query_caption="extractive", query_answer="extractive", top=k, ) # Get Semantic Answers semantic_answers = results.get_answers() or [] semantic_answers_dict: Dict = {} for semantic_answer in semantic_answers: semantic_answers_dict[semantic_answer.key] = { "text": semantic_answer.text, "highlights": semantic_answer.highlights, } # Convert results to Document objects docs = [ ( Document( page_content=result.pop(FIELDS_CONTENT), metadata={ **( json.loads(result[FIELDS_METADATA]) if FIELDS_METADATA in result else { k: v for k, v in result.items() if k != FIELDS_CONTENT_VECTOR } ), **{ "captions": { "text": result.get("@search.captions", [{}])[0].text, "highlights": result.get("@search.captions", [{}])[ 0 ].highlights, } if result.get("@search.captions") else {}, "answers": semantic_answers_dict.get( json.loads(result[FIELDS_METADATA]).get("key") if FIELDS_METADATA in result else "", "", ), }, }, ), float(result["@search.score"]), float(result["@search.reranker_score"]), ) for result in results ] return docs ``` ### System Info Python 3.11 Langchain 0.1.7
AzureSearch.semantic_hybrid_search_with_score_and_rerank not working when METADATA_FIELD not in index
https://api.github.com/repos/langchain-ai/langchain/issues/17587/comments
2
2024-02-15T19:02:19Z
2024-07-01T16:05:09Z
https://github.com/langchain-ai/langchain/issues/17587
2,137,228,317
17,587
[ "langchain-ai", "langchain" ]
### Checked other resources - [X] I added a very descriptive title to this issue. - [X] I searched the LangChain documentation with the integrated search. - [X] I used the GitHub search to find a similar question and didn't find it. - [X] I am sure that this is a bug in LangChain rather than my code. ### Example Code na ### Error Message and Stack Trace (if applicable) _No response_ ### Description The latest version of langchain_coomunity 0.0.20 is giving an error "NO MODEUL PWD" while using TEXTLOADER() ### System Info Running on windows and using TEXTLOADER
Latest langchain_community is giving an error "No MODULE PWD" while using TEXTLOADER
https://api.github.com/repos/langchain-ai/langchain/issues/17585/comments
2
2024-02-15T17:37:11Z
2024-06-01T00:07:41Z
https://github.com/langchain-ai/langchain/issues/17585
2,137,088,483
17,585
[ "langchain-ai", "langchain" ]
### Checked other resources - [X] I added a very descriptive title to this issue. - [X] I searched the LangChain documentation with the integrated search. - [X] I used the GitHub search to find a similar question and didn't find it. - [X] I am sure that this is a bug in LangChain rather than my code. ### Example Code ``` from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline model_id = "gpt2" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id) pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=10) hf = HuggingFacePipeline(pipeline=pipe) from langchain.prompts import PromptTemplate template = """Question: {question} Answer: Let's think step by step.""" prompt = PromptTemplate.from_template(template) chain = prompt | hf question = "What is electroencephalography?" print(chain.invoke({"question": question})) ``` ### Error Message and Stack Trace (if applicable) ``` --------------------------------------------------------------------------- TypeError Traceback (most recent call last) [/mnt/efs/fs2/LangChain-in-Kubernetes/chat-backend](https://vscode-remote+ssh-002dremote-002b7b22686f73744e616d65223a224157532d4543322d41492d32227d.vscode-resource.vscode-cdn.net/mnt/efs/fs2/LangChain-in-Kubernetes/chat-backend) Optimized/test.ipynb Cell 2 line 1 [12](vscode-notebook-cell://ssh-remote%2B7b22686f73744e616d65223a224157532d4543322d41492d32227d/mnt/efs/fs2/LangChain-in-Kubernetes/chat-backend%20Optimized/test.ipynb#W2sdnNjb2RlLXJlbW90ZQ%3D%3D?line=11) template = """Question: {question} [13](vscode-notebook-cell://ssh-remote%2B7b22686f73744e616d65223a224157532d4543322d41492d32227d/mnt/efs/fs2/LangChain-in-Kubernetes/chat-backend%20Optimized/test.ipynb#W2sdnNjb2RlLXJlbW90ZQ%3D%3D?line=12) [14](vscode-notebook-cell://ssh-remote%2B7b22686f73744e616d65223a224157532d4543322d41492d32227d/mnt/efs/fs2/LangChain-in-Kubernetes/chat-backend%20Optimized/test.ipynb#W2sdnNjb2RlLXJlbW90ZQ%3D%3D?line=13) Answer: Let's think step by step.""" [15](vscode-notebook-cell://ssh-remote%2B7b22686f73744e616d65223a224157532d4543322d41492d32227d/mnt/efs/fs2/LangChain-in-Kubernetes/chat-backend%20Optimized/test.ipynb#W2sdnNjb2RlLXJlbW90ZQ%3D%3D?line=14) prompt = PromptTemplate.from_template(template) ---> [17](vscode-notebook-cell://ssh-remote%2B7b22686f73744e616d65223a224157532d4543322d41492d32227d/mnt/efs/fs2/LangChain-in-Kubernetes/chat-backend%20Optimized/test.ipynb#W2sdnNjb2RlLXJlbW90ZQ%3D%3D?line=16) chain = prompt | hf [19](vscode-notebook-cell://ssh-remote%2B7b22686f73744e616d65223a224157532d4543322d41492d32227d/mnt/efs/fs2/LangChain-in-Kubernetes/chat-backend%20Optimized/test.ipynb#W2sdnNjb2RlLXJlbW90ZQ%3D%3D?line=18) question = "What is electroencephalography?" [21](vscode-notebook-cell://ssh-remote%2B7b22686f73744e616d65223a224157532d4543322d41492d32227d/mnt/efs/fs2/LangChain-in-Kubernetes/chat-backend%20Optimized/test.ipynb#W2sdnNjb2RlLXJlbW90ZQ%3D%3D?line=20) print(chain.invoke({"question": question})) File [~/.local/lib/python3.9/site-packages/langchain_core/runnables/base.py:436](https://vscode-remote+ssh-002dremote-002b7b22686f73744e616d65223a224157532d4543322d41492d32227d.vscode-resource.vscode-cdn.net/mnt/efs/fs2/LangChain-in-Kubernetes/chat-backend%20Optimized/~/.local/lib/python3.9/site-packages/langchain_core/runnables/base.py:436), in Runnable.__ror__(self, other) [426](https://vscode-remote+ssh-002dremote-002b7b22686f73744e616d65223a224157532d4543322d41492d32227d.vscode-resource.vscode-cdn.net/mnt/efs/fs2/LangChain-in-Kubernetes/chat-backend%20Optimized/~/.local/lib/python3.9/site-packages/langchain_core/runnables/base.py:426) def __ror__( [427](https://vscode-remote+ssh-002dremote-002b7b22686f73744e616d65223a224157532d4543322d41492d32227d.vscode-resource.vscode-cdn.net/mnt/efs/fs2/LangChain-in-Kubernetes/chat-backend%20Optimized/~/.local/lib/python3.9/site-packages/langchain_core/runnables/base.py:427) self, [428](https://vscode-remote+ssh-002dremote-002b7b22686f73744e616d65223a224157532d4543322d41492d32227d.vscode-resource.vscode-cdn.net/mnt/efs/fs2/LangChain-in-Kubernetes/chat-backend%20Optimized/~/.local/lib/python3.9/site-packages/langchain_core/runnables/base.py:428) other: Union[ (...) [433](https://vscode-remote+ssh-002dremote-002b7b22686f73744e616d65223a224157532d4543322d41492d32227d.vscode-resource.vscode-cdn.net/mnt/efs/fs2/LangChain-in-Kubernetes/chat-backend%20Optimized/~/.local/lib/python3.9/site-packages/langchain_core/runnables/base.py:433) ], [434](https://vscode-remote+ssh-002dremote-002b7b22686f73744e616d65223a224157532d4543322d41492d32227d.vscode-resource.vscode-cdn.net/mnt/efs/fs2/LangChain-in-Kubernetes/chat-backend%20Optimized/~/.local/lib/python3.9/site-packages/langchain_core/runnables/base.py:434) ) -> RunnableSerializable[Other, Output]: [435](https://vscode-remote+ssh-002dremote-002b7b22686f73744e616d65223a224157532d4543322d41492d32227d.vscode-resource.vscode-cdn.net/mnt/efs/fs2/LangChain-in-Kubernetes/chat-backend%20Optimized/~/.local/lib/python3.9/site-packages/langchain_core/runnables/base.py:435) """Compose this runnable with another object to create a RunnableSequence.""" --> [436](https://vscode-remote+ssh-002dremote-002b7b22686f73744e616d65223a224157532d4543322d41492d32227d.vscode-resource.vscode-cdn.net/mnt/efs/fs2/LangChain-in-Kubernetes/chat-backend%20Optimized/~/.local/lib/python3.9/site-packages/langchain_core/runnables/base.py:436) return RunnableSequence(coerce_to_runnable(other), self) File [~/.local/lib/python3.9/site-packages/langchain_core/runnables/base.py:4370](https://vscode-remote+ssh-002dremote-002b7b22686f73744e616d65223a224157532d4543322d41492d32227d.vscode-resource.vscode-cdn.net/mnt/efs/fs2/LangChain-in-Kubernetes/chat-backend%20Optimized/~/.local/lib/python3.9/site-packages/langchain_core/runnables/base.py:4370), in coerce_to_runnable(thing) [4368](https://vscode-remote+ssh-002dremote-002b7b22686f73744e616d65223a224157532d4543322d41492d32227d.vscode-resource.vscode-cdn.net/mnt/efs/fs2/LangChain-in-Kubernetes/chat-backend%20Optimized/~/.local/lib/python3.9/site-packages/langchain_core/runnables/base.py:4368) return cast(Runnable[Input, Output], RunnableParallel(thing)) [4369](https://vscode-remote+ssh-002dremote-002b7b22686f73744e616d65223a224157532d4543322d41492d32227d.vscode-resource.vscode-cdn.net/mnt/efs/fs2/LangChain-in-Kubernetes/chat-backend%20Optimized/~/.local/lib/python3.9/site-packages/langchain_core/runnables/base.py:4369) else: -> [4370](https://vscode-remote+ssh-002dremote-002b7b22686f73744e616d65223a224157532d4543322d41492d32227d.vscode-resource.vscode-cdn.net/mnt/efs/fs2/LangChain-in-Kubernetes/chat-backend%20Optimized/~/.local/lib/python3.9/site-packages/langchain_core/runnables/base.py:4370) raise TypeError( [4371](https://vscode-remote+ssh-002dremote-002b7b22686f73744e616d65223a224157532d4543322d41492d32227d.vscode-resource.vscode-cdn.net/mnt/efs/fs2/LangChain-in-Kubernetes/chat-backend%20Optimized/~/.local/lib/python3.9/site-packages/langchain_core/runnables/base.py:4371) f"Expected a Runnable, callable or dict." [4372](https://vscode-remote+ssh-002dremote-002b7b22686f73744e616d65223a224157532d4543322d41492d32227d.vscode-resource.vscode-cdn.net/mnt/efs/fs2/LangChain-in-Kubernetes/chat-backend%20Optimized/~/.local/lib/python3.9/site-packages/langchain_core/runnables/base.py:4372) f"Instead got an unsupported type: {type(thing)}" [4373](https://vscode-remote+ssh-002dremote-002b7b22686f73744e616d65223a224157532d4543322d41492d32227d.vscode-resource.vscode-cdn.net/mnt/efs/fs2/LangChain-in-Kubernetes/chat-backend%20Optimized/~/.local/lib/python3.9/site-packages/langchain_core/runnables/base.py:4373) ) TypeError: Expected a Runnable, callable or dict.Instead got an unsupported type: <class 'langchain.prompts.prompt.PromptTemplate'> ``` ### Description I've tried to replicate the example on the site https://python.langchain.com/docs/integrations/llms/huggingface_pipelines I've installed all dependencies, and I've got that error ### System Info langchain==0.1.7 langchain-community==0.0.20 langchain-core==0.1.23 langchainplus-sdk==0.0.20 pythin 3.9.16 Platform RHEL & CenOS
Hugging Face Local Pipelines EXAMPLE NOT WORKING ON CENTOS
https://api.github.com/repos/langchain-ai/langchain/issues/17584/comments
4
2024-02-15T17:28:25Z
2024-06-01T00:07:26Z
https://github.com/langchain-ai/langchain/issues/17584
2,137,074,153
17,584
[ "langchain-ai", "langchain" ]
### Checklist - [X] I added a very descriptive title to this issue. - [X] I included a link to the documentation page I am referring to (if applicable). ### Issue with current documentation: Unable to retrieve topk relevant documents using ParentDocumentRetriever ### Idea or request for content: below is the code ``` # Define your prompt template prompt_template = """Use the following pieces of information to answer the user's question. If you don't know the answer, just say that you don't know, don't try to make up an answer. Context: {context} Question: {question} Only return the helpful answer below and nothing else. If no context, then no answer. Helpful Answer:""" child_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=50) parent_splitter = RecursiveCharacterTextSplitter(chunk_size=1200, chunk_overlap=300) vectorstore = Chroma( collection_name="full_documents", embedding_function=embeddings) store = InMemoryStore() retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter ) retriever.add_documents(documents, ids=None) retriever.invoke("data related to cricket") ``` in the above code, if you see i wrote a code where ParentDocumentRetriever will invoke. And it is returning only 1 document. How to get topk documents using ParentDocumenRetriever?
how to get topk retrievals for ParentDocumentRetriever using Chroma?
https://api.github.com/repos/langchain-ai/langchain/issues/17583/comments
3
2024-02-15T17:07:34Z
2024-02-15T22:10:36Z
https://github.com/langchain-ai/langchain/issues/17583
2,137,036,523
17,583
[ "langchain-ai", "langchain" ]
### Checked other resources - [X] I added a very descriptive title to this issue. - [X] I searched the LangChain documentation with the integrated search. - [X] I used the GitHub search to find a similar question and didn't find it. - [X] I am sure that this is a bug in LangChain rather than my code. ### Example Code I created three different sets of examples and, for each of them, the related example selector ``` self.example_selector = SemanticSimilarityExampleSelector.from_examples( examples, # one of the three subset HuggingFaceEmbeddings(), Chroma, k=5, ) ``` ### Error Message and Stack Trace (if applicable) _No response_ ### Description I am trying to manage three different SemanticSimilarityExampleSelectors. Ideally, each one of them has different examples to choose from. I do NOT want to mix them. However, using the code i provided, Chroma mixes them and few-shot breaks ### System Info langchain 0.1.6
Managing multiple vector stores separately
https://api.github.com/repos/langchain-ai/langchain/issues/17580/comments
2
2024-02-15T15:53:14Z
2024-06-24T16:07:26Z
https://github.com/langchain-ai/langchain/issues/17580
2,136,882,477
17,580
[ "langchain-ai", "langchain" ]
### Checklist - [X] I added a very descriptive title to this issue. - [X] I included a link to the documentation page I am referring to (if applicable). ### Issue with current documentation: - ### Idea or request for content: below is the code, which will add the data into chroma and define the retriever %%time # query = 'how many are injured and dead in christchurch Mosque?' # Define your prompt template prompt_template = """Use the following pieces of information to answer the user's question. If you don't know the answer, just say that you don't know, don't try to make up an answer. Context: {context} Question: {question} Only return the helpful answer below and nothing else. If no context, then no answer. Helpful Answer:""" ``` child_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=50) parent_splitter = RecursiveCharacterTextSplitter(chunk_size=1200, chunk_overlap=300) vectorstore = Chroma( collection_name="full_documents", embedding_function=embeddings) store = InMemoryStore() retriever = ParentDocumentRetriever( vectorstore=vectorstore, docstore=store, child_splitter=child_splitter, parent_splitter=parent_splitter ) retriever.add_documents(documents, ids=None) ``` how to use FAISS db instead of Chroma and use retriever to get relevant documents like below? ``` vectorstore = FAISS.from_documents(documents, openai) retriever = vectorstore.as_retriever(search_kwargs={"k": 10}) docs = retriever.get_relevant_documents("data related to cricket?") ``` can you help me with the code?
how to use FAISS for ParentDocumentRetriever for retrieving the documents?
https://api.github.com/repos/langchain-ai/langchain/issues/17576/comments
2
2024-02-15T14:26:13Z
2024-02-15T14:58:47Z
https://github.com/langchain-ai/langchain/issues/17576
2,136,666,207
17,576
[ "langchain-ai", "langchain" ]
### Checked other resources - [X] I added a very descriptive title to this issue. - [X] I searched the LangChain documentation with the integrated search. - [X] I used the GitHub search to find a similar question and didn't find it. - [X] I am sure that this is a bug in LangChain rather than my code. ### Example Code agent = OpenAIAssistantRunnable.create_assistant( name="Weather asssistant", instructions="Very helpful assistnat on any topic but when it comes to weather uses get weather on planet function", tools=tools, model="gpt-4-1106-preview", as_agent=True, ) from langchain.agents import AgentExecutor agent_executor = AgentExecutor(agent=agent, tools=tools) r = agent_executor.invoke({"content": "What's the weather in Mars", "additional_instructions": "Always address the user as king Daniiar"}) # additional_instructions are not getting parsed print(r) r = agent_executor.invoke({"content": "how do you address me?", "thread_id": 'thread_IMlTopZtP9NarAuxXO3Jf9RU'}) print(r) ### Error Message and Stack Trace (if applicable) def _create_run(self, input: dict) -> Any: params = { k: v for k, v in input.items() if k in ("instructions", "model", "tools", "run_metadata") } return self.client.beta.threads.runs.create( input["thread_id"], assistant_id=self.assistant_id, **params, ) this code under langchain.agents.openai_assistant.base.OpenAIAssistnatRunnable does not support additional_instructions parameter. This parameter exists: https://platform.openai.com/docs/api-reference/runs/createRun#runs-createrun-additional_instructions ### Description I need additional_instructions parameter, available in OpenAI SDK but not available in Langchain. ### System Info System Information ------------------ > OS: Darwin > OS Version: Darwin Kernel Version 21.3.0: Wed Jan 5 21:37:58 PST 2022; root:xnu-8019.80.24~20/RELEASE_ARM64_T6000 > Python Version: 3.11.6 (main, Nov 2 2023, 04:39:40) [Clang 14.0.0 (clang-1400.0.29.202)] Package Information ------------------- > langchain_core: 0.1.23 > langchain: 0.1.7 > langchain_community: 0.0.20 > langsmith: 0.0.87 > langchain_openai: 0.0.6 Packages not installed (Not Necessarily a Problem) -------------------------------------------------- The following packages were not found: > langgraph > langserve
OPenAI Assistnat does not support additional_instructions parameter on create_run method.
https://api.github.com/repos/langchain-ai/langchain/issues/17574/comments
1
2024-02-15T13:32:44Z
2024-06-01T00:08:34Z
https://github.com/langchain-ai/langchain/issues/17574
2,136,539,036
17,574
[ "langchain-ai", "langchain" ]
### Checked other resources - [X] I added a very descriptive title to this issue. - [X] I searched the LangChain documentation with the integrated search. - [X] I used the GitHub search to find a similar question and didn't find it. - [X] I am sure that this is a bug in LangChain rather than my code. ### Example Code ``` from langchain_community.vectorstores import Pinecone from langchain_openai import ChatOpenAI from langchain_openai import OpenAIEmbeddings from langchain.chains import ConversationalRetrievalChain pc = pinecone.Pinecone(api_key=secret['PINECONE_API_KEY'], environment=secret['PINECONE_ENV']) index = pc.Index(secret['PINECONE_INDEX_NAME']) embeddings = OpenAIEmbeddings(secret['OPENAI_API_KEY']) model = ChatOpenAI(model_name='gpt-4-turbo-preview') docsearch = Pinecone.from_existing_index(index_name=secret['PINECONE_INDEX_NAME'], embedding=embeddings, namespace=secret['PINECONE_NAMESPACE']), search_kwargs = {'k': 25, 'namespace': secret['PINECONE_NAMESPACE']} retriever = docsearch.as_retriever(namespace=secret['PINECONE_NAMESPACE'], search_kwargs=search_kwargs) qa = ConversationalRetrievalChain.from_llm(llm=model,retriever=retriever) qa({'question': prompt, 'chat_history': chat}) ``` ### Error Message and Stack Trace (if applicable) ``` 2024-02-15 12:26:09 Traceback (most recent call last): 2024-02-15 12:26:09 File "/app/app.py", line 66, in respond 2024-02-15 12:26:09 top_k_result = pinecone_qa({'question': prompt, 'chat_history': chat}) 2024-02-15 12:26:09 File "/usr/local/lib/python3.10/site-packages/ddtrace/contrib/trace_utils.py", line 343, in wrapper 2024-02-15 12:26:09 return func(mod, pin, wrapped, instance, args, kwargs) 2024-02-15 12:26:09 File "/usr/local/lib/python3.10/site-packages/ddtrace/contrib/langchain/patch.py", line 521, in traced_chain_call 2024-02-15 12:26:09 final_outputs = func(*args, **kwargs) 2024-02-15 12:26:09 File "/usr/local/lib/python3.10/site-packages/langchain_core/_api/deprecation.py", line 145, in warning_emitting_wrapper 2024-02-15 12:26:09 return wrapped(*args, **kwargs) 2024-02-15 12:26:09 File "/usr/local/lib/python3.10/site-packages/langchain/chains/base.py", line 363, in __call__ 2024-02-15 12:26:09 return self.invoke( 2024-02-15 12:26:09 File "/usr/local/lib/python3.10/site-packages/langchain/chains/base.py", line 162, in invoke 2024-02-15 12:26:09 raise e 2024-02-15 12:26:09 File "/usr/local/lib/python3.10/site-packages/langchain/chains/base.py", line 156, in invoke 2024-02-15 12:26:09 self._call(inputs, run_manager=run_manager) 2024-02-15 12:26:09 File "/usr/local/lib/python3.10/site-packages/langchain/chains/conversational_retrieval/base.py", line 155, in _call 2024-02-15 12:26:09 docs = self._get_docs(new_question, inputs, run_manager=_run_manager) 2024-02-15 12:26:09 File "/usr/local/lib/python3.10/site-packages/langchain/chains/conversational_retrieval/base.py", line 317, in _get_docs 2024-02-15 12:26:09 docs = self.retriever.get_relevant_documents( 2024-02-15 12:26:09 File "/usr/local/lib/python3.10/site-packages/langchain_core/retrievers.py", line 224, in get_relevant_documents 2024-02-15 12:26:09 raise e 2024-02-15 12:26:09 File "/usr/local/lib/python3.10/site-packages/langchain_core/retrievers.py", line 217, in get_relevant_documents 2024-02-15 12:26:09 result = self._get_relevant_documents( 2024-02-15 12:26:09 File "/usr/local/lib/python3.10/site-packages/langchain_core/vectorstores.py", line 654, in _get_relevant_documents 2024-02-15 12:26:09 docs = self.vectorstore.similarity_search(query, **self.search_kwargs) 2024-02-15 12:26:09 File "/usr/local/lib/python3.10/site-packages/ddtrace/contrib/trace_utils.py", line 343, in wrapper 2024-02-15 12:26:09 return func(mod, pin, wrapped, instance, args, kwargs) 2024-02-15 12:26:09 File "/usr/local/lib/python3.10/site-packages/ddtrace/contrib/langchain/patch.py", line 624, in traced_similarity_search 2024-02-15 12:26:09 instance._index.configuration.server_variables.get("environment", ""), 2024-02-15 12:26:09 AttributeError: 'Index' object has no attribute 'configuration' ``` ### Description When I run this locally, I get a proper response. When I run it through docker, I get 'Index' object has no attribute 'configuration'. Any thoughts? This has been wrecking my brain all morning ### System Info ``` langchain==0.1.7 langchain-community==0.0.20 pinecone-client==3.0.2 ```
'Index' object has no attribute 'configuration' when running my LangChain application in a docker image
https://api.github.com/repos/langchain-ai/langchain/issues/17571/comments
4
2024-02-15T11:33:36Z
2024-06-01T00:20:28Z
https://github.com/langchain-ai/langchain/issues/17571
2,136,301,630
17,571
[ "langchain-ai", "langchain" ]
### Checked other resources - [X] I added a very descriptive title to this issue. - [X] I searched the LangChain documentation with the integrated search. - [X] I used the GitHub search to find a similar question and didn't find it. - [X] I am sure that this is a bug in LangChain rather than my code. ### Example Code ArgillaCallbackHandler is working for Hugging Face Argilla Space for public not for private as the class don't have variable extra_headers. So the class ArgillaCallbackHandler class __init__ method not taking extra_headers as an variable. ``` class ArgillaCallbackHandler(BaseCallbackHandler): REPO_URL: str = "https://github.com/argilla-io/argilla" ISSUES_URL: str = f"{REPO_URL}/issues" BLOG_URL: str = "https://docs.argilla.io/en/latest/tutorials_and_integrations/integrations/use_argilla_callback_in_langchain.html" # noqa: E501 DEFAULT_API_URL: str = "http://localhost:6900" def __init__( self, dataset_name: str, workspace_name: Optional[str] = None, api_url: Optional[str] = None, api_key: Optional[str] = None, ) -> None: ### Error Message and Stack Trace (if applicable) ![image](https://github.com/langchain-ai/langchain/assets/28830164/b6f22cc9-c8a8-4001-aa44-59f69c3c3135) `╭─────────────────────────────── Traceback (most recent call last) ────────────────────────────────╮ │ /anaconda3/envs/llm/lib/python3.10/site-packages/langchain_community/callbacks/argilla_callback. │ │ py:141 in __init__ │ │ │ │ 138 │ │ │ │ 139 │ │ # Connect to Argilla with the provided credentials, if applicable │ │ 140 │ │ try: │ │ ❱ 141 │ │ │ rg.init(api_key=api_key, api_url=api_url) │ │ 142 │ │ except Exception as e: │ │ 143 │ │ │ raise ConnectionError( │ │ 144 │ │ │ │ f"Could not connect to Argilla with exception: '{e}'.\n" │ │ │ │ /anaconda3/envs/llm/lib/python3.10/site-packages/argilla/client/singleton.py:95 in init │ │ │ │ 92 │ │ >>> headers = {"X-Client-id":"id","X-Secret":"secret"} │ │ 93 │ │ >>> rg.init(api_url="http://localhost:9090", api_key="4AkeAPIk3Y", extra_headers │ │ 94 │ """ │ │ ❱ 95 │ ArgillaSingleton.init( │ │ 96 │ │ api_url=api_url, │ │ 97 │ │ api_key=api_key, │ │ 98 │ │ workspace=workspace, │ │ │ │ /anaconda3/envs/llm/lib/python3.10/site-packages/argilla/client/singleton.py:47 in init │ │ │ │ 44 │ ) -> Argilla: │ │ 45 │ │ cls._INSTANCE = None │ │ 46 │ │ │ │ ❱ 47 │ │ cls._INSTANCE = Argilla( │ │ 48 │ │ │ api_url=api_url, │ │ 49 │ │ │ api_key=api_key, │ │ 50 │ │ │ timeout=timeout, │ │ │ │ /anaconda3/envs/llm/lib/python3.10/site-packages/argilla/client/client.py:164 in __init__ │ │ │ │ 161 │ │ │ httpx_extra_kwargs=httpx_extra_kwargs, │ │ 162 │ │ ) │ │ 163 │ │ │ │ ❱ 164 │ │ self._user = users_api.whoami(client=self.http_client) # .parsed │ │ 165 │ │ │ │ 166 │ │ if not workspace and self._user.username == DEFAULT_USERNAME and DEFAULT_USERNAM │ │ 167 │ │ │ warnings.warn( │ │ │ │ /anaconda3/envs/llm/lib/python3.10/site-packages/argilla/client/sdk/users/api.py:39 in whoami │ │ │ │ 36 │ """ │ │ 37 │ url = "/api/me" │ │ 38 │ │ │ ❱ 39 │ response = client.get(url) │ │ 40 │ return UserModel(**response) │ │ 41 │ │ 42 │ │ │ │ /anaconda3/envs/llm/lib/python3.10/site-packages/argilla/client/sdk/client.py:124 in inner │ │ │ │ 121 │ │ @functools.wraps(func) │ │ 122 │ │ def inner(self, *args, **kwargs): │ │ 123 │ │ │ try: │ │ ❱ 124 │ │ │ │ result = func(self, *args, **kwargs) │ │ 125 │ │ │ │ return result │ │ 126 │ │ │ except httpx.ConnectError as err: │ │ 127 │ │ │ │ err_str = f"Your Api endpoint at {self.base_url} is not available or not │ │ │ │ /anaconda3/envs/llm/lib/python3.10/site-packages/argilla/client/sdk/client.py:141 in get │ │ │ │ 138 │ │ │ *args, │ │ 139 │ │ │ **kwargs, │ │ 140 │ │ ) │ │ ❱ 141 │ │ return build_raw_response(response).parsed │ │ 142 │ │ │ 143 │ @with_httpx_error_handler │ │ 144 │ def patch(self, path: str, *args, **kwargs): │ │ │ │ /anaconda3/envs/llm/lib/python3.10/site-packages/argilla/client/sdk/_helpers.py:25 in │ │ build_raw_response │ │ │ │ 22 │ │ 23 │ │ 24 def build_raw_response(response: httpx.Response) -> Response[Union[Dict[str, Any], Error │ │ ❱ 25 │ return build_typed_response(response) │ │ 26 │ │ 27 │ │ 28 ResponseType = TypeVar("ResponseType") │ │ │ │ /anaconda3/envs/llm/lib/python3.10/site-packages/argilla/client/sdk/_helpers.py:34 in │ │ build_typed_response │ │ │ │ 31 def build_typed_response( │ │ 32 │ response: httpx.Response, response_type_class: Optional[Type[ResponseType]] = None │ │ 33 ) -> Response[Union[ResponseType, ErrorMessage, HTTPValidationError]]: │ │ ❱ 34 │ parsed_response = check_response(response, expected_response=response_type_class) │ │ 35 │ if response_type_class: │ │ 36 │ │ parsed_response = response_type_class(**parsed_response) │ │ 37 │ return Response( │ │ │ │ /anaconda3/envs/llm/lib/python3.10/site-packages/argilla/client/sdk/_helpers.py:63 in │ │ check_response │ │ │ │ 60 │ │ │ message=message, │ │ 61 │ │ │ response=response.content, │ │ 62 │ │ ) │ │ ❱ 63 │ handle_response_error(response, **kwargs) │ │ 64 │ │ │ │ /anaconda3/envs/llm/lib/python3.10/site-packages/argilla/client/sdk/commons/errors_handler.py:63 │ │ in handle_response_error │ │ │ │ 60 │ │ error_type = GenericApiError │ │ 61 │ else: │ │ 62 │ │ raise HttpResponseError(response=response) │ │ ❱ 63 │ raise error_type(**error_args) │ │ 64 │ ╰──────────────────────────────────────────────────────────────────────────────────────────────────╯ NotFoundApiError: Argilla server returned an error with http status: 404. Error details: {'response': '<!DOCTYPE html>\n<html class="">\n<head>\n <meta charset="utf-8"/>\n <meta\n name="viewport"\n content="width=device-width, initial-scale=1.0, user-scalable=no"\n />\n <meta\n name="description"\n content="We’re on a journey to advance and democratize artificial intelligence through open source and open science."\n />\n <meta property="fb:app_id" content="1321688464574422"/>\n <meta name="twitter:card" content="summary_large_image"/>\n <meta name="twitter:site" content="@huggingface"/>\n <meta\n property="og:title"\n content="Hugging Face – The AI community building the future."\n />\n <meta property="og:type" content="website"/>\n\n <title>Hugging Face – The AI community building the future.</title>\n <style>\n body {\n margin: 0;\n }\n\n main {\n background-color: white;\n min-height: 100vh;\n text-align: center;\n font-family: Source Sans Pro, ui-sans-serif, system-ui, -apple-system,\n BlinkMacSystemFont, Segoe UI, Roboto, Helvetica Neue, Arial, Noto Sans,\n sans-serif, Apple Color Emoji, Segoe UI Emoji, Segoe UI Symbol,\n Noto Color Emoji;\n }\n\n img {\n width: 6rem;\n height: 6rem;\n margin: 7rem 1rem 1rem;\n }\n\n h1 {\n font-size: 3.75rem;\n line-height: 1;\n color: rgba(31, 41, 55, 1);\n font-weight: 700;\n box-sizing: border-box;\n margin: 0 auto;\n }\n\n p {\n color: rgba(107, 114, 128, 1);\n font-size: 1.125rem;\n line-height: 1.75rem;\n max-width: 28rem;\n box-sizing: border-box;\n margin: 0 auto;\n }\n\n .dark main {\n background-color: rgb(11, 15, 25);\n }\n\n .dark h1 {\n color: rgb(209, 213, 219);\n }\n\n .dark p {\n color: rgb(156, 163, 175);\n }\n </style>\n <script>\n // On page load or when changing themes, best to add inline in `head` to avoid FOUC\n const key = "_tb_global_settings";\n let theme = window.matchMedia("(prefers-color-scheme: dark)").matches\n ? "dark"\n : "light";\n try {\n const storageTheme = JSON.parse(window.localStorage.getItem(key)).theme;\n if (storageTheme) {\n theme = storageTheme === "dark" ? "dark" : "light";\n }\n } catch (e) {\n }\n if (theme === "dark") {\n document.documentElement.classList.add("dark");\n } else {\n document.documentElement.classList.remove("dark");\n }\n </script>\n</head>\n\n<body>\n<main>\n <img\n src="https://huggingface.co/front/assets/huggingface_logo.svg"\n alt=""\n />\n <div>\n <h1>404</h1>\n <p>Sorry, we can’t find the page you are looking for.</p>\n </div>\n</main>\n</body>\n</html>\n'} The above exception was the direct cause of the following exception: ╭─────────────────────────────── Traceback (most recent call last) ────────────────────────────────╮ │ in <module>:2 │ │ │ │ 1 from langchain.callbacks import ArgillaCallbackHandler │ │ ❱ 2 argilla_callback = ArgillaCallbackHandler( │ │ 3 │ dataset_name="slack-search", │ │ 4 │ api_url="https://aniruddhac-argillaslack.hf.space", │ │ 5 │ api_key="owner.apikey", │ │ │ │ /anaconda3/envs/llm/lib/python3.10/site-packages/langchain_community/callbacks/argilla_callback. │ │ py:143 in __init__ │ │ │ │ 140 │ │ try: │ │ 141 │ │ │ rg.init(api_key=api_key, api_url=api_url) │ │ 142 │ │ except Exception as e: │ │ ❱ 143 │ │ │ raise ConnectionError( │ │ 144 │ │ │ │ f"Could not connect to Argilla with exception: '{e}'.\n" │ │ 145 │ │ │ │ "Please check your `api_key` and `api_url`, and make sure that " │ │ 146 │ │ │ │ "the Argilla server is up and running. If the problem persists " │ ╰──────────────────────────────────────────────────────────────────────────────────────────────────╯ ConnectionError: Could not connect to Argilla with exception: 'Argilla server returned an error with http status: 404. Error details: {'response': '<!DOCTYPE html>\n<html class="">\n<head>\n <meta charset="utf-8"/>\n <meta\n name="viewport"\n content="width=device-width, initial-scale=1.0, user-scalable=no"\n />\n <meta\n name="description"\n content="We’re on a journey to advance and democratize artificial intelligence through open source and open science."\n />\n <meta property="fb:app_id" content="1321688464574422"/>\n <meta name="twitter:card" content="summary_large_image"/>\n <meta name="twitter:site" content="@huggingface"/>\n <meta\n property="og:title"\n content="Hugging Face – The AI community building the future."\n />\n <meta property="og:type" content="website"/>\n\n <title>Hugging Face – The AI community building the future.</title>\n <style>\n body {\n margin: 0;\n }\n\n main {\n background-color: white;\n min-height: 100vh;\n text-align: center;\n font-family: Source Sans Pro, ui-sans-serif, system-ui, -apple-system,\n BlinkMacSystemFont, Segoe UI, Roboto, Helvetica Neue, Arial, Noto Sans,\n sans-serif, Apple Color Emoji, Segoe UI Emoji, Segoe UI Symbol,\n Noto Color Emoji;\n }\n\n img {\n width: 6rem;\n height: 6rem;\n margin: 7rem 1rem 1rem;\n }\n\n h1 {\n font-size: 3.75rem;\n line-height: 1;\n color: rgba(31, 41, 55, 1);\n font-weight: 700;\n box-sizing: border-box;\n margin: 0 auto;\n }\n\n p {\n color: rgba(107, 114, 128, 1);\n font-size: 1.125rem;\n line-height: 1.75rem;\n max-width: 28rem;\n box-sizing: border-box;\n margin: 0 auto;\n }\n\n .dark main {\n background-color: rgb(11, 15, 25);\n }\n\n .dark h1 {\n color: rgb(209, 213, 219);\n }\n\n .dark p {\n color: rgb(156, 163, 175);\n }\n </style>\n <script>\n // On page load or when changing themes, best to add inline in `head` to avoid FOUC\n const key = "_tb_global_settings";\n let theme = window.matchMedia("(prefers-color-scheme: dark)").matches\n ? "dark"\n : "light";\n try {\n const storageTheme = JSON.parse(window.localStorage.getItem(key)).theme;\n if (storageTheme) {\n theme = storageTheme === "dark" ? "dark" : "light";\n }\n } catch (e) {\n }\n if (theme === "dark") {\n document.documentElement.classList.add("dark");\n } else {\n document.documentElement.classList.remove("dark");\n }\n </script>\n</head>\n\n<body>\n<main>\n <img\n src="https://huggingface.co/front/assets/huggingface_logo.svg"\n alt=""\n />\n <div>\n <h1>404</h1>\n <p>Sorry, we can’t find the page you are looking for.</p>\n </div>\n</main>\n</body>\n</html>\n'}'. Please check your `api_key` and `api_url`, and make sure that the Argilla server is up and running. If the problem persists please report it to https://github.com/argilla-io/argilla/issues as an `integration` issue.` ![image](https://github.com/langchain-ai/langchain/assets/28830164/06a9c7ef-a687-47e9-ad20-a019fa577e00) ### Description ![image](https://github.com/langchain-ai/langchain/assets/28830164/87d43d8d-a3db-450c-a894-bf43c5bcee77) We should have below code integrated at https://github.com/langchain-ai/langchain/blob/master/libs/community/langchain_community/callbacks/argilla_callback.py ## Change 1: ![image](https://github.com/langchain-ai/langchain/assets/28830164/d896064b-96a6-4f23-97fd-753872881bb5) Below will be the code to be followed ad change fro the above snapshots: ` def __init__( self, dataset_name: str, workspace_name: Optional[str] = None, api_url: Optional[str] = None, api_key: Optional[str] = None, extra_headers: Optional[str] = None, ) -> None:` ## Change 2: ![image](https://github.com/langchain-ai/langchain/assets/28830164/8fb6ee78-fa0d-41ee-a06e-d23efda034dc) ` # Connect to Argilla with the provided credentials, if applicable try: rg.init(api_key=api_key, api_url=api_url,extra_headers=extra_headers) except Exception as e: raise ConnectionError( f"Could not connect to Argilla with exception: '{e}'.\n" "Please check your `api_key` and `api_url`, and make sure that " "the Argilla server is up and running. If the problem persists " f"please report it to {self.ISSUES_URL} as an `integration` issue." ) from e` ### System Info ![image](https://github.com/langchain-ai/langchain/assets/28830164/12715382-1988-45e5-b1ec-b8b9ca2cc7c0) platform : Mac ![image](https://github.com/langchain-ai/langchain/assets/28830164/5fe99ae1-a38b-4841-9ab3-cafd7823daa3) ![image](https://github.com/langchain-ai/langchain/assets/28830164/c540ad08-c37a-49ee-8fe2-da8abc75bf68)
ArgillaCallbackHandler is working for Hugging Face Argilla Space for public not private as the class don't have variable extra_headers.
https://api.github.com/repos/langchain-ai/langchain/issues/17562/comments
2
2024-02-15T06:02:22Z
2024-06-08T16:09:55Z
https://github.com/langchain-ai/langchain/issues/17562
2,135,715,147
17,562
[ "langchain-ai", "langchain" ]
### Checked other resources - [X] I added a very descriptive title to this issue. - [X] I searched the LangChain documentation with the integrated search. - [X] I used the GitHub search to find a similar question and didn't find it. - [X] I am sure that this is a bug in LangChain rather than my code. ### Example Code ``` async def partial_test(self): prompt_template = "Tell me details about {name}. The output should be in {lang}" llm = LLMSelector(self.model).get_language_model() prompt = ChatPromptTemplate.from_messages([ HumanMessagePromptTemplate.from_template(prompt_template, partial_variables={"lang": "Spanish"}), ]) chain = prompt | llm | StrOutputParser() result = await chain.ainvoke({'name': 'Open AI'}) print(result) ``` ### Error Message and Stack Trace (if applicable) Errror: `Input to ChatPromptTemplate is missing variables {'lang'}. Expected: ['lang', 'name'] Received: ['name']\"` ### Description But this works with `PromptTemplate` also work with `ChatPromptTemplate` in previous version ### System Info langchain v0.1.7
partial_variables not working with ChatPromptTemplate (langchain v0.1.9)
https://api.github.com/repos/langchain-ai/langchain/issues/17560/comments
10
2024-02-15T05:20:53Z
2024-06-03T23:22:43Z
https://github.com/langchain-ai/langchain/issues/17560
2,135,668,512
17,560
[ "langchain-ai", "langchain" ]
### Checklist - [X] I added a very descriptive title to this issue. - [X] I included a link to the documentation page I am referring to (if applicable). ### Issue with current documentation: - ### Idea or request for content: below is the code that documentation has for get_prompts() ``` def get_prompts( self, config: Optional[RunnableConfig] = None ) -> List[BasePromptTemplate]: from langchain_core.prompts.base import BasePromptTemplate prompts = [] for _, node in self.get_graph(config=config).nodes.items(): if isinstance(node.data, BasePromptTemplate): prompts.append(node.data) return prompts ``` I just want to know whether the above code will return the template which is running/used inside SelfQueryRetriver else will it return prompts which we've defined? When i tried to run it, it has returned empty list
unable to retrieve prompt template which is already pre-defined for SelfQueryRetriever.
https://api.github.com/repos/langchain-ai/langchain/issues/17558/comments
1
2024-02-15T05:10:10Z
2024-02-16T17:12:55Z
https://github.com/langchain-ai/langchain/issues/17558
2,135,657,667
17,558
[ "langchain-ai", "langchain" ]
### Privileged issue - [X] I am a LangChain maintainer, or was asked directly by a LangChain maintainer to create an issue here. ### Issue Content Add a good README to each of the `libs/partners` packages. See [langchain-google-vertexai](https://github.com/langchain-ai/langchain/blob/master/libs/partners/google-vertexai/README.md) for a good reference.
docs: Make sure all partner packages have README
https://api.github.com/repos/langchain-ai/langchain/issues/17545/comments
6
2024-02-14T19:50:58Z
2024-04-11T21:09:21Z
https://github.com/langchain-ai/langchain/issues/17545
2,135,088,749
17,545
[ "langchain-ai", "langchain" ]
### Checked other resources - [X] I added a very descriptive title to this issue. - [X] I searched the LangChain documentation with the integrated search. - [X] I used the GitHub search to find a similar question and didn't find it. - [X] I am sure that this is a bug in LangChain rather than my code. ### Example Code ``` python class Chat: def __init__(self): self.memory = ConversationBufferMemory(return_messages=True) self.router = self.init_router() self.full_chain = {"router": self.router, "question": lambda x: x["question"]} | self.create_branch() #self.initialiser = self.init_router() def init_followup(self): """ Initialize the follow up chain. :return: Runnable """ return ( RunnablePassthrough.assign( history=RunnableLambda(self.memory.load_memory_variables) | itemgetter("history") ) | prompt | instant ) def init_router(self): """ Initialize the router. :return: Runnable """ chain = (master | instant) return RunnableWithMessageHistory( chain, self.get_by_session_id, input_messages_key="question", history_messages_key="history", ) def get_by_session_id(self, session_id: str) -> BaseChatMessageHistory: if session_id not in store: store[session_id] = self.memory return store[session_id] def create_branch(self): branch = RunnableBranch( (lambda x: "optimization" in x["router"].lower(), optimization), (lambda x: "sustainability" in x["router"].lower(), sustainability), (lambda x: "webpage" in x["router"].lower(), web), (lambda x: "followup" in x["router"].lower(), self.init_followup()), other) return branch def invoke_model(self, query) -> None: result = self.full_chain.invoke({"question":query},config={"configurable": {"session_id": "foo"}}) self._update_memory(query, result) return result def _update_memory(self, inputs, outputs): inputs = {"query" : inputs} if hasattr(outputs, 'content'): self.memory.save_context(inputs, {"output": outputs.content}) else: self.memory.save_context(inputs, {"output": outputs}) def clear_memory(self): self.memory.clear() ### Error Message and Stack Trace (if applicable) ```python Traceback (most recent call last): File "/src/models/model.py", line 199, in <module> person = Chat() ^^^^^^ File "/src/models/model.py", line 136, in __init__ self.router = self.init_router() ^^^^^^^^^^^^^^^^^^ File "/src/models/model.py", line 157, in init_router chain = (master | instant) ~~~~~~~^~~~~~~~~ File "/lib/python3.11/site-packages/langchain_core/runnables/base.py", line 436, in __ror__ return RunnableSequence(coerce_to_runnable(other), self) ^^^^^^^^^^^^^^^^^^^^^^^^^ File "lib/python3.11/site-packages/langchain_core/runnables/base.py", line 4370, in coerce_to_runnable raise TypeError( TypeError: Expected a Runnable, callable or dict.Instead got an unsupported type: <class 'str'> ### Description I am attempting to encapsulate all of my logic within a custom class. I am creating a router chain using LCEL. Upon instantiating the class, I get an error stating that a runnable, callable, or dict was expected but it received a string. **Initialisation of the Chat Object** When attempting to initialise the underlying router chain (Line 136): ```Python self.router = self.init_router() ``` _The init_router() function based between lines 151-162 is based on the LangChain web documentation_: ```Python def init_router(self): """ Initialize the router. :return: Runnable """ chain = (master | instant) return RunnableWithMessageHistory( chain, self.get_by_session_id, input_messages_key="question", history_messages_key="history", ) ``` I seem to experience a strange behaviour whereby, if I was to replace "self.router" with the same chain defined outside of my custom class, the solution works. However, when I try to instantiate the router chain within a method of my class, I get the error. I suspect this has something to do with LCEL but I would like some clarification. ### System Info ``` langchain==0.1.7 langchain-community==0.0.20 langchain-core==0.1.22 langchain-experimental==0.0.50 langchainhub==0.1.14 platform: Mac OS 14.1.1 python version: 3.11.4
Custom class complains of chain being of type Str
https://api.github.com/repos/langchain-ai/langchain/issues/17541/comments
0
2024-02-14T18:57:04Z
2024-05-22T16:09:22Z
https://github.com/langchain-ai/langchain/issues/17541
2,134,989,526
17,541
[ "langchain-ai", "langchain" ]
### Checked other resources - [X] I added a very descriptive title to this issue. - [X] I searched the LangChain documentation with the integrated search. - [X] I used the GitHub search to find a similar question and didn't find it. - [X] I am sure that this is a bug in LangChain rather than my code. ### Example Code Try the following code: ```python from langchain.text_splitter import Language from langchain_community.document_loaders.parsers import LanguageParser parser=LanguageParser(language=Language.PYTHON) ``` ### Error Message and Stack Trace (if applicable) ```sh Traceback (most recent call last): File "/development/test.py", line 4, in <module> parser=LanguageParser(language=Language.PYTHON) File "/development/env/lib/python3.9/site-packages/langchain_community/document_loaders/parsers/language/language_parser.py", line 162, in __init__ raise Exception(f"No parser available for {language}") Exception: No parser available for python ``` ### Description Lines 30 and 33 of `langauge_parser.py` are `from langchain.langchain.text_splitter import Language` which causes an import error. I think these lines should just be `from langchain.text_splitter import Language` ### System Info langchain==0.1.7 langchain-community==0.0.20 langchain-core==0.1.23 langchain-openai==0.0.6 langchainhub==0.1.14 Platform: Mac Python 3.9.6
Import error in language_parser.py during "from langchain.langchain.text_splitter import Language"
https://api.github.com/repos/langchain-ai/langchain/issues/17536/comments
3
2024-02-14T17:14:19Z
2024-02-15T09:42:58Z
https://github.com/langchain-ai/langchain/issues/17536
2,134,810,211
17,536
[ "langchain-ai", "langchain" ]
### Checked other resources - [X] I added a very descriptive title to this issue. - [X] I searched the LangChain documentation with the integrated search. - [X] I used the GitHub search to find a similar question and didn't find it. - [X] I am sure that this is a bug in LangChain rather than my code. ### Example Code Ollama works properly ```python from langchain_community.llms import Ollama llm = Ollama(model="llama2:latest") llm.invoke("Tell me a joke") ``` ChatOllama is not working: ```python from langchain.callbacks.manager import CallbackManager from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler from langchain.chat_models import ChatOllama from langchain.schema import HumanMessage chat_model = ChatOllama(model="llama2:latest",callback_manager=CallbackManager([StreamingStdOutCallbackHandler()])) messages = [HumanMessage(content="Tell me a joke")] chat_model_response = chat_model(messages) ``` ### Error Message and Stack Trace (if applicable) ```ssh --------------------------------------------------------------------------- OllamaEndpointNotFoundError Traceback (most recent call last) Cell In[6], line 8 6 chat_model = ChatOllama(model="llama2:latest",callback_manager=CallbackManager([StreamingStdOutCallbackHandler()])) 7 messages = [HumanMessage(content="Tell me a joke")] ----> 8 chat_model_response = chat_model(messages) File [~/.pyenv/versions/3.11.7/lib/python3.11/site-packages/langchain_core/_api/deprecation.py:145](http://localhost:8888/home/oiaagent/.pyenv/versions/3.11.7/lib/python3.11/site-packages/langchain_core/_api/deprecation.py#line=144), in deprecated.<locals>.deprecate.<locals>.warning_emitting_wrapper(*args, **kwargs) 143 warned = True 144 emit_warning() --> 145 return wrapped(*args, **kwargs) File [~/.pyenv/versions/3.11.7/lib/python3.11/site-packages/langchain_core/language_models/chat_models.py:691](http://localhost:8888/home/oiaagent/.pyenv/versions/3.11.7/lib/python3.11/site-packages/langchain_core/language_models/chat_models.py#line=690), in BaseChatModel.__call__(self, messages, stop, callbacks, **kwargs) 683 @deprecated("0.1.7", alternative="invoke", removal="0.2.0") 684 def __call__( 685 self, (...) 689 **kwargs: Any, 690 ) -> BaseMessage: --> 691 generation = self.generate( 692 [messages], stop=stop, callbacks=callbacks, **kwargs 693 ).generations[0][0] 694 if isinstance(generation, ChatGeneration): 695 return generation.message File [~/.pyenv/versions/3.11.7/lib/python3.11/site-packages/langchain_core/language_models/chat_models.py:408](http://localhost:8888/home/oiaagent/.pyenv/versions/3.11.7/lib/python3.11/site-packages/langchain_core/language_models/chat_models.py#line=407), in BaseChatModel.generate(self, messages, stop, callbacks, tags, metadata, run_name, **kwargs) 406 if run_managers: 407 run_managers[i].on_llm_error(e, response=LLMResult(generations=[])) --> 408 raise e 409 flattened_outputs = [ 410 LLMResult(generations=[res.generations], llm_output=res.llm_output) 411 for res in results 412 ] 413 llm_output = self._combine_llm_outputs([res.llm_output for res in results]) File [~/.pyenv/versions/3.11.7/lib/python3.11/site-packages/langchain_core/language_models/chat_models.py:398](http://localhost:8888/home/oiaagent/.pyenv/versions/3.11.7/lib/python3.11/site-packages/langchain_core/language_models/chat_models.py#line=397), in BaseChatModel.generate(self, messages, stop, callbacks, tags, metadata, run_name, **kwargs) 395 for i, m in enumerate(messages): 396 try: 397 results.append( --> 398 self._generate_with_cache( 399 m, 400 stop=stop, 401 run_manager=run_managers[i] if run_managers else None, 402 **kwargs, 403 ) 404 ) 405 except BaseException as e: 406 if run_managers: File [~/.pyenv/versions/3.11.7/lib/python3.11/site-packages/langchain_core/language_models/chat_models.py:577](), in BaseChatModel._generate_with_cache(self, messages, stop, run_manager, **kwargs) 573 raise ValueError( 574 "Asked to cache, but no cache found at `langchain.cache`." 575 ) 576 if new_arg_supported: --> 577 return self._generate( 578 messages, stop=stop, run_manager=run_manager, **kwargs 579 ) 580 else: 581 return self._generate(messages, stop=stop, **kwargs) File [~/.pyenv/versions/3.11.7/lib/python3.11/site-packages/langchain_community/chat_models/ollama.py:250](), in ChatOllama._generate(self, messages, stop, run_manager, **kwargs) 226 def _generate( 227 self, 228 messages: List[BaseMessage], (...) 231 **kwargs: Any, 232 ) -> ChatResult: 233 """Call out to Ollama's generate endpoint. 234 235 Args: (...) 247 ]) 248 """ --> 250 final_chunk = self._chat_stream_with_aggregation( 251 messages, 252 stop=stop, 253 run_manager=run_manager, 254 verbose=self.verbose, 255 **kwargs, 256 ) 257 chat_generation = ChatGeneration( 258 message=AIMessage(content=final_chunk.text), 259 generation_info=final_chunk.generation_info, 260 ) 261 return ChatResult(generations=[chat_generation]) File [~/.pyenv/versions/3.11.7/lib/python3.11/site-packages/langchain_community/chat_models/ollama.py:183](http://localhost:8888/home/oiaagent/.pyenv/versions/3.11.7/lib/python3.11/site-packages/langchain_community/chat_models/ollama.py#line=182), in ChatOllama._chat_stream_with_aggregation(self, messages, stop, run_manager, verbose, **kwargs) 174 def _chat_stream_with_aggregation( 175 self, 176 messages: List[BaseMessage], (...) 180 **kwargs: Any, 181 ) -> ChatGenerationChunk: 182 final_chunk: Optional[ChatGenerationChunk] = None --> 183 for stream_resp in self._create_chat_stream(messages, stop, **kwargs): 184 if stream_resp: 185 chunk = _chat_stream_response_to_chat_generation_chunk(stream_resp) File [~/.pyenv/versions/3.11.7/lib/python3.11/site-packages/langchain_community/chat_models/ollama.py:156](), in ChatOllama._create_chat_stream(self, messages, stop, **kwargs) 147 def _create_chat_stream( 148 self, 149 messages: List[BaseMessage], 150 stop: Optional[List[str]] = None, 151 **kwargs: Any, 152 ) -> Iterator[str]: 153 payload = { 154 "messages": self._convert_messages_to_ollama_messages(messages), 155 } --> 156 yield from self._create_stream( 157 payload=payload, stop=stop, api_url=f"{self.base_url}/api/chat/", **kwargs 158 ) File [~/.pyenv/versions/3.11.7/lib/python3.11/site-packages/langchain_community/llms/ollama.py:233](), in _OllamaCommon._create_stream(self, api_url, payload, stop, **kwargs) 231 if response.status_code != 200: 232 if response.status_code == 404: --> 233 raise OllamaEndpointNotFoundError( 234 "Ollama call failed with status code 404. " 235 "Maybe your model is not found " 236 f"and you should pull the model with `ollama pull {self.model}`." 237 ) 238 else: 239 optional_detail = response.json().get("error") OllamaEndpointNotFoundError: Ollama call failed with status code 404. Maybe your model is not found and you should pull the model with `ollama pull llama2:latest`. ``` ### Description Hello, I am still having the same issue reported in [15147](https://github.com/langchain-ai/langchain/issues/15147) . I tried the same things BharathKumarAI did and even updated ollama, but it is still showing the same error. ### System Info angchain 0.0.322 langsmith 0.0.51 python 3.11.7 ubuntu 20.04.6 LTS ollama list
OllamaEndpointNotFoundError: Ollama call failed with status code 404. Maybe your model is not found and you should pull the model with `ollama pull llama2`.
https://api.github.com/repos/langchain-ai/langchain/issues/17533/comments
6
2024-02-14T15:55:49Z
2024-08-02T08:49:04Z
https://github.com/langchain-ai/langchain/issues/17533
2,134,667,479
17,533
[ "langchain-ai", "langchain" ]
### Checked other resources - [X] I added a very descriptive title to this issue. - [X] I searched the LangChain documentation with the integrated search. - [X] I used the GitHub search to find a similar question and didn't find it. - [X] I am sure that this is a bug in LangChain rather than my code. ### Example Code ```python from typing import Set, Literal from langchain_core.utils.function_calling import convert_to_openai_function class UserInfos(BaseModel): "general information about a user" gender: Literal["male", "female", "other"] preferences: Set[Literal["games", "books"]] ``` ### Error Message and Stack Trace (if applicable) _No response_ ### Description The resulting function is not well defined and missing some properties. ## Output ```json { "name": "UserInfos", "description": "general information about a user", "parameters": { "type": "object", "properties": { "gender": { "enum": [ "male", "female", "other" ], "type": "string" } }, "required": [ "gender", "preferences" ] } } ``` ## Excepted **NOTE**: This is produced by the deprecated `convert_pydantic_to_openai_function` function. ```json { "name": "UserInfos", "description": "general information about a user", "parameters": { "properties": { "gender": { "enum": [ "male", "female", "other" ], "type": "string" }, "preferences": { "items": { "enum": [ "games", "books" ], "type": "string" }, "type": "array", "uniqueItems": true } }, "required":[ "gender", "preferences" ], "type":"object" } } ``` ### System Info System Information ------------------ > OS: Linux > OS Version: #40~22.04.1-Ubuntu SMP PREEMPT_DYNAMIC Thu Nov 16 10:53:04 UTC 2 > Python Version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] Package Information ------------------- > langchain_core: 0.1.23 > langchain: 0.1.7 > langchain_community: 0.0.20 > langsmith: 0.0.87 > langchain_openai: 0.0.6 Packages not installed (Not Necessarily a Problem) -------------------------------------------------- The following packages were not found: > langgraph > langserve
convert_to_openai_function drop some (nested?) properties
https://api.github.com/repos/langchain-ai/langchain/issues/17531/comments
5
2024-02-14T14:53:30Z
2024-05-22T16:09:17Z
https://github.com/langchain-ai/langchain/issues/17531
2,134,532,581
17,531
[ "langchain-ai", "langchain" ]
### Checked other resources - [X] I added a very descriptive title to this issue. - [X] I searched the LangChain documentation with the integrated search. - [X] I used the GitHub search to find a similar question and didn't find it. - [X] I am sure that this is a bug in LangChain rather than my code. ### Example Code ```python llm = BedrockChat( credentials_profile_name="default", model_id="anthropic.claude-instant-v1", streaming=True, model_kwargs={"temperature": 0.1}, ) prompt = ChatPromptTemplate.from_messages( [ ("system", system_prompt), MessagesPlaceholder(variable_name="history"), ("human", "{input}"), MessagesPlaceholder(variable_name="agent_scratchpad"), ] ) tools = load_tools(["google-search"], llm) image_generation_tool = StructuredTool.from_function( func=image_generation, name="image_generator", description="Use this tool to generate images for the user", return_direct=True, ) tools.append(image_generation_tool) agent = create_json_chat_agent(llm, tools, prompt) history = DynamoDBChatMessageHistory(table_name="LangchainSessionTable", session_id=session_id) memory = ConversationBufferMemory(chat_memory=history, memory_key="history", return_messages=True) agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True, handle_parsing_errors=True, memory=memory)``` ### Error Message and Stack Trace (if applicable) > Finished chain. 2 validation errors for AIMessage content str type expected (type=type_error.str) content value is not a valid list (type=type_error.list) ### Description I'm using: 1. Langchain 0.5.1 2. Amazon Bedrock / Anthropic Claude Instant 1.2 3. Amazon Dynomo DB for chat history 4. Conversation memory buffer 5. A tool to create an image from Bedrock Stability AI The agent generates the image but when it tries to add it to the conversation history; I get an error. ### System Info System Information ------------------ > OS: Linux > OS Version: #15~22.04.1-Ubuntu SMP PREEMPT_DYNAMIC Fri Jan 12 18:54:30 UTC 2 > Python Version: 3.10.13 (main, Sep 11 2023, 13:44:35) [GCC 11.2.0] Package Information ------------------- > langchain_core: 0.1.21 > langchain: 0.1.5 > langchain_community: 0.0.19 > langsmith: 0.0.87 > langchain_openai: 0.0.5 > langserve: 0.0.41 Packages not installed (Not Necessarily a Problem) -------------------------------------------------- The following packages were not found: > langgraph
Error with multi-modal chat and agent memory
https://api.github.com/repos/langchain-ai/langchain/issues/17529/comments
3
2024-02-14T14:33:02Z
2024-06-01T00:07:30Z
https://github.com/langchain-ai/langchain/issues/17529
2,134,489,640
17,529
[ "langchain-ai", "langchain" ]
### Checked other resources - [X] I added a very descriptive title to this issue. - [X] I searched the LangChain documentation with the integrated search. - [X] I used the GitHub search to find a similar question and didn't find it. - [X] I am sure that this is a bug in LangChain rather than my code. ### Example Code ```python from langchain_community.document_loaders.athena import AthenaLoader database_name = "database" s3_output_path = "s3://bucket-no-prefix" query="""SELECT CAST(extract(hour FROM current_timestamp) AS INTEGER) AS current_hour, CAST(extract(minute FROM current_timestamp) AS INTEGER) AS current_minute, CAST(extract(second FROM current_timestamp) AS INTEGER) AS current_second; """ profile_name = "AdministratorAccess" loader = AthenaLoader( query=query, database=database_name, s3_output_uri=s3_output_path, profile_name=profile_name, ) documents = loader.load() print(documents) ``` ### Error Message and Stack Trace (if applicable) NoSuchKey: An error occurred (NoSuchKey) when calling the GetObject operation: The specified key does not exist ### Description Athena Loader errors when result s3 bucket uri has no prefix. The Loader instance call results in a "NoSuchKey: An error occurred (NoSuchKey) when calling the GetObject operation: The specified key does not exist." error. If s3_output_path contains a prefix like: ```python s3_output_path = "s3://bucket-with-prefix/prefix" ``` Execution works without an error. ## Suggested solution Modify: ```python key = "/".join(tokens[1:]) + "/" + query_execution_id + ".csv" ``` to ```python key = "/".join(tokens[1:]) + ("/" if tokens[1:] else "") + query_execution_id + ".csv" ``` https://github.com/langchain-ai/langchain/blob/9e8a3fc4fff8e20ab5d1f113515ded14906eb6f3/libs/community/langchain_community/document_loaders/athena.py#L128 ### System Info System Information ------------------ > OS: Darwin > OS Version: Darwin Kernel Version 22.6.0: Fri Sep 15 13:41:30 PDT 2023; root:xnu-8796.141.3.700.8~1/RELEASE_ARM64_T8103 > Python Version: 3.9.9 (main, Jan 9 2023, 11:42:03) [Clang 14.0.0 (clang-1400.0.29.102)] Package Information ------------------- > langchain_core: 0.1.23 > langchain: 0.1.7 > langchain_community: 0.0.20 > langsmith: 0.0.87 > langchain_openai: 0.0.6 > langchainhub: 0.1.14 Packages not installed (Not Necessarily a Problem) -------------------------------------------------- The following packages were not found: > langgraph > langserve
Athena Loader errors when result s3 bucket uri has no prefix
https://api.github.com/repos/langchain-ai/langchain/issues/17525/comments
3
2024-02-14T12:45:19Z
2024-05-22T16:09:07Z
https://github.com/langchain-ai/langchain/issues/17525
2,134,276,738
17,525
[ "langchain-ai", "langchain" ]
### Checklist - [X] I added a very descriptive title to this issue. - [X] I included a link to the documentation page I am referring to (if applicable). ### Issue with current documentation: - ### Idea or request for content: below is the code ``` retriever = SelfQueryRetriever.from_llm( llm, vectorstore, document_content_description, metadata_field_info, use_original_query=False, enable_limit=True, verbose=True ) retriever.get_prompts() ``` and retriever.get_prompts() is returning [], i..e nothing. But SelfQueryRetriever has in-built template right which has prompts. How see that and change that template?
unable to retrieve the get_prompts for SelfQueryRetriever?
https://api.github.com/repos/langchain-ai/langchain/issues/17524/comments
1
2024-02-14T12:07:25Z
2024-02-14T14:19:07Z
https://github.com/langchain-ai/langchain/issues/17524
2,134,212,565
17,524
[ "langchain-ai", "langchain" ]
### Checked other resources - [X] I added a very descriptive title to this issue. - [X] I searched the LangChain documentation with the integrated search. - [X] I used the GitHub search to find a similar question and didn't find it. - [X] I am sure that this is a bug in LangChain rather than my code. ### Example Code The code and steps on https://python.langchain.com/docs/integrations/providers/modal ### Error Message and Stack Trace (if applicable) ``` File "/[redacted]/langchain-modal-test.py", line 96, in call_api 'output': chain.invoke({"input": prompt, "format_instructions": parser.get_format_instructions()}).content ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/[redacted]/python3.12/site-packages/langchain_core/runnables/base.py", line 2053, in invoke input = step.invoke( ^^^^^^^^^^^^ File "/[redacted]/python3.12/site-packages/langchain_core/language_models/llms.py", line 235, in invoke self.generate_prompt( File "/[redacted]/python3.12/site-packages/langchain_core/language_models/llms.py", line 530, in generate_prompt return self.generate(prompt_strings, stop=stop, callbacks=callbacks, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/[redacted]/python3.12/site-packages/langchain_core/language_models/llms.py", line 703, in generate output = self._generate_helper( ^^^^^^^^^^^^^^^^^^^^^^ File "/[redacted]/python3.12/site-packages/langchain_core/language_models/llms.py", line 567, in _generate_helper raise e File "/[redacted]/python3.12/site-packages/langchain_core/language_models/llms.py", line 554, in _generate_helper self._generate( File "/[redacted]/python3.12/site-packages/langchain_core/language_models/llms.py", line 1139, in _generate self._call(prompt, stop=stop, run_manager=run_manager, **kwargs) File "/[redacted]/python3.12/site-packages/langchain_community/llms/modal.py", line 95, in _call text = response_json["prompt"] ^^^^^^^^^^^^^ UnboundLocalError: cannot access local variable 'response_json' where it is not associated with a value ``` ### Description * I'm trying to use the modal integration as the documentation describes : I published a modal endpoint to prompt a llm, and I can see it is receiving the query and generating the response, but the chain is failing. I believe the response is incorrectly parsed by the modal community package. ### System Info System Information ------------------ > OS: Linux > Python Version: 3.12.1 Package Information ------------------- > langchain_core: 0.1.22 > langchain: 0.1.6 > langchain_community: 0.0.19 > langsmith: 0.0.87 > langchain_openai: 0.0.5 Packages not installed (Not Necessarily a Problem) -------------------------------------------------- The following packages were not found: > langgraph > langserve
Community integration for Modal consistently fails
https://api.github.com/repos/langchain-ai/langchain/issues/17522/comments
2
2024-02-14T11:49:16Z
2024-06-01T00:08:33Z
https://github.com/langchain-ai/langchain/issues/17522
2,134,182,644
17,522
[ "langchain-ai", "langchain" ]
### Checklist - [X] I added a very descriptive title to this issue. - [X] I included a link to the documentation page I am referring to (if applicable). ### Issue with current documentation: - ### Idea or request for content: below is the data ``` from langchain.globals import set_debug set_debug(True) query = "what's the one thing that cricketers to do while they're bowling?" structured_query = StructuredQuery(query=query_template, limit=5) # print(structured_query) docs = retriever.get_relevant_documents(structured_query) ``` when i saw the debug output, it's completing changing the query to just 'cricket' instead of the complete query which i gave like below ``` [chain/end] [1:retriever:Retriever > 2:chain:RunnableSequence > 5:parser:StructuredQueryOutputParser] [1ms] Exiting Parser run with output: { "lc": 1, "type": "not_implemented", "id": [ "langchain", "chains", "query_constructor", "ir", "StructuredQuery" ], "repr": "StructuredQuery(query='Cricket', filter=None, limit=5)" } [chain/end] [1:retriever:Retriever > 2:chain:RunnableSequence] [476ms] Exiting Chain run with output: [outputs] ``` i want to change the prompt which is already present inside the SelfQueryRetriever. Can you help me with that code?
how to set custom prompt for SelfQueryRetriever?
https://api.github.com/repos/langchain-ai/langchain/issues/17521/comments
2
2024-02-14T11:42:21Z
2024-03-16T10:58:40Z
https://github.com/langchain-ai/langchain/issues/17521
2,134,170,530
17,521
[ "langchain-ai", "langchain" ]
### Checked other resources - [X] I added a very descriptive title to this issue. - [X] I searched the LangChain documentation with the integrated search. - [X] I used the GitHub search to find a similar question and didn't find it. - [X] I am sure that this is a bug in LangChain rather than my code. ### Example Code from langchain.agents import initialize_agent, AgentType ### Error Message and Stack Trace (if applicable) ```py File "<some file>", line 7, in <module> from langchain.agents import initialize_agent, AgentType File "/home/rafail/.pyenv/versions/develop/lib/python3.8/site-packages/langchain/agents/__init__.py", line 34, in <module> from langchain_community.agent_toolkits import ( File "/home/rafail/.pyenv/versions/develop/lib/python3.8/site-packages/langchain_community/agent_toolkits/__init__.py", line 45, in <module> from langchain_community.agent_toolkits.sql.base import create_sql_agent File "/home/rafail/.pyenv/versions/develop/lib/python3.8/site-packages/langchain_community/agent_toolkits/sql/base.py", line 29, in <module> from langchain_community.agent_toolkits.sql.toolkit import SQLDatabaseToolkit File "/home/rafail/.pyenv/versions/develop/lib/python3.8/site-packages/langchain_community/agent_toolkits/sql/toolkit.py", line 9, in <module> from langchain_community.tools.sql_database.tool import ( File "/home/rafail/.pyenv/versions/develop/lib/python3.8/site-packages/langchain_community/tools/sql_database/tool.py", line 5, in <module> from sqlalchemy import Result ImportError: cannot import name 'Result' from 'sqlalchemy' (/home/rafail/.pyenv/versions/develop/lib/python3.8/site-packages/sqlalchemy/__init__.py) ``` ### Description Importing langchain causes this issue as "Result" was not directly importable in versions of SQLAlchemy < 2.0.0. To resolve, this line could be changed to: `from sqlalchemy.engine import Result` https://github.com/langchain-ai/langchain/blob/9e8a3fc4fff8e20ab5d1f113515ded14906eb6f3/libs/community/langchain_community/tools/sql_database/tool.py#L5C1-L5C5 ### System Info langchain==0.1.6 langchain-community==0.0.20 langchain-core==0.1.23 langchainplus-sdk==0.0.20
Compatibility issue with SQLAlchemy<2
https://api.github.com/repos/langchain-ai/langchain/issues/17519/comments
6
2024-02-14T11:31:19Z
2024-06-01T00:08:33Z
https://github.com/langchain-ai/langchain/issues/17519
2,134,148,532
17,519
[ "langchain-ai", "langchain" ]
### Checklist - [X] I added a very descriptive title to this issue. - [X] I included a link to the documentation page I am referring to (if applicable). ### Issue with current documentation: - ### Idea or request for content: I want to change a prompt which is present inside SelfQueryRetrieval and i wrote below code ``` from langchain.chains.retrieval_qa import SelfQueryRetrieval from langchain.prompts import PromptTemplate class CustomSelfQueryRetrieval(SelfQueryRetrieval): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.prompt = PromptTemplate( template="Your new prompt template here.\n\n{context}\n\nQuestion: {question}\nHelpful Answer:", input_variables=["context", "question"] ) ``` Now, can you give me an example of how to utilize the above function? Is it same like SelfQueryRetrieval? Can you give a code example?
i want to change the prompt which is present inside the SelfQueryRetrieval
https://api.github.com/repos/langchain-ai/langchain/issues/17518/comments
3
2024-02-14T11:22:07Z
2024-02-14T14:19:06Z
https://github.com/langchain-ai/langchain/issues/17518
2,134,132,681
17,518
[ "langchain-ai", "langchain" ]
### Checklist - [X] I added a very descriptive title to this issue. - [X] I included a link to the documentation page I am referring to (if applicable). ### Issue with current documentation: - ### Idea or request for content: Below is the code which tries to the return the information what's happening within SelfQueryRetrieval and return the relevant documents ``` from langchain.globals import set_debug set_debug(True) query = "what's the one thing that cricketers to do while they're bowling?" structured_query = StructuredQuery(query=query_template, limit=5) # print(structured_query) docs = retriever.get_relevant_documents(structured_query) ``` ``` when i saw the debug output, it's completing changing the query to just 'cricket' instead of the complete query which i gave like below ``` chain/start] [1:retriever:Retriever > 2:chain:RunnableSequence > 5:parser:StructuredQueryOutputParser] Entering Parser run with input: { "input": "```json\n{\n \"query\": \"cricket\",\n \"filter\": \"NO_FILTER\",\n \"limit\": 5\n}\n```" } [chain/end] [1:retriever:Retriever > 2:chain:RunnableSequence > 5:parser:StructuredQueryOutputParser] [1ms] Exiting Parser run with output: { "lc": 1, "type": "not_implemented", "id": [ "langchain", "chains", "query_constructor", "ir", "StructuredQuery" ], "repr": "StructuredQuery(query='cricket', filter=None, limit=5)" } [chain/end] [1:retriever:Retriever > 2:chain:RunnableSequence] [476ms] Exiting Chain run with output: [outputs] ``` if you see above it completely changed the query which will return irrelevant results. Now, I want to set my own prompt to the SelfQueryRetrieval. Can you return the code of how to do it with example?
how to set custom prompt for SelQueryRetrieval?
https://api.github.com/repos/langchain-ai/langchain/issues/17517/comments
4
2024-02-14T10:59:23Z
2024-02-14T14:19:05Z
https://github.com/langchain-ai/langchain/issues/17517
2,134,075,685
17,517
[ "langchain-ai", "langchain" ]
### Checked other resources - [X] I added a very descriptive title to this issue. - [X] I searched the LangChain documentation with the integrated search. - [X] I used the GitHub search to find a similar question and didn't find it. - [X] I am sure that this is a bug in LangChain rather than my code. ### Example Code from langchain.retrievers.multi_query import MultiQueryRetriever MultiQueryRetriever() ### Error Message and Stack Trace (if applicable) venv\Lib\site-packages\langchain\retrievers\__init__.py:37: in <module> from langchain.retrievers.web_research import WebResearchRetriever venv\Lib\site-packages\langchain\retrievers\web_research.py:5: in <module> from langchain_community.document_loaders import AsyncHtmlLoader venv\Lib\site-packages\langchain_community\document_loaders\__init__.py:163: in <module> from langchain_community.document_loaders.pebblo import PebbloSafeLoader venv\Lib\site-packages\langchain_community\document_loaders\pebblo.py:5: in <module> import pwd E ModuleNotFoundError: No module named 'pwd' ### Description There is no `pwd` on windows. ### System Info python 3.11.2 langchain==0.1.7
import pwd on windows
https://api.github.com/repos/langchain-ai/langchain/issues/17514/comments
31
2024-02-14T09:51:15Z
2024-05-22T10:46:00Z
https://github.com/langchain-ai/langchain/issues/17514
2,133,944,378
17,514
[ "langchain-ai", "langchain" ]
### Checklist - [X] I added a very descriptive title to this issue. - [X] I included a link to the documentation page I am referring to (if applicable). ### Issue with current documentation: - ### Idea or request for content: below is the code retriever = SelfQueryRetriever.from_llm( llm, vectorstore, document_content_description, metadata_field_info, use_original_query=False, enable_limit=True, verbose=True ) and below's the function which i'm trying to use which will do de-duplication, ``` def fetch_unique_documents(query_template, company_names, initial_limit, desired_count): company_documents = {} for company_name in company_names: # Format the query with the current company name query = query_template.format(company_names=company_name) unique_docs = [] seen_contents = set() current_limit = initial_limit while len(unique_docs) < desired_count: # structured_query = StructuredQuery(query=query, limit=current_limit) docs = retriever.get_relevant_documents(query) # Keep track of whether we found new unique documents in this iteration found_new_unique = False for doc in docs: if doc.page_content not in seen_contents: unique_docs.append(doc) seen_contents.add(doc.page_content) found_new_unique = True if len(unique_docs) == desired_count: break if not found_new_unique or len(unique_docs) == desired_count: break # Exit if no new unique documents are found or if we've reached the desired count # Increase the limit more aggressively if we are still far from the desired count current_limit += desired_count - len(unique_docs) # Store the results in the dictionary with the company name as the key company_documents[company_name] = unique_docs return company_documents # Example usage company_names = company_names query_template = "Does the company {company_names}, have one?" desired_count = 5 initial_limit = 50 # Fetch documents for each company company_documents = fetch_unique_documents(query_template, company_names, initial_limit=desired_count, desired_count=desired_count) ``` in the above, i don't know where to mention the desired_count, as i don't want to use StructuredQuery function, i just want to use normal retriever and get the relevant documents. Can you help me with the code?
how to mention topk in get_relevant_documents while using SelfQueryRetriever?
https://api.github.com/repos/langchain-ai/langchain/issues/17511/comments
1
2024-02-14T07:45:34Z
2024-02-14T14:19:05Z
https://github.com/langchain-ai/langchain/issues/17511
2,133,737,849
17,511
[ "langchain-ai", "langchain" ]
### Checked other resources - [X] I added a very descriptive title to this issue. - [X] I searched the LangChain documentation with the integrated search. - [X] I used the GitHub search to find a similar question and didn't find it. - [X] I am sure that this is a bug in LangChain rather than my code. ### Example Code ``` python pgvector = PGVector(...) # Initialize PGVector with necessary parameters ids_to_delete = [...] # List of ids to delete pgvector.delete(ids_to_delete) ``` ### Error Message and Stack Trace (if applicable) _No response_ ### Description How can I obtain the custom_id of a document from a collection using the source file name, as I want to delete a specific file from the list of files stored in a particular collection? ### System Info I am using postgres
How can I get custom_id of a document from a Collection using Source file name?
https://api.github.com/repos/langchain-ai/langchain/issues/17508/comments
7
2024-02-14T06:10:52Z
2024-02-14T14:24:24Z
https://github.com/langchain-ai/langchain/issues/17508
2,133,616,806
17,508
[ "langchain-ai", "langchain" ]
### Checked other resources - [X] I added a very descriptive title to this issue. - [X] I searched the LangChain documentation with the integrated search. - [X] I used the GitHub search to find a similar question and didn't find it. - [X] I am sure that this is a bug in LangChain rather than my code. ### Example Code Approach 1: def result(question): snowflake_url = get_snowflake_db_uri() db_connection = SQLDatabase.from_uri(snowflake_url, sample_rows_in_table_info=1, include_tables=['table_1'],max_string_length=32000) sql_db_chain = SQLDatabaseChain.from_llm( llm, db_connection, prompt=few_shot_prompt, use_query_checker=True, # must be False for OpenAI model verbose=False, return_intermediate_steps=True ) answer = sql_db_chain(question) return answer["intermediate_steps"][1], answer["result"] Approach2: def llm_answer(question): snowflake_url = get_snowflake_db_uri() db_connection = SQLDatabase.from_uri( snowflake_url, sample_rows_in_table_info=1, include_tables=['maps_route_campaign_report'], view_support=True, max_string_length=30000, ) table_info = db_connection.table_info chat_template = """You are a SQL expert .... Use the following format: Question: Question here SQLQuery: SQL Query to run SQLResult: Result of the SQLQuery Answer: Final answer here """ chat_prompt = ChatPromptTemplate.from_messages( [ ('system', chat_template), MessagesPlaceholder(variable_name='history'), ('human', '{input}'), ] ) sql_db_chain = SQLDatabaseChain.from_llm( llm, db_connection, prompt=few_shot_prompt, use_query_checker=True, # must be False for OpenAI model verbose=False, return_intermediate_steps=True ) memory_buffer = ConversationBufferWindowMemory(k=4, return_messages=True) chat = memory_buffer.load_memory_variables({})['history'] prompt = chat_prompt.format(info=table_info, history=chat, input=question) answer = sql_db_chain(prompt) answer_str = str(answer) if not isinstance(answer, str) else answer memory_buffer.save_context({'input': question}, {'answer': answer_str}) sql_query = json.dumps(answer['intermediate_steps'][1]) sql_result = json.dumps(answer['result']) return sql_query, sql_result ### Error Message and Stack Trace (if applicable) None ### Description 1) What is the main difference in llm performance between Approach 1 and Approach 2 for text-to-SQL tasks? 2) Will Approach 2 retain memory if multiple tables are involved? 3) Which approach is best for light weighted deployment with docker and AWS ### System Info boto3==1.34.29 chromadb==0.4.22 huggingface-hub==0.20.3 langchain==0.1.4 langchain-experimental==0.0.49 python-dotenv==1.0.1 sentence_transformers==2.3.0 snowflake-connector-python==3.7.0 snowflake-sqlalchemy==1.5.1 SQLAlchemy==1.4.51 streamlit==1.30.0
Which has better performance for Text2SQL: direct question in SQLDatabaseChain vs question though ChatPromptTemplate
https://api.github.com/repos/langchain-ai/langchain/issues/17506/comments
1
2024-02-14T04:14:09Z
2024-02-14T04:30:21Z
https://github.com/langchain-ai/langchain/issues/17506
2,133,508,496
17,506
[ "langchain-ai", "langchain" ]
### Privileged issue - [X] I am a LangChain maintainer, or was asked directly by a LangChain maintainer to create an issue here. ### Issue Content The goal is to make it easier to run integration tests. Acceptance Criteria: - [ ] Update docker compose file here: https://github.com/langchain-ai/langchain/blob/master/docker/docker-compose.yml to include a database that is used during integration tests and can be spun up locally via docker compose. - [ ] Use non standard port -- just increment by 1 from the previous service in that file - [ ] Update any integration tests that use the given service to use a matching port This is a good first issue for folks with experience in dev ops. Consider git-grepping for existing yml files that contain service configuration.
Expand docker-compose with common dependencies used in integration tests
https://api.github.com/repos/langchain-ai/langchain/issues/17505/comments
0
2024-02-14T03:32:30Z
2024-06-01T00:07:26Z
https://github.com/langchain-ai/langchain/issues/17505
2,133,481,545
17,505
[ "langchain-ai", "langchain" ]
### Checked other resources - [X] I added a very descriptive title to this issue. - [X] I searched the LangChain documentation with the integrated search. - [X] I used the GitHub search to find a similar question and didn't find it. - [X] I am sure that this is a bug in LangChain rather than my code. ### Example Code few_shot_prompt = FewShotPromptTemplate( example_selector=example_selector, example_prompt=example_prompt, prefix=_snowflake_prompt + 'Provide no preamble' + ' Here are some examples:', suffix=PROMPT_SUFFIX, input_variables=['table_info', 'input', 'top_k'], ) snowflake_url = get_snowflake_db_uri() db_connection = SQLDatabase.from_uri( snowflake_url, sample_rows_in_table_info=2, include_tables=['table1'], view_support=True, max_string_length=30000, ) return_op = SQLDatabaseChain.from_llm( llm, db_connection, prompt=few_shot_prompt, use_query_checker=True verbose=False, return_intermediate_steps=True ### Error Message and Stack Trace (if applicable) How to set dialect with SQLDatabaseChain using FewShotPromptTemplate ### Description I want to set the dialect to snowflake in SQLDatabaseChain, provide steps to do so for FewShotPromptTemplate ### System Info boto3==1.34.29 chromadb==0.4.22 huggingface-hub==0.20.3 langchain==0.1.5 langchain-community==0.0.19 langchain-experimental==0.0.50 pip_audit==2.6.0 pre-commit==3.6.0 pylint==2.17.4 pylint_quotes==0.2.3 pylint_pydantic==0.3.2 python-dotenv==1.0.1 sentence_transformers==2.3.0 snowflake-connector-python==3.7.0 snowflake-sqlalchemy==1.5.1 SQLAlchemy==1.4.51 streamlit==1.30.0 watchdog==3.0.0 boto3==1.34.29 chromadb==0.4.22 huggingface-hub==0.20.3
How to set dialect snowflake with SQLDatabaseChain with FewShotPromptTemplate
https://api.github.com/repos/langchain-ai/langchain/issues/17487/comments
11
2024-02-13T21:38:37Z
2024-05-22T16:08:48Z
https://github.com/langchain-ai/langchain/issues/17487
2,133,171,500
17,487
[ "langchain-ai", "langchain" ]
### Checked other resources - [X] I added a very descriptive title to this issue. - [X] I searched the LangChain documentation with the integrated search. - [X] I used the GitHub search to find a similar question and didn't find it. - [X] I am sure that this is a bug in LangChain rather than my code. ### Example Code ``` app = Sanic("app") async def create_collection(col_id, docs, embeddings): return await PGVector.afrom_documents( embedding=embeddings, documents=docs, collection_name=f"col_{col_id}", connection_string=config.CONNECTION_STRING, pre_delete_collection=True, ) @app.post("/collection") async def chat_collection(request): docs = [] session = request.args.get("session") if not session: raise ValueError("Session ID is required.") for file in request.files.getlist("collection"): for page in document_split_chunk(file.body, "pdf"): docs.append(page) await create_collection(session, docs, embeddings) return json("Collection created.") if __name__ == "__main__": app.run(host="127.0.0.1", port=8000, workers=1, debug=True) ``` ### Error Message and Stack Trace (if applicable) ``` Executing <Task pending name='Task-8' coro=<HttpProtocol.connection_task() running at C:\Users\Aidan Stewart\PycharmProjects\x\venv\lib\site-packages\sanic\server\protocols\http_protocol.py:155> wait_for=<Future pending cb=[_chain_future.<locals>._call_check_cancel() at C:\Users\Aidan Stewart\AppData\Local\Programs\Python\Python39\lib\asyncio\futures.py:384, <TaskWakeupMethWrapper object at 0x0000016B66DC5CA0>()] created at C:\Users\Aidan Stewart\AppData\Local\Programs\Python\Python39\lib\asyncio\base_events.py:429> created at C:\Users\Aidan Stewart\PycharmProjects\x\venv\lib\site-packages\sanic\server\protocols\http_protocol.py:283> took 1.016 seconds Traceback (most recent call last): File "C:\Users\Aidan Stewart\PycharmProjects\x\venv\lib\site-packages\langchain_community\vectorstores\pgvector.py", line 253, in create_vector_extension session.execute(statement) File "C:\Users\Aidan Stewart\PycharmProjects\x\venv\lib\site-packages\sqlalchemy\orm\session.py", line 2308, in execute return self._execute_internal( File "C:\Users\Aidan Stewart\PycharmProjects\x\venv\lib\site-packages\sqlalchemy\orm\session.py", line 2180, in _execute_internal conn = self._connection_for_bind(bind) File "C:\Users\Aidan Stewart\PycharmProjects\x\venv\lib\site-packages\sqlalchemy\orm\session.py", line 2047, in _connection_for_bind return trans._connection_for_bind(engine, execution_options) File "<string>", line 2, in _connection_for_bind File "C:\Users\Aidan Stewart\PycharmProjects\x\venv\lib\site-packages\sqlalchemy\orm\state_changes.py", line 139, in _go ret_value = fn(self, *arg, **kw) File "C:\Users\Aidan Stewart\PycharmProjects\x\venv\lib\site-packages\sqlalchemy\orm\session.py", line 1143, in _connection_for_bind conn = bind.connect() File "C:\Users\Aidan Stewart\PycharmProjects\x\venv\lib\site-packages\sqlalchemy\engine\base.py", line 3269, in connect return self._connection_cls(self) File "C:\Users\Aidan Stewart\PycharmProjects\x\venv\lib\site-packages\sqlalchemy\engine\base.py", line 145, in __init__ self._dbapi_connection = engine.raw_connection() File "C:\Users\Aidan Stewart\PycharmProjects\x\venv\lib\site-packages\sqlalchemy\engine\base.py", line 3293, in raw_connection return self.pool.connect() File "C:\Users\Aidan Stewart\PycharmProjects\x\venv\lib\site-packages\sqlalchemy\pool\base.py", line 452, in connect return _ConnectionFairy._checkout(self) File "C:\Users\Aidan Stewart\PycharmProjects\x\venv\lib\site-packages\sqlalchemy\pool\base.py", line 1269, in _checkout fairy = _ConnectionRecord.checkout(pool) File "C:\Users\Aidan Stewart\PycharmProjects\x\venv\lib\site-packages\sqlalchemy\pool\base.py", line 716, in checkout rec = pool._do_get() File "C:\Users\Aidan Stewart\PycharmProjects\x\venv\lib\site-packages\sqlalchemy\pool\impl.py", line 148, in _do_get return self._pool.get(wait, self._timeout) File "C:\Users\Aidan Stewart\PycharmProjects\x\venv\lib\site-packages\sqlalchemy\util\queue.py", line 309, in get return self.get_nowait() File "C:\Users\Aidan Stewart\PycharmProjects\x\venv\lib\site-packages\sqlalchemy\util\queue.py", line 303, in get_nowait return self._queue.get_nowait() File "C:\Users\Aidan Stewart\PycharmProjects\x\venv\lib\site-packages\sqlalchemy\util\langhelpers.py", line 1146, in __get__ obj.__dict__[self.__name__] = result = self.fget(obj) File "C:\Users\Aidan Stewart\PycharmProjects\x\venv\lib\site-packages\sqlalchemy\util\queue.py", line 278, in _queue queue = asyncio.Queue(maxsize=self.maxsize) File "C:\Users\Aidan Stewart\AppData\Local\Programs\Python\Python39\lib\asyncio\queues.py", line 36, in __init__ self._loop = events.get_event_loop() File "C:\Users\Aidan Stewart\AppData\Local\Programs\Python\Python39\lib\asyncio\events.py", line 642, in get_event_loop raise RuntimeError('There is no current event loop in thread %r.' RuntimeError: There is no current event loop in thread 'asyncio_0'. The above exception was the direct cause of the following exception: Traceback (most recent call last): File "C:\Users\Aidan Stewart\PycharmProjects\x\venv\lib\site-packages\sanic\app.py", line 1385, in handle_request response = await response File "C:\Users\Aidan Stewart\PycharmProjects\x\py_orchestrator\blueprints\chat\view.py", line 41, in chat_collection await create_collection(session, files, embeddings) File "C:\Users\Aidan Stewart\PycharmProjects\x\py_orchestrator\common\document_service.py", line 35, in create_collection return await PGVector.afrom_documents( File "C:\Users\Aidan Stewart\PycharmProjects\x\venv\lib\site-packages\langchain_core\vectorstores.py", line 520, in afrom_documents return await cls.afrom_texts(texts, embedding, metadatas=metadatas, **kwargs) File "C:\Users\Aidan Stewart\PycharmProjects\x\venv\lib\site-packages\langchain_core\vectorstores.py", line 542, in afrom_texts return await run_in_executor( File "C:\Users\Aidan Stewart\PycharmProjects\x\venv\lib\site-packages\langchain_core\runnables\config.py", line 493, in run_in_executor return await asyncio.get_running_loop().run_in_executor( File "C:\Users\Aidan Stewart\AppData\Local\Programs\Python\Python39\lib\concurrent\futures\thread.py", line 58, in run result = self.fn(*self.args, **self.kwargs) File "C:\Users\Aidan Stewart\PycharmProjects\x\venv\lib\site-packages\langchain_community\vectorstores\pgvector.py", line 662, in from_texts return cls.__from( File "C:\Users\Aidan Stewart\PycharmProjects\x\venv\lib\site-packages\langchain_community\vectorstores\pgvector.py", line 349, in __from store = cls( File "C:\Users\Aidan Stewart\PycharmProjects\x\venv\lib\site-packages\langchain_community\vectorstores\pgvector.py", line 212, in __init__ self.__post_init__() File "C:\Users\Aidan Stewart\PycharmProjects\x\venv\lib\site-packages\langchain_community\vectorstores\pgvector.py", line 218, in __post_init__ self.create_vector_extension() File "C:\Users\Aidan Stewart\PycharmProjects\x\venv\lib\site-packages\langchain_community\vectorstores\pgvector.py", line 256, in create_vector_extension raise Exception(f"Failed to create vector extension: {e}") from e Exception: Failed to create vector extension: There is no current event loop in thread 'asyncio_0'. ``` ### Description When attempting to create a collection for a set of documents, I run into the error demonstrated. It appears that sqlalchemy cannot find the existing async loop created by Sanic when the server is running. Not entirely sure what the cause is nor the solution as it appears to be an internal Langchain issue when attempting to execute SQLAlchemy code in an async executor. ### System Info aiofiles==23.2.1 aiohttp==3.9.2 aiosignal==1.3.1 aiosqlite==0.17.0 annotated-types==0.6.0 anyio==4.2.0 async-timeout==4.0.3 asyncpg==0.29.0 attrs==23.2.0 black==24.1.1 certifi==2023.11.17 charset-normalizer==3.3.2 click==8.1.7 colorama==0.4.6 dataclasses-json==0.6.3 distro==1.9.0 exceptiongroup==1.2.0 frozenlist==1.4.1 greenlet==3.0.3 h11==0.14.0 html5tagger==1.3.0 httpcore==1.0.2 httptools==0.6.1 httpx==0.26.0 idna==3.6 iso8601==1.1.0 jsonpatch==1.33 jsonpointer==2.4 langchain==0.1.4 langchain-community==0.0.16 langchain-core==0.1.17 langchain-openai==0.0.5 langsmith==0.0.84 marshmallow==3.20.2 multidict==6.0.4 mypy-extensions==1.0.0 numpy==1.26.3 openai==1.10.0 packaging==23.2 pathspec==0.12.1 pgvector==0.2.4 platformdirs==4.1.0 psycopg2-binary==2.9.9 pydantic==2.6.0 pydantic_core==2.16.1 pypdf==4.0.1 pypika-tortoise==0.1.6 pytz==2023.4 PyYAML==6.0.1 regex==2023.12.25 requests==2.31.0 sanic==23.12.1 sanic-routing==23.12.0 sniffio==1.3.0 SQLAlchemy==2.0.25 tenacity==8.2.3 tiktoken==0.5.2 tomli==2.0.1 tortoise-orm==0.20.0 tqdm==4.66.1 tracerite==1.1.1 typing-inspect==0.9.0 typing_extensions==4.9.0 urllib3==2.1.0 websockets==12.0 yarl==1.9.4 Windows 10 Python 3.9
RuntimeError: There is no current event loop in thread 'asyncio_0'. Utilizing Sanic as my web framework as choice. Error occurs when attempting to use `PGVector.afrom_documents`
https://api.github.com/repos/langchain-ai/langchain/issues/17485/comments
7
2024-02-13T21:35:23Z
2024-02-13T22:44:00Z
https://github.com/langchain-ai/langchain/issues/17485
2,133,167,447
17,485
[ "langchain-ai", "langchain" ]
### Checked other resources - [X] I added a very descriptive title to this issue. - [X] I searched the LangChain documentation with the integrated search. - [X] I used the GitHub search to find a similar question and didn't find it. - [X] I am sure that this is a bug in LangChain rather than my code. ### Example Code The following code is copied from a [small repository](https://github.com/jkndrkn/pinecone-upsert-error/tree/main) I created that helps reproduce the issue. ```python from os import environ from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain_community.document_loaders import TextLoader from langchain_community.embeddings import HuggingFaceEmbeddings from langchain_community.vectorstores import Pinecone INDEX_NAME = environ["PINECONE_INDEX"] TRIALS = 50 TEXT_PATH = "my_text.txt" loader = TextLoader(TEXT_PATH) documents = loader.load() text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=50) docs = text_splitter.split_documents(documents) print("docs length:", len(docs)) embedder = HuggingFaceEmbeddings(model_name="all-mpnet-base-v2") for i in range(0, TRIALS): print("trial: ", i, flush=True) Pinecone.from_documents(docs, embedder, index_name=INDEX_NAME) ``` Please see the project [README.md](https://github.com/jkndrkn/pinecone-upsert-error/blob/main/README.md) for instructions for how to configure and run this code. ### Error Message and Stack Trace (if applicable) ``` Traceback (most recent call last): File "/Users/jeriksen/opt/anaconda3/envs/pinecone-upsert-error/lib/python3.10/site-packages/urllib3/connection.py", line 198, in _new_conn sock = connection.create_connection( File "/Users/jeriksen/opt/anaconda3/envs/pinecone-upsert-error/lib/python3.10/site-packages/urllib3/util/connection.py", line 60, in create_connection for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM): File "/Users/jeriksen/opt/anaconda3/envs/pinecone-upsert-error/lib/python3.10/socket.py", line 955, in getaddrinfo for res in _socket.getaddrinfo(host, port, family, type, proto, flags): socket.gaierror: [Errno 8] nodename nor servname provided, or not known The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/Users/jeriksen/opt/anaconda3/envs/pinecone-upsert-error/lib/python3.10/site-packages/urllib3/connectionpool.py", line 793, in urlopen response = self._make_request( File "/Users/jeriksen/opt/anaconda3/envs/pinecone-upsert-error/lib/python3.10/site-packages/urllib3/connectionpool.py", line 491, in _make_request raise new_e File "/Users/jeriksen/opt/anaconda3/envs/pinecone-upsert-error/lib/python3.10/site-packages/urllib3/connectionpool.py", line 467, in _make_request self._validate_conn(conn) File "/Users/jeriksen/opt/anaconda3/envs/pinecone-upsert-error/lib/python3.10/site-packages/urllib3/connectionpool.py", line 1099, in _validate_conn conn.connect() File "/Users/jeriksen/opt/anaconda3/envs/pinecone-upsert-error/lib/python3.10/site-packages/urllib3/connection.py", line 616, in connect self.sock = sock = self._new_conn() File "/Users/jeriksen/opt/anaconda3/envs/pinecone-upsert-error/lib/python3.10/site-packages/urllib3/connection.py", line 205, in _new_conn raise NameResolutionError(self.host, self, e) from e urllib3.exceptions.NameResolutionError: <urllib3.connection.HTTPSConnection object at 0x7f7d20580430>: Failed to resolve 'answers-dev-jde-test-REDACTED.svc.us-east-1-aws.pinecone.io' ([Errno 8] nodename nor servname provided, or not known) The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/Users/jeriksen/bamboohr/ai-labs/pinecone-upsert-error/load_document.py", line 28, in <module> Pinecone.from_documents(docs, embedder, index_name=INDEX_NAME) File "/Users/jeriksen/opt/anaconda3/envs/pinecone-upsert-error/lib/python3.10/site-packages/langchain_core/vectorstores.py", line 508, in from_documents return cls.from_texts(texts, embedding, metadatas=metadatas, **kwargs) File "/Users/jeriksen/opt/anaconda3/envs/pinecone-upsert-error/lib/python3.10/site-packages/langchain_pinecone/vectorstores.py", line 434, in from_texts pinecone.add_texts( File "/Users/jeriksen/opt/anaconda3/envs/pinecone-upsert-error/lib/python3.10/site-packages/langchain_pinecone/vectorstores.py", line 166, in add_texts [res.get() for res in async_res] File "/Users/jeriksen/opt/anaconda3/envs/pinecone-upsert-error/lib/python3.10/site-packages/langchain_pinecone/vectorstores.py", line 166, in <listcomp> [res.get() for res in async_res] File "/Users/jeriksen/opt/anaconda3/envs/pinecone-upsert-error/lib/python3.10/multiprocessing/pool.py", line 774, in get raise self._value File "/Users/jeriksen/opt/anaconda3/envs/pinecone-upsert-error/lib/python3.10/multiprocessing/pool.py", line 125, in worker result = (True, func(*args, **kwds)) File "/Users/jeriksen/opt/anaconda3/envs/pinecone-upsert-error/lib/python3.10/site-packages/pinecone/core/client/api_client.py", line 195, in __call_api response_data = self.request( File "/Users/jeriksen/opt/anaconda3/envs/pinecone-upsert-error/lib/python3.10/site-packages/pinecone/core/client/api_client.py", line 454, in request return self.rest_client.POST(url, File "/Users/jeriksen/opt/anaconda3/envs/pinecone-upsert-error/lib/python3.10/site-packages/pinecone/core/client/rest.py", line 301, in POST return self.request("POST", url, File "/Users/jeriksen/opt/anaconda3/envs/pinecone-upsert-error/lib/python3.10/site-packages/pinecone/core/client/rest.py", line 178, in request r = self.pool_manager.request( File "/Users/jeriksen/opt/anaconda3/envs/pinecone-upsert-error/lib/python3.10/site-packages/urllib3/_request_methods.py", line 144, in request return self.request_encode_body( File "/Users/jeriksen/opt/anaconda3/envs/pinecone-upsert-error/lib/python3.10/site-packages/urllib3/_request_methods.py", line 279, in request_encode_body return self.urlopen(method, url, **extra_kw) File "/Users/jeriksen/opt/anaconda3/envs/pinecone-upsert-error/lib/python3.10/site-packages/urllib3/poolmanager.py", line 444, in urlopen response = conn.urlopen(method, u.request_uri, **kw) File "/Users/jeriksen/opt/anaconda3/envs/pinecone-upsert-error/lib/python3.10/site-packages/urllib3/connectionpool.py", line 877, in urlopen return self.urlopen( File "/Users/jeriksen/opt/anaconda3/envs/pinecone-upsert-error/lib/python3.10/site-packages/urllib3/connectionpool.py", line 877, in urlopen return self.urlopen( File "/Users/jeriksen/opt/anaconda3/envs/pinecone-upsert-error/lib/python3.10/site-packages/urllib3/connectionpool.py", line 877, in urlopen return self.urlopen( File "/Users/jeriksen/opt/anaconda3/envs/pinecone-upsert-error/lib/python3.10/site-packages/urllib3/connectionpool.py", line 847, in urlopen retries = retries.increment( File "/Users/jeriksen/opt/anaconda3/envs/pinecone-upsert-error/lib/python3.10/site-packages/urllib3/util/retry.py", line 515, in increment raise MaxRetryError(_pool, url, reason) from reason # type: ignore[arg-type] urllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='answers-dev-jde-test-REDACTED.svc.us-east-1-aws.pinecone.io', port=443): Max retries exceeded with url: /vectors/upsert (Caused by NameResolutionError("<urllib3.connection.HTTPSConnection object at 0x7f7d20580430>: Failed to resolve 'answers-dev-jde-test-REDACTED.svc.us-east-1-aws.pinecone.io' ([Errno 8] nodename nor servname provided, or not known)")) ``` ### Description I am trying to use from `langchain_community.vectorstores.Pinecone` to upsert embeddings to a Pinecone index using `Pinecone.from_documents()`. When I start a script that calls `from_documents()` once for each document it will succeed for the first few runs but then fail and return this error: ``` urllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='answers-dev-jde-test-REDACTED.svc.us-east-1-aws.pinecone.io', port=443): Max retries exceeded with url: /vectors/upsert (Caused by NameResolutionError("<urllib3.connection.HTTPSConnection object at 0x7f7d20580430>: Failed to resolve 'answers-dev-jde-test-REDACTED.svc.us-east-1-aws.pinecone.io' ([Errno 8] nodename nor servname provided, or not known)")) ``` This is an unexpected error. The index does exist. I have tried this with many different Pinecone indexes of both 1024 dimensions with Cohere embeddings and 768 dimensions with HuggingFace embeddings. I have also tried various document sizes. ### System Info LangChain dependencies ``` langchain==0.1.6 langchain-community==0.0.19 langchain-core==0.1.22 ``` Here is my environment.yml ``` name: pinecone-upsert-error dependencies: - pip=23.1.2 - python=3.10.13 - pip: - cohere==4.46 - langchain==0.1.6 - pinecone-client[grpc]==3.0.2 ``` Here is the entire output of `pip freeze`: ``` aiohttp==3.9.3 aiosignal==1.3.1 annotated-types==0.6.0 anyio==4.2.0 async-timeout==4.0.3 attrs==23.2.0 backoff==2.2.1 certifi==2024.2.2 charset-normalizer==3.3.2 click==8.1.7 cohere==4.46 dataclasses-json==0.6.4 exceptiongroup==1.2.0 fastavro==1.9.3 filelock==3.13.1 frozenlist==1.4.1 fsspec==2024.2.0 googleapis-common-protos==1.62.0 greenlet==3.0.3 grpc-gateway-protoc-gen-openapiv2==0.1.0 grpcio==1.60.1 huggingface-hub==0.20.3 idna==3.6 importlib-metadata==6.11.0 Jinja2==3.1.3 joblib==1.3.2 jsonpatch==1.33 jsonpointer==2.4 langchain==0.1.6 langchain-community==0.0.19 langchain-core==0.1.22 langsmith==0.0.87 lz4==4.3.3 MarkupSafe==2.1.5 marshmallow==3.20.2 mpmath==1.3.0 multidict==6.0.5 mypy-extensions==1.0.0 networkx==3.2.1 nltk==3.8.1 numpy==1.26.4 packaging==23.2 pillow==10.2.0 pinecone-client==3.0.2 protobuf==3.20.3 pydantic==2.6.1 pydantic_core==2.16.2 PyYAML==6.0.1 regex==2023.12.25 requests==2.31.0 safetensors==0.4.2 scikit-learn==1.4.0 scipy==1.12.0 sentence-transformers==2.3.1 sentencepiece==0.1.99 sniffio==1.3.0 SQLAlchemy==2.0.27 sympy==1.12 tenacity==8.2.3 threadpoolctl==3.2.0 tokenizers==0.15.2 torch==2.2.0 tqdm==4.66.2 transformers==4.37.2 typing-inspect==0.9.0 typing_extensions==4.9.0 urllib3==2.2.0 yarl==1.9.4 zipp==3.17.0 ```
Error in langchain_community.vectorstores.Pinecone: Sending request to /vectors/upsert triggers NameResolutionError
https://api.github.com/repos/langchain-ai/langchain/issues/17474/comments
8
2024-02-13T19:49:05Z
2024-02-14T21:44:26Z
https://github.com/langchain-ai/langchain/issues/17474
2,133,024,664
17,474
[ "langchain-ai", "langchain" ]
### Checklist - [X] I added a very descriptive title to this issue. - [X] I included a link to the documentation page I am referring to (if applicable). ### Issue with current documentation: - ### Idea or request for content: below is the code ``` retriever = vectorStore.as_retriever(search_kwargs= {"k": current_limit, "pre_filter": {"Subject": {"$eq": "sports"}} } ) ``` the above pre_filter should go through the name metadata component i.e. metadata where Subject is sports and should retrieve the document right? But its performing same as ht normal retriever i.e. without pre_filter. May i know the reason why?
why' the pre_filter component not working for retriever while retrieving documents?
https://api.github.com/repos/langchain-ai/langchain/issues/17464/comments
3
2024-02-13T15:24:57Z
2024-02-14T01:34:39Z
https://github.com/langchain-ai/langchain/issues/17464
2,132,577,209
17,464
[ "langchain-ai", "langchain" ]
### Checked other resources - [X] I added a very descriptive title to this issue. - [X] I searched the LangChain documentation with the integrated search. - [X] I used the GitHub search to find a similar question and didn't find it. - [X] I am sure that this is a bug in LangChain rather than my code. ### Example Code ```python def retreival_qa_chain(COLLECTION_NAME): embedding = OpenAIEmbeddings() llm = ChatOpenAI(model="gpt-3.5-turbo-16k",temperature=0.1) vector_store = PGVector( connection_string=CONNECTION_STRING, collection_name=COLLECTION_NAME, embedding_function=embedding ) retriever = vector_store.as_retriever(search_kwargs={"k": 3}) qa = RetrievalQA.from_chain_type( llm=llm, chain_type="stuff", retriever=retriever, return_source_documents=True ) return qa ``` ### Error Message and Stack Trace (if applicable) _No response_ ### Description How can I delete existing collection in pgvector and ensure that when deleting a collection in pgvector, its corresponding entry in another table is also deleted? ### System Info I am using pgvector
How to delete collection in Pgvector with cascade delete?
https://api.github.com/repos/langchain-ai/langchain/issues/17461/comments
1
2024-02-13T12:26:46Z
2024-02-14T01:50:11Z
https://github.com/langchain-ai/langchain/issues/17461
2,132,213,397
17,461
[ "langchain-ai", "langchain" ]
### Checked other resources - [X] I added a very descriptive title to this issue. - [X] I searched the LangChain documentation with the integrated search. - [X] I used the GitHub search to find a similar question and didn't find it. - [X] I am sure that this is a bug in LangChain rather than my code. ### Example Code The following code ``` from langchain_core.runnables import RunnableParallel from langchain.prompts import ChatPromptTemplate chain1 = ChatPromptTemplate.from_template("tell me a joke about {topic}") chain2 = ChatPromptTemplate.from_template("write a short (2 line) poem about {topic}") def test(input) -> int: print(input) return(3) combined = RunnableParallel(joke=chain1, poem=chain2).assign(x=RunnableLambda(test)) ``` The output is correct if you do `combined.invoke({'topic':"love"})` you correctly get ``` {'joke': ChatPromptValue(messages=[HumanMessage(content='tell me a joke about love')]), 'poem': ChatPromptValue(messages=[HumanMessage(content='write a short (2 line) poem about love')]), 'x': 3} ``` however if you check the output schema as follows ``` combined.output_schema.schema() ```` Output is ``` {'title': 'RunnableSequenceOutput', 'type': 'object', 'properties': {'topic': {'title': 'Topic', 'type': 'string'}, 'x': {'title': 'X', 'type': 'integer'}}} ``` `joke ` field is missing in the output schema. This is impacting the langserve api output for the chain as well. ### Error Message and Stack Trace (if applicable) _No response_ ### Description Output schema of the runnable is missing field when RunnableParallel is used in conjunction with assign. ### System Info langchain = "0.1.6" python = "^3.11"
Issue with chain output_schema when a runnableparrallel is invoked with assign.
https://api.github.com/repos/langchain-ai/langchain/issues/17460/comments
2
2024-02-13T11:19:29Z
2024-07-30T16:05:57Z
https://github.com/langchain-ai/langchain/issues/17460
2,132,099,898
17,460
[ "langchain-ai", "langchain" ]
### Checklist - [X] I added a very descriptive title to this issue. - [X] I included a link to the documentation page I am referring to (if applicable). ### Issue with current documentation: - ### Idea or request for content: below is the code which will add only page_content to the embeddings ``` # Instantiate the OpenAIEmbeddings class openai = OpenAIEmbeddings(openai_api_key="") # Generate embeddings for your documents embeddings = openai.embed_documents([doc.page_content for doc in documents]) # Create tuples of text and corresponding embedding text_embeddings = list(zip([doc.page_content for doc in documents], embeddings)) # Create a FAISS vector store from the embeddings vectorStore = FAISS.from_embeddings(text_embeddings, openai) # Create a retriever for the vector database retriever = vectorStore.as_retriever(search_kwargs={"k": 10}) docs = retriever.get_relevant_documents("Data related to cricket") ``` while retrieving the document and printing the output, it'll only return page_content and doesn't return metadata. So, is there any to also add metadata to the vector store and return metadata along with the page_content output?
how to also add metadata along with the page_content to the vector store?
https://api.github.com/repos/langchain-ai/langchain/issues/17459/comments
5
2024-02-13T10:34:22Z
2024-08-01T15:41:18Z
https://github.com/langchain-ai/langchain/issues/17459
2,131,995,849
17,459
[ "langchain-ai", "langchain" ]
### Checklist - [X] I added a very descriptive title to this issue. - [X] I included a link to the documentation page I am referring to (if applicable). ### Issue with current documentation: - ### Idea or request for content: below is the code which will retrieve the documents ``` # Create a retriever for the vector database retriever = vectorStore.as_retriever(search_kwargs={"k": 10}) docs = retriever.get_relevant_documents("data related to cricket") ``` then below's the output when i tried to return the metadata ``` for doc in docs: print(doc.metadata) {} {} {} {} {} {} {} {} {} {} ``` it is not returning metadata, but it is returning page_content. How to also return the metadata along with page_content?
unable to retrieve metadata while retrieving the documents
https://api.github.com/repos/langchain-ai/langchain/issues/17458/comments
5
2024-02-13T10:29:04Z
2024-02-18T04:30:22Z
https://github.com/langchain-ai/langchain/issues/17458
2,131,985,841
17,458
[ "langchain-ai", "langchain" ]
### Checklist - [X] I added a very descriptive title to this issue. - [X] I included a link to the documentation page I am referring to (if applicable). ### Issue with current documentation: - ### Idea or request for content: below is the code how documents[0] look like `Document(page_content='Text: This e Australian Act (Cth).', metadata={'source': '/content/excel_data/2.csv', 'row': 0, 'x1': '296.707803875208', 'y1': '211.071329072118', 'x2': '1436.41797742248', 'y2': '276.853476041928', 'Block Type': 'LAYOUT_TEXT', 'Block ID': '42c93696619b409b80ddc71df32580f2', 'page_num': '1', 'Company': 'BBCI', 'Year': '2020', 'is_answer': '0'})` below's the code which returns the topk unique documents using retriever ``` def fetch_unique_documents_with_metadata(query, initial_limit, desired_count): unique_docs_with_metadata = [] seen_contents = set() current_limit = initial_limit while len(unique_docs_with_metadata) < desired_count: retriever = vectorStore.as_retriever(search_kwargs={"k": current_limit}) docs = retriever.get_relevant_documents(query) # Keep track of whether we found new unique documents in this iteration found_new_unique = False for doc in docs: if doc.page_content not in seen_contents: # Add both page_content and metadata to the unique document list doc_with_metadata = { "page_content": doc.page_content, "metadata": doc.metadata } unique_docs_with_metadata.append(doc_with_metadata) seen_contents.add(doc.page_content) found_new_unique = True if len(unique_docs_with_metadata) == desired_count: break if not found_new_unique or len(unique_docs_with_metadata) == desired_count: break # Exit if no new unique documents are found or if we've reached the desired count # Increase the limit more aggressively if we are still far from the desired count current_limit += desired_count - len(unique_docs_with_metadata) return unique_docs_with_metadata # Example usage with the updated function query = "Does or concerns, including in relation?" desired_count = 10 # The number of unique documents you want unique_documents_with_metadata = fetch_unique_documents_with_metadata(query, initial_limit=desired_count, desired_count=desired_count) # Print the unique documents or handle them as needed # for doc in unique_documents_with_metadata: # print(f"Row {doc['metadata']['row']}: {doc['page_content']}") # print(f"Metadata: {doc['metadata']}") len(unique_documents_with_metadata) ``` the output is below ``` {'page_content': 'Text: 5. esources departments or the \n.', 'metadata': {}} ``` unable to extract/return metadata while retrieving the relevant documents. It is not returning metadata present inside the documents, it is only returning page_content. I want to return all the medata which looks like metadata={'source': '/content/excel_data/2.csv', 'row': 0, 'x1': '296.707803875208', 'y1': '211.071329072118', 'x2': '1436.41797742248', 'y2': '276.853476041928', 'Block Type': 'LAYOUT_TEXT', 'Block ID': '42c93696619b409b80ddc71df32580f2', 'page_num': '1', 'Company': 'BBCI', 'Year': '2020', 'is_answer': '0'}) but it is only returning page_content. Can you look into it and help me with the code?
unable to extract/return metadata while retrieving the relevant documents
https://api.github.com/repos/langchain-ai/langchain/issues/17455/comments
4
2024-02-13T10:08:37Z
2024-02-14T01:39:23Z
https://github.com/langchain-ai/langchain/issues/17455
2,131,944,956
17,455
[ "langchain-ai", "langchain" ]
### Checked other resources - [X] I added a very descriptive title to this issue. - [X] I searched the LangChain documentation with the integrated search. - [X] I used the GitHub search to find a similar question and didn't find it. - [X] I am sure that this is a bug in LangChain rather than my code. ### Example Code from langchain.document_loaders import GithubFileLoader loader = GithubFileLoader( repo="langchain-ai/langchain", # the repo name access_token="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", github_api_url="https://api.github.com", file_filter=lambda file_path: file_path.endswith( ".md" ), # load all markdowns files. ) documents = loader.load() print(documents) ### Error Message and Stack Trace (if applicable) _No response_ ### Description raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://api.github.com/api/v3/repos/langchain-ai/langchain/git/trees/master?recursive=1 ### System Info System Information ------------------ > OS: Linux > OS Version: #17~22.04.1-Ubuntu SMP PREEMPT_DYNAMIC Tue Jan 16 14:32:32 UTC 2 > Python Version: 3.11.4 (main, Jul 5 2023, 13:45:01) [GCC 11.2.0] Package Information ------------------- > langchain_core: 0.1.22 > langchain: 0.1.6 > langchain_community: 0.0.19 > langsmith: 0.0.87 Packages not installed (Not Necessarily a Problem) -------------------------------------------------- The following packages were not found: > langgraph > langserve
GithubFileLoader API Error
https://api.github.com/repos/langchain-ai/langchain/issues/17453/comments
13
2024-02-13T09:48:58Z
2024-07-23T16:07:26Z
https://github.com/langchain-ai/langchain/issues/17453
2,131,905,108
17,453
[ "langchain-ai", "langchain" ]
### Checklist - [X] I added a very descriptive title to this issue. - [X] I included a link to the documentation page I am referring to (if applicable). ### Issue with current documentation: See here: https://github.com/langchain-ai/langchain/pull/610 no mention of this hypothetical notebook containing an example. and here: https://api.python.langchain.com/en/latest/evaluation/langchain.evaluation.qa.eval_chain.QAEvalChain.html# What does this mean? ![image](https://github.com/langchain-ai/langchain/assets/25189545/258a11a9-8ac9-4cee-bfd3-3557886ece37) It's cryptic. ### Idea or request for content: ```python from langchain import PromptTemplate # Create a custom prompt template incorporating the rubric custom_prompt_template = PromptTemplate( template=f""" Grade the student's answer's against the answer key on a scale of 1 to 10 based on the following criteria: - Accuracy: Is the answer correct? (40%) - Relevance: Is the answer relevant to the question? (30%) - Clarity: Is the answer clearly articulated? (20%) - Grammar: Is the answer grammatically correct? (10%) Provide a numerical score and a brief justification for each category. {{input}} Evaluate the following answer based on the criteria outlined in this rubric: {{prompt}} Answer: {{evaluation}} """) # Initialize the QAEvalChain with the custom prompt eval_chain = QAEvalChain.from_llm(llm, prompt=custom_prompt_template, verbose=True) # %% qa_pair = {'query': 'What did the boy do when he was tired?', 'answer': 'He would sleep in her shade.'} student_answer = {'query': 'What did the boy do when he was tired?', 'answer': 'When the boy was tired, he asked the tree for a quiet place to sit and rest, and the tree offered her old stump for him to sit and rest.'} eval_data = zip([qa_pair], [student_answer]) graded_outputs = eval_chain.evaluate(eval_data) ``` I want to do something like that passing custom rubrics for evaluation but have no idea how arguments to the call `.evaluate()` map to `input`, `prompt`, `evaluation` which are deemed required input variables in the documentation. Please when you speak among yourselves remember that users are going to be researching potential questions, as you insist, but if you don't provide more informative speech amongst yourselves, I have to create a new issue like this one.
QAEvalChain custom prompt how do I do this?
https://api.github.com/repos/langchain-ai/langchain/issues/17449/comments
3
2024-02-13T08:52:21Z
2024-05-21T16:09:26Z
https://github.com/langchain-ai/langchain/issues/17449
2,131,806,527
17,449
[ "langchain-ai", "langchain" ]
### Checked other resources - [X] I added a very descriptive title to this issue. - [X] I searched the LangChain documentation with the integrated search. - [X] I used the GitHub search to find a similar question and didn't find it. - [X] I am sure that this is a bug in LangChain rather than my code. ### Example Code from langchain.agents.agent_types import AgentType from langchain_experimental.agents.agent_toolkits.csv.base import create_csv_agent from langchain_openai import OpenAI from langchain_community.embeddings import HuggingFaceInferenceAPIEmbeddings from langchain_experimental.agents.agent_toolkits import create_pandas_dataframe_agent agent = create_csv_agent( llm , 'train.csv', agent_type=AgentType.ZERO_SHOT_REACT_DESCRIPTION, ) agent.run("how many rows are there?")¨ f=pd.read_csv('train.csv',delimiter=';',encoding='Latin-1') print(df.head()) agent = create_pandas_dataframe_agent(llm, df, agent_type="openai-tools", verbose=True) agent.invoke( { "input": "What's the correlation between age and fare? is that greater than the correlation between fare and survival?" } ) **## Output ## return func(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^^ TypeError: Completions.create() got an unexpected keyword argument 'tools'** **and when changing the LLM or agent type i get this error: in create_react_agent raise ValueError(f"Prompt missing required variables: {missing_vars}") ValueError: Prompt missing required variables: {'tools', 'tool_names'}** ### Error Message and Stack Trace (if applicable) line 26, in <module> agent = create_csv_agent(llm ,'train.csv',agent_type=AgentType.ZERO_SHOT_REACT_DESCRIPTION) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "\LocalCache\local-packages\Python311\site-packages\langchain_experimental\agents\agent_toolkits\csv\base.py", line 66, in create_csv_agent return create_pandas_dataframe_agent(llm, df, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "\Local\Packages\PythonSoftwareFoundation.Python.3.11_qbz5n2kfra8p0\LocalCache\local-packages\Python311\site-packages\langchain_experimental\agents\agent_toolkits\pandas\base.py", line 264, in create_pandas_dataframe_agent runnable=create_react_agent(llm, tools, prompt), # type: ignore ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "\Local\Packages\PythonSoftwareFoundation.Python.3.11_qbz5n2kfra8p0\LocalCache\local-packages\Python311\site-packages\langchain\agents\react\agent.py", line 97, in create_react_agent raise ValueError(f"Prompt missing required variables: {missing_vars}") ValueError: Prompt missing required variables: {'tool_names', 'tools'} ### Description I am trying to use the CSV agent to query my CSV but I keep getting the error: TypeError: Completions.create() got an unexpected keyword argument 'tools' **( for agent type = openai tools)** and I tried a different agent type and I am having this error **ValueError: Prompt missing required variables: {'tools', 'tool_names'}** (i followed the example from the documentation) if anyone has an idea how to fix this or encountered this issue before please reach out! ### System Info OS: Windows OS Version: 10.0.19045 Python Version: 3.11.8 (tags/v3.11.8:db85d51, Feb 6 2024, 22:03:32) [MSC v.1937 64 bit (AMD64)]
Erros with langchain CSV agent and Pandas agent
https://api.github.com/repos/langchain-ai/langchain/issues/17448/comments
7
2024-02-13T08:38:17Z
2024-05-21T16:09:20Z
https://github.com/langchain-ai/langchain/issues/17448
2,131,775,505
17,448
[ "langchain-ai", "langchain" ]
### Checked other resources - [X] I added a very descriptive title to this issue. - [X] I searched the LangChain documentation with the integrated search. - [X] I used the GitHub search to find a similar question and didn't find it. - [X] I am sure that this is a bug in LangChain rather than my code. ### Example Code ```python from langchain_community.tools.tavily_search import TavilySearchResults, TavilyAnswer search = TavilySearchResults() print(search.invoke("what is the weather in SF")) # output: [] tans = TavilyAnswer() print(tans.invoke("What is the weather in SF?")) # output: The current weather in San Francisco is partly cloudy with a temperature of 51.1°F (10.6°C). The wind is coming from the west-northwest at 8.1 mph (13.0 km/h), and the humidity is at 86%. The visibility is 9.0 miles (16.0 km), and the UV index is 1.0. The highest temperature recorded in San Francisco in 2024 was 73°F (23°C) on January 29. ``` ### Error Message and Stack Trace (if applicable) _No response_ ### Description # Issue: I'm trying the [Agents Quickstart example on Tavily](https://python.langchain.com/docs/modules/agents/quick_start). As shown above, "TavilySearchResults" is returning empty response. Upon checking, I found "TavilyAnswer" can return results correctly. Digging further, I found the difference is due to Tavily rest API param "search_depth". "TavilySearchResults" defaults to "advanced" while "TavilyAnswer" defaults to "basic". I tried on https://app.tavily.com/playground, same behavior, search_depth="basic" can return results while "advanced" does not have result. # Possible Solution I'm on Tavily free tier, hope the API tier does not make a difference. I propose either to change default search_depth to "basic" or expose search_depth as variable in TavilySearchResults. let me know any option is preferred, I can create a pull request ### System Info System Information ------------------ > OS: Darwin > OS Version: Darwin Kernel Version 23.3.0: Wed Dec 20 21:30:44 PST 2023; root:xnu-10002.81.5~7/RELEASE_ARM64_T6000 > Python Version: 3.9.18 | packaged by conda-forge | (main, Dec 23 2023, 16:35:41) [Clang 16.0.6 ] Package Information ------------------- > langchain_core: 0.1.16 > langchain: 0.1.5 > langchain_community: 0.0.17 > langchain_experimental: 0.0.50 > langchain_openai: 0.0.5 > langchainhub: 0.1.14 Packages not installed (Not Necessarily a Problem) -------------------------------------------------- The following packages were not found: > langgraph > langserve
TavilySearchResults in Agents Quick Start always return empty result
https://api.github.com/repos/langchain-ai/langchain/issues/17447/comments
6
2024-02-13T08:26:36Z
2024-07-04T08:58:21Z
https://github.com/langchain-ai/langchain/issues/17447
2,131,742,229
17,447
[ "langchain-ai", "langchain" ]
### Checked other resources - [X] I added a very descriptive title to this issue. - [X] I searched the LangChain documentation with the integrated search. - [X] I used the GitHub search to find a similar question and didn't find it. - [X] I am sure that this is a bug in LangChain rather than my code. ### Example Code '''python def add_docs_to_chromadb(docpath: str, collection_name: str): try: logging.info("Adding docs to collection") vectordb = delib.connect_chromadb(collection_name = collection_name) if os.path.isfile(docpath): loader = UnstructuredFileLoader(docpath, mode="paged", strategy="hi_res", hi_res_model_name="detectron2_onnx", post_processors=[clean_extra_whitespace]) elif os.path.isdir(docpath): loader = DirectoryLoader(docpath, silent_errors=True, use_multithreading=True, loader_kwargs={"mode": "paged", "strategy":"hi_res", "hi_res_model_name":"detectron2_onnx", "post_processors":[clean_extra_whitespace]}) #loader = DirectoryLoader(docpath, silent_errors=True, use_multithreading=True) else: logging.error(f"Provided path '{docpath}' is not a valid file or folder.") return {"response": f"Provided path '{docpath}' is not a valid file or folder."} logging.info("Connected to db and collection") #loader = DirectoryLoader(docpath) documents = loader.load() logging.info(f"There are {len(documents)} documents are loaded for indexing.") documents = filter_complex_metadata(documents) #text_splitter = TokenTextSplitter(chunk_size=1000, chunk_overlap=200) #text_splitter = RecursiveCharacterTextSplitter(separators=["\n\n", "\n", "\t"], chunk_size=10000, chunk_overlap=3000) text_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder(separators=["\n\n", "\n", "\t"],chunk_size=delib.CHUNK_SIZE_TOKENS,chunk_overlap=200) texts = text_splitter.split_documents(documents) logging.info("Adding documents to vectordb..") vectordb.add_documents(documents=texts, embedding=embeddings, persist_directory = db_directory) #vectordb.add_documents(documents=texts) #vectordb.persist() logging.info(f"Documents from '{docpath}' indexed successfully.") except Exception as e: logging.error(f"An error occured: {str(e)}") logging.error("An error occured adding to collection: "+ str(e)) #pass ''' add_docs_to_chromadb("home/uploaded/data", "langchain") ### Error Message and Stack Trace (if applicable) 2024-02-13T03:12:26.0575885Z 2024-02-13 03:12:26,052 INFO: Detecting page elements ... 2024-02-13T03:12:26.4798476Z 2024-02-13 03:12:26,479 INFO: Processing entire page OCR with tesseract... 2024-02-13T03:12:33.8568556Z 2024-02-13 03:12:33,851 INFO: Detecting page elements ... 2024-02-13T03:12:33.9356842Z 2024-02-13 03:12:33,927 INFO: Detecting page elements ... 2024-02-13T03:12:34.4394549Z 2024-02-13 03:12:34,438 INFO: Processing entire page OCR with tesseract... 2024-02-13T03:12:41.8113054Z 2024-02-13 03:12:41,804 INFO: Detecting page elements ... 2024-02-13T03:12:42.8076208Z 2024-02-13 03:12:42,807 INFO: Detecting page elements ... 2024-02-13T03:12:47.3426438Z 2024-02-13 03:12:47,328 INFO: Processing entire page OCR with tesseract... 2024-02-13T03:12:49.6169096Z 2024-02-13 03:12:49,608 INFO: Detecting page elements ... 2024-02-13T03:12:50.7754205Z 2024-02-13 03:12:50,767 INFO: Detecting page elements ... 2024-02-13T03:12:58.8977025Z 2024-02-13 03:12:58,891 INFO: Detecting page elements ... 2024-02-13T03:12:59.5277951Z 2024-02-13 03:12:59,527 INFO: Detecting page elements ... 2024-02-13T03:13:00.6122064Z [2024-02-13 03:13:00 +0000] [32703] [INFO] 169.254.130.1:62885 - "GET /docs HTTP/1.1" 200 2024-02-13T03:13:04.4380495Z 2024-02-13 03:13:04,433 INFO: Processing entire page OCR with tesseract... 2024-02-13T03:13:06.8056447Z 2024-02-13 03:13:06,803 INFO: Detecting page elements ... 2024-02-13T03:13:07.5115041Z 2024-02-13 03:13:07,497 INFO: Detecting page elements ... 2024-02-13T03:13:12.0908634Z 2024-02-13 03:13:12,081 INFO: Processing entire page OCR with tesseract... 2024-02-13T03:13:13.3332086Z 2024-02-13 03:13:13,323 INFO: Detecting page elements ... 2024-02-13T03:13:16.9851600Z 2024-02-13 03:13:16,979 INFO: Detecting page elements ... 2024-02-13T03:13:17.7084602Z 2024-02-13 03:13:17,706 INFO: Processing entire page OCR with tesseract... 2024-02-13T03:13:21.0221346Z 2024-02-13 03:13:21,018 INFO: Detecting page elements ... 2024-02-13T03:13:21.8595109Z 2024-02-13 03:13:21,857 INFO: Processing entire page OCR with tesseract... 2024-02-13T03:13:22.0843506Z 2024-02-13 03:13:22,083 INFO: Processing entire page OCR with tesseract... 2024-02-13T03:13:24.1381747Z 2024-02-13 03:13:24,137 INFO: Detecting page elements ... 2024-02-13T03:13:26.8770925Z 2024-02-13 03:13:26,876 INFO: Processing entire page OCR with tesseract... 2024-02-13T03:13:28.9028391Z 2024-02-13 03:13:28,892 INFO: Detecting page elements ... 2024-02-13T03:13:31.8635158Z 2024-02-13 03:13:31,860 INFO: Detecting page elements ... ### Description I am trying to upload a folder "data" containing some pdf files to chromadb vectorstore, If I do not use the loader_kwargs parameter then it takes little time in indexing but then accuracy of getting the answer from vectordb is not good so I want to use the loader_kwargs to pass the parameters to loader_cls which is by default "UnstructuredFileLoader" but in that case it takes a lot of time which more than expected like for 4 to 5 pdfs it is taking more than two hours and when I uploaded folder containing 130 pdf files and checked after 12 hours and found that indexing could not done and log was freezed. Can you let me know any solution for this, I will be thankful to you. ### System Info System Information ------------------ > OS: Windows > OS Version: 10.0.22621 > Python Version: 3.9.5 (default, May 18 2021, 14:42:02) [MSC v.1916 64 bit (AMD64)] Package Information ------------------- > langchain_core: 0.1.10 > langchain: 0.0.242 > langchain_community: 0.0.12 > langserve: Not Found
OCR with Tesseract takes more than expected time while indexing a pdf file to chromadb using UnstructuredFileLoader
https://api.github.com/repos/langchain-ai/langchain/issues/17444/comments
1
2024-02-13T06:20:29Z
2024-05-21T16:09:15Z
https://github.com/langchain-ai/langchain/issues/17444
2,131,545,246
17,444
[ "langchain-ai", "langchain" ]
### Checked other resources - [X] I added a very descriptive title to this issue. - [X] I searched the LangChain documentation with the integrated search. - [X] I used the GitHub search to find a similar question and didn't find it. - [X] I am sure that this is a bug in LangChain rather than my code. ### Example Code ```json {"page_content": "hallo test", "metadata": {"source": "mysource", "seq_num": 244 }, "type": "Document"} ``` ```py chunked_documents = load_docs_from_jsonl("test.jsonl") embeddings = OpenAIEmbeddings(openai_api_key=OPENAI_API_KEY) collection_name = "test_embeddings_cs" + str(CHUNK_SIZE) db = PGVector.from_documents( embedding=embeddings, documents=chunked_documents, collection_name=collection_name, connection_string=POSTGRES_URL, pre_delete_collection=False, ) ``` ``` DETAIL: Missing "]" after array dimensions. [SQL: INSERT INTO langchain_pg_embedding (collection_id, embedding, document, cmetadata, custom_id, uuid) VALUES (%(collection_id)s::UUID, %(embedding)s, %(document)s, %(cmetadata)s, %(custom_id)s, %(uuid)s::UUID)] [parameters: {'collection_id': UUID('f35bcca7-c797-4e57-a31a-8d82da54542b'), 'embedding': '[-0.02666512080169465,0.0014875975490676566,0.003841705348754905,-0.025984670417264277,-0.01976139198263224,-0.005496757382752562,-0.0229651753288143 ... (32576 characters truncated) ... .016387495109019937,-0.02728886667698162,-0.011532203877076985,-0.013332560400847324,-0.006326055473869786,0.015182533137287487,0.012971071250534016]', 'document': 'hallo test', 'cmetadata': '{"source": "mysource", "seq_num": 244}', 'custom_id': 'ab97f376-c9cf-11ee-88a9-1eae5cf5b7d5', 'uuid': UUID('f21bf17b-07b2-40f2-a33f-16c50f682e7f')}] (Background on this error at: https://sqlalche.me/e/20/9h9h) ``` ### Error Message and Stack Trace (if applicable) _No response_ ### Description I have a very simple code that creates embeddings from my previously prepared documents with PGVector and tries to push it to the PostgreSQL in Vercel. Unfortunately it complains with an `DETAIL: Missing "]" after array dimensions.` ### System Info langchain==0.1.6 langchain-community==0.0.19 langchain-core==0.1.22 langchain-openai==0.0.5 mac Python 3.12.1
Error when pushing PGVector embeddings to Vercel
https://api.github.com/repos/langchain-ai/langchain/issues/17428/comments
4
2024-02-12T18:01:04Z
2024-07-03T16:34:06Z
https://github.com/langchain-ai/langchain/issues/17428
2,130,669,648
17,428
[ "langchain-ai", "langchain" ]
### Checklist - [X] I added a very descriptive title to this issue. - [X] I included a link to the documentation page I am referring to (if applicable). ### Issue with current documentation: - ### Idea or request for content: below is the code ``` # Categorize documents documents_dict = { 'amd': DirectoryLoader('/content/amd', glob="*.pdf", loader_cls=PyPDFLoader).load(), 'engie': DirectoryLoader('/content/engie', glob="*.pdf", loader_cls=PyPDFLoader).load(), # Add more categories as needed } # Create a vector database and a retriever for each category vector_stores = {} retrievers = {} class MyTextSplitter(RecursiveCharacterTextSplitter): def split_documents(self, documents): chunks = super().split_documents(documents) for document, chunk in zip(documents, chunks): chunk.metadata['source'] = document.metadata['source'] return chunks text_splitter = MyTextSplitter(chunk_size=1000, chunk_overlap=100) for category, docs in documents_dict.items(): texts = text_splitter.split_documents(docs) vector_store = FAISS.from_documents(texts, embeddings) retriever = vector_store.as_retriever(search_kwargs={"k": 5}) vector_stores[category] = vector_store retrievers[category] = retriever # Answer a question related to 'Cricket' category = 'engie' qa_chain = RetrievalQA.from_chain_type(llm=OpenAI(temperature=0.2), chain_type="stuff", retriever=retrievers[category], return_source_documents=True) # Format the prompt using the template context = "" # question = "what's the final provision of dhl?" question = "what for it strives?" formatted_prompt = prompt_template.format(context=context, question=question) # Pass the formatted prompt to the RetrievalQA function llm_response = qa_chain(formatted_prompt) process_llm_response(llm_response) ``` in the above code, if you see even retrievers has been iterated. Is there any way that we can only use only one retriever to retrieve the data from multiple vector stores? As in there should be only one retriever, if category is amd, it should fetch data from amd category vector store and return the answer. Can you help me with this code?
how to use only retriever to retrieve data from multiple knowledge bases?
https://api.github.com/repos/langchain-ai/langchain/issues/17427/comments
1
2024-02-12T17:56:28Z
2024-02-14T01:47:37Z
https://github.com/langchain-ai/langchain/issues/17427
2,130,661,616
17,427
[ "langchain-ai", "langchain" ]
### Checklist - [X] I added a very descriptive title to this issue. - [X] I included a link to the documentation page I am referring to (if applicable). ### Issue with current documentation: - ### Idea or request for content: below is the code ``` # Categorize documents documents_dict = { 'amd': DirectoryLoader('/content/amd', glob="*.pdf", loader_cls=PyPDFLoader).load(), 'engie': DirectoryLoader('/content/engie', glob="*.pdf", loader_cls=PyPDFLoader).load(), # Add more categories as needed } # Create a vector database and a retriever for each category vector_stores = {} retrievers = {} class MyTextSplitter(RecursiveCharacterTextSplitter): def split_documents(self, documents): chunks = super().split_documents(documents) for document, chunk in zip(documents, chunks): chunk.metadata['source'] = document.metadata['source'] return chunks text_splitter = MyTextSplitter(chunk_size=1000, chunk_overlap=100) for category, docs in documents_dict.items(): texts = text_splitter.split_documents(docs) vector_store = Chroma.from_documents(texts, embeddings) retriever = vector_store.as_retriever(search_kwargs={"k": 5}) vector_stores[category] = vector_store retrievers[category] = retriever llm = OpenAI(temperature=0.2) # Create a retriever for the vector database document_content_description = "Description of a corporate document outlining human rights commitments and implementation strategies by an organization, including ethical principles, global agreements, and operational procedures." metadata_field_info = [ { "name": "document_type", "description": "The type of document, such as policy statement, modern slavery statement, human rights due diligence manual.", "type": "string", }, { "name": "company_name", "description": "The name of the company that the document pertains to.", "type": "string", }, { "name": "effective_date", "description": "The date when the document or policy became effective.", "type": "date", }, ] category = 'amd' retriever = SelfQueryRetriever.from_llm( llm, vector_stores[category], document_content_description, metadata_field_info, use_original_query=False, verbose=True ) # print(retriever) # Answer a question related to 'Cricket' # category = 'amd' qa_chain = RetrievalQA.from_chain_type(llm=OpenAI(temperature=0.2), chain_type="stuff", retriever=retriever, return_source_documents=True) # Format the prompt using the template context = "" # question = "what's the final provision of dhl?" question = "what for it strives?" formatted_prompt = prompt_template.format(context=context, question=question) # Pass the formatted prompt to the RetrievalQA function llm_response = qa_chain(formatted_prompt) process_llm_response(llm_response) ``` above is the code which should return output from amd document, but it is returning output from engie document. I feel because while using the SelfQueryRetriever, we're not defining retrievers[category] inside the SelfQueryRetriever, maybe that's one of the reasons. And we must utilize both retrievers[category] and vector_stores[category] to get the output from the category we have mentioned. But SelfQueryRetriever won't accept retriever parameter in it. Is there any alternative for this? Can you have a look into it and fix the code?
not able to return correct answer from selected vector category and no option to add retriever category in SelfQueryRetriever
https://api.github.com/repos/langchain-ai/langchain/issues/17426/comments
3
2024-02-12T17:49:38Z
2024-02-14T03:34:59Z
https://github.com/langchain-ai/langchain/issues/17426
2,130,650,621
17,426
[ "langchain-ai", "langchain" ]
### Checklist - [X] I added a very descriptive title to this issue. - [X] I included a link to the documentation page I am referring to (if applicable). ### Issue with current documentation: - ### Idea or request for content: below's the code ``` # Categorize documents documents_dict = { 'amd': DirectoryLoader('/content/amd', glob="*.pdf", loader_cls=PyPDFLoader).load(), 'engie': DirectoryLoader('/content/engie', glob="*.pdf", loader_cls=PyPDFLoader).load(), # Add more categories as needed } # Create a vector database and a retriever for each category vector_stores = {} retrievers = {} class MyTextSplitter(RecursiveCharacterTextSplitter): def split_documents(self, documents): chunks = super().split_documents(documents) for document, chunk in zip(documents, chunks): chunk.metadata['source'] = document.metadata['source'] return chunks text_splitter = MyTextSplitter(chunk_size=1000, chunk_overlap=100) for category, docs in documents_dict.items(): texts = text_splitter.split_documents(docs) vector_store = Chroma.from_documents(texts, embeddings) retriever = vector_store.as_retriever(search_kwargs={"k": 5}) vector_stores[category] = vector_store retrievers[category] = retriever llm = OpenAI(temperature=0.2) # Create a retriever for the vector database document_content_description = "Description of a corporate document outlining human rights commitments and implementation strategies by an organization, including ethical principles, global agreements, and operational procedures." metadata_field_info = [ { "name": "document_type", "description": "The type of document, such as policy statement, modern slavery statement, human rights due diligence manual.", "type": "string", }, { "name": "company_name", "description": "The name of the company that the document pertains to.", "type": "string", }, { "name": "effective_date", "description": "The date when the document or policy became effective.", "type": "date", }, ] category = 'amd' retriever = SelfQueryRetriever.from_llm( llm, vector_stores[category], document_content_description, metadata_field_info, use_original_query=False, verbose=True ) # print(retriever) # Answer a question related to 'Cricket' # category = 'amd' qa_chain = RetrievalQA.from_chain_type(llm=OpenAI(temperature=0.2), chain_type="stuff", retriever=retriever, return_source_documents=True) # Format the prompt using the template context = "" # question = "what's the final provision of dhl?" question = "what for it strives?" formatted_prompt = prompt_template.format(context=context, question=question) # Pass the formatted prompt to the RetrievalQA function llm_response = qa_chain(formatted_prompt) process_llm_response(llm_response) ``` above is the code which should return output from amd document, but it is returning output from engie document. I feel because while using the SelfQueryRetriever, we're not defining retrievers[category] anywhere, maybe that's one of the reasons. Can you have a look into it and fix the code?
not able to return answer from selected vector category and no option to add retriever category in SelfQueryRetriever
https://api.github.com/repos/langchain-ai/langchain/issues/17424/comments
1
2024-02-12T16:34:16Z
2024-02-14T03:34:58Z
https://github.com/langchain-ai/langchain/issues/17424
2,130,511,421
17,424
[ "langchain-ai", "langchain" ]
### Checked other resources - [X] I added a very descriptive title to this issue. - [X] I searched the LangChain documentation with the integrated search. - [X] I used the GitHub search to find a similar question and didn't find it. - [X] I am sure that this is a bug in LangChain rather than my code. ### Example Code from langchain_openai import AzureChatOpenAI ### Error Message and Stack Trace (if applicable) It does not authorize the API from bearer token ### Description az login az account get-access-token --resource https://cognitiveservices.azure.com --query "accessToken" -o tsv ref link: https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/managed-identity ### System Info from langchain_openai import AzureChatOpenAI Windows python 3.11
Bearer authentication of Azure OpenAI instead API_KEY
https://api.github.com/repos/langchain-ai/langchain/issues/17422/comments
1
2024-02-12T16:03:43Z
2024-05-20T16:09:29Z
https://github.com/langchain-ai/langchain/issues/17422
2,130,446,715
17,422
[ "langchain-ai", "langchain" ]
### Checklist - [X] I added a very descriptive title to this issue. - [X] I included a link to the documentation page I am referring to (if applicable). ### Issue with current documentation: - ### Idea or request for content: below's the code ``` # Categorize documents documents_dict = { 'amd': DirectoryLoader('/content/amd', glob="*.pdf", loader_cls=PyPDFLoader).load(), 'engie': DirectoryLoader('/content/engie', glob="*.pdf", loader_cls=PyPDFLoader).load(), # Add more categories as needed } # Create a vector database and a retriever for each category vector_stores = {} retrievers = {} class MyTextSplitter(RecursiveCharacterTextSplitter): def split_documents(self, documents): chunks = super().split_documents(documents) for document, chunk in zip(documents, chunks): chunk.metadata['source'] = document.metadata['source'] return chunks text_splitter = MyTextSplitter(chunk_size=1000, chunk_overlap=100) for category, docs in documents_dict.items(): texts = text_splitter.split_documents(docs) vector_store = Chroma.from_documents(texts, embeddings) retriever = vector_store.as_retriever(search_kwargs={"k": 5}) vector_stores[category] = vector_store retrievers[category] = retriever llm = OpenAI(temperature=0.2) # Create a retriever for the vector database document_content_description = "Description of a corporate document outlining human rights commitments and implementation strategies by an organization, including ethical principles, global agreements, and operational procedures." metadata_field_info = [ { "name": "document_type", "description": "The type of document, such as policy statement, modern slavery statement, human rights due diligence manual.", "type": "string", }, { "name": "company_name", "description": "The name of the company that the document pertains to.", "type": "string", }, { "name": "effective_date", "description": "The date when the document or policy became effective.", "type": "date", }, ] category = 'amd' retriever = SelfQueryRetriever.from_llm( llm, vector_stores[category], document_content_description, metadata_field_info, use_original_query=False, verbose=True ) # Answer a question related to 'Cricket' # category = 'amd' qa_chain = RetrievalQA.from_chain_type(llm=OpenAI(temperature=0.2), chain_type="stuff", retriever=retrievers[category], return_source_documents=True) # Format the prompt using the template context = "" # question = "what's the final provision of dhl?" question = "what for it strives?" formatted_prompt = prompt_template.format(context=context, question=question) # Pass the formatted prompt to the RetrievalQA function llm_response = qa_chain(formatted_prompt) process_llm_response(llm_response) ``` above is the code which should output from amd document, but it is returning output from engie document. Can you have a look into it and fix the code?
unable to return answer from selected vector category
https://api.github.com/repos/langchain-ai/langchain/issues/17421/comments
1
2024-02-12T16:00:09Z
2024-02-14T03:34:58Z
https://github.com/langchain-ai/langchain/issues/17421
2,130,439,746
17,421