method_name
stringlengths
1
78
method_body
stringlengths
3
9.66k
full_code
stringlengths
31
10.7k
docstring
stringlengths
4
4.74k
_get_elements
from unstructured.partition.odt import partition_odt return partition_odt(filename=self.file_path, **self.unstructured_kwargs)
def _get_elements(self) ->List: from unstructured.partition.odt import partition_odt return partition_odt(filename=self.file_path, **self.unstructured_kwargs)
null
ignore_chain
"""Whether to ignore chain callbacks.""" return self.ignore_chain_
@property def ignore_chain(self) ->bool: """Whether to ignore chain callbacks.""" return self.ignore_chain_
Whether to ignore chain callbacks.
convert_dict_to_message
"""Convert a dictionary to a LangChain message. Args: _dict: The dictionary. Returns: The LangChain message. """ role = _dict.get('role') if role == 'user': return HumanMessage(content=_dict.get('content', '')) elif role == 'assistant': content = _dict.get('content', '') or '' ...
def convert_dict_to_message(_dict: Mapping[str, Any]) ->BaseMessage: """Convert a dictionary to a LangChain message. Args: _dict: The dictionary. Returns: The LangChain message. """ role = _dict.get('role') if role == 'user': return HumanMessage(content=_dict.get('conte...
Convert a dictionary to a LangChain message. Args: _dict: The dictionary. Returns: The LangChain message.
__init__
"""Create a RedisTag FilterField. Args: field (str): The name of the RedisTag field in the index to be queried against. """ super().__init__(field)
def __init__(self, field: str): """Create a RedisTag FilterField. Args: field (str): The name of the RedisTag field in the index to be queried against. """ super().__init__(field)
Create a RedisTag FilterField. Args: field (str): The name of the RedisTag field in the index to be queried against.
test_load_success_load_max_docs
"""Test that returns the correct answer""" api_client = PubMedLoader(query='chatgpt', load_max_docs=2) docs = api_client.load() print(docs) assert len(docs) == api_client.load_max_docs == 2 assert_docs(docs)
def test_load_success_load_max_docs() ->None: """Test that returns the correct answer""" api_client = PubMedLoader(query='chatgpt', load_max_docs=2) docs = api_client.load() print(docs) assert len(docs) == api_client.load_max_docs == 2 assert_docs(docs)
Test that returns the correct answer
test_sim_search
"""Test end to end construction and simple similarity search.""" hnsw_vec_store = DocArrayHnswSearch.from_texts(texts, FakeEmbeddings(), work_dir=str(tmp_path), n_dim=10, dist_metric=metric, index=True) output = hnsw_vec_store.similarity_search('foo', k=1) assert output == [Document(page_content='foo')]
@pytest.mark.parametrize('metric', ['cosine', 'l2']) def test_sim_search(metric: str, texts: List[str], tmp_path: Path) ->None: """Test end to end construction and simple similarity search.""" hnsw_vec_store = DocArrayHnswSearch.from_texts(texts, FakeEmbeddings(), work_dir=str(tmp_path), n_dim=10, dist_...
Test end to end construction and simple similarity search.
input_keys
"""Return the singular input key. :meta private: """ return [self.input_key]
@property def input_keys(self) ->List[str]: """Return the singular input key. :meta private: """ return [self.input_key]
Return the singular input key. :meta private:
_import_azure_openai
from langchain_community.llms.openai import AzureOpenAI return AzureOpenAI
def _import_azure_openai() ->Any: from langchain_community.llms.openai import AzureOpenAI return AzureOpenAI
null
get_validated_relative_path
"""Resolve a relative path, raising an error if not within the root directory.""" root = root.resolve() full_path = (root / user_path).resolve() if not is_relative_to(full_path, root): raise FileValidationError( f'Path {user_path} is outside of the allowed directory {root}') return full_path
def get_validated_relative_path(root: Path, user_path: str) ->Path: """Resolve a relative path, raising an error if not within the root directory.""" root = root.resolve() full_path = (root / user_path).resolve() if not is_relative_to(full_path, root): raise FileValidationError( f'Pa...
Resolve a relative path, raising an error if not within the root directory.
_get_json_operator
if isinstance(value, str): return '->>' else: return '->'
def _get_json_operator(self, value: Any) ->str: if isinstance(value, str): return '->>' else: return '->'
null
output_keys
"""Return output key. :meta private: """ _output_keys = [self.answer_key, self.sources_answer_key] if self.return_source_documents: _output_keys = _output_keys + ['source_documents'] return _output_keys
@property def output_keys(self) ->List[str]: """Return output key. :meta private: """ _output_keys = [self.answer_key, self.sources_answer_key] if self.return_source_documents: _output_keys = _output_keys + ['source_documents'] return _output_keys
Return output key. :meta private:
load
"""Extract text from Diffbot on all the URLs and return Documents""" docs: List[Document] = list() for url in self.urls: try: data = self._get_diffbot_data(url) text = data['objects'][0]['text'] if 'objects' in data else '' metadata = {'source': url} docs.append(Document(page_content...
def load(self) ->List[Document]: """Extract text from Diffbot on all the URLs and return Documents""" docs: List[Document] = list() for url in self.urls: try: data = self._get_diffbot_data(url) text = data['objects'][0]['text'] if 'objects' in data else '' metadat...
Extract text from Diffbot on all the URLs and return Documents
_get_docs
"""Get docs.""" docs = self.retriever.get_relevant_documents(question, callbacks= run_manager.get_child()) return self._reduce_tokens_below_limit(docs)
def _get_docs(self, question: str, inputs: Dict[str, Any], *, run_manager: CallbackManagerForChainRun) ->List[Document]: """Get docs.""" docs = self.retriever.get_relevant_documents(question, callbacks= run_manager.get_child()) return self._reduce_tokens_below_limit(docs)
Get docs.
save_context
"""Pass."""
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) ->None: """Pass."""
Pass.
__add__
if isinstance(other, GenerationChunk): generation_info = ({**self.generation_info or {}, **other. generation_info or {}} if self.generation_info is not None or other .generation_info is not None else None) return GenerationChunk(text=self.text + other.text, generation_info= generation_in...
def __add__(self, other: GenerationChunk) ->GenerationChunk: if isinstance(other, GenerationChunk): generation_info = ({**self.generation_info or {}, **other. generation_info or {}} if self.generation_info is not None or other.generation_info is not None else None) return Ge...
null
_load_retrieval_qa
if 'retriever' in kwargs: retriever = kwargs.pop('retriever') else: raise ValueError('`retriever` must be present.') if 'combine_documents_chain' in config: combine_documents_chain_config = config.pop('combine_documents_chain') combine_documents_chain = load_chain_from_config( combine_documents_...
def _load_retrieval_qa(config: dict, **kwargs: Any) ->RetrievalQA: if 'retriever' in kwargs: retriever = kwargs.pop('retriever') else: raise ValueError('`retriever` must be present.') if 'combine_documents_chain' in config: combine_documents_chain_config = config.pop('combine_documen...
null
test_initialization
"""Test integration vectorstore initialization.""" __ModuleName__VectorStore()
def test_initialization() ->None: """Test integration vectorstore initialization.""" __ModuleName__VectorStore()
Test integration vectorstore initialization.
_create_api_controller_tool
"""Expose controller as a tool. The tool is invoked with a plan from the planner, and dynamically creates a controller agent with relevant documentation only to constrain the context. """ base_url = api_spec.servers[0]['url'] def _create_and_run_api_controller_agent(plan_str: str) ->str: pattern = ...
def _create_api_controller_tool(api_spec: ReducedOpenAPISpec, requests_wrapper: RequestsWrapper, llm: BaseLanguageModel) ->Tool: """Expose controller as a tool. The tool is invoked with a plan from the planner, and dynamically creates a controller agent with relevant documentation only to constrain...
Expose controller as a tool. The tool is invoked with a plan from the planner, and dynamically creates a controller agent with relevant documentation only to constrain the context.
parse_issue
"""Create Document objects from a list of GitHub issues.""" metadata = {'url': issue['html_url'], 'title': issue['title'], 'creator': issue['user']['login'], 'created_at': issue['created_at'], 'comments': issue['comments'], 'state': issue['state'], 'labels': [label['name'] for label in issue['labels']], 'as...
def parse_issue(self, issue: dict) ->Document: """Create Document objects from a list of GitHub issues.""" metadata = {'url': issue['html_url'], 'title': issue['title'], 'creator': issue['user']['login'], 'created_at': issue['created_at' ], 'comments': issue['comments'], 'state': issue['state'],...
Create Document objects from a list of GitHub issues.
_chain_type
return 'llm_math_chain'
@property def _chain_type(self) ->str: return 'llm_math_chain'
null
calculator
"""Do math.""" return 'bar'
@tool def calculator(expression: str) ->str: """Do math.""" return 'bar'
Do math.
on_chain_error
if self.__has_valid_config is False: return try: self.__track_event('chain', 'error', run_id=str(run_id), parent_run_id= str(parent_run_id) if parent_run_id else None, error={'message': str(error), 'stack': traceback.format_exc()}, app_id=self.__app_id) except Exception as e: logger.error(f'...
def on_chain_error(self, error: BaseException, *, run_id: UUID, parent_run_id: Union[UUID, None]=None, **kwargs: Any) ->Any: if self.__has_valid_config is False: return try: self.__track_event('chain', 'error', run_id=str(run_id), parent_run_id=str(parent_run_id) if parent_run_id...
null
_run
try: source_path_ = self.get_relative_path(source_path) except FileValidationError: return INVALID_PATH_TEMPLATE.format(arg_name='source_path', value= source_path) try: destination_path_ = self.get_relative_path(destination_path) except FileValidationError: return INVALID_PATH_TEMPLATE.format(ar...
def _run(self, source_path: str, destination_path: str, run_manager: Optional[CallbackManagerForToolRun]=None) ->str: try: source_path_ = self.get_relative_path(source_path) except FileValidationError: return INVALID_PATH_TEMPLATE.format(arg_name='source_path', value= source_path...
null
similarity_search_with_score
"""Run similarity search with **vector distance**. The "scores" returned from this function are the raw vector distances from the query vector. For similarity scores, use ``similarity_search_with_relevance_scores``. Args: query (str): The query text for which to find simila...
def similarity_search_with_score(self, query: str, k: int=4, filter: Optional[RedisFilterExpression]=None, return_metadata: bool=True, ** kwargs: Any) ->List[Tuple[Document, float]]: """Run similarity search with **vector distance**. The "scores" returned from this function are the raw vector ...
Run similarity search with **vector distance**. The "scores" returned from this function are the raw vector distances from the query vector. For similarity scores, use ``similarity_search_with_relevance_scores``. Args: query (str): The query text for which to find similar documents. k (int): The number of doc...
create_prompt
"""Return default prompt.""" return WIKI_PROMPT
@classmethod def create_prompt(cls, tools: Sequence[BaseTool]) ->BasePromptTemplate: """Return default prompt.""" return WIKI_PROMPT
Return default prompt.
from_texts
index = create_index(texts, embeddings) return cls(embeddings=embeddings, index=index, texts=texts, **kwargs)
@classmethod def from_texts(cls, texts: List[str], embeddings: Embeddings, **kwargs: Any ) ->KNNRetriever: index = create_index(texts, embeddings) return cls(embeddings=embeddings, index=index, texts=texts, **kwargs)
null
__init__
"""Initializes the `deepevalCallbackHandler`. Args: implementation_name: Name of the implementation you want. metrics: What metrics do you want to track? Raises: ImportError: if the `deepeval` package is not installed. ConnectionError: if the connection ...
def __init__(self, metrics: List[Any], implementation_name: Optional[str]=None ) ->None: """Initializes the `deepevalCallbackHandler`. Args: implementation_name: Name of the implementation you want. metrics: What metrics do you want to track? Raises: ImportE...
Initializes the `deepevalCallbackHandler`. Args: implementation_name: Name of the implementation you want. metrics: What metrics do you want to track? Raises: ImportError: if the `deepeval` package is not installed. ConnectionError: if the connection to deepeval fails.
_llm_type
"""Return the type of llm.""" return 'rwkv'
@property def _llm_type(self) ->str: """Return the type of llm.""" return 'rwkv'
Return the type of llm.
_identifying_params
return {'responses': self.responses}
@property def _identifying_params(self) ->Dict[str, Any]: return {'responses': self.responses}
null
__init__
self.datastore_url = datastore_url self.api_key = api_key self.top_k = top_k
def __init__(self, datastore_url: str, top_k: Optional[int]=None, api_key: Optional[str]=None): self.datastore_url = datastore_url self.api_key = api_key self.top_k = top_k
null
test_batch
"""Test batch tokens from ChatAnthropicMessages.""" llm = ChatAnthropicMessages(model_name='claude-instant-1.2') result = llm.batch(["I'm Pickle Rick", "I'm not Pickle Rick"]) for token in result: assert isinstance(token.content, str)
def test_batch() ->None: """Test batch tokens from ChatAnthropicMessages.""" llm = ChatAnthropicMessages(model_name='claude-instant-1.2') result = llm.batch(["I'm Pickle Rick", "I'm not Pickle Rick"]) for token in result: assert isinstance(token.content, str)
Test batch tokens from ChatAnthropicMessages.
similarity_search_with_score_by_vector
"""Perform a similarity search with Yellowbrick with vector Args: embedding (List[float]): query embedding k (int, optional): Top K neighbors to retrieve. Defaults to 4. NOTE: Please do not let end-user fill this and always be aware of SQL injection. ...
def similarity_search_with_score_by_vector(self, embedding: List[float], k: int=4, **kwargs: Any) ->List[Tuple[Document, float]]: """Perform a similarity search with Yellowbrick with vector Args: embedding (List[float]): query embedding k (int, optional): Top K neighbors to retr...
Perform a similarity search with Yellowbrick with vector Args: embedding (List[float]): query embedding k (int, optional): Top K neighbors to retrieve. Defaults to 4. NOTE: Please do not let end-user fill this and always be aware of SQL injection. Returns: List[Document, float]: List of Doc...
drop
""" Helper function: Drop data """ from psycopg2 import sql cursor = self._connection.cursor() cursor.execute(sql.SQL('DROP TABLE IF EXISTS {}').format(sql.Identifier(table)) ) self._connection.commit() cursor.close()
def drop(self, table: str) ->None: """ Helper function: Drop data """ from psycopg2 import sql cursor = self._connection.cursor() cursor.execute(sql.SQL('DROP TABLE IF EXISTS {}').format(sql.Identifier (table))) self._connection.commit() cursor.close()
Helper function: Drop data
test_tool_lambda_args_schema
"""Test args schema inference when the tool argument is a lambda function.""" tool = Tool(name='tool', description='A tool', func=lambda tool_input: tool_input) assert tool.args_schema is None expected_args = {'tool_input': {'type': 'string'}} assert tool.args == expected_args
def test_tool_lambda_args_schema() ->None: """Test args schema inference when the tool argument is a lambda function.""" tool = Tool(name='tool', description='A tool', func=lambda tool_input: tool_input) assert tool.args_schema is None expected_args = {'tool_input': {'type': 'string'}} asser...
Test args schema inference when the tool argument is a lambda function.
test_from_documents
"""Test end to end construction and search.""" documents = [Document(page_content='Dogs are tough.', metadata={'a': 1}), Document(page_content='Cats have fluff.', metadata={'b': 1}), Document( page_content='What is a sandwich?', metadata={'c': 1}), Document( page_content='That fence is purple.', metadata={'...
def test_from_documents(self, embedding_openai: Embeddings, collection: Any ) ->None: """Test end to end construction and search.""" documents = [Document(page_content='Dogs are tough.', metadata={'a': 1} ), Document(page_content='Cats have fluff.', metadata={'b': 1}), Document(page_content=...
Test end to end construction and search.
_llm_type
"""Return type of llm.""" return 'octoai_endpoint'
@property def _llm_type(self) ->str: """Return type of llm.""" return 'octoai_endpoint'
Return type of llm.
test_visit_structured_query
query = 'What is the capital of France?' structured_query = StructuredQuery(query=query, filter=None) expected: Tuple[str, Dict] = (query, {}) actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query) assert expected == actual comp = Comparison(comparator=Comparator.LT, attribute='foo', value=['1', '2']) str...
def test_visit_structured_query() ->None: query = 'What is the capital of France?' structured_query = StructuredQuery(query=query, filter=None) expected: Tuple[str, Dict] = (query, {}) actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query) assert expected == actual comp = Compariso...
null
test_final_answer_after_parsable_action
llm_output = """ Observation: I can use the `foo` tool to achieve the goal. Action: foo Action Input: bar Final Answer: The best pizza to eat is margaritta """ with pytest.raises(OutputParserException) as exception_info: mrkl_output_parser.parse_folder(llm_output) assert 'Pa...
def test_final_answer_after_parsable_action() ->None: llm_output = """ Observation: I can use the `foo` tool to achieve the goal. Action: foo Action Input: bar Final Answer: The best pizza to eat is margaritta """ with pytest.raises(OutputParserException) as exception_in...
null
_create_query
hits = k doc_embedding_field = self._embedding_field input_embedding_field = self._input_field ranking_function = kwargs['ranking'] if 'ranking' in kwargs else 'default' filter = kwargs['filter'] if 'filter' in kwargs else None approximate = kwargs['approximate'] if 'approximate' in kwargs else False approximate = 'tru...
def _create_query(self, query_embedding: List[float], k: int=4, **kwargs: Any ) ->Dict: hits = k doc_embedding_field = self._embedding_field input_embedding_field = self._input_field ranking_function = kwargs['ranking'] if 'ranking' in kwargs else 'default' filter = kwargs['filter'] if 'filter' ...
null
test_openai_invalid_model_kwargs
with pytest.raises(ValueError): OpenAI(model_kwargs={'model_name': 'foo'})
@pytest.mark.requires('openai') def test_openai_invalid_model_kwargs() ->None: with pytest.raises(ValueError): OpenAI(model_kwargs={'model_name': 'foo'})
null
_agent_type
"""Return Identifier of agent type.""" return AgentType.ZERO_SHOT_REACT_DESCRIPTION
@property def _agent_type(self) ->str: """Return Identifier of agent type.""" return AgentType.ZERO_SHOT_REACT_DESCRIPTION
Return Identifier of agent type.
test_hallucinating
""" Test CPAL approach does not hallucinate when given an invalid entity in the question. The PAL chain would hallucinates here! """ narrative_input = ( 'Jan has three times the number of pets as Marcia.Marcia has two more pets than Cindy.If Cindy has ten pets, how many pets does Ba...
def test_hallucinating(self) ->None: """ Test CPAL approach does not hallucinate when given an invalid entity in the question. The PAL chain would hallucinates here! """ narrative_input = ( 'Jan has three times the number of pets as Marcia.Marcia has two more pets than C...
Test CPAL approach does not hallucinate when given an invalid entity in the question. The PAL chain would hallucinates here!
parse_result
text = result[0].text text = text.strip() if partial: try: return parse_json_markdown(text) except JSONDecodeError: return None else: try: return parse_json_markdown(text) except JSONDecodeError as e: raise OutputParserException(f'Invalid json output: {text}') from e
def parse_result(self, result: List[Generation], *, partial: bool=False) ->Any: text = result[0].text text = text.strip() if partial: try: return parse_json_markdown(text) except JSONDecodeError: return None else: try: return parse_json_markdow...
null
input_keys
"""Will be whatever keys the prompt expects. :meta private: """ return [self.input_key]
@property def input_keys(self) ->List[str]: """Will be whatever keys the prompt expects. :meta private: """ return [self.input_key]
Will be whatever keys the prompt expects. :meta private:
test_create_directory_and_files
"""Test creation of a directory and files in a temporary directory.""" session = BashProcess(strip_newlines=True) temp_dir = tmp_path / 'test_dir' temp_dir.mkdir() commands = [f'touch {temp_dir}/file1.txt', f'touch {temp_dir}/file2.txt', f"echo 'hello world' > {temp_dir}/file2.txt", f'cat {temp_dir}/file2.txt'] out...
@pytest.mark.skipif(sys.platform.startswith('win'), reason= 'Test not supported on Windows') def test_create_directory_and_files(tmp_path: Path) ->None: """Test creation of a directory and files in a temporary directory.""" session = BashProcess(strip_newlines=True) temp_dir = tmp_path / 'test_dir' ...
Test creation of a directory and files in a temporary directory.
add_message
"""Add a Message object to the history. Args: message: A BaseMessage object to store. """ if self.sync and 'id' not in message.additional_kwargs: message.additional_kwargs['id'] = self.message_uuid_method() self.client.Documents.patch_documents(collection=self.collection, workspace ...
def add_message(self, message: BaseMessage) ->None: """Add a Message object to the history. Args: message: A BaseMessage object to store. """ if self.sync and 'id' not in message.additional_kwargs: message.additional_kwargs['id'] = self.message_uuid_method() self.client....
Add a Message object to the history. Args: message: A BaseMessage object to store.
__init__
"""Building a myscale vector store without metadata column embedding (Embeddings): embedding model config (MyScaleSettings): Configuration to MyScale Client must_have_cols (List[str]): column names to be included in query Other keyword arguments will pass into [clickhouse-co...
def __init__(self, embedding: Embeddings, config: Optional[MyScaleSettings] =None, must_have_cols: List[str]=[], **kwargs: Any) ->None: """Building a myscale vector store without metadata column embedding (Embeddings): embedding model config (MyScaleSettings): Configuration to MyScale Client ...
Building a myscale vector store without metadata column embedding (Embeddings): embedding model config (MyScaleSettings): Configuration to MyScale Client must_have_cols (List[str]): column names to be included in query Other keyword arguments will pass into [clickhouse-connect](https://docs.myscale.com/)
is_lc_serializable
return True
@classmethod def is_lc_serializable(self) ->bool: return True
null
query
return list(self.lazy_query(query))
def query(self, query: str) ->List[dict]: return list(self.lazy_query(query))
null
load
"""Loads all cards from the specified Trello board. You can filter the cards, metadata and text included by using the optional parameters. Returns: A list of documents, one for each card in the board. """ try: from bs4 import BeautifulSoup except ImportError as ex:...
def load(self) ->List[Document]: """Loads all cards from the specified Trello board. You can filter the cards, metadata and text included by using the optional parameters. Returns: A list of documents, one for each card in the board. """ try: from bs4 i...
Loads all cards from the specified Trello board. You can filter the cards, metadata and text included by using the optional parameters. Returns: A list of documents, one for each card in the board.
test_single_agent_action_observation
agent_action = AgentAction(tool='Tool1', tool_input='Input1', log='Log1') observation = 'Observation1' intermediate_steps = [(agent_action, observation)] result = format_xml(intermediate_steps) expected_result = ( '<tool>Tool1</tool><tool_input>Input1</tool_input><observation>Observation1</observation>' ) asser...
def test_single_agent_action_observation() ->None: agent_action = AgentAction(tool='Tool1', tool_input='Input1', log='Log1') observation = 'Observation1' intermediate_steps = [(agent_action, observation)] result = format_xml(intermediate_steps) expected_result = ( '<tool>Tool1</tool><tool_in...
null
buffer
"""String buffer of memory.""" return self.buffer_as_messages if self.return_messages else self.buffer_as_str
@property def buffer(self) ->Union[str, List[BaseMessage]]: """String buffer of memory.""" return (self.buffer_as_messages if self.return_messages else self. buffer_as_str)
String buffer of memory.
test_usearch_add_texts
"""Test adding a new document""" texts = ['foo', 'bar', 'baz'] docsearch = USearch.from_texts(texts, FakeEmbeddings()) docsearch.add_texts(['foo']) output = docsearch.similarity_search('foo', k=2) assert output == [Document(page_content='foo'), Document(page_content='foo')]
def test_usearch_add_texts() ->None: """Test adding a new document""" texts = ['foo', 'bar', 'baz'] docsearch = USearch.from_texts(texts, FakeEmbeddings()) docsearch.add_texts(['foo']) output = docsearch.similarity_search('foo', k=2) assert output == [Document(page_content='foo'), Document(page_...
Test adding a new document
validate_chains
"""Validate that chains are all single input/output.""" for chain in values['chains']: if len(chain.input_keys) != 1: raise ValueError( f'Chains used in SimplePipeline should all have one input, got {chain} with {len(chain.input_keys)} inputs.' ) if len(chain.output_keys) != 1: ...
@root_validator() def validate_chains(cls, values: Dict) ->Dict: """Validate that chains are all single input/output.""" for chain in values['chains']: if len(chain.input_keys) != 1: raise ValueError( f'Chains used in SimplePipeline should all have one input, got {chain} with...
Validate that chains are all single input/output.
test_prompt_from_jinja2_template
"""Test prompts can be constructed from a jinja2 template.""" template = """Hello there There is no variable here { Will it get confused{ }? """ prompt = PromptTemplate.from_template(template, template_format='jinja2') expected_prompt = PromptTemplate(template=template, input_variables=[], template_format='jin...
@pytest.mark.requires('jinja2') def test_prompt_from_jinja2_template() ->None: """Test prompts can be constructed from a jinja2 template.""" template = ( 'Hello there\nThere is no variable here {\nWill it get confused{ }? \n ' ) prompt = PromptTemplate.from_template(template, template_for...
Test prompts can be constructed from a jinja2 template.
visit_operation
try: from qdrant_client.http import models as rest except ImportError as e: raise ImportError( 'Cannot import qdrant_client. Please install with `pip install qdrant-client`.' ) from e args = [arg.accept(self) for arg in operation.arguments] operator = {Operator.AND: 'must', Operator.OR: 'should'...
def visit_operation(self, operation: Operation) ->rest.Filter: try: from qdrant_client.http import models as rest except ImportError as e: raise ImportError( 'Cannot import qdrant_client. Please install with `pip install qdrant-client`.' ) from e args = [arg.accept(se...
null
validate_client
"""Validate that the client is of the correct type.""" from metal_sdk.metal import Metal if 'client' in values: client = values['client'] if not isinstance(client, Metal): raise ValueError( f'Got unexpected client, should be of type metal_sdk.metal.Metal. Instead, got {type(client)}' ...
@root_validator(pre=True) def validate_client(cls, values: dict) ->dict: """Validate that the client is of the correct type.""" from metal_sdk.metal import Metal if 'client' in values: client = values['client'] if not isinstance(client, Metal): raise ValueError( f...
Validate that the client is of the correct type.
on_retry
llm_run = self._get_run(run_id) retry_d: Dict[str, Any] = {'slept': retry_state.idle_for, 'attempt': retry_state.attempt_number} if retry_state.outcome is None: retry_d['outcome'] = 'N/A' elif retry_state.outcome.failed: retry_d['outcome'] = 'failed' exception = retry_state.outcome.exception() retry...
def on_retry(self, retry_state: RetryCallState, *, run_id: UUID, **kwargs: Any ) ->Run: llm_run = self._get_run(run_id) retry_d: Dict[str, Any] = {'slept': retry_state.idle_for, 'attempt': retry_state.attempt_number} if retry_state.outcome is None: retry_d['outcome'] = 'N/A' elif ret...
null
test_api_key_masked_when_passed_from_env
mock_response = mock_get.return_value mock_response.status_code = 200 mock_response.json.return_value = {'model_id': '', 'status': 'training_complete'} monkeypatch.setenv('ARCEE_API_KEY', 'secret_api_key') arcee_with_env_var = Arcee(model='DALM-PubMed', arcee_api_url= 'https://localhost', arcee_api_version='ver...
@patch('langchain_community.utilities.arcee.requests.get') def test_api_key_masked_when_passed_from_env(mock_get: MagicMock, capsys: CaptureFixture, monkeypatch: MonkeyPatch) ->None: mock_response = mock_get.return_value mock_response.status_code = 200 mock_response.json.return_value = {'model_id': '', ...
null
observation_prefix
"""Prefix to append the observation with.""" return 'Observation: '
@property def observation_prefix(self) ->str: """Prefix to append the observation with.""" return 'Observation: '
Prefix to append the observation with.
test_konko_model_test
"""Check how ChatKonko manages model_name.""" chat_instance = ChatKonko(model='alpha') assert chat_instance.model == 'alpha' chat_instance = ChatKonko(model='beta') assert chat_instance.model == 'beta'
def test_konko_model_test() ->None: """Check how ChatKonko manages model_name.""" chat_instance = ChatKonko(model='alpha') assert chat_instance.model == 'alpha' chat_instance = ChatKonko(model='beta') assert chat_instance.model == 'beta'
Check how ChatKonko manages model_name.
use_simple_prompt
"""Decides whether to use the simple prompt""" if llm._llm_type and 'anthropic' in llm._llm_type: return True if hasattr(llm, 'model_id') and 'anthropic' in llm.model_id: return True return False
def use_simple_prompt(llm: BaseLanguageModel) ->bool: """Decides whether to use the simple prompt""" if llm._llm_type and 'anthropic' in llm._llm_type: return True if hasattr(llm, 'model_id') and 'anthropic' in llm.model_id: return True return False
Decides whether to use the simple prompt
lazy_parse
"""Lazily parse the blob.""" import io try: import openai except ImportError: raise ImportError( 'openai package not found, please install it with `pip install openai`' ) try: from pydub import AudioSegment except ImportError: raise ImportError( 'pydub package not found, please i...
def lazy_parse(self, blob: Blob) ->Iterator[Document]: """Lazily parse the blob.""" import io try: import openai except ImportError: raise ImportError( 'openai package not found, please install it with `pip install openai`' ) try: from pydub import Aud...
Lazily parse the blob.
test_similarity_search_with_metadata
"""Test end to end construction and search with metadata.""" texts = ['foo', 'bar', 'baz'] metadatas = [{'page': i} for i in range(len(texts))] docsearch = ElasticVectorSearch.from_texts(texts, FakeEmbeddings(), metadatas=metadatas, elasticsearch_url=elasticsearch_url) output = docsearch.similarity_search('foo', k=...
def test_similarity_search_with_metadata(self, elasticsearch_url: str) ->None: """Test end to end construction and search with metadata.""" texts = ['foo', 'bar', 'baz'] metadatas = [{'page': i} for i in range(len(texts))] docsearch = ElasticVectorSearch.from_texts(texts, FakeEmbeddings(), metad...
Test end to end construction and search with metadata.
get_test_api_data
"""Fake api data to use for testing.""" api_docs = """ This API endpoint will search the notes for a user. Endpoint: https://thisapidoesntexist.com GET /api/notes Query parameters: q | string | The search term for notes """ return {'api_docs': api_docs, 'question': 'Search for notes contai...
def get_test_api_data() ->dict: """Fake api data to use for testing.""" api_docs = """ This API endpoint will search the notes for a user. Endpoint: https://thisapidoesntexist.com GET /api/notes Query parameters: q | string | The search term for notes """ return {'api_docs': api_do...
Fake api data to use for testing.
test_openai_predict
llm = ChatOpenAI() mock_client = MagicMock() completed = False def mock_create(*args: Any, **kwargs: Any) ->Any: nonlocal completed completed = True return mock_completion mock_client.create = mock_create with patch.object(llm, 'client', mock_client): res = llm.predict('bar') assert res == 'Bar Baz'...
@pytest.mark.requires('openai') def test_openai_predict(mock_completion: dict) ->None: llm = ChatOpenAI() mock_client = MagicMock() completed = False def mock_create(*args: Any, **kwargs: Any) ->Any: nonlocal completed completed = True return mock_completion mock_client.crea...
null
preprocess_json_input
"""Preprocesses a string to be parsed as json. Replace single backslashes with double backslashes, while leaving already escaped ones intact. Args: input_str: String to be preprocessed Returns: Preprocessed string """ corrected_str = re.sub('(?<!\\\\)\\\\(?!["\\\\/bfnrt]|u[0-9a-fA...
def preprocess_json_input(input_str: str) ->str: """Preprocesses a string to be parsed as json. Replace single backslashes with double backslashes, while leaving already escaped ones intact. Args: input_str: String to be preprocessed Returns: Preprocessed string """ correc...
Preprocesses a string to be parsed as json. Replace single backslashes with double backslashes, while leaving already escaped ones intact. Args: input_str: String to be preprocessed Returns: Preprocessed string
get_format_instructions
return """Your response should be a markdown list, eg: `- foo - bar - baz`"""
def get_format_instructions(self) ->str: return 'Your response should be a markdown list, eg: `- foo\n- bar\n- baz`'
null
embed_documents
"""Compute doc embeddings using a HuggingFace transformer model. Args: texts: The list of texts to embed.s Returns: List of embeddings, one for each text. """ texts = list(map(lambda x: x.replace('\n', ' '), texts)) embeddings = self.client(self.pipeline_ref, texts) if ...
def embed_documents(self, texts: List[str]) ->List[List[float]]: """Compute doc embeddings using a HuggingFace transformer model. Args: texts: The list of texts to embed.s Returns: List of embeddings, one for each text. """ texts = list(map(lambda x: x.replace('...
Compute doc embeddings using a HuggingFace transformer model. Args: texts: The list of texts to embed.s Returns: List of embeddings, one for each text.
test_openai_streaming
"""Test streaming tokens from OpenAI.""" llm = ChatOpenAI(max_tokens=10) for token in llm.stream("I'm Pickle Rick"): assert isinstance(token.content, str)
@pytest.mark.scheduled def test_openai_streaming() ->None: """Test streaming tokens from OpenAI.""" llm = ChatOpenAI(max_tokens=10) for token in llm.stream("I'm Pickle Rick"): assert isinstance(token.content, str)
Test streaming tokens from OpenAI.
__init__
try: from alibabacloud_ha3engine_vector import client, models from alibabacloud_tea_util import models as util_models except ImportError: raise ImportError( 'Could not import alibaba cloud opensearch python package. Please install it with `pip install alibabacloud-ha3engine-vector`.' ) self....
def __init__(self, embedding: Embeddings, config: AlibabaCloudOpenSearchSettings, **kwargs: Any) ->None: try: from alibabacloud_ha3engine_vector import client, models from alibabacloud_tea_util import models as util_models except ImportError: raise ImportError( 'Could not...
null
evaluation_name
return 'json_edit_distance'
@property def evaluation_name(self) ->str: return 'json_edit_distance'
null
_create_api_controller_agent
from langchain.agents.agent import AgentExecutor from langchain.agents.mrkl.base import ZeroShotAgent from langchain.chains.llm import LLMChain get_llm_chain = LLMChain(llm=llm, prompt=PARSING_GET_PROMPT) post_llm_chain = LLMChain(llm=llm, prompt=PARSING_POST_PROMPT) tools: List[BaseTool] = [RequestsGetToolWithParsing(...
def _create_api_controller_agent(api_url: str, api_docs: str, requests_wrapper: RequestsWrapper, llm: BaseLanguageModel) ->Any: from langchain.agents.agent import AgentExecutor from langchain.agents.mrkl.base import ZeroShotAgent from langchain.chains.llm import LLMChain get_llm_chain = LLMChain(llm...
null
force_delete_by_path
"""Force delete dataset by path. Args: path (str): path of the dataset to delete. Raises: ValueError: if deeplake is not installed. """ try: import deeplake except ImportError: raise ValueError( 'Could not import deeplake python package. Please install i...
@classmethod def force_delete_by_path(cls, path: str) ->None: """Force delete dataset by path. Args: path (str): path of the dataset to delete. Raises: ValueError: if deeplake is not installed. """ try: import deeplake except ImportError: rai...
Force delete dataset by path. Args: path (str): path of the dataset to delete. Raises: ValueError: if deeplake is not installed.
other
try: import json except ImportError: raise ImportError( 'json is not installed. Please install it with `pip install json`') params = json.loads(query) jira_function = getattr(self.jira, params['function']) return jira_function(*params.get('args', []), **params.get('kwargs', {}))
def other(self, query: str) ->str: try: import json except ImportError: raise ImportError( 'json is not installed. Please install it with `pip install json`') params = json.loads(query) jira_function = getattr(self.jira, params['function']) return jira_function(*params.ge...
null
_create_retry_decorator
import openai errors = [openai.error.Timeout, openai.error.APIError, openai.error. APIConnectionError, openai.error.RateLimitError, openai.error. ServiceUnavailableError] return create_base_retry_decorator(error_types=errors, max_retries=llm. max_retries, run_manager=run_manager)
def _create_retry_decorator(llm: Union[BaseOpenAI, OpenAIChat], run_manager: Optional[Union[AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun] ]=None) ->Callable[[Any], Any]: import openai errors = [openai.error.Timeout, openai.error.APIError, openai.error. APIConnectionError, openai.error...
null
_evaluate_strings
parsed = self._canonicalize(self._parse_json(prediction)) label = self._canonicalize(self._parse_json(reference)) distance = self._string_distance(parsed, label) return {'score': distance}
def _evaluate_strings(self, prediction: str, input: Optional[str]=None, reference: Optional[str]=None, **kwargs: Any) ->dict: parsed = self._canonicalize(self._parse_json(prediction)) label = self._canonicalize(self._parse_json(reference)) distance = self._string_distance(parsed, label) return {'sco...
null
wait_for_all_evaluators
"""Wait for all tracers to finish.""" global _TRACERS for tracer in list(_TRACERS): if tracer is not None: tracer.wait_for_futures()
def wait_for_all_evaluators() ->None: """Wait for all tracers to finish.""" global _TRACERS for tracer in list(_TRACERS): if tracer is not None: tracer.wait_for_futures()
Wait for all tracers to finish.
_get_relevant_documents
"""Look up similar documents in Weaviate. query: The query to search for relevant documents of using weviate hybrid search. where_filter: A filter to apply to the query. https://weaviate.io/developers/weaviate/guides/querying/#filtering score: Whether to include the score...
def _get_relevant_documents(self, query: str, *, run_manager: CallbackManagerForRetrieverRun, where_filter: Optional[Dict[str, object ]]=None, score: bool=False, hybrid_search_kwargs: Optional[Dict[str, object]]=None) ->List[Document]: """Look up similar documents in Weaviate. query: The query ...
Look up similar documents in Weaviate. query: The query to search for relevant documents of using weviate hybrid search. where_filter: A filter to apply to the query. https://weaviate.io/developers/weaviate/guides/querying/#filtering score: Whether to include the score, and score explanation in the returned...
test_similarity_search_with_uuids
"""Test end to end construction and search with uuids.""" texts = ['foo', 'bar', 'baz'] uuids = [uuid.uuid5(uuid.NAMESPACE_DNS, 'same-name') for text in texts] metadatas = [{'page': i} for i in range(len(texts))] docsearch = Weaviate.from_texts(texts, embedding_openai, metadatas= metadatas, weaviate_url=weaviate_ur...
@pytest.mark.vcr(ignore_localhost=True) def test_similarity_search_with_uuids(self, weaviate_url: str, embedding_openai: OpenAIEmbeddings) ->None: """Test end to end construction and search with uuids.""" texts = ['foo', 'bar', 'baz'] uuids = [uuid.uuid5(uuid.NAMESPACE_DNS, 'same-name') for text in text...
Test end to end construction and search with uuids.
_get_relevant_documents
"""Get documents relevant for a query.""" from google.cloud.discoveryengine_v1beta import ConverseConversationRequest, TextInput request = ConverseConversationRequest(name=self._client.conversation_path( self.project_id, self.location_id, self.data_store_id, self. conversation_id), serving_config=self._serving_...
def _get_relevant_documents(self, query: str, *, run_manager: CallbackManagerForRetrieverRun) ->List[Document]: """Get documents relevant for a query.""" from google.cloud.discoveryengine_v1beta import ConverseConversationRequest, TextInput request = ConverseConversationRequest(name=self._client. ...
Get documents relevant for a query.
run
if mode == 'jql': return self.search(query) elif mode == 'get_projects': return self.project() elif mode == 'create_issue': return self.issue_create(query) elif mode == 'other': return self.other(query) elif mode == 'create_page': return self.page_create(query) else: raise ValueError(f'Got unexp...
def run(self, mode: str, query: str) ->str: if mode == 'jql': return self.search(query) elif mode == 'get_projects': return self.project() elif mode == 'create_issue': return self.issue_create(query) elif mode == 'other': return self.other(query) elif mode == 'create_...
null
test_singlestoredb_filter_metadata_6
"""Test filtering by other bool""" table_name = 'test_singlestoredb_filter_metadata_6' drop(table_name) docs = [Document(page_content=t, metadata={'index': i, 'category': 'budget', 'is_good': i == 1}) for i, t in enumerate(texts)] docsearch = SingleStoreDB.from_documents(docs, FakeEmbeddings(), distance_strateg...
@pytest.mark.skipif(not singlestoredb_installed, reason= 'singlestoredb not installed') def test_singlestoredb_filter_metadata_6(texts: List[str]) ->None: """Test filtering by other bool""" table_name = 'test_singlestoredb_filter_metadata_6' drop(table_name) docs = [Document(page_content=t, metadata...
Test filtering by other bool
visit_comparison
try: from qdrant_client.http import models as rest except ImportError as e: raise ImportError( 'Cannot import qdrant_client. Please install with `pip install qdrant-client`.' ) from e self._validate_func(comparison.comparator) attribute = self.metadata_key + '.' + comparison.attribute if compari...
def visit_comparison(self, comparison: Comparison) ->rest.FieldCondition: try: from qdrant_client.http import models as rest except ImportError as e: raise ImportError( 'Cannot import qdrant_client. Please install with `pip install qdrant-client`.' ) from e self._vali...
null
test_mistralai_initialization
"""Test ChatMistralAI initialization.""" ChatMistralAI(model='test', mistral_api_key='test')
@pytest.mark.requires('mistralai') def test_mistralai_initialization() ->None: """Test ChatMistralAI initialization.""" ChatMistralAI(model='test', mistral_api_key='test')
Test ChatMistralAI initialization.
_searx_api_query
"""Actual request to searx API.""" raw_result = requests.get(self.searx_host, headers=self.headers, params= params, verify=not self.unsecure) if not raw_result.ok: raise ValueError('Searx API returned an error: ', raw_result.text) res = SearxResults(raw_result.text) self._result = res return res
def _searx_api_query(self, params: dict) ->SearxResults: """Actual request to searx API.""" raw_result = requests.get(self.searx_host, headers=self.headers, params =params, verify=not self.unsecure) if not raw_result.ok: raise ValueError('Searx API returned an error: ', raw_result.text) ...
Actual request to searx API.
__getattr__
if name == 'MRKLChain': from langchain.agents import MRKLChain _warn_on_import(name, replacement='langchain.agents.MRKLChain') return MRKLChain elif name == 'ReActChain': from langchain.agents import ReActChain _warn_on_import(name, replacement='langchain.agents.ReActChain') return ReActChain el...
def __getattr__(name: str) ->Any: if name == 'MRKLChain': from langchain.agents import MRKLChain _warn_on_import(name, replacement='langchain.agents.MRKLChain') return MRKLChain elif name == 'ReActChain': from langchain.agents import ReActChain _warn_on_import(name, repla...
null
test_chat_message_partial
template = ChatPromptTemplate.from_messages([('system', 'You are an AI assistant named {name}.'), ('human', "Hi I'm {user}"), ( 'ai', "Hi there, {user}, I'm {name}."), ('human', '{input}')]) template2 = template.partial(user='Lucy', name='R2D2') with pytest.raises(KeyError): template.format_messages(input='...
def test_chat_message_partial() ->None: template = ChatPromptTemplate.from_messages([('system', 'You are an AI assistant named {name}.'), ('human', "Hi I'm {user}" ), ('ai', "Hi there, {user}, I'm {name}."), ('human', '{input}')]) template2 = template.partial(user='Lucy', name='R2D2') with p...
null
query
"""Query Neptune database.""" try: return self.client.execute_open_cypher_query(openCypherQuery=query) except Exception as e: raise NeptuneQueryException({'message': 'An error occurred while executing the query.', 'details': str(e)})
def query(self, query: str, params: dict={}) ->Dict[str, Any]: """Query Neptune database.""" try: return self.client.execute_open_cypher_query(openCypherQuery=query) except Exception as e: raise NeptuneQueryException({'message': 'An error occurred while executing the query.', 'de...
Query Neptune database.
from_browser
"""Instantiate the tool.""" lazy_import_playwright_browsers() return cls(sync_browser=sync_browser, async_browser=async_browser)
@classmethod def from_browser(cls, sync_browser: Optional[SyncBrowser]=None, async_browser: Optional[AsyncBrowser]=None) ->BaseBrowserTool: """Instantiate the tool.""" lazy_import_playwright_browsers() return cls(sync_browser=sync_browser, async_browser=async_browser)
Instantiate the tool.
_process_llm_result
run_manager.on_text(llm_output, color='green', verbose=self.verbose) llm_output = llm_output.strip() text_match = re.search('^```text(.*?)```', llm_output, re.DOTALL) if text_match: expression = text_match.group(1) output = self._evaluate_expression(expression) run_manager.on_text('\nAnswer: ', verbose=self...
def _process_llm_result(self, llm_output: str, run_manager: CallbackManagerForChainRun) ->Dict[str, str]: run_manager.on_text(llm_output, color='green', verbose=self.verbose) llm_output = llm_output.strip() text_match = re.search('^```text(.*?)```', llm_output, re.DOTALL) if text_match: expr...
null
_get_tables_to_query
"""Get the tables names that need to be queried, after checking they exist.""" if table_names is not None: if isinstance(table_names, list) and len(table_names) > 0 and table_names[0 ] != '': fixed_tables = [fix_table_name(table) for table in table_names] non_existing_tables = [table for tab...
def _get_tables_to_query(self, table_names: Optional[Union[List[str], str]] =None) ->Optional[List[str]]: """Get the tables names that need to be queried, after checking they exist.""" if table_names is not None: if isinstance(table_names, list) and len(table_names ) > 0 and table_names[...
Get the tables names that need to be queried, after checking they exist.
test_openai_batch
"""Test streaming tokens from OpenAI.""" llm = OpenAI(max_tokens=10) result = llm.batch(["I'm Pickle Rick", "I'm not Pickle Rick"]) for token in result: assert isinstance(token, str)
@pytest.mark.scheduled def test_openai_batch() ->None: """Test streaming tokens from OpenAI.""" llm = OpenAI(max_tokens=10) result = llm.batch(["I'm Pickle Rick", "I'm not Pickle Rick"]) for token in result: assert isinstance(token, str)
Test streaming tokens from OpenAI.
on_chain_error
"""Do nothing when LLM chain outputs an error.""" pass
def on_chain_error(self, error: BaseException, **kwargs: Any) ->None: """Do nothing when LLM chain outputs an error.""" pass
Do nothing when LLM chain outputs an error.
test_pgvector_with_metadatas
"""Test end to end construction and search.""" texts = ['foo', 'bar', 'baz'] metadatas = [{'page': str(i)} for i in range(len(texts))] docsearch = PGVector.from_texts(texts=texts, collection_name= 'test_collection', embedding=FakeEmbeddingsWithAdaDimension(), metadatas=metadatas, connection_string=CONNECTION_ST...
def test_pgvector_with_metadatas() ->None: """Test end to end construction and search.""" texts = ['foo', 'bar', 'baz'] metadatas = [{'page': str(i)} for i in range(len(texts))] docsearch = PGVector.from_texts(texts=texts, collection_name= 'test_collection', embedding=FakeEmbeddingsWithAdaDimens...
Test end to end construction and search.
from_texts
"""Construct Vectara wrapper from raw documents. This is intended to be a quick way to get started. Example: .. code-block:: python from langchain_community.vectorstores import Vectara vectara = Vectara.from_texts( texts, ...
@classmethod def from_texts(cls: Type[Vectara], texts: List[str], embedding: Optional[ Embeddings]=None, metadatas: Optional[List[dict]]=None, **kwargs: Any ) ->Vectara: """Construct Vectara wrapper from raw documents. This is intended to be a quick way to get started. Example: ....
Construct Vectara wrapper from raw documents. This is intended to be a quick way to get started. Example: .. code-block:: python from langchain_community.vectorstores import Vectara vectara = Vectara.from_texts( texts, vectara_customer_id=customer_id, vectara_cor...
value
"""The only defined document attribute value or None. According to Amazon Kendra, you can only provide one value for a document attribute. """ if self.DateValue: return self.DateValue if self.LongValue: return self.LongValue if self.StringListValue: return self.StringListValue if sel...
@property def value(self) ->DocumentAttributeValueType: """The only defined document attribute value or None. According to Amazon Kendra, you can only provide one value for a document attribute. """ if self.DateValue: return self.DateValue if self.LongValue: return se...
The only defined document attribute value or None. According to Amazon Kendra, you can only provide one value for a document attribute.
similarity_search
""" Returns the most similar indexed documents to the query text. Args: query (str): The query text for which to find similar documents. k (int): The number of documents to return. Default is 4. Returns: List[Document]: A list of documents that are most simi...
def similarity_search(self, query: str, k: int=4, **kwargs: Any) ->List[ Document]: """ Returns the most similar indexed documents to the query text. Args: query (str): The query text for which to find similar documents. k (int): The number of documents to return. Defaul...
Returns the most similar indexed documents to the query text. Args: query (str): The query text for which to find similar documents. k (int): The number of documents to return. Default is 4. Returns: List[Document]: A list of documents that are most similar to the query text.
test_elasticsearch_indexing_exception_error
"""Test bulk exception logging is giving better hints.""" from elasticsearch.helpers import BulkIndexError docsearch = ElasticsearchStore(embedding=ConsistentFakeEmbeddings(), ** elasticsearch_connection, index_name=index_name) docsearch.client.indices.create(index=index_name, mappings={'properties': { }}, sett...
def test_elasticsearch_indexing_exception_error(self, elasticsearch_connection: dict, index_name: str, caplog: pytest. LogCaptureFixture) ->None: """Test bulk exception logging is giving better hints.""" from elasticsearch.helpers import BulkIndexError docsearch = ElasticsearchStore(embedding=Consis...
Test bulk exception logging is giving better hints.
set_ref
self.ref = ref
def set_ref(self, ref: str) ->None: self.ref = ref
null