method_name
stringlengths
1
78
method_body
stringlengths
3
9.66k
full_code
stringlengths
31
10.7k
docstring
stringlengths
4
4.74k
max_marginal_relevance_search_by_vector
"""Return docs selected using the maximal marginal relevance to embedding vector. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding (str): Text to look up documents similar to. k (int): N...
def max_marginal_relevance_search_by_vector(self, embedding: List[float], k: int=4, fetch_k: int=20, lambda_mult: float=0.5, filter: Optional[Dict[ str, str]]=None, **kwargs: Any) ->List[Document]: """Return docs selected using the maximal marginal relevance to embedding vector. Maximal...
Return docs selected using the maximal marginal relevance to embedding vector. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding (str): Text to look up documents similar to. k (int): Number of Documents to return. Defaults to 4. fe...
_make_request
try: import grpc from yandex.cloud.ai.foundation_models.v1.foundation_models_service_pb2 import TextEmbeddingRequest from yandex.cloud.ai.foundation_models.v1.foundation_models_service_pb2_grpc import EmbeddingsServiceStub except ImportError as e: raise ImportError( 'Please install YandexCloud S...
def _make_request(self: YandexGPTEmbeddings, texts: List[str]): try: import grpc from yandex.cloud.ai.foundation_models.v1.foundation_models_service_pb2 import TextEmbeddingRequest from yandex.cloud.ai.foundation_models.v1.foundation_models_service_pb2_grpc import EmbeddingsServiceStub e...
null
test_filter_list_metadata
documents = [Document(page_content='', metadata={'key1': 'this is a string!', 'key2': ['a', 'list', 'of', 'strings']}), Document (page_content='', metadata={'key1': 'this is another string!', 'key2': {'foo'}}), Document(page_content='', metadata={'key1': 'this is another string!', 'key2': {'foo': 'bar'}...
def test_filter_list_metadata() ->None: documents = [Document(page_content='', metadata={'key1': 'this is a string!', 'key2': ['a', 'list', 'of', 'strings']}), Document(page_content='', metadata={'key1': 'this is another string!', 'key2': {'foo'}}), Document(page_content ='', metadat...
null
test_load_converts_dataframe_columns_to_document_metadata
loader = GeoDataFrameLoader(sample_gdf) docs = loader.load() for i, doc in enumerate(docs): assert doc.metadata['area'] == sample_gdf.loc[i, 'area'] assert doc.metadata['crs'] == sample_gdf.loc[i, 'crs']
@pytest.mark.requires('geopandas') def test_load_converts_dataframe_columns_to_document_metadata(sample_gdf: GeoDataFrame) ->None: loader = GeoDataFrameLoader(sample_gdf) docs = loader.load() for i, doc in enumerate(docs): assert doc.metadata['area'] == sample_gdf.loc[i, 'area'] assert d...
null
on_agent_finish
self.on_agent_finish_common()
def on_agent_finish(self, *args: Any, **kwargs: Any) ->Any: self.on_agent_finish_common()
null
get_tools
"""Get the tools in the toolkit.""" allowed_tools = self.selected_tools or _FILE_TOOLS.keys() tools: List[BaseTool] = [] for tool in allowed_tools: tool_cls = _FILE_TOOLS[tool] tools.append(tool_cls(root_dir=self.root_dir)) return tools
def get_tools(self) ->List[BaseTool]: """Get the tools in the toolkit.""" allowed_tools = self.selected_tools or _FILE_TOOLS.keys() tools: List[BaseTool] = [] for tool in allowed_tools: tool_cls = _FILE_TOOLS[tool] tools.append(tool_cls(root_dir=self.root_dir)) return tools
Get the tools in the toolkit.
from_texts
"""Create a Clarifai vectorstore from a list of texts. Args: user_id (str): User ID. app_id (str): App ID. texts (List[str]): List of texts to add. number_of_docs (Optional[int]): Number of documents to return during vector search. Defaults to None. ...
@classmethod def from_texts(cls, texts: List[str], embedding: Optional[Embeddings]=None, metadatas: Optional[List[dict]]=None, user_id: Optional[str]=None, app_id: Optional[str]=None, number_of_docs: Optional[int]=None, pat: Optional[str]=None, **kwargs: Any) ->Clarifai: """Create a Clarifai vectorstore...
Create a Clarifai vectorstore from a list of texts. Args: user_id (str): User ID. app_id (str): App ID. texts (List[str]): List of texts to add. number_of_docs (Optional[int]): Number of documents to return during vector search. Defaults to None. metadatas (Optional[List[dict]]): Optional list ...
validate_params
"""Validate similarity parameters.""" if values['k'] is None and values['similarity_threshold'] is None: raise ValueError('Must specify one of `k` or `similarity_threshold`.') return values
@root_validator() def validate_params(cls, values: Dict) ->Dict: """Validate similarity parameters.""" if values['k'] is None and values['similarity_threshold'] is None: raise ValueError('Must specify one of `k` or `similarity_threshold`.') return values
Validate similarity parameters.
embeddings
return self.embedding
@property def embeddings(self) ->Optional[Embeddings]: return self.embedding
null
write_key_value_pair
self.dispatch(k) self.write(': ') self.dispatch(v)
def write_key_value_pair(k, v): self.dispatch(k) self.write(': ') self.dispatch(v)
null
similarity_search
"""Perform a similarity search with Yellowbrick Args: query (str): query string k (int, optional): Top K neighbors to retrieve. Defaults to 4. NOTE: Please do not let end-user fill this and always be aware of SQL injection. Returns: Li...
def similarity_search(self, query: str, k: int=4, **kwargs: Any) ->List[ Document]: """Perform a similarity search with Yellowbrick Args: query (str): query string k (int, optional): Top K neighbors to retrieve. Defaults to 4. NOTE: Please do not let end-user fill t...
Perform a similarity search with Yellowbrick Args: query (str): query string k (int, optional): Top K neighbors to retrieve. Defaults to 4. NOTE: Please do not let end-user fill this and always be aware of SQL injection. Returns: List[Document]: List of Documents
test_task_related
time_str = datetime.now().strftime('%d/%m/%Y-%H:%M:%S') task_name = f'Test Task - {time_str}' create_response = json.loads(clickup_wrapper.run(mode='create_task', query= json.dumps({'name': task_name, 'description': 'This is a Test'}))) assert create_response['name'] == task_name task_id = create_response['id'] get...
def test_task_related(clickup_wrapper: ClickupAPIWrapper) ->None: time_str = datetime.now().strftime('%d/%m/%Y-%H:%M:%S') task_name = f'Test Task - {time_str}' create_response = json.loads(clickup_wrapper.run(mode='create_task', query=json.dumps({'name': task_name, 'description': 'This is a Test'}))...
null
__init__
if escape_chars_re: self.escaped_chars_re = escape_chars_re else: self.escaped_chars_re = re.compile(self.DEFAULT_ESCAPED_CHARS)
def __init__(self, escape_chars_re: Optional[Pattern]=None): if escape_chars_re: self.escaped_chars_re = escape_chars_re else: self.escaped_chars_re = re.compile(self.DEFAULT_ESCAPED_CHARS)
null
test_move_file
"""Test the FileMove tool.""" with TemporaryDirectory() as temp_dir: tool = MoveFileTool() source_file = Path(temp_dir) / 'source.txt' destination_file = Path(temp_dir) / 'destination.txt' source_file.write_text('Hello, world!') tool.run({'source_path': str(source_file), 'destination_path': str( ...
def test_move_file() ->None: """Test the FileMove tool.""" with TemporaryDirectory() as temp_dir: tool = MoveFileTool() source_file = Path(temp_dir) / 'source.txt' destination_file = Path(temp_dir) / 'destination.txt' source_file.write_text('Hello, world!') tool.run({'sou...
Test the FileMove tool.
test_deepinfra_call
"""Test valid call to DeepInfra.""" deepinfra_emb = DeepInfraEmbeddings(model_id= 'sentence-transformers/clip-ViT-B-32') r1 = deepinfra_emb.embed_documents([ 'Alpha is the first letter of Greek alphabet', 'Beta is the second letter of Greek alphabet']) assert len(r1) == 2 assert len(r1[0]) == 512 assert len...
def test_deepinfra_call() ->None: """Test valid call to DeepInfra.""" deepinfra_emb = DeepInfraEmbeddings(model_id= 'sentence-transformers/clip-ViT-B-32') r1 = deepinfra_emb.embed_documents([ 'Alpha is the first letter of Greek alphabet', 'Beta is the second letter of Greek alphabet'...
Test valid call to DeepInfra.
_get_relevant_documents
"""Return documents that are relevant to the query.""" current_time = datetime.datetime.now() docs_and_scores = {doc.metadata['buffer_idx']: (doc, self.default_salience) for doc in self.memory_stream[-self.k:]} docs_and_scores.update(self.get_salient_docs(query)) rescored_docs = [(doc, self._get_combined_score(doc,...
def _get_relevant_documents(self, query: str, *, run_manager: CallbackManagerForRetrieverRun) ->List[Document]: """Return documents that are relevant to the query.""" current_time = datetime.datetime.now() docs_and_scores = {doc.metadata['buffer_idx']: (doc, self. default_salience) for doc in se...
Return documents that are relevant to the query.
on_agent_finish_common
self.agent_ends += 1 self.ends += 1
def on_agent_finish_common(self) ->None: self.agent_ends += 1 self.ends += 1
null
test_faiss_search_not_found
"""Test what happens when document is not found.""" texts = ['foo', 'bar', 'baz'] docsearch = FAISS.from_texts(texts, FakeEmbeddings()) docsearch.docstore = InMemoryDocstore({}) with pytest.raises(ValueError): docsearch.similarity_search('foo')
@pytest.mark.requires('faiss') def test_faiss_search_not_found() ->None: """Test what happens when document is not found.""" texts = ['foo', 'bar', 'baz'] docsearch = FAISS.from_texts(texts, FakeEmbeddings()) docsearch.docstore = InMemoryDocstore({}) with pytest.raises(ValueError): docsearch...
Test what happens when document is not found.
parse
try: match = re.search(self.pattern, text.strip()) yaml_str = '' if match: yaml_str = match.group('yaml') else: yaml_str = text json_object = yaml.safe_load(yaml_str) return self.pydantic_object.parse_obj(json_object) except (yaml.YAMLError, ValidationError) as e: name = self...
def parse(self, text: str) ->T: try: match = re.search(self.pattern, text.strip()) yaml_str = '' if match: yaml_str = match.group('yaml') else: yaml_str = text json_object = yaml.safe_load(yaml_str) return self.pydantic_object.parse_obj(json_ob...
null
test_cypher_return_direct
"""Test that chain returns direct results.""" url = os.environ.get('NEO4J_URI') username = os.environ.get('NEO4J_USERNAME') password = os.environ.get('NEO4J_PASSWORD') assert url is not None assert username is not None assert password is not None graph = Neo4jGraph(url=url, username=username, password=password) graph.q...
def test_cypher_return_direct() ->None: """Test that chain returns direct results.""" url = os.environ.get('NEO4J_URI') username = os.environ.get('NEO4J_USERNAME') password = os.environ.get('NEO4J_PASSWORD') assert url is not None assert username is not None assert password is not None g...
Test that chain returns direct results.
create_openai_fn_chain
"""[Legacy] Create an LLM chain that uses OpenAI functions. Args: functions: A sequence of either dictionaries, pydantic.BaseModels classes, or Python functions. If dictionaries are passed in, they are assumed to already be a valid OpenAI functions. If only a single func...
def create_openai_fn_chain(functions: Sequence[Union[Dict[str, Any], Type[ BaseModel], Callable]], llm: BaseLanguageModel, prompt: BasePromptTemplate, *, enforce_single_function_usage: bool=True, output_key: str='function', output_parser: Optional[BaseLLMOutputParser ]=None, **kwargs: Any) ->LLMChain: ...
[Legacy] Create an LLM chain that uses OpenAI functions. Args: functions: A sequence of either dictionaries, pydantic.BaseModels classes, or Python functions. If dictionaries are passed in, they are assumed to already be a valid OpenAI functions. If only a single function is passed in, then...
memory_variables
"""Will always return list of memory variables. :meta private: """ return [self.memory_key]
@property def memory_variables(self) ->List[str]: """Will always return list of memory variables. :meta private: """ return [self.memory_key]
Will always return list of memory variables. :meta private:
_import_yahoo_finance_news
from langchain_community.tools.yahoo_finance_news import YahooFinanceNewsTool return YahooFinanceNewsTool
def _import_yahoo_finance_news() ->Any: from langchain_community.tools.yahoo_finance_news import YahooFinanceNewsTool return YahooFinanceNewsTool
null
get_graph
graph = self.mapper.get_graph(config) input_node = graph.first_node() output_node = graph.last_node() if input_node is not None and output_node is not None: passthrough_node = graph.add_node(_graph_passthrough) graph.add_edge(input_node, passthrough_node) graph.add_edge(passthrough_node, output_node) return...
def get_graph(self, config: (RunnableConfig | None)=None) ->Graph: graph = self.mapper.get_graph(config) input_node = graph.first_node() output_node = graph.last_node() if input_node is not None and output_node is not None: passthrough_node = graph.add_node(_graph_passthrough) graph.add_...
null
add_texts
"""Insert text data into Milvus. Inserting data when the collection has not be made yet will result in creating a new Collection. The data of the first entity decides the schema of the new collection, the dim is extracted from the first embedding and the columns are decided by the first...
def add_texts(self, texts: Iterable[str], metadatas: Optional[List[dict]]= None, timeout: Optional[int]=None, batch_size: int=1000, **kwargs: Any ) ->List[str]: """Insert text data into Milvus. Inserting data when the collection has not be made yet will result in creating a new Collection. ...
Insert text data into Milvus. Inserting data when the collection has not be made yet will result in creating a new Collection. The data of the first entity decides the schema of the new collection, the dim is extracted from the first embedding and the columns are decided by the first metadata dict. Metada keys will ne...
_convert_to_prompt
if isinstance(part, str): return Part.from_text(part) if not isinstance(part, Dict): raise ValueError( f"Message's content is expected to be a dict, got {type(part)}!") if part['type'] == 'text': return Part.from_text(part['text']) elif part['type'] == 'image_url': path = part['image_url']['url'...
def _convert_to_prompt(part: Union[str, Dict]) ->Part: if isinstance(part, str): return Part.from_text(part) if not isinstance(part, Dict): raise ValueError( f"Message's content is expected to be a dict, got {type(part)}!") if part['type'] == 'text': return Part.from_text...
null
_create_table_if_not_exists
self.sql_model_class.metadata.create_all(self.engine)
def _create_table_if_not_exists(self) ->None: self.sql_model_class.metadata.create_all(self.engine)
null
set_llm_cache
"""Set a new LLM cache, overwriting the previous value, if any.""" try: import langchain with warnings.catch_warnings(): warnings.filterwarnings('ignore', message= 'Importing llm_cache from langchain root module is no longer supported' ) langchain.llm_cache = value except...
def set_llm_cache(value: Optional['BaseCache']) ->None: """Set a new LLM cache, overwriting the previous value, if any.""" try: import langchain with warnings.catch_warnings(): warnings.filterwarnings('ignore', message= 'Importing llm_cache from langchain root module ...
Set a new LLM cache, overwriting the previous value, if any.
test_PairwiseStringResultOutputParser_parse
output_parser = PairwiseStringResultOutputParser() text = """I like pie better than cake. [[A]]""" got = output_parser.parse_folder(text) want = {'reasoning': text, 'value': 'A', 'score': 1} assert got.get('reasoning') == want['reasoning'] assert got.get('value') == want['value'] assert got.get('score') == want['score'...
def test_PairwiseStringResultOutputParser_parse() ->None: output_parser = PairwiseStringResultOutputParser() text = 'I like pie better than cake.\n[[A]]' got = output_parser.parse_folder(text) want = {'reasoning': text, 'value': 'A', 'score': 1} assert got.get('reasoning') == want['reasoning'] a...
null
on_chain_error
"""Need to log the error.""" pass
def on_chain_error(self, error: BaseException, **kwargs: Any) ->None: """Need to log the error.""" pass
Need to log the error.
format_property_key
words = s.split() if not words: return s first_word = words[0].lower() capitalized_words = [word.capitalize() for word in words[1:]] return ''.join([first_word] + capitalized_words)
def format_property_key(s: str) ->str: words = s.split() if not words: return s first_word = words[0].lower() capitalized_words = [word.capitalize() for word in words[1:]] return ''.join([first_word] + capitalized_words)
null
test_azure_cognitive_search_get_relevant_documents
"""Test valid call to Azure Cognitive Search.""" retriever = AzureCognitiveSearchRetriever() documents = retriever.get_relevant_documents('what is langchain') for doc in documents: assert isinstance(doc, Document) assert doc.page_content retriever = AzureCognitiveSearchRetriever(top_k=1) documents = retriever.g...
def test_azure_cognitive_search_get_relevant_documents() ->None: """Test valid call to Azure Cognitive Search.""" retriever = AzureCognitiveSearchRetriever() documents = retriever.get_relevant_documents('what is langchain') for doc in documents: assert isinstance(doc, Document) assert do...
Test valid call to Azure Cognitive Search.
clear
"""Clear memory contents.""" self.chat_memory.clear() self.entity_cache.clear() self.entity_store.clear()
def clear(self) ->None: """Clear memory contents.""" self.chat_memory.clear() self.entity_cache.clear() self.entity_store.clear()
Clear memory contents.
_prepare_request_metadata
from google.cloud.contentwarehouse_v1 import RequestMetadata, UserInfo user_info = UserInfo(id=f'user:{user_ldap}') return RequestMetadata(user_info=user_info)
def _prepare_request_metadata(self, user_ldap: str) ->'RequestMetadata': from google.cloud.contentwarehouse_v1 import RequestMetadata, UserInfo user_info = UserInfo(id=f'user:{user_ldap}') return RequestMetadata(user_info=user_info)
null
get_elements_from_api
"""Retrieve a list of elements from the `Unstructured API`.""" if isinstance(file, collections.abc.Sequence) or isinstance(file_path, list): from unstructured.partition.api import partition_multiple_via_api _doc_elements = partition_multiple_via_api(filenames=file_path, files= file, api_key=api_key, api...
def get_elements_from_api(file_path: Union[str, List[str], None]=None, file: Union[IO, Sequence[IO], None]=None, api_url: str= 'https://api.unstructured.io/general/v0/general', api_key: str='', ** unstructured_kwargs: Any) ->List: """Retrieve a list of elements from the `Unstructured API`.""" if isi...
Retrieve a list of elements from the `Unstructured API`.
similarity_search
"""Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query. """ if self._by_text: return self.similarity_search_by_text(qu...
def similarity_search(self, query: str, k: int=4, **kwargs: Any) ->List[ Document]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most simi...
Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query.
test_default_call
"""Test valid chat call to volc engine.""" chat = VolcEngineMaasChat() response = chat(messages=[HumanMessage(content='Hello')]) assert isinstance(response, BaseMessage) assert isinstance(response.content, str)
def test_default_call() ->None: """Test valid chat call to volc engine.""" chat = VolcEngineMaasChat() response = chat(messages=[HumanMessage(content='Hello')]) assert isinstance(response, BaseMessage) assert isinstance(response.content, str)
Test valid chat call to volc engine.
embed_documents
"""Call out to Voyage Embedding endpoint for embedding search docs. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ return self._get_embeddings(texts, batch_size=self.batch_size, input_type= 'document')
def embed_documents(self, texts: List[str]) ->List[List[float]]: """Call out to Voyage Embedding endpoint for embedding search docs. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ return self._get_embeddings(texts, b...
Call out to Voyage Embedding endpoint for embedding search docs. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text.
test_success
"""Test that call that doesn't run.""" stackexchange = StackExchangeAPIWrapper() output = stackexchange.run('zsh: command not found: python') assert 'zsh: command not found: python' in output
def test_success() ->None: """Test that call that doesn't run.""" stackexchange = StackExchangeAPIWrapper() output = stackexchange.run('zsh: command not found: python') assert 'zsh: command not found: python' in output
Test that call that doesn't run.
similarity_search_with_score_by_vector
sql_query = f""" SELECT text, metadata, distance FROM {self._table} e INNER JOIN vss_{self._table} v on v.rowid = e.rowid WHERE vss_search( v.text_embedding, vss_search_params('{json.dumps(embe...
def similarity_search_with_score_by_vector(self, embedding: List[float], k: int=4, **kwargs: Any) ->List[Tuple[Document, float]]: sql_query = f""" SELECT text, metadata, distance FROM {self._table} e INNER JOIN vss_{self._table...
null
get_from_dict_or_env
"""Get a value from a dictionary or an environment variable.""" if key in data and data[key]: return data[key] else: return get_from_env(key, env_key, default=default)
def get_from_dict_or_env(data: Dict[str, Any], key: str, env_key: str, default: Optional[str]=None) ->str: """Get a value from a dictionary or an environment variable.""" if key in data and data[key]: return data[key] else: return get_from_env(key, env_key, default=default)
Get a value from a dictionary or an environment variable.
__init__
self.json_data = json_data self.status_code = status_code
def __init__(self, json_data: Dict, status_code: int): self.json_data = json_data self.status_code = status_code
null
test_non_presigned_loading
mocker.register_uri(requests_mock.ANY, requests_mock.ANY, status_code=200) loader = LakeFSLoader(lakefs_access_key='lakefs_access_key', lakefs_secret_key='lakefs_secret_key', lakefs_endpoint=self.endpoint) loader.set_repo(self.repo) loader.set_ref(self.ref) loader.set_path(self.path) loader.load()
@requests_mock.Mocker() @pytest.mark.usefixtures('mock_lakefs_client_no_presign_local', 'mock_unstructured_local') def test_non_presigned_loading(self, mocker: Mocker) ->None: mocker.register_uri(requests_mock.ANY, requests_mock.ANY, status_code=200) loader = LakeFSLoader(lakefs_access_key='lakefs_access_ke...
null
test_load_evaluators
"""Test loading evaluators.""" fake_llm = FakeChatModel() embeddings = FakeEmbeddings(size=32) load_evaluators([evaluator_type], llm=fake_llm, embeddings=embeddings) load_evaluators([evaluator_type.value], llm=fake_llm, embeddings=embeddings)
@pytest.mark.requires('rapidfuzz') @pytest.mark.parametrize('evaluator_type', EvaluatorType) def test_load_evaluators(evaluator_type: EvaluatorType) ->None: """Test loading evaluators.""" fake_llm = FakeChatModel() embeddings = FakeEmbeddings(size=32) load_evaluators([evaluator_type], llm=fake_llm, embe...
Test loading evaluators.
initialize_llm_chain
if 'llm_chain' not in values: from langchain.chains.llm import LLMChain values['llm_chain'] = LLMChain(llm=values.get('llm'), prompt= PromptTemplate(template=QUERY_CHECKER, input_variables=['query'])) if values['llm_chain'].prompt.input_variables != ['query']: raise ValueError( "LLM chain fo...
@root_validator(pre=True) def initialize_llm_chain(cls, values: Dict[str, Any]) ->Dict[str, Any]: if 'llm_chain' not in values: from langchain.chains.llm import LLMChain values['llm_chain'] = LLMChain(llm=values.get('llm'), prompt= PromptTemplate(template=QUERY_CHECKER, input_variables=[...
null
lazy_parse
file_path = blob.source if file_path is None: raise ValueError('blob.source cannot be None.') pdf = open(file_path, 'rb') files = {'input': (file_path, pdf, 'application/pdf', {'Expires': '0'})} try: data: Dict[str, Union[str, List[str]]] = {} for param in ['generateIDs', 'consolidateHeader', 'segmentSenten...
def lazy_parse(self, blob: Blob) ->Iterator[Document]: file_path = blob.source if file_path is None: raise ValueError('blob.source cannot be None.') pdf = open(file_path, 'rb') files = {'input': (file_path, pdf, 'application/pdf', {'Expires': '0'})} try: data: Dict[str, Union[str, Li...
null
load_memory_variables
input_key = self._get_prompt_input_key(inputs) query = inputs[input_key] docs = self.retriever.get_relevant_documents(query) return {'chat_history': self.chat_memory.messages[-10:], 'relevant_context': docs}
def load_memory_variables(self, inputs: Dict[str, Any]) ->Dict[str, Any]: input_key = self._get_prompt_input_key(inputs) query = inputs[input_key] docs = self.retriever.get_relevant_documents(query) return {'chat_history': self.chat_memory.messages[-10:], 'relevant_context': docs}
null
test_visit_comparison_range_gt
comp = Comparison(comparator=Comparator.GT, attribute='foo', value=1) expected = {'range': {'metadata.foo': {'gt': 1}}} actual = DEFAULT_TRANSLATOR.visit_comparison(comp) assert expected == actual
def test_visit_comparison_range_gt() ->None: comp = Comparison(comparator=Comparator.GT, attribute='foo', value=1) expected = {'range': {'metadata.foo': {'gt': 1}}} actual = DEFAULT_TRANSLATOR.visit_comparison(comp) assert expected == actual
null
_compute
self._block_back_door_paths() self._set_initial_conditions() self._make_graph() self._sort_entities() self._forward_propagate() self._run_query()
def _compute(self) ->Any: self._block_back_door_paths() self._set_initial_conditions() self._make_graph() self._sort_entities() self._forward_propagate() self._run_query()
null
mock_list_examples
return iter(examples)
def mock_list_examples(*args: Any, **kwargs: Any) ->Iterator[Example]: return iter(examples)
null
test_mistralai_model_param
llm = ChatMistralAI(model='foo') assert llm.model == 'foo'
@pytest.mark.requires('mistralai') def test_mistralai_model_param() ->None: llm = ChatMistralAI(model='foo') assert llm.model == 'foo'
null
test_graph_single_runnable
runnable = StrOutputParser() graph = StrOutputParser().get_graph() first_node = graph.first_node() assert first_node is not None assert first_node.data.schema() == runnable.input_schema.schema() last_node = graph.last_node() assert last_node is not None assert last_node.data.schema() == runnable.output_schema.schema() ...
def test_graph_single_runnable(snapshot: SnapshotAssertion) ->None: runnable = StrOutputParser() graph = StrOutputParser().get_graph() first_node = graph.first_node() assert first_node is not None assert first_node.data.schema() == runnable.input_schema.schema() last_node = graph.last_node() ...
null
foo
"""Add one to the input.""" raise NotImplementedError()
def foo(x: int) ->None: """Add one to the input.""" raise NotImplementedError()
Add one to the input.
_load_dump_file
try: import mwxml except ImportError as e: raise ImportError( "Unable to import 'mwxml'. Please install with `pip install mwxml`." ) from e return mwxml.Dump.from_file(open(self.file_path, encoding=self.encoding))
def _load_dump_file(self): try: import mwxml except ImportError as e: raise ImportError( "Unable to import 'mwxml'. Please install with `pip install mwxml`." ) from e return mwxml.Dump.from_file(open(self.file_path, encoding=self.encoding))
null
_validate_mode
_valid_modes = {'single', 'elements'} if mode not in _valid_modes: raise ValueError( f'Got {mode} for `mode`, but should be one of `{_valid_modes}`')
def _validate_mode(self, mode: str) ->None: _valid_modes = {'single', 'elements'} if mode not in _valid_modes: raise ValueError( f'Got {mode} for `mode`, but should be one of `{_valid_modes}`')
null
similarity_search
"""Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Filter by metadata. Defaults to None. search_params: Additional search params offset: Off...
def similarity_search(self, query: str, k: int=4, filter: Optional[ MetadataFilter]=None, search_params: Optional[common_types.SearchParams ]=None, offset: int=0, score_threshold: Optional[float]=None, consistency: Optional[common_types.ReadConsistency]=None, **kwargs: Any ) ->List[Document]: """Ret...
Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Filter by metadata. Defaults to None. search_params: Additional search params offset: Offset of the first result to return. May be used to p...
validate_environment
"""Validate that api key and python package exists in environment.""" mosaicml_api_token = get_from_dict_or_env(values, 'mosaicml_api_token', 'MOSAICML_API_TOKEN') values['mosaicml_api_token'] = mosaicml_api_token return values
@root_validator() def validate_environment(cls, values: Dict) ->Dict: """Validate that api key and python package exists in environment.""" mosaicml_api_token = get_from_dict_or_env(values, 'mosaicml_api_token', 'MOSAICML_API_TOKEN') values['mosaicml_api_token'] = mosaicml_api_token return value...
Validate that api key and python package exists in environment.
test_visit_comparison_range_lt
comp = Comparison(comparator=Comparator.LT, attribute='foo', value=1) expected = {'range': {'metadata.foo': {'lt': 1}}} actual = DEFAULT_TRANSLATOR.visit_comparison(comp) assert expected == actual
def test_visit_comparison_range_lt() ->None: comp = Comparison(comparator=Comparator.LT, attribute='foo', value=1) expected = {'range': {'metadata.foo': {'lt': 1}}} actual = DEFAULT_TRANSLATOR.visit_comparison(comp) assert expected == actual
null
custom_preprocess
return [self.preprocess_msg(m) for m in msg_list]
def custom_preprocess(self, msg_list: Sequence[BaseMessage]) ->List[Dict[ str, str]]: return [self.preprocess_msg(m) for m in msg_list]
null
_create_tool_message
"""Convert agent action and observation into a function message. Args: agent_action: the tool invocation request from the agent observation: the result of the tool invocation Returns: FunctionMessage that corresponds to the original tool invocation """ if not isinstance(observation, ...
def _create_tool_message(agent_action: OpenAIToolAgentAction, observation: str ) ->ToolMessage: """Convert agent action and observation into a function message. Args: agent_action: the tool invocation request from the agent observation: the result of the tool invocation Returns: ...
Convert agent action and observation into a function message. Args: agent_action: the tool invocation request from the agent observation: the result of the tool invocation Returns: FunctionMessage that corresponds to the original tool invocation
test_redis_cache_multi
from upstash_redis import Redis langchain.llm_cache = UpstashRedisCache(redis_=Redis(url=URL, token=TOKEN), ttl=1) llm = FakeLLM() params = llm.dict() params['stop'] = None llm_string = str(sorted([(k, v) for k, v in params.items()])) langchain.llm_cache.update('foo', llm_string, [Generation(text='fizz'), Gener...
def test_redis_cache_multi() ->None: from upstash_redis import Redis langchain.llm_cache = UpstashRedisCache(redis_=Redis(url=URL, token= TOKEN), ttl=1) llm = FakeLLM() params = llm.dict() params['stop'] = None llm_string = str(sorted([(k, v) for k, v in params.items()])) langchain.l...
null
reset_deanonymizer_mapping
"""Reset the deanonymizer mapping""" self._deanonymizer_mapping = DeanonymizerMapping()
def reset_deanonymizer_mapping(self) ->None: """Reset the deanonymizer mapping""" self._deanonymizer_mapping = DeanonymizerMapping()
Reset the deanonymizer mapping
gen_mock_zep_document
from zep_python.document import Document as ZepDocument embedding = [random() for _ in range(embedding_dimensions) ] if embedding_dimensions else None return ZepDocument(uuid=str(uuid4()), collection_name=collection_name, content='Test Document', embedding=embedding, metadata={'key': 'value'})
def gen_mock_zep_document(collection_name: str, embedding_dimensions: Optional[int]=None) ->'ZepDocument': from zep_python.document import Document as ZepDocument embedding = [random() for _ in range(embedding_dimensions) ] if embedding_dimensions else None return ZepDocument(uuid=str(uuid4()), ...
null
_convert_message_to_dict
message_dict: Dict[str, Any] if isinstance(message, ChatMessage): message_dict = {'role': message.role, 'content': message.content} elif isinstance(message, HumanMessage): message_dict = {'role': 'user', 'content': message.content} elif isinstance(message, AIMessage): message_dict = {'role': 'assistant', 'c...
def _convert_message_to_dict(message: BaseMessage) ->dict: message_dict: Dict[str, Any] if isinstance(message, ChatMessage): message_dict = {'role': message.role, 'content': message.content} elif isinstance(message, HumanMessage): message_dict = {'role': 'user', 'content': message.content} ...
null
post
resp = self._post(self.api_url, request) return transform_output_fn(resp) if transform_output_fn else resp
def post(self, request: Any, transform_output_fn: Optional[Callable[..., str]]=None) ->Any: resp = self._post(self.api_url, request) return transform_output_fn(resp) if transform_output_fn else resp
null
_import_huggingface_hub
from langchain_community.llms.huggingface_hub import HuggingFaceHub return HuggingFaceHub
def _import_huggingface_hub() ->Any: from langchain_community.llms.huggingface_hub import HuggingFaceHub return HuggingFaceHub
null
test_tokenization
assert _get_token_ids_default_method('This is a test') == [1212, 318, 257, 1332 ]
def test_tokenization(self) ->None: assert _get_token_ids_default_method('This is a test') == [1212, 318, 257, 1332]
null
test_model_param
"""Test model params works.""" chat = QianfanChatEndpoint() response = chat(model='BLOOMZ-7B', messages=[HumanMessage(content='Hello')]) assert isinstance(response, BaseMessage) assert isinstance(response.content, str)
def test_model_param() ->None: """Test model params works.""" chat = QianfanChatEndpoint() response = chat(model='BLOOMZ-7B', messages=[HumanMessage(content='Hello')] ) assert isinstance(response, BaseMessage) assert isinstance(response.content, str)
Test model params works.
memory_variables
"""Will always return list of memory variables. :meta private: """ return [self.memory_key]
@property def memory_variables(self) ->List[str]: """Will always return list of memory variables. :meta private: """ return [self.memory_key]
Will always return list of memory variables. :meta private:
assign_name
"""Assign name to the run.""" if values.get('name') is None: if 'name' in values['serialized']: values['name'] = values['serialized']['name'] elif 'id' in values['serialized']: values['name'] = values['serialized']['id'][-1] if values.get('events') is None: values['events'] = [] return value...
@root_validator(pre=True) def assign_name(cls, values: dict) ->dict: """Assign name to the run.""" if values.get('name') is None: if 'name' in values['serialized']: values['name'] = values['serialized']['name'] elif 'id' in values['serialized']: values['name'] = values['s...
Assign name to the run.
test_init_with_pipeline_fn
"""Test initialization with a self-hosted HF pipeline.""" gpu = get_remote_instance() llm = SelfHostedPipeline(model_load_fn=load_pipeline, hardware=gpu, model_reqs=model_reqs, inference_fn=inference_fn) output = llm('Say foo:') assert isinstance(output, str)
def test_init_with_pipeline_fn() ->None: """Test initialization with a self-hosted HF pipeline.""" gpu = get_remote_instance() llm = SelfHostedPipeline(model_load_fn=load_pipeline, hardware=gpu, model_reqs=model_reqs, inference_fn=inference_fn) output = llm('Say foo:') assert isinstance(outp...
Test initialization with a self-hosted HF pipeline.
_load_package_modules
"""Recursively load modules of a package based on the file system. Traversal based on the file system makes it easy to determine which of the modules/packages are part of the package vs. 3rd party or built-in. Parameters: package_directory: Path to the package directory. submodule: Optiona...
def _load_package_modules(package_directory: Union[str, Path], submodule: Optional[str]=None) ->Dict[str, ModuleMembers]: """Recursively load modules of a package based on the file system. Traversal based on the file system makes it easy to determine which of the modules/packages are part of the packag...
Recursively load modules of a package based on the file system. Traversal based on the file system makes it easy to determine which of the modules/packages are part of the package vs. 3rd party or built-in. Parameters: package_directory: Path to the package directory. submodule: Optional name of submodule to ...
dependable_usearch_import
""" Import usearch if available, otherwise raise error. """ try: import usearch.index except ImportError: raise ImportError( 'Could not import usearch python package. Please install it with `pip install usearch` ' ) return usearch.index
def dependable_usearch_import() ->Any: """ Import usearch if available, otherwise raise error. """ try: import usearch.index except ImportError: raise ImportError( 'Could not import usearch python package. Please install it with `pip install usearch` ' ) r...
Import usearch if available, otherwise raise error.
test_from_texts_with_metadatas_inner_product
texts = ['Dogs are tough.', 'Cats have fluff.', 'What is a sandwich?', 'The fence is purple.'] metadatas = [{'a': 1}, {'b': 1}, {'c': 1}, {'d': 1, 'e': 2}] vectorstore = AzureCosmosDBVectorSearch.from_texts(texts, azure_openai_embeddings, metadatas=metadatas, collection=collection, index_name=INDEX_NAME) ve...
def test_from_texts_with_metadatas_inner_product(self, azure_openai_embeddings: OpenAIEmbeddings, collection: Any) ->None: texts = ['Dogs are tough.', 'Cats have fluff.', 'What is a sandwich?', 'The fence is purple.'] metadatas = [{'a': 1}, {'b': 1}, {'c': 1}, {'d': 1, 'e': 2}] vectorstore = Azu...
null
_generate_embeddings
"""Generate embeddings using the Embaas API.""" payload = self._generate_payload(texts) try: return self._handle_request(payload) except requests.exceptions.RequestException as e: if e.response is None or not e.response.text: raise ValueError(f'Error raised by embaas embeddings API: {e}') parsed_res...
def _generate_embeddings(self, texts: List[str]) ->List[List[float]]: """Generate embeddings using the Embaas API.""" payload = self._generate_payload(texts) try: return self._handle_request(payload) except requests.exceptions.RequestException as e: if e.response is None or not e.respons...
Generate embeddings using the Embaas API.
embed_documents
"""Embed documents using a Bookend deployed embeddings model. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ result = [] headers = self.auth_header headers['Content-Type'] = 'application/json; charset=utf-8' params = {'model...
def embed_documents(self, texts: List[str]) ->List[List[float]]: """Embed documents using a Bookend deployed embeddings model. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ result = [] headers = self.auth_header...
Embed documents using a Bookend deployed embeddings model. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text.
update
"""Update the documents which have the specified ids. Args: ids: The id list of the updating embedding vector. texts: The texts of the updating documents. metadatas: The metadatas of the updating documents. Returns: the ids of the updated documents. ...
def update(self, ids: List[str], texts: Iterable[str], metadatas: Optional[ List[dict]]=None, **kwargs: Any) ->List[str]: """Update the documents which have the specified ids. Args: ids: The id list of the updating embedding vector. texts: The texts of the updating documents. ...
Update the documents which have the specified ids. Args: ids: The id list of the updating embedding vector. texts: The texts of the updating documents. metadatas: The metadatas of the updating documents. Returns: the ids of the updated documents.
_run
"""Run the tool.""" query = self.api_resource.users().messages().get(userId='me', format='raw', id=message_id) message_data = query.execute() raw_message = base64.urlsafe_b64decode(message_data['raw']) email_msg = email.message_from_bytes(raw_message) subject = email_msg['Subject'] sender = email_msg['From'] messag...
def _run(self, message_id: str, run_manager: Optional[ CallbackManagerForToolRun]=None) ->Dict: """Run the tool.""" query = self.api_resource.users().messages().get(userId='me', format= 'raw', id=message_id) message_data = query.execute() raw_message = base64.urlsafe_b64decode(message_data['...
Run the tool.
plan
"""Given input, decided what to do. Args: intermediate_steps: Steps the LLM has taken to date, along with the observations. callbacks: Callbacks to run. **kwargs: User inputs. Returns: Actions specifying what tool to use. """
@abstractmethod def plan(self, intermediate_steps: List[Tuple[AgentAction, str]], callbacks: Callbacks=None, **kwargs: Any) ->Union[List[AgentAction], AgentFinish]: """Given input, decided what to do. Args: intermediate_steps: Steps the LLM has taken to date, along with the ...
Given input, decided what to do. Args: intermediate_steps: Steps the LLM has taken to date, along with the observations. callbacks: Callbacks to run. **kwargs: User inputs. Returns: Actions specifying what tool to use.
delete
"""Delete by vector IDs. Args: ids: List of ids to delete. delete_all: Delete all records in the table. """ if delete_all: self._delete_all() self.wait_for_indexing(ndocs=0) elif ids is not None: chunk_size = 500 for i in range(0, len(ids), chunk_size): c...
def delete(self, ids: Optional[List[str]]=None, delete_all: Optional[bool]= None, **kwargs: Any) ->None: """Delete by vector IDs. Args: ids: List of ids to delete. delete_all: Delete all records in the table. """ if delete_all: self._delete_all() self...
Delete by vector IDs. Args: ids: List of ids to delete. delete_all: Delete all records in the table.
load
"""Transcribes the audio file and loads the transcript into documents. It uses the AssemblyAI API to transcribe the audio file and blocks until the transcription is finished. """ transcript = self.transcriber.transcribe(self.file_path) if transcript.error: raise ValueError(f'Could not trans...
def load(self) ->List[Document]: """Transcribes the audio file and loads the transcript into documents. It uses the AssemblyAI API to transcribe the audio file and blocks until the transcription is finished. """ transcript = self.transcriber.transcribe(self.file_path) if transcript....
Transcribes the audio file and loads the transcript into documents. It uses the AssemblyAI API to transcribe the audio file and blocks until the transcription is finished.
get_input_schema
if all(s.get_input_schema(config).schema().get('type', 'object') == 'object' for s in self.steps.values()): return create_model(self.get_name('Input'), **{k: (v.annotation, v. default) for step in self.steps.values() for k, v in step. get_input_schema(config).__fields__.items() if k != '__root__...
def get_input_schema(self, config: Optional[RunnableConfig]=None) ->Type[ BaseModel]: if all(s.get_input_schema(config).schema().get('type', 'object') == 'object' for s in self.steps.values()): return create_model(self.get_name('Input'), **{k: (v.annotation, v. default) for step in s...
null
test_cassandra
"""Test end to end construction and search.""" texts = ['foo', 'bar', 'baz'] docsearch = _vectorstore_from_texts(texts) output = docsearch.similarity_search('foo', k=1) assert output == [Document(page_content='foo')]
def test_cassandra() ->None: """Test end to end construction and search.""" texts = ['foo', 'bar', 'baz'] docsearch = _vectorstore_from_texts(texts) output = docsearch.similarity_search('foo', k=1) assert output == [Document(page_content='foo')]
Test end to end construction and search.
load_qa_chain
"""Load question answering chain. Args: llm: Language Model to use in the chain. chain_type: Type of document combining chain to use. Should be one of "stuff", "map_reduce", "map_rerank", and "refine". verbose: Whether chains should be run in verbose mode or not. Note that this ...
def load_qa_chain(llm: BaseLanguageModel, chain_type: str='stuff', verbose: Optional[bool]=None, callback_manager: Optional[BaseCallbackManager]= None, **kwargs: Any) ->BaseCombineDocumentsChain: """Load question answering chain. Args: llm: Language Model to use in the chain. chain_type...
Load question answering chain. Args: llm: Language Model to use in the chain. chain_type: Type of document combining chain to use. Should be one of "stuff", "map_reduce", "map_rerank", and "refine". verbose: Whether chains should be run in verbose mode or not. Note that this applies to all ...
results
"""Silence mypy for accessing this field. :meta private: """ return self.get('results')
@property def results(self) ->Any: """Silence mypy for accessing this field. :meta private: """ return self.get('results')
Silence mypy for accessing this field. :meta private:
get_input_schema
super_schema = super().get_input_schema(config) if super_schema.__custom_root_type__ is not None: from langchain_core.messages import BaseMessage fields: Dict = {} if self.input_messages_key and self.history_messages_key: fields[self.input_messages_key] = Union[str, BaseMessage, Sequence[ ...
def get_input_schema(self, config: Optional[RunnableConfig]=None) ->Type[ BaseModel]: super_schema = super().get_input_schema(config) if super_schema.__custom_root_type__ is not None: from langchain_core.messages import BaseMessage fields: Dict = {} if self.input_messages_key and sel...
null
embed_query
"""Generate a hypothetical document and embedded it.""" var_name = self.llm_chain.input_keys[0] result = self.llm_chain.generate([{var_name: text}]) documents = [generation.text for generation in result.generations[0]] embeddings = self.embed_documents(documents) return self.combine_embeddings(embeddings)
def embed_query(self, text: str) ->List[float]: """Generate a hypothetical document and embedded it.""" var_name = self.llm_chain.input_keys[0] result = self.llm_chain.generate([{var_name: text}]) documents = [generation.text for generation in result.generations[0]] embeddings = self.embed_documents...
Generate a hypothetical document and embedded it.
test_singlestoredb_filter_metadata_3
"""Test filtering by two metadata fields""" table_name = 'test_singlestoredb_filter_metadata_3' drop(table_name) docs = [Document(page_content=t, metadata={'index': i, 'category': 'budget' }) for i, t in enumerate(texts)] docsearch = SingleStoreDB.from_documents(docs, FakeEmbeddings(), distance_strategy=Distanc...
@pytest.mark.skipif(not singlestoredb_installed, reason= 'singlestoredb not installed') def test_singlestoredb_filter_metadata_3(texts: List[str]) ->None: """Test filtering by two metadata fields""" table_name = 'test_singlestoredb_filter_metadata_3' drop(table_name) docs = [Document(page_content=t,...
Test filtering by two metadata fields
test_embed_documents_quality
"""Smoke test embedding quality by comparing similar and dissimilar documents.""" model = GoogleGenerativeAIEmbeddings(model=_MODEL) similar_docs = ['Document A', 'Similar Document A'] dissimilar_docs = ['Document A', 'Completely Different Zebra'] similar_embeddings = model.embed_documents(similar_docs) dissimilar_embe...
def test_embed_documents_quality() ->None: """Smoke test embedding quality by comparing similar and dissimilar documents.""" model = GoogleGenerativeAIEmbeddings(model=_MODEL) similar_docs = ['Document A', 'Similar Document A'] dissimilar_docs = ['Document A', 'Completely Different Zebra'] similar_e...
Smoke test embedding quality by comparing similar and dissimilar documents.
_get_default_output_parser
return StructuredChatOutputParserWithRetries.from_llm(llm=llm)
@classmethod def _get_default_output_parser(cls, llm: Optional[BaseLanguageModel]=None, **kwargs: Any) ->AgentOutputParser: return StructuredChatOutputParserWithRetries.from_llm(llm=llm)
null
__init__
if not isinstance(urls, list): raise TypeError('urls must be a list') self.urls = urls self.save_dir = save_dir
def __init__(self, urls: List[str], save_dir: str): if not isinstance(urls, list): raise TypeError('urls must be a list') self.urls = urls self.save_dir = save_dir
null
test_get_relevant_documents_with_score
"""Test end to end construction and MRR search.""" from weaviate import Client texts = ['foo', 'bar', 'baz'] metadatas = [{'page': i} for i in range(len(texts))] client = Client(weaviate_url) retriever = WeaviateHybridSearchRetriever(client=client, index_name= f'LangChain_{uuid4().hex}', text_key='text', attributes...
@pytest.mark.vcr(ignore_localhost=True) def test_get_relevant_documents_with_score(self, weaviate_url: str) ->None: """Test end to end construction and MRR search.""" from weaviate import Client texts = ['foo', 'bar', 'baz'] metadatas = [{'page': i} for i in range(len(texts))] client = Client(weavia...
Test end to end construction and MRR search.
__getattr__
"""Get attr name.""" if name == 'create_csv_agent': HERE = Path(__file__).parents[3] here = as_import_path(Path(__file__).parent, relative_to=HERE) old_path = 'langchain.' + here + '.' + name new_path = 'langchain_experimental.' + here + '.' + name raise ImportError( f"""This agent has been ...
def __getattr__(name: str) ->Any: """Get attr name.""" if name == 'create_csv_agent': HERE = Path(__file__).parents[3] here = as_import_path(Path(__file__).parent, relative_to=HERE) old_path = 'langchain.' + here + '.' + name new_path = 'langchain_experimental.' + here + '.' + na...
Get attr name.
delete_collection
""" Just an alias for `clear` (to better align with other VectorStore implementations). """ self.clear()
def delete_collection(self) ->None: """ Just an alias for `clear` (to better align with other VectorStore implementations). """ self.clear()
Just an alias for `clear` (to better align with other VectorStore implementations).
_infer_embedding_dimension
"""Infer the embedding dimension from the embedding function.""" assert self.embeddings is not None, 'embedding model is required.' return len(self.embeddings.embed_query('test'))
def _infer_embedding_dimension(self) ->int: """Infer the embedding dimension from the embedding function.""" assert self.embeddings is not None, 'embedding model is required.' return len(self.embeddings.embed_query('test'))
Infer the embedding dimension from the embedding function.
_import_astradb
from langchain_community.vectorstores.astradb import AstraDB return AstraDB
def _import_astradb() ->Any: from langchain_community.vectorstores.astradb import AstraDB return AstraDB
null
_display_messages
dict_messages = messages_to_dict(messages) for message in dict_messages: yaml_string = yaml.dump(message, default_flow_style=False, sort_keys= False, allow_unicode=True, width=10000, line_break=None) print('\n', '======= start of message =======', '\n\n') print(yaml_string) print('======= end of...
def _display_messages(messages: List[BaseMessage]) ->None: dict_messages = messages_to_dict(messages) for message in dict_messages: yaml_string = yaml.dump(message, default_flow_style=False, sort_keys=False, allow_unicode=True, width=10000, line_break=None) print('\n', '======= start...
null
test_sim_search_with_score_for_ip_metric
""" Test end to end construction and similarity search with score for ip (inner-product) metric. """ hnsw_vec_store = DocArrayHnswSearch.from_texts(texts, FakeEmbeddings(), work_dir=str(tmp_path), n_dim=10, dist_metric='ip') output = hnsw_vec_store.similarity_search_with_score('foo', k=3) assert len(out...
def test_sim_search_with_score_for_ip_metric(texts: List[str], tmp_path: Path ) ->None: """ Test end to end construction and similarity search with score for ip (inner-product) metric. """ hnsw_vec_store = DocArrayHnswSearch.from_texts(texts, FakeEmbeddings(), work_dir=str(tmp_path), n_d...
Test end to end construction and similarity search with score for ip (inner-product) metric.
clear
"""Clear memory contents.""" super().clear() self.moving_summary_buffer = ''
def clear(self) ->None: """Clear memory contents.""" super().clear() self.moving_summary_buffer = ''
Clear memory contents.
generate_with_retry
"""Use tenacity to retry the completion call.""" retry_decorator = _create_retry_decorator(llm) @retry_decorator def _generate_with_retry(**_kwargs: Any) ->Any: resp = llm.client.call(**_kwargs) return check_response(resp) return _generate_with_retry(**kwargs)
def generate_with_retry(llm: Tongyi, **kwargs: Any) ->Any: """Use tenacity to retry the completion call.""" retry_decorator = _create_retry_decorator(llm) @retry_decorator def _generate_with_retry(**_kwargs: Any) ->Any: resp = llm.client.call(**_kwargs) return check_response(resp) r...
Use tenacity to retry the completion call.