method_name
stringlengths
1
78
method_body
stringlengths
3
9.66k
full_code
stringlengths
31
10.7k
docstring
stringlengths
4
4.74k
_llm_type
"""Return type of llm.""" return 'huggingface_textgen_inference'
@property def _llm_type(self) ->str: """Return type of llm.""" return 'huggingface_textgen_inference'
Return type of llm.
_run
"""Use the tool.""" image_generator = self.steamship.use_plugin(plugin_handle=self.model_name. value, config={'n': 1, 'size': self.size}) task = image_generator.generate(text=query, append_output_to_file=True) task.wait() blocks = task.output.blocks if len(blocks) > 0: if self.return_urls: return make_image_public(self.steamship, blocks[0]) else: return blocks[0].id raise RuntimeError(f'[{self.name}] Tool unable to generate image!')
def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun] =None) ->str: """Use the tool.""" image_generator = self.steamship.use_plugin(plugin_handle=self. model_name.value, config={'n': 1, 'size': self.size}) task = image_generator.generate(text=query, append_output_to_file=True) task.wait() blocks = task.output.blocks if len(blocks) > 0: if self.return_urls: return make_image_public(self.steamship, blocks[0]) else: return blocks[0].id raise RuntimeError(f'[{self.name}] Tool unable to generate image!')
Use the tool.
test_together_uses_actual_secret_value_from_secretstr
"""Test that the actual secret value is correctly retrieved.""" llm = Together(together_api_key='secret-api-key', model= 'togethercomputer/RedPajama-INCITE-7B-Base', temperature=0.2, max_tokens=250) assert cast(SecretStr, llm.together_api_key).get_secret_value( ) == 'secret-api-key'
def test_together_uses_actual_secret_value_from_secretstr() ->None: """Test that the actual secret value is correctly retrieved.""" llm = Together(together_api_key='secret-api-key', model= 'togethercomputer/RedPajama-INCITE-7B-Base', temperature=0.2, max_tokens=250) assert cast(SecretStr, llm.together_api_key).get_secret_value( ) == 'secret-api-key'
Test that the actual secret value is correctly retrieved.
from_texts
"""Construct OpenSearchVectorSearch wrapper from raw texts. Example: .. code-block:: python from langchain_community.vectorstores import OpenSearchVectorSearch from langchain_community.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() opensearch_vector_search = OpenSearchVectorSearch.from_texts( texts, embeddings, opensearch_url="http://localhost:9200" ) OpenSearch by default supports Approximate Search powered by nmslib, faiss and lucene engines recommended for large datasets. Also supports brute force search through Script Scoring and Painless Scripting. Optional Args: vector_field: Document field embeddings are stored in. Defaults to "vector_field". text_field: Document field the text of the document is stored in. Defaults to "text". Optional Keyword Args for Approximate Search: engine: "nmslib", "faiss", "lucene"; default: "nmslib" space_type: "l2", "l1", "cosinesimil", "linf", "innerproduct"; default: "l2" ef_search: Size of the dynamic list used during k-NN searches. Higher values lead to more accurate but slower searches; default: 512 ef_construction: Size of the dynamic list used during k-NN graph creation. Higher values lead to more accurate graph but slower indexing speed; default: 512 m: Number of bidirectional links created for each new element. Large impact on memory consumption. Between 2 and 100; default: 16 Keyword Args for Script Scoring or Painless Scripting: is_appx_search: False """ embeddings = embedding.embed_documents(texts) return cls.from_embeddings(embeddings, texts, embedding, metadatas= metadatas, bulk_size=bulk_size, ids=ids, **kwargs)
@classmethod def from_texts(cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]]=None, bulk_size: int=500, ids: Optional[List[str]] =None, **kwargs: Any) ->OpenSearchVectorSearch: """Construct OpenSearchVectorSearch wrapper from raw texts. Example: .. code-block:: python from langchain_community.vectorstores import OpenSearchVectorSearch from langchain_community.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() opensearch_vector_search = OpenSearchVectorSearch.from_texts( texts, embeddings, opensearch_url="http://localhost:9200" ) OpenSearch by default supports Approximate Search powered by nmslib, faiss and lucene engines recommended for large datasets. Also supports brute force search through Script Scoring and Painless Scripting. Optional Args: vector_field: Document field embeddings are stored in. Defaults to "vector_field". text_field: Document field the text of the document is stored in. Defaults to "text". Optional Keyword Args for Approximate Search: engine: "nmslib", "faiss", "lucene"; default: "nmslib" space_type: "l2", "l1", "cosinesimil", "linf", "innerproduct"; default: "l2" ef_search: Size of the dynamic list used during k-NN searches. Higher values lead to more accurate but slower searches; default: 512 ef_construction: Size of the dynamic list used during k-NN graph creation. Higher values lead to more accurate graph but slower indexing speed; default: 512 m: Number of bidirectional links created for each new element. Large impact on memory consumption. Between 2 and 100; default: 16 Keyword Args for Script Scoring or Painless Scripting: is_appx_search: False """ embeddings = embedding.embed_documents(texts) return cls.from_embeddings(embeddings, texts, embedding, metadatas= metadatas, bulk_size=bulk_size, ids=ids, **kwargs)
Construct OpenSearchVectorSearch wrapper from raw texts. Example: .. code-block:: python from langchain_community.vectorstores import OpenSearchVectorSearch from langchain_community.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() opensearch_vector_search = OpenSearchVectorSearch.from_texts( texts, embeddings, opensearch_url="http://localhost:9200" ) OpenSearch by default supports Approximate Search powered by nmslib, faiss and lucene engines recommended for large datasets. Also supports brute force search through Script Scoring and Painless Scripting. Optional Args: vector_field: Document field embeddings are stored in. Defaults to "vector_field". text_field: Document field the text of the document is stored in. Defaults to "text". Optional Keyword Args for Approximate Search: engine: "nmslib", "faiss", "lucene"; default: "nmslib" space_type: "l2", "l1", "cosinesimil", "linf", "innerproduct"; default: "l2" ef_search: Size of the dynamic list used during k-NN searches. Higher values lead to more accurate but slower searches; default: 512 ef_construction: Size of the dynamic list used during k-NN graph creation. Higher values lead to more accurate graph but slower indexing speed; default: 512 m: Number of bidirectional links created for each new element. Large impact on memory consumption. Between 2 and 100; default: 16 Keyword Args for Script Scoring or Painless Scripting: is_appx_search: False
__enter__
"""Context manager entry point.""" self._client.__enter__() self.prepare_cosmos() return self
def __enter__(self) ->'CosmosDBChatMessageHistory': """Context manager entry point.""" self._client.__enter__() self.prepare_cosmos() return self
Context manager entry point.
add_texts
"""Insert more texts through the embeddings and add to the VectorStore. Args: texts: Iterable of strings to add to the VectorStore. ids: Optional list of ids to associate with the texts. batch_size: Batch size of insertion metadata: Optional column data to be inserted Returns: List of ids from adding the texts into the VectorStore. """ ids = ids or [sha1(t.encode('utf-8')).hexdigest() for t in texts] colmap_ = self.config.column_map transac = [] column_names = {colmap_['id']: ids, colmap_['document']: texts, colmap_[ 'embedding']: self.embedding_function.embed_documents(list(texts))} metadatas = metadatas or [{} for _ in texts] column_names[colmap_['metadata']] = map(json.dumps, metadatas) assert len(set(colmap_) - set(column_names)) >= 0 keys, values = zip(*column_names.items()) try: t = None for v in self.pgbar(zip(*values), desc='Inserting data...', total=len( metadatas)): assert len(v[keys.index(self.config.column_map['embedding'])] ) == self.dim transac.append(v) if len(transac) == batch_size: if t: t.join() t = Thread(target=self._insert, args=[transac, keys]) t.start() transac = [] if len(transac) > 0: if t: t.join() self._insert(transac, keys) return [i for i in ids] except Exception as e: logger.error(f'\x1b[91m\x1b[1m{type(e)}\x1b[0m \x1b[95m{str(e)}\x1b[0m') return []
def add_texts(self, texts: Iterable[str], metadatas: Optional[List[dict]]= None, batch_size: int=32, ids: Optional[Iterable[str]]=None, **kwargs: Any ) ->List[str]: """Insert more texts through the embeddings and add to the VectorStore. Args: texts: Iterable of strings to add to the VectorStore. ids: Optional list of ids to associate with the texts. batch_size: Batch size of insertion metadata: Optional column data to be inserted Returns: List of ids from adding the texts into the VectorStore. """ ids = ids or [sha1(t.encode('utf-8')).hexdigest() for t in texts] colmap_ = self.config.column_map transac = [] column_names = {colmap_['id']: ids, colmap_['document']: texts, colmap_ ['embedding']: self.embedding_function.embed_documents(list(texts))} metadatas = metadatas or [{} for _ in texts] column_names[colmap_['metadata']] = map(json.dumps, metadatas) assert len(set(colmap_) - set(column_names)) >= 0 keys, values = zip(*column_names.items()) try: t = None for v in self.pgbar(zip(*values), desc='Inserting data...', total= len(metadatas)): assert len(v[keys.index(self.config.column_map['embedding'])] ) == self.dim transac.append(v) if len(transac) == batch_size: if t: t.join() t = Thread(target=self._insert, args=[transac, keys]) t.start() transac = [] if len(transac) > 0: if t: t.join() self._insert(transac, keys) return [i for i in ids] except Exception as e: logger.error(f'\x1b[91m\x1b[1m{type(e)}\x1b[0m \x1b[95m{str(e)}\x1b[0m' ) return []
Insert more texts through the embeddings and add to the VectorStore. Args: texts: Iterable of strings to add to the VectorStore. ids: Optional list of ids to associate with the texts. batch_size: Batch size of insertion metadata: Optional column data to be inserted Returns: List of ids from adding the texts into the VectorStore.
test_faiss_with_metadatas_and_list_filter
texts = ['foo', 'bar', 'baz', 'foo', 'qux'] metadatas = [({'page': i} if i <= 3 else {'page': 3}) for i in range(len( texts))] docsearch = FAISS.from_texts(texts, FakeEmbeddings(), metadatas=metadatas) expected_docstore = InMemoryDocstore({docsearch.index_to_docstore_id[0]: Document(page_content='foo', metadata={'page': 0}), docsearch. index_to_docstore_id[1]: Document(page_content='bar', metadata={'page': 1}), docsearch.index_to_docstore_id[2]: Document(page_content='baz', metadata={'page': 2}), docsearch.index_to_docstore_id[3]: Document( page_content='foo', metadata={'page': 3}), docsearch. index_to_docstore_id[4]: Document(page_content='qux', metadata={'page': 3})}) assert docsearch.docstore.__dict__ == expected_docstore.__dict__ output = docsearch.similarity_search('foor', k=1, filter={'page': [0, 1, 2]}) assert output == [Document(page_content='foo', metadata={'page': 0})]
@pytest.mark.requires('faiss') def test_faiss_with_metadatas_and_list_filter() ->None: texts = ['foo', 'bar', 'baz', 'foo', 'qux'] metadatas = [({'page': i} if i <= 3 else {'page': 3}) for i in range( len(texts))] docsearch = FAISS.from_texts(texts, FakeEmbeddings(), metadatas=metadatas) expected_docstore = InMemoryDocstore({docsearch.index_to_docstore_id[0]: Document(page_content='foo', metadata={'page': 0}), docsearch. index_to_docstore_id[1]: Document(page_content='bar', metadata={ 'page': 1}), docsearch.index_to_docstore_id[2]: Document( page_content='baz', metadata={'page': 2}), docsearch. index_to_docstore_id[3]: Document(page_content='foo', metadata={ 'page': 3}), docsearch.index_to_docstore_id[4]: Document( page_content='qux', metadata={'page': 3})}) assert docsearch.docstore.__dict__ == expected_docstore.__dict__ output = docsearch.similarity_search('foor', k=1, filter={'page': [0, 1, 2]}) assert output == [Document(page_content='foo', metadata={'page': 0})]
null
__init__
try: from cassio.history import StoredBlobHistory except (ImportError, ModuleNotFoundError): raise ImportError( 'Could not import cassio python package. Please install it with `pip install cassio`.' ) self.session_id = session_id self.ttl_seconds = ttl_seconds self.blob_history = StoredBlobHistory(session, keyspace, table_name)
def __init__(self, session_id: str, session: Session, keyspace: str, table_name: str=DEFAULT_TABLE_NAME, ttl_seconds: typing.Optional[int]= DEFAULT_TTL_SECONDS) ->None: try: from cassio.history import StoredBlobHistory except (ImportError, ModuleNotFoundError): raise ImportError( 'Could not import cassio python package. Please install it with `pip install cassio`.' ) self.session_id = session_id self.ttl_seconds = ttl_seconds self.blob_history = StoredBlobHistory(session, keyspace, table_name)
null
_generate
"""Call out to Ollama's generate endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = ollama("Tell me a joke.") """ generations = [] for prompt in prompts: final_chunk = super()._stream_with_aggregation(prompt, stop=stop, images=images, run_manager=run_manager, verbose=self.verbose, **kwargs) generations.append([final_chunk]) return LLMResult(generations=generations)
def _generate(self, prompts: List[str], stop: Optional[List[str]]=None, images: Optional[List[str]]=None, run_manager: Optional[ CallbackManagerForLLMRun]=None, **kwargs: Any) ->LLMResult: """Call out to Ollama's generate endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = ollama("Tell me a joke.") """ generations = [] for prompt in prompts: final_chunk = super()._stream_with_aggregation(prompt, stop=stop, images=images, run_manager=run_manager, verbose=self.verbose, **kwargs) generations.append([final_chunk]) return LLMResult(generations=generations)
Call out to Ollama's generate endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = ollama("Tell me a joke.")
test_sql_database_run
"""Test that commands can be run successfully and returned in correct format.""" engine = create_engine('duckdb:///:memory:') metadata_obj.create_all(engine) stmt = insert(user).values(user_id=13, user_name='Harrison') with engine.begin() as conn: conn.execute(stmt) with pytest.warns(Warning) as records: db = SQLDatabase(engine, schema='schema_a') assert len(records) == 1 assert isinstance(records[0].message, Warning) assert records[0].message.args[0 ] == "duckdb-engine doesn't yet support reflection on indices" command = 'select user_name from "user" where user_id = 13' output = db.run(command) expected_output = "[('Harrison',)]" assert output == expected_output
def test_sql_database_run() ->None: """Test that commands can be run successfully and returned in correct format.""" engine = create_engine('duckdb:///:memory:') metadata_obj.create_all(engine) stmt = insert(user).values(user_id=13, user_name='Harrison') with engine.begin() as conn: conn.execute(stmt) with pytest.warns(Warning) as records: db = SQLDatabase(engine, schema='schema_a') assert len(records) == 1 assert isinstance(records[0].message, Warning) assert records[0].message.args[0 ] == "duckdb-engine doesn't yet support reflection on indices" command = 'select user_name from "user" where user_id = 13' output = db.run(command) expected_output = "[('Harrison',)]" assert output == expected_output
Test that commands can be run successfully and returned in correct format.
retriever
return WikipediaRetriever()
@pytest.fixture def retriever() ->WikipediaRetriever: return WikipediaRetriever()
null
test_chat_baichuan_with_model
chat = ChatBaichuan(model='Baichuan2-13B') message = HumanMessage(content='Hello') response = chat([message]) assert isinstance(response, AIMessage) assert isinstance(response.content, str)
def test_chat_baichuan_with_model() ->None: chat = ChatBaichuan(model='Baichuan2-13B') message = HumanMessage(content='Hello') response = chat([message]) assert isinstance(response, AIMessage) assert isinstance(response.content, str)
null
_import_opensearch_vector_search
from langchain_community.vectorstores.opensearch_vector_search import OpenSearchVectorSearch return OpenSearchVectorSearch
def _import_opensearch_vector_search() ->Any: from langchain_community.vectorstores.opensearch_vector_search import OpenSearchVectorSearch return OpenSearchVectorSearch
null
as_tool
return Tool.from_function(func=self._run, name=self.name, description=self. description, args_schema=self.args_schema)
def as_tool(self) ->Tool: return Tool.from_function(func=self._run, name=self.name, description= self.description, args_schema=self.args_schema)
null
test_yellowbrick
"""Test end to end construction and search.""" docsearch = _yellowbrick_vector_from_texts() output = docsearch.similarity_search('foo', k=1) docsearch.drop(YELLOWBRICK_TABLE) assert output == [Document(page_content='foo', metadata={})]
@pytest.mark.requires('yb-vss') def test_yellowbrick() ->None: """Test end to end construction and search.""" docsearch = _yellowbrick_vector_from_texts() output = docsearch.similarity_search('foo', k=1) docsearch.drop(YELLOWBRICK_TABLE) assert output == [Document(page_content='foo', metadata={})]
Test end to end construction and search.
test_init_fail_no_index
with pytest.raises(TypeError): DatabricksVectorSearch()
@pytest.mark.requires('databricks', 'databricks.vector_search') def test_init_fail_no_index() ->None: with pytest.raises(TypeError): DatabricksVectorSearch()
null
test_qdrant_from_texts_stores_embeddings_as_named_vectors
"""Test end to end Qdrant.from_texts stores named vectors if name is provided.""" from qdrant_client import QdrantClient collection_name = uuid.uuid4().hex with tempfile.TemporaryDirectory() as tmpdir: vec_store = Qdrant.from_texts(['lorem', 'ipsum', 'dolor', 'sit', 'amet' ], ConsistentFakeEmbeddings(), collection_name=collection_name, path=str(tmpdir), vector_name=vector_name) del vec_store client = QdrantClient(path=str(tmpdir)) assert 5 == client.count(collection_name).count assert all(vector_name in point.vector for point in client.scroll( collection_name, with_vectors=True)[0])
@pytest.mark.parametrize('vector_name', ['custom-vector']) def test_qdrant_from_texts_stores_embeddings_as_named_vectors(vector_name: str ) ->None: """Test end to end Qdrant.from_texts stores named vectors if name is provided.""" from qdrant_client import QdrantClient collection_name = uuid.uuid4().hex with tempfile.TemporaryDirectory() as tmpdir: vec_store = Qdrant.from_texts(['lorem', 'ipsum', 'dolor', 'sit', 'amet'], ConsistentFakeEmbeddings(), collection_name= collection_name, path=str(tmpdir), vector_name=vector_name) del vec_store client = QdrantClient(path=str(tmpdir)) assert 5 == client.count(collection_name).count assert all(vector_name in point.vector for point in client.scroll( collection_name, with_vectors=True)[0])
Test end to end Qdrant.from_texts stores named vectors if name is provided.
_import_javelin_ai_gateway
from langchain_community.llms.javelin_ai_gateway import JavelinAIGateway return JavelinAIGateway
def _import_javelin_ai_gateway() ->Any: from langchain_community.llms.javelin_ai_gateway import JavelinAIGateway return JavelinAIGateway
null
get_user_agent
from langchain_community import __version__ return f'langchain/{__version__}'
@staticmethod def get_user_agent() ->str: from langchain_community import __version__ return f'langchain/{__version__}'
null
from_llm
"""Convenience method to construct ElasticsearchDatabaseChain from an LLM. Args: llm: The language model to use. database: The Elasticsearch db. query_prompt: The prompt to use for query construction. answer_prompt: The prompt to use for answering user question given data. query_output_parser: The output parser to use for parsing model-generated ES query. Defaults to SimpleJsonOutputParser. **kwargs: Additional arguments to pass to the constructor. """ query_prompt = query_prompt or DSL_PROMPT query_output_parser = query_output_parser or SimpleJsonOutputParser() query_chain = LLMChain(llm=llm, prompt=query_prompt, output_parser= query_output_parser) answer_prompt = answer_prompt or ANSWER_PROMPT answer_chain = LLMChain(llm=llm, prompt=answer_prompt) return cls(query_chain=query_chain, answer_chain=answer_chain, database= database, **kwargs)
@classmethod def from_llm(cls, llm: BaseLanguageModel, database: Elasticsearch, *, query_prompt: Optional[BasePromptTemplate]=None, answer_prompt: Optional[BasePromptTemplate]=None, query_output_parser: Optional[ BaseLLMOutputParser]=None, **kwargs: Any) ->ElasticsearchDatabaseChain: """Convenience method to construct ElasticsearchDatabaseChain from an LLM. Args: llm: The language model to use. database: The Elasticsearch db. query_prompt: The prompt to use for query construction. answer_prompt: The prompt to use for answering user question given data. query_output_parser: The output parser to use for parsing model-generated ES query. Defaults to SimpleJsonOutputParser. **kwargs: Additional arguments to pass to the constructor. """ query_prompt = query_prompt or DSL_PROMPT query_output_parser = query_output_parser or SimpleJsonOutputParser() query_chain = LLMChain(llm=llm, prompt=query_prompt, output_parser= query_output_parser) answer_prompt = answer_prompt or ANSWER_PROMPT answer_chain = LLMChain(llm=llm, prompt=answer_prompt) return cls(query_chain=query_chain, answer_chain=answer_chain, database =database, **kwargs)
Convenience method to construct ElasticsearchDatabaseChain from an LLM. Args: llm: The language model to use. database: The Elasticsearch db. query_prompt: The prompt to use for query construction. answer_prompt: The prompt to use for answering user question given data. query_output_parser: The output parser to use for parsing model-generated ES query. Defaults to SimpleJsonOutputParser. **kwargs: Additional arguments to pass to the constructor.
_import_pubmed
from langchain_community.utilities.pubmed import PubMedAPIWrapper return PubMedAPIWrapper
def _import_pubmed() ->Any: from langchain_community.utilities.pubmed import PubMedAPIWrapper return PubMedAPIWrapper
null
load_pipeline
"""Load pipeline for testing.""" from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline model_id = 'gpt2' tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id) pipe = pipeline('text-generation', model=model, tokenizer=tokenizer, max_new_tokens=10) return pipe
def load_pipeline() ->Any: """Load pipeline for testing.""" from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline model_id = 'gpt2' tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id) pipe = pipeline('text-generation', model=model, tokenizer=tokenizer, max_new_tokens=10) return pipe
Load pipeline for testing.
verify_schema
""" Args: from_node_labels: labels of the from node relation_type: type of the relation to_node_labels: labels of the to node """ valid_schemas = self.schemas if from_node_labels != []: from_node_labels = [label.strip('`') for label in from_node_labels] valid_schemas = [schema for schema in valid_schemas if schema[0] in from_node_labels] if to_node_labels != []: to_node_labels = [label.strip('`') for label in to_node_labels] valid_schemas = [schema for schema in valid_schemas if schema[2] in to_node_labels] if relation_types != []: relation_types = [type.strip('`') for type in relation_types] valid_schemas = [schema for schema in valid_schemas if schema[1] in relation_types] return valid_schemas != []
def verify_schema(self, from_node_labels: List[str], relation_types: List[ str], to_node_labels: List[str]) ->bool: """ Args: from_node_labels: labels of the from node relation_type: type of the relation to_node_labels: labels of the to node """ valid_schemas = self.schemas if from_node_labels != []: from_node_labels = [label.strip('`') for label in from_node_labels] valid_schemas = [schema for schema in valid_schemas if schema[0] in from_node_labels] if to_node_labels != []: to_node_labels = [label.strip('`') for label in to_node_labels] valid_schemas = [schema for schema in valid_schemas if schema[2] in to_node_labels] if relation_types != []: relation_types = [type.strip('`') for type in relation_types] valid_schemas = [schema for schema in valid_schemas if schema[1] in relation_types] return valid_schemas != []
Args: from_node_labels: labels of the from node relation_type: type of the relation to_node_labels: labels of the to node
lazy_load
""" Lazy load the chat sessions from the iterable of run IDs. This method fetches the runs and converts them to chat sessions on-the-fly, yielding one session at a time. :return: Iterator of chat sessions containing messages. """ from langsmith.schemas import Run for run_obj in self.runs: try: if hasattr(run_obj, 'id'): run = run_obj else: run = self.client.read_run(run_obj) session = self._load_single_chat_session(cast(Run, run)) yield session except ValueError as e: logger.warning(f'Could not load run {run_obj}: {repr(e)}') continue
def lazy_load(self) ->Iterator[ChatSession]: """ Lazy load the chat sessions from the iterable of run IDs. This method fetches the runs and converts them to chat sessions on-the-fly, yielding one session at a time. :return: Iterator of chat sessions containing messages. """ from langsmith.schemas import Run for run_obj in self.runs: try: if hasattr(run_obj, 'id'): run = run_obj else: run = self.client.read_run(run_obj) session = self._load_single_chat_session(cast(Run, run)) yield session except ValueError as e: logger.warning(f'Could not load run {run_obj}: {repr(e)}') continue
Lazy load the chat sessions from the iterable of run IDs. This method fetches the runs and converts them to chat sessions on-the-fly, yielding one session at a time. :return: Iterator of chat sessions containing messages.
_rdf_s_schema
return f"""In the following, each IRI is followed by the local name and optionally its description in parentheses. The RDF graph supports the following node types: {', '.join([self._res_to_str(r, 'cls') for r in classes])} The RDF graph supports the following relationships: {', '.join([self._res_to_str(r, 'rel') for r in relationships])} """
def _rdf_s_schema(classes: List[rdflib.query.ResultRow], relationships: List[rdflib.query.ResultRow]) ->str: return f"""In the following, each IRI is followed by the local name and optionally its description in parentheses. The RDF graph supports the following node types: {', '.join([self._res_to_str(r, 'cls') for r in classes])} The RDF graph supports the following relationships: {', '.join([self._res_to_str(r, 'rel') for r in relationships])} """
null
test_spacy_text_splitter
"""Test splitting by sentence using Spacy.""" text = 'This is sentence one. And this is sentence two.' separator = '|||' splitter = SpacyTextSplitter(separator=separator, pipeline=pipeline) output = splitter.split_text(text) expected_output = [f'This is sentence one.{separator}And this is sentence two.' ] assert output == expected_output
@pytest.mark.parametrize('pipeline', ['sentencizer', 'en_core_web_sm']) def test_spacy_text_splitter(pipeline: str) ->None: """Test splitting by sentence using Spacy.""" text = 'This is sentence one. And this is sentence two.' separator = '|||' splitter = SpacyTextSplitter(separator=separator, pipeline=pipeline) output = splitter.split_text(text) expected_output = [ f'This is sentence one.{separator}And this is sentence two.'] assert output == expected_output
Test splitting by sentence using Spacy.
transform
""" Default implementation of transform, which buffers input and then calls stream. Subclasses should override this method if they can start producing output while input is still being generated. """ final: Input got_first_val = False for chunk in input: if not got_first_val: final = chunk got_first_val = True else: final = final + chunk if got_first_val: yield from self.stream(final, config, **kwargs)
def transform(self, input: Iterator[Input], config: Optional[RunnableConfig ]=None, **kwargs: Optional[Any]) ->Iterator[Output]: """ Default implementation of transform, which buffers input and then calls stream. Subclasses should override this method if they can start producing output while input is still being generated. """ final: Input got_first_val = False for chunk in input: if not got_first_val: final = chunk got_first_val = True else: final = final + chunk if got_first_val: yield from self.stream(final, config, **kwargs)
Default implementation of transform, which buffers input and then calls stream. Subclasses should override this method if they can start producing output while input is still being generated.
get_lc_namespace
"""Get the namespace of the langchain object.""" return ['langchain', 'chat_models', 'fireworks']
@classmethod def get_lc_namespace(cls) ->List[str]: """Get the namespace of the langchain object.""" return ['langchain', 'chat_models', 'fireworks']
Get the namespace of the langchain object.
delete
"""Delete the documents which have the specified ids. Args: ids: The ids of the embedding vectors. **kwargs: Other keyword arguments that subclasses might use. Returns: Optional[bool]: True if deletion is successful. False otherwise, None if not implemented. """ if self.awadb_client is None: raise ValueError('AwaDB client is None!!!') ret: Optional[bool] = None if ids is None or ids.__len__() == 0: return ret ret = self.awadb_client.Delete(ids) return ret
def delete(self, ids: Optional[List[str]]=None, **kwargs: Any) ->Optional[bool ]: """Delete the documents which have the specified ids. Args: ids: The ids of the embedding vectors. **kwargs: Other keyword arguments that subclasses might use. Returns: Optional[bool]: True if deletion is successful. False otherwise, None if not implemented. """ if self.awadb_client is None: raise ValueError('AwaDB client is None!!!') ret: Optional[bool] = None if ids is None or ids.__len__() == 0: return ret ret = self.awadb_client.Delete(ids) return ret
Delete the documents which have the specified ids. Args: ids: The ids of the embedding vectors. **kwargs: Other keyword arguments that subclasses might use. Returns: Optional[bool]: True if deletion is successful. False otherwise, None if not implemented.
ignore_retriever
"""Whether to ignore retriever callbacks.""" return self.ignore_retriever_
@property def ignore_retriever(self) ->bool: """Whether to ignore retriever callbacks.""" return self.ignore_retriever_
Whether to ignore retriever callbacks.
_stream_response_to_generation_chunk
"""Convert a stream response to a generation chunk.""" parsed_response = json.loads(stream_response) generation_info = parsed_response if parsed_response.get('done' ) is True else None return GenerationChunk(text=parsed_response.get('response', ''), generation_info=generation_info)
def _stream_response_to_generation_chunk(stream_response: str ) ->GenerationChunk: """Convert a stream response to a generation chunk.""" parsed_response = json.loads(stream_response) generation_info = parsed_response if parsed_response.get('done' ) is True else None return GenerationChunk(text=parsed_response.get('response', ''), generation_info=generation_info)
Convert a stream response to a generation chunk.
test_seq_dict_prompt_llm
passthrough = mocker.Mock(side_effect=lambda x: x) retriever = FakeRetriever() prompt = SystemMessagePromptTemplate.from_template('You are a nice assistant.') + """Context: {documents} Question: {question}""" chat = FakeListChatModel(responses=['foo, bar']) parser = CommaSeparatedListOutputParser() chain: Runnable = {'question': RunnablePassthrough[str]() | passthrough, 'documents': passthrough | retriever, 'just_to_test_lambda': passthrough } | prompt | chat | parser assert repr(chain) == snapshot assert isinstance(chain, RunnableSequence) assert isinstance(chain.first, RunnableParallel) assert chain.middle == [prompt, chat] assert chain.last == parser assert dumps(chain, pretty=True) == snapshot prompt_spy = mocker.spy(prompt.__class__, 'invoke') chat_spy = mocker.spy(chat.__class__, 'invoke') parser_spy = mocker.spy(parser.__class__, 'invoke') tracer = FakeTracer() assert chain.invoke('What is your name?', dict(callbacks=[tracer])) == ['foo', 'bar'] assert prompt_spy.call_args.args[1] == {'documents': [Document(page_content ='foo'), Document(page_content='bar')], 'question': 'What is your name?', 'just_to_test_lambda': 'What is your name?'} assert chat_spy.call_args.args[1] == ChatPromptValue(messages=[ SystemMessage(content='You are a nice assistant.'), HumanMessage( content= """Context: [Document(page_content='foo'), Document(page_content='bar')] Question: What is your name?""" )]) assert parser_spy.call_args.args[1] == AIMessage(content='foo, bar') assert len([r for r in tracer.runs if r.parent_run_id is None]) == 1 parent_run = next(r for r in tracer.runs if r.parent_run_id is None) assert len(parent_run.child_runs) == 4 map_run = parent_run.child_runs[0] assert map_run.name == 'RunnableParallel<question,documents,just_to_test_lambda>' assert len(map_run.child_runs) == 3
@freeze_time('2023-01-01') def test_seq_dict_prompt_llm(mocker: MockerFixture, snapshot: SnapshotAssertion ) ->None: passthrough = mocker.Mock(side_effect=lambda x: x) retriever = FakeRetriever() prompt = SystemMessagePromptTemplate.from_template('You are a nice assistant.') + """Context: {documents} Question: {question}""" chat = FakeListChatModel(responses=['foo, bar']) parser = CommaSeparatedListOutputParser() chain: Runnable = {'question': RunnablePassthrough[str]() | passthrough, 'documents': passthrough | retriever, 'just_to_test_lambda': passthrough} | prompt | chat | parser assert repr(chain) == snapshot assert isinstance(chain, RunnableSequence) assert isinstance(chain.first, RunnableParallel) assert chain.middle == [prompt, chat] assert chain.last == parser assert dumps(chain, pretty=True) == snapshot prompt_spy = mocker.spy(prompt.__class__, 'invoke') chat_spy = mocker.spy(chat.__class__, 'invoke') parser_spy = mocker.spy(parser.__class__, 'invoke') tracer = FakeTracer() assert chain.invoke('What is your name?', dict(callbacks=[tracer])) == [ 'foo', 'bar'] assert prompt_spy.call_args.args[1] == {'documents': [Document( page_content='foo'), Document(page_content='bar')], 'question': 'What is your name?', 'just_to_test_lambda': 'What is your name?'} assert chat_spy.call_args.args[1] == ChatPromptValue(messages=[ SystemMessage(content='You are a nice assistant.'), HumanMessage( content= """Context: [Document(page_content='foo'), Document(page_content='bar')] Question: What is your name?""" )]) assert parser_spy.call_args.args[1] == AIMessage(content='foo, bar') assert len([r for r in tracer.runs if r.parent_run_id is None]) == 1 parent_run = next(r for r in tracer.runs if r.parent_run_id is None) assert len(parent_run.child_runs) == 4 map_run = parent_run.child_runs[0] assert map_run.name == 'RunnableParallel<question,documents,just_to_test_lambda>' assert len(map_run.child_runs) == 3
null
_chat
if self.service_url is None: res = requests.models.Response() res.status_code = 503 res.reason = 'The IP address or port of the chat service is incorrect.' return res service_url = f'{self.service_url}/v1/chat/completions' payload = {'model': self.model, 'messages': [_convert_message_to_dict(m) for m in messages]} res = requests.post(url=service_url, timeout=self.request_timeout, headers= {'accept': 'application/json', 'Content-Type': 'application/json'}, data=json.dumps(payload)) return res
def _chat(self, messages: List[BaseMessage], **kwargs: Any ) ->requests.Response: if self.service_url is None: res = requests.models.Response() res.status_code = 503 res.reason = 'The IP address or port of the chat service is incorrect.' return res service_url = f'{self.service_url}/v1/chat/completions' payload = {'model': self.model, 'messages': [_convert_message_to_dict(m ) for m in messages]} res = requests.post(url=service_url, timeout=self.request_timeout, headers={'accept': 'application/json', 'Content-Type': 'application/json'}, data=json.dumps(payload)) return res
null
_identifying_params
"""Get the identifying parameters.""" return {'model_uri': self.model_uri, 'temperature': self.temperature, 'max_tokens': self.max_tokens, 'stop': self.stop, 'max_retries': self. max_retries}
@property def _identifying_params(self) ->Mapping[str, Any]: """Get the identifying parameters.""" return {'model_uri': self.model_uri, 'temperature': self.temperature, 'max_tokens': self.max_tokens, 'stop': self.stop, 'max_retries': self.max_retries}
Get the identifying parameters.
load
"""Load all documents.""" return list(self.lazy_load())
def load(self) ->List[Document]: """Load all documents.""" return list(self.lazy_load())
Load all documents.
_import_youtube_search
from langchain_community.tools.youtube.search import YouTubeSearchTool return YouTubeSearchTool
def _import_youtube_search() ->Any: from langchain_community.tools.youtube.search import YouTubeSearchTool return YouTubeSearchTool
null
get_token_ids
if self._model_is_anthropic: return get_token_ids_anthropic(text) else: return super().get_token_ids(text)
def get_token_ids(self, text: str) ->List[int]: if self._model_is_anthropic: return get_token_ids_anthropic(text) else: return super().get_token_ids(text)
null
test_no_delete
"""Test indexing without a deletion strategy.""" loader = ToyLoader(documents=[Document(page_content= 'This is a test document.', metadata={'source': '1'}), Document( page_content='This is another document.', metadata={'source': '2'})]) with patch.object(record_manager, 'get_time', return_value=datetime(2021, 1, 2).timestamp()): assert index(loader, record_manager, vector_store, cleanup=None, source_id_key='source') == {'num_added': 2, 'num_deleted': 0, 'num_skipped': 0, 'num_updated': 0} with patch.object(record_manager, 'get_time', return_value=datetime(2021, 1, 2).timestamp()): assert index(loader, record_manager, vector_store, cleanup=None, source_id_key='source') == {'num_added': 0, 'num_deleted': 0, 'num_skipped': 2, 'num_updated': 0} loader = ToyLoader(documents=[Document(page_content='mutated content', metadata={'source': '1'}), Document(page_content= 'This is another document.', metadata={'source': '2'})]) with patch.object(record_manager, 'get_time', return_value=datetime(2021, 1, 2).timestamp()): assert index(loader, record_manager, vector_store, cleanup=None, source_id_key='source') == {'num_added': 1, 'num_deleted': 0, 'num_skipped': 1, 'num_updated': 0}
def test_no_delete(record_manager: SQLRecordManager, vector_store: InMemoryVectorStore) ->None: """Test indexing without a deletion strategy.""" loader = ToyLoader(documents=[Document(page_content= 'This is a test document.', metadata={'source': '1'}), Document( page_content='This is another document.', metadata={'source': '2'})]) with patch.object(record_manager, 'get_time', return_value=datetime( 2021, 1, 2).timestamp()): assert index(loader, record_manager, vector_store, cleanup=None, source_id_key='source') == {'num_added': 2, 'num_deleted': 0, 'num_skipped': 0, 'num_updated': 0} with patch.object(record_manager, 'get_time', return_value=datetime( 2021, 1, 2).timestamp()): assert index(loader, record_manager, vector_store, cleanup=None, source_id_key='source') == {'num_added': 0, 'num_deleted': 0, 'num_skipped': 2, 'num_updated': 0} loader = ToyLoader(documents=[Document(page_content='mutated content', metadata={'source': '1'}), Document(page_content= 'This is another document.', metadata={'source': '2'})]) with patch.object(record_manager, 'get_time', return_value=datetime( 2021, 1, 2).timestamp()): assert index(loader, record_manager, vector_store, cleanup=None, source_id_key='source') == {'num_added': 1, 'num_deleted': 0, 'num_skipped': 1, 'num_updated': 0}
Test indexing without a deletion strategy.
search_tool
"""Returns num_search_results pages per Google search.""" query_clean = self.clean_search_query(query) result = self.search.results(query_clean, num_search_results) return result
def search_tool(self, query: str, num_search_results: int=1) ->List[dict]: """Returns num_search_results pages per Google search.""" query_clean = self.clean_search_query(query) result = self.search.results(query_clean, num_search_results) return result
Returns num_search_results pages per Google search.
load
return self._get_resource()
def load(self) ->List[Document]: return self._get_resource()
null
test_invalid_payload
mock_response = MagicMock() mock_response.status_code = 400 mock_response.json.return_value = {} mock_post.return_value = mock_response with pytest.raises(ValueError): tool._run('some query')
def test_invalid_payload(mock_post: MagicMock) ->None: mock_response = MagicMock() mock_response.status_code = 400 mock_response.json.return_value = {} mock_post.return_value = mock_response with pytest.raises(ValueError): tool._run('some query')
null
_import_atlas
from langchain_community.vectorstores.atlas import AtlasDB return AtlasDB
def _import_atlas() ->Any: from langchain_community.vectorstores.atlas import AtlasDB return AtlasDB
null
prompt_length
"""Return the prompt length given the documents passed in. This can be used by a caller to determine whether passing in a list of documents would exceed a certain prompt length. This useful when trying to ensure that the size of a prompt remains below a certain context limit. Args: docs: List[Document], a list of documents to use to calculate the total prompt length. Returns: Returns None if the method does not depend on the prompt length, otherwise the length of the prompt in tokens. """ inputs = self._get_inputs(docs, **kwargs) prompt = self.llm_chain.prompt.format(**inputs) return self.llm_chain._get_num_tokens(prompt)
def prompt_length(self, docs: List[Document], **kwargs: Any) ->Optional[int]: """Return the prompt length given the documents passed in. This can be used by a caller to determine whether passing in a list of documents would exceed a certain prompt length. This useful when trying to ensure that the size of a prompt remains below a certain context limit. Args: docs: List[Document], a list of documents to use to calculate the total prompt length. Returns: Returns None if the method does not depend on the prompt length, otherwise the length of the prompt in tokens. """ inputs = self._get_inputs(docs, **kwargs) prompt = self.llm_chain.prompt.format(**inputs) return self.llm_chain._get_num_tokens(prompt)
Return the prompt length given the documents passed in. This can be used by a caller to determine whether passing in a list of documents would exceed a certain prompt length. This useful when trying to ensure that the size of a prompt remains below a certain context limit. Args: docs: List[Document], a list of documents to use to calculate the total prompt length. Returns: Returns None if the method does not depend on the prompt length, otherwise the length of the prompt in tokens.
raise_warning
"""Raise warning that this class is deprecated.""" warnings.warn( 'This Anthropic LLM is deprecated. Please use `from langchain_community.chat_models import ChatAnthropic` instead' ) return values
@root_validator() def raise_warning(cls, values: Dict) ->Dict: """Raise warning that this class is deprecated.""" warnings.warn( 'This Anthropic LLM is deprecated. Please use `from langchain_community.chat_models import ChatAnthropic` instead' ) return values
Raise warning that this class is deprecated.
lazy_load_docs
for d in self.lazy_load(query=query): yield self._dict2document(d)
def lazy_load_docs(self, query: str) ->Iterator[Document]: for d in self.lazy_load(query=query): yield self._dict2document(d)
null
get_authorization_request_url
return 'fake_authorization_url'
def get_authorization_request_url(self, *args: Any, **kwargs: Any) ->str: return 'fake_authorization_url'
null
from_texts
"""Create a SingleStoreDB vectorstore from raw documents. This is a user-friendly interface that: 1. Embeds documents. 2. Creates a new table for the embeddings in SingleStoreDB. 3. Adds the documents to the newly created table. This is intended to be a quick way to get started. Example: .. code-block:: python from langchain_community.vectorstores import SingleStoreDB from langchain_community.embeddings import OpenAIEmbeddings s2 = SingleStoreDB.from_texts( texts, OpenAIEmbeddings(), host="username:password@localhost:3306/database" ) """ instance = cls(embedding, distance_strategy=distance_strategy, table_name= table_name, content_field=content_field, metadata_field=metadata_field, vector_field=vector_field, pool_size=pool_size, max_overflow= max_overflow, timeout=timeout, **kwargs) instance.add_texts(texts, metadatas, embedding.embed_documents(texts), **kwargs ) return instance
@classmethod def from_texts(cls: Type[SingleStoreDB], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]]=None, distance_strategy: DistanceStrategy=DEFAULT_DISTANCE_STRATEGY, table_name: str= 'embeddings', content_field: str='content', metadata_field: str= 'metadata', vector_field: str='vector', pool_size: int=5, max_overflow: int=10, timeout: float=30, **kwargs: Any) ->SingleStoreDB: """Create a SingleStoreDB vectorstore from raw documents. This is a user-friendly interface that: 1. Embeds documents. 2. Creates a new table for the embeddings in SingleStoreDB. 3. Adds the documents to the newly created table. This is intended to be a quick way to get started. Example: .. code-block:: python from langchain_community.vectorstores import SingleStoreDB from langchain_community.embeddings import OpenAIEmbeddings s2 = SingleStoreDB.from_texts( texts, OpenAIEmbeddings(), host="username:password@localhost:3306/database" ) """ instance = cls(embedding, distance_strategy=distance_strategy, table_name=table_name, content_field=content_field, metadata_field= metadata_field, vector_field=vector_field, pool_size=pool_size, max_overflow=max_overflow, timeout=timeout, **kwargs) instance.add_texts(texts, metadatas, embedding.embed_documents(texts), **kwargs) return instance
Create a SingleStoreDB vectorstore from raw documents. This is a user-friendly interface that: 1. Embeds documents. 2. Creates a new table for the embeddings in SingleStoreDB. 3. Adds the documents to the newly created table. This is intended to be a quick way to get started. Example: .. code-block:: python from langchain_community.vectorstores import SingleStoreDB from langchain_community.embeddings import OpenAIEmbeddings s2 = SingleStoreDB.from_texts( texts, OpenAIEmbeddings(), host="username:password@localhost:3306/database" )
test_json_spec_value
"""Test JsonSpec can return value of a dict at given path.""" spec = JsonSpec(dict_={'foo': 'bar', 'baz': {'test': {'foo': [1, 2, 3]}}}) assert spec.value('data' ) == "{'foo': 'bar', 'baz': {'test': {'foo': [1, 2, 3]}}}" assert spec.value('data["foo"]') == 'bar' assert spec.value('data["baz"]') == "{'test': {'foo': [1, 2, 3]}}" assert spec.value('data["baz"]["test"]') == "{'foo': [1, 2, 3]}" assert spec.value('data["baz"]["test"]["foo"]') == '[1, 2, 3]' assert spec.value("data['foo']") == 'bar' assert spec.value("data['baz']") == "{'test': {'foo': [1, 2, 3]}}" assert spec.value("data['baz']['test']") == "{'foo': [1, 2, 3]}" assert spec.value("data['baz']['test']['foo']") == '[1, 2, 3]'
def test_json_spec_value() ->None: """Test JsonSpec can return value of a dict at given path.""" spec = JsonSpec(dict_={'foo': 'bar', 'baz': {'test': {'foo': [1, 2, 3]}}}) assert spec.value('data' ) == "{'foo': 'bar', 'baz': {'test': {'foo': [1, 2, 3]}}}" assert spec.value('data["foo"]') == 'bar' assert spec.value('data["baz"]') == "{'test': {'foo': [1, 2, 3]}}" assert spec.value('data["baz"]["test"]') == "{'foo': [1, 2, 3]}" assert spec.value('data["baz"]["test"]["foo"]') == '[1, 2, 3]' assert spec.value("data['foo']") == 'bar' assert spec.value("data['baz']") == "{'test': {'foo': [1, 2, 3]}}" assert spec.value("data['baz']['test']") == "{'foo': [1, 2, 3]}" assert spec.value("data['baz']['test']['foo']") == '[1, 2, 3]'
Test JsonSpec can return value of a dict at given path.
test_python_repl_no_previous_variables
"""Test that it does not have access to variables created outside the scope.""" foo = 3 repl = PythonREPL() output = repl.run('print(foo)') assert output == 'NameError("name \'foo\' is not defined")'
def test_python_repl_no_previous_variables() ->None: """Test that it does not have access to variables created outside the scope.""" foo = 3 repl = PythonREPL() output = repl.run('print(foo)') assert output == 'NameError("name \'foo\' is not defined")'
Test that it does not have access to variables created outside the scope.
_get_jinja2_variables_from_template
try: from jinja2 import Environment, meta except ImportError: raise ImportError( 'jinja2 not installed, which is needed to use the jinja2_formatter. Please install it with `pip install jinja2`.' ) env = Environment() ast = env.parse(template) variables = meta.find_undeclared_variables(ast) return variables
def _get_jinja2_variables_from_template(template: str) ->Set[str]: try: from jinja2 import Environment, meta except ImportError: raise ImportError( 'jinja2 not installed, which is needed to use the jinja2_formatter. Please install it with `pip install jinja2`.' ) env = Environment() ast = env.parse(template) variables = meta.find_undeclared_variables(ast) return variables
null
_run
"""Use the tool.""" if self.func: new_argument_supported = signature(self.func).parameters.get('callbacks') return self.func(*args, callbacks=run_manager.get_child() if run_manager else None, **kwargs ) if new_argument_supported else self.func(*args, **kwargs) raise NotImplementedError('Tool does not support sync')
def _run(self, *args: Any, run_manager: Optional[CallbackManagerForToolRun] =None, **kwargs: Any) ->Any: """Use the tool.""" if self.func: new_argument_supported = signature(self.func).parameters.get( 'callbacks') return self.func(*args, callbacks=run_manager.get_child() if run_manager else None, **kwargs ) if new_argument_supported else self.func(*args, **kwargs) raise NotImplementedError('Tool does not support sync')
Use the tool.
__init__
self.underlying_store = _UpstashRedisStore(client=client, url=url, token= token, ttl=ttl, namespace=namespace)
def __init__(self, *, client: Any=None, url: Optional[str]=None, token: Optional[str]=None, ttl: Optional[int]=None, namespace: Optional[str]=None ) ->None: self.underlying_store = _UpstashRedisStore(client=client, url=url, token=token, ttl=ttl, namespace=namespace)
null
test_function_message_dict_to_function_message
content = json.dumps({'result': 'Example #1'}) name = 'test_function' result = convert_dict_to_message({'role': 'function', 'name': name, 'content': content}) assert isinstance(result, FunctionMessage) assert result.name == name assert result.content == content
def test_function_message_dict_to_function_message() ->None: content = json.dumps({'result': 'Example #1'}) name = 'test_function' result = convert_dict_to_message({'role': 'function', 'name': name, 'content': content}) assert isinstance(result, FunctionMessage) assert result.name == name assert result.content == content
null
__bool__
return bool(self.nodes)
def __bool__(self) ->bool: return bool(self.nodes)
null
get_cards
"""We do not need to test the card-filter since is on Trello Client side.""" return self.cards
def get_cards(self, card_filter: Optional[str]='') ->list: """We do not need to test the card-filter since is on Trello Client side.""" return self.cards
We do not need to test the card-filter since is on Trello Client side.
_import_ainetwork_value
from langchain_community.tools.ainetwork.value import AINValueOps return AINValueOps
def _import_ainetwork_value() ->Any: from langchain_community.tools.ainetwork.value import AINValueOps return AINValueOps
null
_import_google_palm
from langchain_community.llms.google_palm import GooglePalm return GooglePalm
def _import_google_palm() ->Any: from langchain_community.llms.google_palm import GooglePalm return GooglePalm
null
_import_azure_cognitive_services_AzureCogsImageAnalysisTool
from langchain_community.tools.azure_cognitive_services import AzureCogsImageAnalysisTool return AzureCogsImageAnalysisTool
def _import_azure_cognitive_services_AzureCogsImageAnalysisTool() ->Any: from langchain_community.tools.azure_cognitive_services import AzureCogsImageAnalysisTool return AzureCogsImageAnalysisTool
null
test_api_key_masked_when_passed_via_constructor
llm = LLMRailsEmbeddings(api_key='secret-api-key') print(llm.api_key, end='') captured = capsys.readouterr() assert captured.out == '**********'
def test_api_key_masked_when_passed_via_constructor(capsys: CaptureFixture ) ->None: llm = LLMRailsEmbeddings(api_key='secret-api-key') print(llm.api_key, end='') captured = capsys.readouterr() assert captured.out == '**********'
null
memory_variables
"""Input keys this memory class will load dynamically.""" return []
@property def memory_variables(self) ->List[str]: """Input keys this memory class will load dynamically.""" return []
Input keys this memory class will load dynamically.
on_llm_error
if self.__has_valid_config is False: return try: self.__track_event('llm', 'error', run_id=str(run_id), parent_run_id= str(parent_run_id) if parent_run_id else None, error={'message': str(error), 'stack': traceback.format_exc()}, app_id=self.__app_id) except Exception as e: logger.error(f'[LLMonitor] An error occurred in on_llm_error: {e}')
def on_llm_error(self, error: BaseException, *, run_id: UUID, parent_run_id: Union[UUID, None]=None, **kwargs: Any) ->Any: if self.__has_valid_config is False: return try: self.__track_event('llm', 'error', run_id=str(run_id), parent_run_id=str(parent_run_id) if parent_run_id else None, error={'message': str(error), 'stack': traceback.format_exc()}, app_id=self.__app_id) except Exception as e: logger.error(f'[LLMonitor] An error occurred in on_llm_error: {e}')
null
run
"""Run request to DataForSEO SERP API and parse result async.""" return self._process_response(self._response_json(url))
def run(self, url: str) ->str: """Run request to DataForSEO SERP API and parse result async.""" return self._process_response(self._response_json(url))
Run request to DataForSEO SERP API and parse result async.
parse
raise NotImplementedError( 'This OutputParser can only be called by the `parse_with_prompt` method.')
def parse(self, completion: str) ->T: raise NotImplementedError( 'This OutputParser can only be called by the `parse_with_prompt` method.' )
null
get_allowed_tools
return None
def get_allowed_tools(self) ->Optional[List[str]]: return None
null
get_steps
return self.steps
def get_steps(self) ->List[Tuple[Step, StepResponse]]: return self.steps
null
test_parsers_public_api_correct
"""Test public API of parsers for breaking changes.""" assert set(__all__) == {'BS4HTMLParser', 'DocAIParser', 'GrobidParser', 'LanguageParser', 'OpenAIWhisperParser', 'PyPDFParser', 'PDFMinerParser', 'PyMuPDFParser', 'PyPDFium2Parser', 'PDFPlumberParser'}
def test_parsers_public_api_correct() ->None: """Test public API of parsers for breaking changes.""" assert set(__all__) == {'BS4HTMLParser', 'DocAIParser', 'GrobidParser', 'LanguageParser', 'OpenAIWhisperParser', 'PyPDFParser', 'PDFMinerParser', 'PyMuPDFParser', 'PyPDFium2Parser', 'PDFPlumberParser'}
Test public API of parsers for breaking changes.
_import_tiledb
from langchain_community.vectorstores.tiledb import TileDB return TileDB
def _import_tiledb() ->Any: from langchain_community.vectorstores.tiledb import TileDB return TileDB
null
test_serialize_llmchain_with_non_serializable_arg
llm = OpenAI(model='davinci', temperature=0.5, openai_api_key='hello', client=NotSerializable) prompt = PromptTemplate.from_template('hello {name}!') chain = LLMChain(llm=llm, prompt=prompt) assert dumps(chain, pretty=True) == snapshot
@pytest.mark.requires('openai') def test_serialize_llmchain_with_non_serializable_arg(snapshot: Any) ->None: llm = OpenAI(model='davinci', temperature=0.5, openai_api_key='hello', client=NotSerializable) prompt = PromptTemplate.from_template('hello {name}!') chain = LLMChain(llm=llm, prompt=prompt) assert dumps(chain, pretty=True) == snapshot
null
concatenate_rows
""" Combine message information in a readable format ready to be used. Args: message: Message to be concatenated title: Title of the conversation Returns: Concatenated message """ if not message: return '' sender = message['author']['role'] if message['author'] else 'unknown' text = message['content']['parts'][0] date = datetime.datetime.fromtimestamp(message['create_time']).strftime( '%Y-%m-%d %H:%M:%S') return f'{title} - {sender} on {date}: {text}\n\n'
def concatenate_rows(message: dict, title: str) ->str: """ Combine message information in a readable format ready to be used. Args: message: Message to be concatenated title: Title of the conversation Returns: Concatenated message """ if not message: return '' sender = message['author']['role'] if message['author'] else 'unknown' text = message['content']['parts'][0] date = datetime.datetime.fromtimestamp(message['create_time']).strftime( '%Y-%m-%d %H:%M:%S') return f'{title} - {sender} on {date}: {text}\n\n'
Combine message information in a readable format ready to be used. Args: message: Message to be concatenated title: Title of the conversation Returns: Concatenated message
from_spec_dict
"""Get an OpenAPI spec from a dict.""" return cls.parse_obj(spec_dict)
@classmethod def from_spec_dict(cls, spec_dict: dict) ->OpenAPISpec: """Get an OpenAPI spec from a dict.""" return cls.parse_obj(spec_dict)
Get an OpenAPI spec from a dict.
_import_fireworks
from langchain_community.llms.fireworks import Fireworks return Fireworks
def _import_fireworks() ->Any: from langchain_community.llms.fireworks import Fireworks return Fireworks
null
_prepare_output
parsed_result = _parse_string_eval_output(result[self.output_key]) if RUN_KEY in result: parsed_result[RUN_KEY] = result[RUN_KEY] return parsed_result
def _prepare_output(self, result: dict) ->dict: parsed_result = _parse_string_eval_output(result[self.output_key]) if RUN_KEY in result: parsed_result[RUN_KEY] = result[RUN_KEY] return parsed_result
null
_process_response
raise NotImplementedError
def _process_response(self, response_json: dict) ->str: raise NotImplementedError
null
_get_relevant_documents
response = requests.post(self.datastore_url, json={'query': query, **{ 'topK': self.top_k} if self.top_k is not None else {}}, headers={ 'Content-Type': 'application/json', **{'Authorization': f'Bearer {self.api_key}'} if self.api_key is not None else {}}) data = response.json() return [Document(page_content=r['text'], metadata={'source': r['source'], 'score': r['score']}) for r in data['results']]
def _get_relevant_documents(self, query: str, *, run_manager: CallbackManagerForRetrieverRun, **kwargs: Any) ->List[Document]: response = requests.post(self.datastore_url, json={'query': query, **{ 'topK': self.top_k} if self.top_k is not None else {}}, headers={ 'Content-Type': 'application/json', **{'Authorization': f'Bearer {self.api_key}'} if self.api_key is not None else {}}) data = response.json() return [Document(page_content=r['text'], metadata={'source': r['source' ], 'score': r['score']}) for r in data['results']]
null
_import_pgembedding
from langchain_community.vectorstores.pgembedding import PGEmbedding return PGEmbedding
def _import_pgembedding() ->Any: from langchain_community.vectorstores.pgembedding import PGEmbedding return PGEmbedding
null
__call__
generation = self.generate([messages], stop=stop, callbacks=callbacks, **kwargs ).generations[0][0] if isinstance(generation, ChatGeneration): return generation.message else: raise ValueError('Unexpected generation type')
def __call__(self, messages: List[BaseMessage], stop: Optional[List[str]]= None, callbacks: Callbacks=None, **kwargs: Any) ->BaseMessage: generation = self.generate([messages], stop=stop, callbacks=callbacks, **kwargs).generations[0][0] if isinstance(generation, ChatGeneration): return generation.message else: raise ValueError('Unexpected generation type')
null
_default_params
"""Get the identifying parameters.""" return {'verbose': self.verbose, 'top_p': self.top_p, 'temperature': self. temperature, 'penalty_alpha_frequency': self.penalty_alpha_frequency, 'penalty_alpha_presence': self.penalty_alpha_presence, 'CHUNK_LEN': self.CHUNK_LEN, 'max_tokens_per_generation': self.max_tokens_per_generation }
@property def _default_params(self) ->Dict[str, Any]: """Get the identifying parameters.""" return {'verbose': self.verbose, 'top_p': self.top_p, 'temperature': self.temperature, 'penalty_alpha_frequency': self. penalty_alpha_frequency, 'penalty_alpha_presence': self. penalty_alpha_presence, 'CHUNK_LEN': self.CHUNK_LEN, 'max_tokens_per_generation': self.max_tokens_per_generation}
Get the identifying parameters.
wrapper
"""Validate exactly one arg in each group is not None.""" counts = [sum(1 for arg in arg_group if kwargs.get(arg) is not None) for arg_group in arg_groups] invalid_groups = [i for i, count in enumerate(counts) if count != 1] if invalid_groups: invalid_group_names = [', '.join(arg_groups[i]) for i in invalid_groups] raise ValueError( f"Exactly one argument in each of the following groups must be defined: {', '.join(invalid_group_names)}" ) return func(*args, **kwargs)
@functools.wraps(func) def wrapper(*args: Any, **kwargs: Any) ->Any: """Validate exactly one arg in each group is not None.""" counts = [sum(1 for arg in arg_group if kwargs.get(arg) is not None) for arg_group in arg_groups] invalid_groups = [i for i, count in enumerate(counts) if count != 1] if invalid_groups: invalid_group_names = [', '.join(arg_groups[i]) for i in invalid_groups ] raise ValueError( f"Exactly one argument in each of the following groups must be defined: {', '.join(invalid_group_names)}" ) return func(*args, **kwargs)
Validate exactly one arg in each group is not None.
_convert_message_to_dict
from gigachat.models import Messages, MessagesRole if isinstance(message, SystemMessage): return Messages(role=MessagesRole.SYSTEM, content=message.content) elif isinstance(message, HumanMessage): return Messages(role=MessagesRole.USER, content=message.content) elif isinstance(message, AIMessage): return Messages(role=MessagesRole.ASSISTANT, content=message.content) elif isinstance(message, ChatMessage): return Messages(role=MessagesRole(message.role), content=message.content) else: raise TypeError(f'Got unknown type {message}')
def _convert_message_to_dict(message: BaseMessage) ->Any: from gigachat.models import Messages, MessagesRole if isinstance(message, SystemMessage): return Messages(role=MessagesRole.SYSTEM, content=message.content) elif isinstance(message, HumanMessage): return Messages(role=MessagesRole.USER, content=message.content) elif isinstance(message, AIMessage): return Messages(role=MessagesRole.ASSISTANT, content=message.content) elif isinstance(message, ChatMessage): return Messages(role=MessagesRole(message.role), content=message. content) else: raise TypeError(f'Got unknown type {message}')
null
_convert_message_to_mistral_chat_message
if isinstance(message, ChatMessage): mistral_message = MistralChatMessage(role=message.role, content=message .content) elif isinstance(message, HumanMessage): mistral_message = MistralChatMessage(role='user', content=message.content) elif isinstance(message, AIMessage): mistral_message = MistralChatMessage(role='assistant', content=message. content) elif isinstance(message, SystemMessage): mistral_message = MistralChatMessage(role='system', content=message.content ) else: raise ValueError(f'Got unknown type {message}') return mistral_message
def _convert_message_to_mistral_chat_message(message: BaseMessage ) ->MistralChatMessage: if isinstance(message, ChatMessage): mistral_message = MistralChatMessage(role=message.role, content= message.content) elif isinstance(message, HumanMessage): mistral_message = MistralChatMessage(role='user', content=message. content) elif isinstance(message, AIMessage): mistral_message = MistralChatMessage(role='assistant', content= message.content) elif isinstance(message, SystemMessage): mistral_message = MistralChatMessage(role='system', content=message .content) else: raise ValueError(f'Got unknown type {message}') return mistral_message
null
test_gooseai_stop_valid
"""Test gooseai stop logic on valid configuration.""" query = 'write an ordered list of five items' first_llm = GooseAI(stop='3', temperature=0) first_output = first_llm(query) second_llm = GooseAI(temperature=0) second_output = second_llm(query, stop=['3']) assert first_output == second_output
def test_gooseai_stop_valid() ->None: """Test gooseai stop logic on valid configuration.""" query = 'write an ordered list of five items' first_llm = GooseAI(stop='3', temperature=0) first_output = first_llm(query) second_llm = GooseAI(temperature=0) second_output = second_llm(query, stop=['3']) assert first_output == second_output
Test gooseai stop logic on valid configuration.
on_text
self.on_text_common()
def on_text(self, *args: Any, **kwargs: Any) ->Any: self.on_text_common()
null
test_narrative_chain
"""Test NarrativeChain decomposes a human's narrative into three story elements: - causal model - intervention model - query model """ narrative = ( 'Jan has three times the number of pets as Marcia. Marcia has two more pets than Cindy. If Cindy has ten pets, how many pets does Jan have? ' ) llm = OpenAI(temperature=0, max_tokens=512) narrative_chain = NarrativeChain.from_univariate_prompt(llm) data = narrative_chain(narrative)[Constant.chain_data.value] self.assertEqual(type(data), NarrativeModel) out = narrative_chain(narrative) expected_narrative_out = {'chain_answer': None, 'chain_data': NarrativeModel(story_outcome_question='how many pets does Jan have?', story_hypothetical='If Cindy has ten pets', story_plot= 'Jan has three times the number of pets as Marcia. Marcia has two more pets than Cindy.' ), 'narrative_input': 'Jan has three times the number of pets as Marcia. Marcia has two more pets than Cindy. If Cindy has ten pets, how many pets does Jan have? ' } self.assertDictEqual(out, expected_narrative_out)
def test_narrative_chain(self) ->None: """Test NarrativeChain decomposes a human's narrative into three story elements: - causal model - intervention model - query model """ narrative = ( 'Jan has three times the number of pets as Marcia. Marcia has two more pets than Cindy. If Cindy has ten pets, how many pets does Jan have? ' ) llm = OpenAI(temperature=0, max_tokens=512) narrative_chain = NarrativeChain.from_univariate_prompt(llm) data = narrative_chain(narrative)[Constant.chain_data.value] self.assertEqual(type(data), NarrativeModel) out = narrative_chain(narrative) expected_narrative_out = {'chain_answer': None, 'chain_data': NarrativeModel(story_outcome_question= 'how many pets does Jan have?', story_hypothetical= 'If Cindy has ten pets', story_plot= 'Jan has three times the number of pets as Marcia. Marcia has two more pets than Cindy.' ), 'narrative_input': 'Jan has three times the number of pets as Marcia. Marcia has two more pets than Cindy. If Cindy has ten pets, how many pets does Jan have? ' } self.assertDictEqual(out, expected_narrative_out)
Test NarrativeChain decomposes a human's narrative into three story elements: - causal model - intervention model - query model
on_decision
pass
def on_decision(self) ->None: pass
null
_dumps_generations
""" Serialization for generic RETURN_VAL_TYPE, i.e. sequence of `Generation` Args: generations (RETURN_VAL_TYPE): A list of language model generations. Returns: str: a single string representing a list of generations. This function (+ its counterpart `_loads_generations`) rely on the dumps/loads pair with Reviver, so are able to deal with all subclasses of Generation. Each item in the list can be `dumps`ed to a string, then we make the whole list of strings into a json-dumped. """ return json.dumps([dumps(_item) for _item in generations])
def _dumps_generations(generations: RETURN_VAL_TYPE) ->str: """ Serialization for generic RETURN_VAL_TYPE, i.e. sequence of `Generation` Args: generations (RETURN_VAL_TYPE): A list of language model generations. Returns: str: a single string representing a list of generations. This function (+ its counterpart `_loads_generations`) rely on the dumps/loads pair with Reviver, so are able to deal with all subclasses of Generation. Each item in the list can be `dumps`ed to a string, then we make the whole list of strings into a json-dumped. """ return json.dumps([dumps(_item) for _item in generations])
Serialization for generic RETURN_VAL_TYPE, i.e. sequence of `Generation` Args: generations (RETURN_VAL_TYPE): A list of language model generations. Returns: str: a single string representing a list of generations. This function (+ its counterpart `_loads_generations`) rely on the dumps/loads pair with Reviver, so are able to deal with all subclasses of Generation. Each item in the list can be `dumps`ed to a string, then we make the whole list of strings into a json-dumped.
_llm_type
return 'openllm_client' if self._client else 'openllm'
@property def _llm_type(self) ->str: return 'openllm_client' if self._client else 'openllm'
null
is_lc_serializable
return True
@classmethod def is_lc_serializable(cls) ->bool: return True
null
__init__
super().__init__() self.creds = creds self.n = n self.raise_error = raise_error
def __init__(self, creds: Any, n: int=100, raise_error: bool=False) ->None: super().__init__() self.creds = creds self.n = n self.raise_error = raise_error
null
validate_llm_output
"""Validate that the combine chain outputs a dictionary.""" output_parser = values['llm_chain'].prompt.output_parser if not isinstance(output_parser, RegexParser): raise ValueError( f'Output parser of llm_chain should be a RegexParser, got {output_parser}' ) output_keys = output_parser.output_keys if values['rank_key'] not in output_keys: raise ValueError( f"Got {values['rank_key']} as key to rank on, but did not find it in the llm_chain output keys ({output_keys})" ) if values['answer_key'] not in output_keys: raise ValueError( f"Got {values['answer_key']} as key to return, but did not find it in the llm_chain output keys ({output_keys})" ) return values
@root_validator() def validate_llm_output(cls, values: Dict) ->Dict: """Validate that the combine chain outputs a dictionary.""" output_parser = values['llm_chain'].prompt.output_parser if not isinstance(output_parser, RegexParser): raise ValueError( f'Output parser of llm_chain should be a RegexParser, got {output_parser}' ) output_keys = output_parser.output_keys if values['rank_key'] not in output_keys: raise ValueError( f"Got {values['rank_key']} as key to rank on, but did not find it in the llm_chain output keys ({output_keys})" ) if values['answer_key'] not in output_keys: raise ValueError( f"Got {values['answer_key']} as key to return, but did not find it in the llm_chain output keys ({output_keys})" ) return values
Validate that the combine chain outputs a dictionary.
_client_params
"""Get the parameters used for the openai client.""" openai_creds: Dict[str, Any] = {'model': self.model_name} if not is_openai_v1(): openai_creds.update({'api_key': self.openai_api_key, 'api_base': self. openai_api_base, 'organization': self.openai_organization}) if self.openai_proxy: import openai openai.proxy = {'http': self.openai_proxy, 'https': self.openai_proxy} return {**self._default_params, **openai_creds}
@property def _client_params(self) ->Dict[str, Any]: """Get the parameters used for the openai client.""" openai_creds: Dict[str, Any] = {'model': self.model_name} if not is_openai_v1(): openai_creds.update({'api_key': self.openai_api_key, 'api_base': self.openai_api_base, 'organization': self.openai_organization}) if self.openai_proxy: import openai openai.proxy = {'http': self.openai_proxy, 'https': self.openai_proxy} return {**self._default_params, **openai_creds}
Get the parameters used for the openai client.
_run
self._commands = commands return self.output
def _run(self, commands: List[str]) ->str: self._commands = commands return self.output
null
lc_serializable
return True
@property def lc_serializable(self) ->bool: return True
null
_create_table_if_not_exists
r = self._client.table().get_schema(self._table_name) if r.status_code <= 299: return if r.status_code != 404: raise Exception( f'Error checking if table exists in Xata: {r.status_code} {r}') r = self._client.table().create(self._table_name) if r.status_code > 299: raise Exception(f'Error creating table in Xata: {r.status_code} {r}') r = self._client.table().set_schema(self._table_name, payload={'columns': [ {'name': 'sessionId', 'type': 'string'}, {'name': 'type', 'type': 'string'}, {'name': 'role', 'type': 'string'}, {'name': 'content', 'type': 'text'}, {'name': 'name', 'type': 'string'}, {'name': 'additionalKwargs', 'type': 'json'}]}) if r.status_code > 299: raise Exception(f'Error setting table schema in Xata: {r.status_code} {r}')
def _create_table_if_not_exists(self) ->None: r = self._client.table().get_schema(self._table_name) if r.status_code <= 299: return if r.status_code != 404: raise Exception( f'Error checking if table exists in Xata: {r.status_code} {r}') r = self._client.table().create(self._table_name) if r.status_code > 299: raise Exception(f'Error creating table in Xata: {r.status_code} {r}') r = self._client.table().set_schema(self._table_name, payload={ 'columns': [{'name': 'sessionId', 'type': 'string'}, {'name': 'type', 'type': 'string'}, {'name': 'role', 'type': 'string'}, { 'name': 'content', 'type': 'text'}, {'name': 'name', 'type': 'string'}, {'name': 'additionalKwargs', 'type': 'json'}]}) if r.status_code > 299: raise Exception( f'Error setting table schema in Xata: {r.status_code} {r}')
null
test_rate_limit
llm = QianfanEmbeddingsEndpoint(model='Embedding-V1', init_kwargs={ 'query_per_second': 2}) assert llm.client._client._rate_limiter._sync_limiter._query_per_second == 2 documents = ['foo', 'bar'] output = llm.embed_documents(documents) assert len(output) == 2 assert len(output[0]) == 384 assert len(output[1]) == 384
def test_rate_limit() ->None: llm = QianfanEmbeddingsEndpoint(model='Embedding-V1', init_kwargs={ 'query_per_second': 2}) assert llm.client._client._rate_limiter._sync_limiter._query_per_second == 2 documents = ['foo', 'bar'] output = llm.embed_documents(documents) assert len(output) == 2 assert len(output[0]) == 384 assert len(output[1]) == 384
null
similarity_search_by_vector_returning_embeddings
match_documents_params = self.match_args(query, filter) query_builder = self._client.rpc(self.query_name, match_documents_params) if postgrest_filter: query_builder.params = query_builder.params.set('and', f'({postgrest_filter})') query_builder.params = query_builder.params.set('limit', k) res = query_builder.execute() match_result = [(Document(metadata=search.get('metadata', {}), page_content =search.get('content', '')), search.get('similarity', 0.0), np. fromstring(search.get('embedding', '').strip('[]'), np.float32, sep=',' )) for search in res.data if search.get('content')] return match_result
def similarity_search_by_vector_returning_embeddings(self, query: List[ float], k: int, filter: Optional[Dict[str, Any]]=None, postgrest_filter: Optional[str]=None) ->List[Tuple[Document, float, np.ndarray[np.float32, Any]]]: match_documents_params = self.match_args(query, filter) query_builder = self._client.rpc(self.query_name, match_documents_params) if postgrest_filter: query_builder.params = query_builder.params.set('and', f'({postgrest_filter})') query_builder.params = query_builder.params.set('limit', k) res = query_builder.execute() match_result = [(Document(metadata=search.get('metadata', {}), page_content=search.get('content', '')), search.get('similarity', 0.0), np.fromstring(search.get('embedding', '').strip('[]'), np. float32, sep=',')) for search in res.data if search.get('content')] return match_result
null
messages
"""Retrieve the messages from Momento. Raises: SdkException: Momento service or network error Exception: Unexpected response Returns: list[BaseMessage]: List of cached messages """ from momento.responses import CacheListFetch fetch_response = self.cache_client.list_fetch(self.cache_name, self.key) if isinstance(fetch_response, CacheListFetch.Hit): items = [json.loads(m) for m in fetch_response.value_list_string] return messages_from_dict(items) elif isinstance(fetch_response, CacheListFetch.Miss): return [] elif isinstance(fetch_response, CacheListFetch.Error): raise fetch_response.inner_exception else: raise Exception(f'Unexpected response: {fetch_response}')
@property def messages(self) ->list[BaseMessage]: """Retrieve the messages from Momento. Raises: SdkException: Momento service or network error Exception: Unexpected response Returns: list[BaseMessage]: List of cached messages """ from momento.responses import CacheListFetch fetch_response = self.cache_client.list_fetch(self.cache_name, self.key) if isinstance(fetch_response, CacheListFetch.Hit): items = [json.loads(m) for m in fetch_response.value_list_string] return messages_from_dict(items) elif isinstance(fetch_response, CacheListFetch.Miss): return [] elif isinstance(fetch_response, CacheListFetch.Error): raise fetch_response.inner_exception else: raise Exception(f'Unexpected response: {fetch_response}')
Retrieve the messages from Momento. Raises: SdkException: Momento service or network error Exception: Unexpected response Returns: list[BaseMessage]: List of cached messages
init_weaviate
""" cd tests/integration_tests/vectorstores/docker-compose docker compose -f weaviate.yml up """ from docarray import BaseDoc from docarray.index import WeaviateDocumentIndex class WeaviateDoc(BaseDoc): title: str title_embedding: NdArray[32] = Field(is_embedding=True) other_emb: NdArray[32] year: int embeddings = FakeEmbeddings(size=32) dbconfig = WeaviateDocumentIndex.DBConfig(host='http://localhost:8080') weaviate_db = WeaviateDocumentIndex[WeaviateDoc](db_config=dbconfig, index_name='docarray_retriever') weaviate_db.index([WeaviateDoc(title=f'My document {i}', title_embedding=np .array(embeddings.embed_query(f'fake emb {i}')), other_emb=np.array( embeddings.embed_query(f'other fake emb {i}')), year=i) for i in range( 100)]) filter_query = {'path': ['year'], 'operator': 'LessThanEqual', 'valueInt': '90' } yield weaviate_db, filter_query, embeddings weaviate_db._client.schema.delete_all()
@pytest.fixture def init_weaviate() ->Generator[Tuple[WeaviateDocumentIndex, Dict[str, Any], FakeEmbeddings], None, None]: """ cd tests/integration_tests/vectorstores/docker-compose docker compose -f weaviate.yml up """ from docarray import BaseDoc from docarray.index import WeaviateDocumentIndex class WeaviateDoc(BaseDoc): title: str title_embedding: NdArray[32] = Field(is_embedding=True) other_emb: NdArray[32] year: int embeddings = FakeEmbeddings(size=32) dbconfig = WeaviateDocumentIndex.DBConfig(host='http://localhost:8080') weaviate_db = WeaviateDocumentIndex[WeaviateDoc](db_config=dbconfig, index_name='docarray_retriever') weaviate_db.index([WeaviateDoc(title=f'My document {i}', title_embedding=np.array(embeddings.embed_query(f'fake emb {i}')), other_emb=np.array(embeddings.embed_query(f'other fake emb {i}')), year=i) for i in range(100)]) filter_query = {'path': ['year'], 'operator': 'LessThanEqual', 'valueInt': '90'} yield weaviate_db, filter_query, embeddings weaviate_db._client.schema.delete_all()
cd tests/integration_tests/vectorstores/docker-compose docker compose -f weaviate.yml up
raw_docs
return [{'_id': '1', 'address': {'building': '1', 'room': '1'}}, {'_id': '2', 'address': {'building': '2', 'room': '2'}}]
@pytest.fixture def raw_docs() ->List[Dict]: return [{'_id': '1', 'address': {'building': '1', 'room': '1'}}, {'_id': '2', 'address': {'building': '2', 'room': '2'}}]
null
__init__
super().__init__(persist_path) self.bson = guard_import('bson')
def __init__(self, persist_path: str) ->None: super().__init__(persist_path) self.bson = guard_import('bson')
null
__post_init__
self._conn = self.connect() self.create_hnsw_extension() self.create_tables_if_not_exists() self.create_collection()
def __post_init__(self) ->None: self._conn = self.connect() self.create_hnsw_extension() self.create_tables_if_not_exists() self.create_collection()
null