method_name
stringlengths
1
78
method_body
stringlengths
3
9.66k
full_code
stringlengths
31
10.7k
docstring
stringlengths
4
4.74k
on_text
self.on_text_common()
def on_text(self, *args: Any, **kwargs: Any) ->Any: self.on_text_common()
null
get_output_schema
if self.return_intermediate_steps: return create_model('MapReduceDocumentsOutput', **{self.output_key: ( str, None), 'intermediate_steps': (List[str], None)}) return super().get_output_schema(config)
def get_output_schema(self, config: Optional[RunnableConfig]=None) ->Type[ BaseModel]: if self.return_intermediate_steps: return create_model('MapReduceDocumentsOutput', **{self.output_key: (str, None), 'intermediate_steps': (List[str], None)}) return super().get_output_schema(config)
null
eval_response
assert callback.errors == 1 assert len(callback.errors_args) == 1 llm_result: LLMResult = callback.errors_args[0]['kwargs']['response'] if i == 0: assert llm_result.generations == [] else: assert llm_result.generations[0][0].text == message[:i]
def eval_response(callback: BaseFakeCallbackHandler, i: int) ->None: assert callback.errors == 1 assert len(callback.errors_args) == 1 llm_result: LLMResult = callback.errors_args[0]['kwargs']['response'] if i == 0: assert llm_result.generations == [] else: assert llm_result.generations[0][0].text == message[:i]
null
add_texts
"""Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of unique IDs. Returns: List of ids from adding the texts into the vectorstore. """ embeddings = self.embedding.embed_documents(list(texts)) return self.__add(texts, embeddings, metadatas=metadatas, ids=ids, **kwargs)
def add_texts(self, texts: Iterable[str], metadatas: Optional[List[dict]]= None, ids: Optional[List[str]]=None, **kwargs: Any) ->List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of unique IDs. Returns: List of ids from adding the texts into the vectorstore. """ embeddings = self.embedding.embed_documents(list(texts)) return self.__add(texts, embeddings, metadatas=metadatas, ids=ids, **kwargs )
Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. ids: Optional list of unique IDs. Returns: List of ids from adding the texts into the vectorstore.
_run
try: file_path_ = self.get_relative_path(file_path) except FileValidationError: return INVALID_PATH_TEMPLATE.format(arg_name='file_path', value=file_path) if not file_path_.exists(): return f'Error: no such file or directory: {file_path}' try: os.remove(file_path_) return f'File deleted successfully: {file_path}.' except Exception as e: return 'Error: ' + str(e)
def _run(self, file_path: str, run_manager: Optional[ CallbackManagerForToolRun]=None) ->str: try: file_path_ = self.get_relative_path(file_path) except FileValidationError: return INVALID_PATH_TEMPLATE.format(arg_name='file_path', value= file_path) if not file_path_.exists(): return f'Error: no such file or directory: {file_path}' try: os.remove(file_path_) return f'File deleted successfully: {file_path}.' except Exception as e: return 'Error: ' + str(e)
null
__init__
self.message = message super().__init__(self.message)
def __init__(self, message: str='The prompt is unsafe and cannot be processed' ): self.message = message super().__init__(self.message)
null
first_node
"""Find the single node that is not a target of any edge. If there is no such node, or there are multiple, return None. When drawing the graph this node would be the origin.""" targets = {edge.target for edge in self.edges} found: List[Node] = [] for node in self.nodes.values(): if node.id not in targets: found.append(node) return found[0] if len(found) == 1 else None
def first_node(self) ->Optional[Node]: """Find the single node that is not a target of any edge. If there is no such node, or there are multiple, return None. When drawing the graph this node would be the origin.""" targets = {edge.target for edge in self.edges} found: List[Node] = [] for node in self.nodes.values(): if node.id not in targets: found.append(node) return found[0] if len(found) == 1 else None
Find the single node that is not a target of any edge. If there is no such node, or there are multiple, return None. When drawing the graph this node would be the origin.
clear
query = f""" DELETE FROM {self.full_table_name} """ with self.conn: self.conn.execute(query)
def clear(self) ->None: query = f""" DELETE FROM {self.full_table_name} """ with self.conn: self.conn.execute(query)
null
test_chain
llm = ChatOpenAI(temperature=0) chain = LLMChain.from_string(llm, 'The answer to the {question} is: ') eval_config = RunEvalConfig(evaluators=[EvaluatorType.QA, EvaluatorType. CRITERIA]) with pytest.raises(ValueError, match='Must specify reference_key'): run_on_dataset(dataset_name=kv_dataset_name, llm_or_chain_factory=lambda : chain, evaluation=eval_config, client=client) eval_config = RunEvalConfig(evaluators=[EvaluatorType.QA, EvaluatorType. CRITERIA], reference_key='some_output') with pytest.raises(InputFormatError, match= 'Example inputs do not match chain input keys'): run_on_dataset(dataset_name=kv_dataset_name, llm_or_chain_factory=lambda : chain, evaluation=eval_config, client=client) def input_mapper(d: dict) ->dict: return {'input': d['some_input']} with pytest.raises(InputFormatError, match= " match the chain's expected input keys."): run_on_dataset(dataset_name=kv_dataset_name, llm_or_chain_factory=lambda : input_mapper | chain, client=client, evaluation=eval_config) def right_input_mapper(d: dict) ->dict: return {'question': d['some_input']} run_on_dataset(dataset_name=kv_dataset_name, llm_or_chain_factory=lambda : right_input_mapper | chain, client=client, evaluation=eval_config, project_name=eval_project_name, tags=['shouldpass']) _check_all_feedback_passed(eval_project_name, client)
def test_chain(kv_dataset_name: str, eval_project_name: str, client: Client ) ->None: llm = ChatOpenAI(temperature=0) chain = LLMChain.from_string(llm, 'The answer to the {question} is: ') eval_config = RunEvalConfig(evaluators=[EvaluatorType.QA, EvaluatorType .CRITERIA]) with pytest.raises(ValueError, match='Must specify reference_key'): run_on_dataset(dataset_name=kv_dataset_name, llm_or_chain_factory= lambda : chain, evaluation=eval_config, client=client) eval_config = RunEvalConfig(evaluators=[EvaluatorType.QA, EvaluatorType .CRITERIA], reference_key='some_output') with pytest.raises(InputFormatError, match= 'Example inputs do not match chain input keys'): run_on_dataset(dataset_name=kv_dataset_name, llm_or_chain_factory= lambda : chain, evaluation=eval_config, client=client) def input_mapper(d: dict) ->dict: return {'input': d['some_input']} with pytest.raises(InputFormatError, match= " match the chain's expected input keys."): run_on_dataset(dataset_name=kv_dataset_name, llm_or_chain_factory= lambda : input_mapper | chain, client=client, evaluation= eval_config) def right_input_mapper(d: dict) ->dict: return {'question': d['some_input']} run_on_dataset(dataset_name=kv_dataset_name, llm_or_chain_factory=lambda : right_input_mapper | chain, client=client, evaluation=eval_config, project_name=eval_project_name, tags=['shouldpass']) _check_all_feedback_passed(eval_project_name, client)
null
test_cohere_embedding_query
"""Test cohere embeddings.""" document = 'foo bar' embedding = CohereEmbeddings() output = embedding.embed_query(document) assert len(output) == 2048
def test_cohere_embedding_query() ->None: """Test cohere embeddings.""" document = 'foo bar' embedding = CohereEmbeddings() output = embedding.embed_query(document) assert len(output) == 2048
Test cohere embeddings.
set_api_url
if 'api_url' not in values: host = values['host'] cluster_id = values['cluster_id'] port = values['cluster_driver_port'] api_url = f'https://{host}/driver-proxy-api/o/0/{cluster_id}/{port}' values['api_url'] = api_url return values
@root_validator(pre=True) def set_api_url(cls, values: Dict[str, Any]) ->Dict[str, Any]: if 'api_url' not in values: host = values['host'] cluster_id = values['cluster_id'] port = values['cluster_driver_port'] api_url = f'https://{host}/driver-proxy-api/o/0/{cluster_id}/{port}' values['api_url'] = api_url return values
null
validate_environment
"""Validate that api key and python package exists in environment.""" values['infinity_api_url'] = get_from_dict_or_env(values, 'infinity_api_url', 'INFINITY_API_URL') values['client'] = TinyAsyncOpenAIInfinityEmbeddingClient(host=values[ 'infinity_api_url']) return values
@root_validator(allow_reuse=True) def validate_environment(cls, values: Dict) ->Dict: """Validate that api key and python package exists in environment.""" values['infinity_api_url'] = get_from_dict_or_env(values, 'infinity_api_url', 'INFINITY_API_URL') values['client'] = TinyAsyncOpenAIInfinityEmbeddingClient(host=values[ 'infinity_api_url']) return values
Validate that api key and python package exists in environment.
_identifying_params
return {'model_name': self.model_name, **super()._identifying_params}
@property def _identifying_params(self) ->Mapping[str, Any]: return {'model_name': self.model_name, **super()._identifying_params}
null
test_timescalevector_retriever_search_threshold
"""Test using retriever for searching with threshold.""" texts = ['foo', 'bar', 'baz'] metadatas = [{'page': str(i)} for i in range(len(texts))] docsearch = TimescaleVector.from_texts(texts=texts, collection_name= 'test_collection', embedding=FakeEmbeddingsWithAdaDimension(), metadatas=metadatas, service_url=SERVICE_URL, pre_delete_collection=True) retriever = docsearch.as_retriever(search_type='similarity_score_threshold', search_kwargs={'k': 3, 'score_threshold': 0.999}) output = retriever.get_relevant_documents('summer') assert output == [Document(page_content='foo', metadata={'page': '0'}), Document(page_content='bar', metadata={'page': '1'})]
def test_timescalevector_retriever_search_threshold() ->None: """Test using retriever for searching with threshold.""" texts = ['foo', 'bar', 'baz'] metadatas = [{'page': str(i)} for i in range(len(texts))] docsearch = TimescaleVector.from_texts(texts=texts, collection_name= 'test_collection', embedding=FakeEmbeddingsWithAdaDimension(), metadatas=metadatas, service_url=SERVICE_URL, pre_delete_collection =True) retriever = docsearch.as_retriever(search_type= 'similarity_score_threshold', search_kwargs={'k': 3, 'score_threshold': 0.999}) output = retriever.get_relevant_documents('summer') assert output == [Document(page_content='foo', metadata={'page': '0'}), Document(page_content='bar', metadata={'page': '1'})]
Test using retriever for searching with threshold.
similarity_search
"""Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query. """ query_embedding = self.embedding.embed_query(query) matches = self.index.search(np.array(query_embedding), k) docs: List[Document] = [] for id in matches.keys: doc = self.docstore.search(str(id)) if not isinstance(doc, Document): raise ValueError(f'Could not find document for id {id}, got {doc}') docs.append(doc) return docs
def similarity_search(self, query: str, k: int=4, **kwargs: Any) ->List[ Document]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query. """ query_embedding = self.embedding.embed_query(query) matches = self.index.search(np.array(query_embedding), k) docs: List[Document] = [] for id in matches.keys: doc = self.docstore.search(str(id)) if not isinstance(doc, Document): raise ValueError(f'Could not find document for id {id}, got {doc}') docs.append(doc) return docs
Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query.
test_epsilla_add_texts
from pyepsilla import vectordb embeddings = FakeEmbeddings() client = vectordb.Client() db = Epsilla(client, embeddings) db.add_texts(fake_texts) search = db.similarity_search(query='foo', k=1) result_texts = [doc.page_content for doc in search] assert 'foo' in result_texts
def test_epsilla_add_texts() ->None: from pyepsilla import vectordb embeddings = FakeEmbeddings() client = vectordb.Client() db = Epsilla(client, embeddings) db.add_texts(fake_texts) search = db.similarity_search(query='foo', k=1) result_texts = [doc.page_content for doc in search] assert 'foo' in result_texts
null
test_render_text_description_and_args
tool_string = render_text_description_and_args(tools) expected_string = """search: search(query: str) -> str - Lookup things online., args: {'query': {'title': 'Query', 'type': 'string'}} calculator: calculator(expression: str) -> str - Do math., args: {'expression': {'title': 'Expression', 'type': 'string'}}""" assert tool_string == expected_string
def test_render_text_description_and_args(tools: List[BaseTool]) ->None: tool_string = render_text_description_and_args(tools) expected_string = """search: search(query: str) -> str - Lookup things online., args: {'query': {'title': 'Query', 'type': 'string'}} calculator: calculator(expression: str) -> str - Do math., args: {'expression': {'title': 'Expression', 'type': 'string'}}""" assert tool_string == expected_string
null
_convert_delta_to_message_chunk
role = getattr(_obj, 'role') content = getattr(_obj, 'content', '') if role == 'user' or default_class == HumanMessageChunk: return HumanMessageChunk(content=content) elif role == 'assistant' or default_class == AIMessageChunk: return AIMessageChunk(content=content) elif role == 'system' or default_class == SystemMessageChunk: return SystemMessageChunk(content=content) elif role or default_class == ChatMessageChunk: return ChatMessageChunk(content=content, role=role) else: return default_class(content=content)
def _convert_delta_to_message_chunk(_obj: MistralDeltaMessage, default_class: Type[BaseMessageChunk]) ->BaseMessageChunk: role = getattr(_obj, 'role') content = getattr(_obj, 'content', '') if role == 'user' or default_class == HumanMessageChunk: return HumanMessageChunk(content=content) elif role == 'assistant' or default_class == AIMessageChunk: return AIMessageChunk(content=content) elif role == 'system' or default_class == SystemMessageChunk: return SystemMessageChunk(content=content) elif role or default_class == ChatMessageChunk: return ChatMessageChunk(content=content, role=role) else: return default_class(content=content)
null
OutputType
return cast(Type[Output], self.custom_output_type ) if self.custom_output_type is not None else self.bound.OutputType
@property def OutputType(self) ->Type[Output]: return cast(Type[Output], self.custom_output_type ) if self.custom_output_type is not None else self.bound.OutputType
null
test_faiss_with_metadatas
"""Test end to end construction and search.""" texts = ['foo', 'bar', 'baz'] metadatas = [{'page': i} for i in range(len(texts))] docsearch = FAISS.from_texts(texts, FakeEmbeddings(), metadatas=metadatas) expected_docstore = InMemoryDocstore({docsearch.index_to_docstore_id[0]: Document(page_content='foo', metadata={'page': 0}), docsearch. index_to_docstore_id[1]: Document(page_content='bar', metadata={'page': 1}), docsearch.index_to_docstore_id[2]: Document(page_content='baz', metadata={'page': 2})}) assert docsearch.docstore.__dict__ == expected_docstore.__dict__ output = docsearch.similarity_search('foo', k=1) assert output == [Document(page_content='foo', metadata={'page': 0})]
@pytest.mark.requires('faiss') def test_faiss_with_metadatas() ->None: """Test end to end construction and search.""" texts = ['foo', 'bar', 'baz'] metadatas = [{'page': i} for i in range(len(texts))] docsearch = FAISS.from_texts(texts, FakeEmbeddings(), metadatas=metadatas) expected_docstore = InMemoryDocstore({docsearch.index_to_docstore_id[0]: Document(page_content='foo', metadata={'page': 0}), docsearch. index_to_docstore_id[1]: Document(page_content='bar', metadata={ 'page': 1}), docsearch.index_to_docstore_id[2]: Document( page_content='baz', metadata={'page': 2})}) assert docsearch.docstore.__dict__ == expected_docstore.__dict__ output = docsearch.similarity_search('foo', k=1) assert output == [Document(page_content='foo', metadata={'page': 0})]
Test end to end construction and search.
on_tool_start
if self.__has_valid_config is False: return try: user_id = _get_user_id(metadata) user_props = _get_user_props(metadata) name = serialized.get('name') self.__track_event('tool', 'start', user_id=user_id, run_id=str(run_id), parent_run_id=str(parent_run_id) if parent_run_id else None, name= name, input=input_str, tags=tags, metadata=metadata, user_props= user_props, app_id=self.__app_id) except Exception as e: logger.error(f'[LLMonitor] An error occurred in on_tool_start: {e}')
def on_tool_start(self, serialized: Dict[str, Any], input_str: str, *, run_id: UUID, parent_run_id: Union[UUID, None]=None, tags: Union[List[ str], None]=None, metadata: Union[Dict[str, Any], None]=None, **kwargs: Any ) ->None: if self.__has_valid_config is False: return try: user_id = _get_user_id(metadata) user_props = _get_user_props(metadata) name = serialized.get('name') self.__track_event('tool', 'start', user_id=user_id, run_id=str( run_id), parent_run_id=str(parent_run_id) if parent_run_id else None, name=name, input=input_str, tags=tags, metadata=metadata, user_props=user_props, app_id=self.__app_id) except Exception as e: logger.error(f'[LLMonitor] An error occurred in on_tool_start: {e}')
null
get_md5
return hashlib.md5(input_string.encode()).hexdigest()
@staticmethod def get_md5(input_string: str) ->str: return hashlib.md5(input_string.encode()).hexdigest()
null
delete
"""Evict from cache if there's an entry.""" return self.kv_cache.delete(llm_string=_hash(llm_string), prompt=_hash(prompt))
def delete(self, prompt: str, llm_string: str) ->None: """Evict from cache if there's an entry.""" return self.kv_cache.delete(llm_string=_hash(llm_string), prompt=_hash( prompt))
Evict from cache if there's an entry.
build_resource_service
"""Build a Gmail service.""" credentials = credentials or get_gmail_credentials() builder = import_googleapiclient_resource_builder() return builder(service_name, service_version, credentials=credentials)
def build_resource_service(credentials: Optional[Credentials]=None, service_name: str='gmail', service_version: str='v1') ->Resource: """Build a Gmail service.""" credentials = credentials or get_gmail_credentials() builder = import_googleapiclient_resource_builder() return builder(service_name, service_version, credentials=credentials)
Build a Gmail service.
run_query
return db.run(query)
def run_query(query): return db.run(query)
null
__init__
"""Initialize with COS config, bucket and key name. :param conf(CosConfig): COS config. :param bucket(str): COS bucket. :param key(str): COS file key. """ self.conf = conf self.bucket = bucket self.key = key
def __init__(self, conf: Any, bucket: str, key: str): """Initialize with COS config, bucket and key name. :param conf(CosConfig): COS config. :param bucket(str): COS bucket. :param key(str): COS file key. """ self.conf = conf self.bucket = bucket self.key = key
Initialize with COS config, bucket and key name. :param conf(CosConfig): COS config. :param bucket(str): COS bucket. :param key(str): COS file key.
order_by
if self.value == 'EUCLIDEAN_DIST': return 'ASC' return 'DESC'
def order_by(self) ->str: if self.value == 'EUCLIDEAN_DIST': return 'ASC' return 'DESC'
null
_call
"""Generate Cypher statement, use it to look up in db and answer question.""" _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() callbacks = _run_manager.get_child() question = inputs[self.input_key] generated_cypher = self.cypher_generation_chain.run({'question': question, 'schema': self.graph.get_schema}, callbacks=callbacks) _run_manager.on_text('Generated Cypher:', end='\n', verbose=self.verbose) _run_manager.on_text(generated_cypher, color='green', end='\n', verbose= self.verbose) context = self.graph.query(generated_cypher) _run_manager.on_text('Full Context:', end='\n', verbose=self.verbose) _run_manager.on_text(str(context), color='green', end='\n', verbose=self. verbose) result = self.qa_chain({'question': question, 'context': context}, callbacks=callbacks) return {self.output_key: result[self.qa_chain.output_key]}
def _call(self, inputs: Dict[str, Any], run_manager: Optional[ CallbackManagerForChainRun]=None) ->Dict[str, str]: """Generate Cypher statement, use it to look up in db and answer question.""" _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() callbacks = _run_manager.get_child() question = inputs[self.input_key] generated_cypher = self.cypher_generation_chain.run({'question': question, 'schema': self.graph.get_schema}, callbacks=callbacks) _run_manager.on_text('Generated Cypher:', end='\n', verbose=self.verbose) _run_manager.on_text(generated_cypher, color='green', end='\n', verbose =self.verbose) context = self.graph.query(generated_cypher) _run_manager.on_text('Full Context:', end='\n', verbose=self.verbose) _run_manager.on_text(str(context), color='green', end='\n', verbose= self.verbose) result = self.qa_chain({'question': question, 'context': context}, callbacks=callbacks) return {self.output_key: result[self.qa_chain.output_key]}
Generate Cypher statement, use it to look up in db and answer question.
test_hologres
"""Test end to end construction and search.""" texts = ['foo', 'bar', 'baz'] docsearch = Hologres.from_texts(texts=texts, table_name='test_table', embedding=FakeEmbeddingsWithAdaDimension(), connection_string= CONNECTION_STRING, pre_delete_table=True) output = docsearch.similarity_search('foo', k=1) assert output == [Document(page_content='foo')]
def test_hologres() ->None: """Test end to end construction and search.""" texts = ['foo', 'bar', 'baz'] docsearch = Hologres.from_texts(texts=texts, table_name='test_table', embedding=FakeEmbeddingsWithAdaDimension(), connection_string= CONNECTION_STRING, pre_delete_table=True) output = docsearch.similarity_search('foo', k=1) assert output == [Document(page_content='foo')]
Test end to end construction and search.
test_nltk_text_splitting_args
"""Test invalid arguments.""" with pytest.raises(ValueError): NLTKTextSplitter(chunk_size=2, chunk_overlap=4)
def test_nltk_text_splitting_args() ->None: """Test invalid arguments.""" with pytest.raises(ValueError): NLTKTextSplitter(chunk_size=2, chunk_overlap=4)
Test invalid arguments.
__FunctionDef_helper
self.write('\n') for deco in t.decorator_list: self.fill('@') self.dispatch(deco) def_str = fill_suffix + ' ' + t.name + '(' self.fill(def_str) self.dispatch(t.args) self.write(')') if t.returns: self.write(' -> ') self.dispatch(t.returns) self.enter() self.dispatch(t.body) self.leave()
def __FunctionDef_helper(self, t, fill_suffix): self.write('\n') for deco in t.decorator_list: self.fill('@') self.dispatch(deco) def_str = fill_suffix + ' ' + t.name + '(' self.fill(def_str) self.dispatch(t.args) self.write(')') if t.returns: self.write(' -> ') self.dispatch(t.returns) self.enter() self.dispatch(t.body) self.leave()
null
validate_environment
"""Validate that api key and python package exists in environment.""" try: import cohere except ImportError: raise ImportError( 'Could not import cohere python package. Please install it with `pip install cohere`.' ) cohere_api_key = get_from_dict_or_env(values, 'cohere_api_key', 'COHERE_API_KEY') client_name = values.get('user_agent', 'langchain') values['client'] = cohere.Client(cohere_api_key, client_name=client_name) return values
@root_validator(pre=True) def validate_environment(cls, values: Dict) ->Dict: """Validate that api key and python package exists in environment.""" try: import cohere except ImportError: raise ImportError( 'Could not import cohere python package. Please install it with `pip install cohere`.' ) cohere_api_key = get_from_dict_or_env(values, 'cohere_api_key', 'COHERE_API_KEY') client_name = values.get('user_agent', 'langchain') values['client'] = cohere.Client(cohere_api_key, client_name=client_name) return values
Validate that api key and python package exists in environment.
add_texts
"""Run more texts through the embeddings and add to the vectorstore. Args: texts (Iterable[str]): Iterable of strings to add to the vectorstore. metadatas (Optional[List[dict]]): Optional list of metadatas associated with the texts. kwargs (Any): Other optional parameters. Specifically: - ids (List[str], optional): List of ids to use for the texts. Defaults to None, in which case uuids are generated. Returns: List[str]: List of ids from adding the texts into the vectorstore. """ from momento.requests.vector_index import Item from momento.responses.vector_index import UpsertItemBatch texts = list(texts) if len(texts) == 0: return [] if metadatas is not None: for metadata, text in zip(metadatas, texts): metadata[self.text_field] = text else: metadatas = [{self.text_field: text} for text in texts] try: embeddings = self._embedding.embed_documents(texts) except NotImplementedError: embeddings = [self._embedding.embed_query(x) for x in texts] if self._ensure_index_exists: self._create_index_if_not_exists(len(embeddings[0])) if 'ids' in kwargs: ids = kwargs['ids'] if len(ids) != len(embeddings): raise ValueError('Number of ids must match number of texts') else: ids = [str(uuid4()) for _ in range(len(embeddings))] batch_size = 128 for i in range(0, len(embeddings), batch_size): start = i end = min(i + batch_size, len(embeddings)) items = [Item(id=id, vector=vector, metadata=metadata) for id, vector, metadata in zip(ids[start:end], embeddings[start:end], metadatas[ start:end])] response = self._client.upsert_item_batch(self.index_name, items) if isinstance(response, UpsertItemBatch.Success): pass elif isinstance(response, UpsertItemBatch.Error): raise response.inner_exception else: raise Exception(f'Unexpected response: {response}') return ids
def add_texts(self, texts: Iterable[str], metadatas: Optional[List[dict]]= None, **kwargs: Any) ->List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts (Iterable[str]): Iterable of strings to add to the vectorstore. metadatas (Optional[List[dict]]): Optional list of metadatas associated with the texts. kwargs (Any): Other optional parameters. Specifically: - ids (List[str], optional): List of ids to use for the texts. Defaults to None, in which case uuids are generated. Returns: List[str]: List of ids from adding the texts into the vectorstore. """ from momento.requests.vector_index import Item from momento.responses.vector_index import UpsertItemBatch texts = list(texts) if len(texts) == 0: return [] if metadatas is not None: for metadata, text in zip(metadatas, texts): metadata[self.text_field] = text else: metadatas = [{self.text_field: text} for text in texts] try: embeddings = self._embedding.embed_documents(texts) except NotImplementedError: embeddings = [self._embedding.embed_query(x) for x in texts] if self._ensure_index_exists: self._create_index_if_not_exists(len(embeddings[0])) if 'ids' in kwargs: ids = kwargs['ids'] if len(ids) != len(embeddings): raise ValueError('Number of ids must match number of texts') else: ids = [str(uuid4()) for _ in range(len(embeddings))] batch_size = 128 for i in range(0, len(embeddings), batch_size): start = i end = min(i + batch_size, len(embeddings)) items = [Item(id=id, vector=vector, metadata=metadata) for id, vector, metadata in zip(ids[start:end], embeddings[start:end], metadatas[start:end])] response = self._client.upsert_item_batch(self.index_name, items) if isinstance(response, UpsertItemBatch.Success): pass elif isinstance(response, UpsertItemBatch.Error): raise response.inner_exception else: raise Exception(f'Unexpected response: {response}') return ids
Run more texts through the embeddings and add to the vectorstore. Args: texts (Iterable[str]): Iterable of strings to add to the vectorstore. metadatas (Optional[List[dict]]): Optional list of metadatas associated with the texts. kwargs (Any): Other optional parameters. Specifically: - ids (List[str], optional): List of ids to use for the texts. Defaults to None, in which case uuids are generated. Returns: List[str]: List of ids from adding the texts into the vectorstore.
_do_retrieval
question_gen_inputs = [{'user_input': user_input, 'current_response': initial_response, 'uncertain_span': span} for span in low_confidence_spans] callbacks = _run_manager.get_child() question_gen_outputs = self.question_generator_chain.apply(question_gen_inputs, callbacks=callbacks) questions = [output[self.question_generator_chain.output_keys[0]] for output in question_gen_outputs] _run_manager.on_text(f'Generated Questions: {questions}', color='yellow', end='\n') return self._do_generation(questions, user_input, response, _run_manager)
def _do_retrieval(self, low_confidence_spans: List[str], _run_manager: CallbackManagerForChainRun, user_input: str, response: str, initial_response: str) ->Tuple[str, bool]: question_gen_inputs = [{'user_input': user_input, 'current_response': initial_response, 'uncertain_span': span} for span in low_confidence_spans] callbacks = _run_manager.get_child() question_gen_outputs = self.question_generator_chain.apply( question_gen_inputs, callbacks=callbacks) questions = [output[self.question_generator_chain.output_keys[0]] for output in question_gen_outputs] _run_manager.on_text(f'Generated Questions: {questions}', color= 'yellow', end='\n') return self._do_generation(questions, user_input, response, _run_manager)
null
load
"""Load the documents from the source.""" return list(self.lazy_load())
def load(self) ->List[Document]: """Load the documents from the source.""" return list(self.lazy_load())
Load the documents from the source.
similarity_search
"""Perform a similarity search with MyScale Args: query (str): query string k (int, optional): Top K neighbors to retrieve. Defaults to 4. where_str (Optional[str], optional): where condition string. Defaults to None. NOTE: Please do not let end-user to fill this and always be aware of SQL injection. When dealing with metadatas, remember to use `{self.metadata_column}.attribute` instead of `attribute` alone. The default name for it is `metadata`. Returns: List[Document]: List of Documents """ return self.similarity_search_by_vector(self._embeddings.embed_query(query), k, where_str, **kwargs)
def similarity_search(self, query: str, k: int=4, where_str: Optional[str]= None, **kwargs: Any) ->List[Document]: """Perform a similarity search with MyScale Args: query (str): query string k (int, optional): Top K neighbors to retrieve. Defaults to 4. where_str (Optional[str], optional): where condition string. Defaults to None. NOTE: Please do not let end-user to fill this and always be aware of SQL injection. When dealing with metadatas, remember to use `{self.metadata_column}.attribute` instead of `attribute` alone. The default name for it is `metadata`. Returns: List[Document]: List of Documents """ return self.similarity_search_by_vector(self._embeddings.embed_query( query), k, where_str, **kwargs)
Perform a similarity search with MyScale Args: query (str): query string k (int, optional): Top K neighbors to retrieve. Defaults to 4. where_str (Optional[str], optional): where condition string. Defaults to None. NOTE: Please do not let end-user to fill this and always be aware of SQL injection. When dealing with metadatas, remember to use `{self.metadata_column}.attribute` instead of `attribute` alone. The default name for it is `metadata`. Returns: List[Document]: List of Documents
transform_documents
"""Filter down documents.""" stateful_documents = get_stateful_documents(documents) embedded_documents = _get_embeddings_from_stateful_docs(self.embeddings, stateful_documents) included_idxs = _filter_similar_embeddings(embedded_documents, self. similarity_fn, self.similarity_threshold) return [stateful_documents[i] for i in sorted(included_idxs)]
def transform_documents(self, documents: Sequence[Document], **kwargs: Any ) ->Sequence[Document]: """Filter down documents.""" stateful_documents = get_stateful_documents(documents) embedded_documents = _get_embeddings_from_stateful_docs(self.embeddings, stateful_documents) included_idxs = _filter_similar_embeddings(embedded_documents, self. similarity_fn, self.similarity_threshold) return [stateful_documents[i] for i in sorted(included_idxs)]
Filter down documents.
test_trajectory_eval_chain_no_tools
llm = _FakeTrajectoryChatModel(queries={'a': """Trajectory good Score: 5""", 'b': """Trajectory not good Score: 1"""}, sequential_responses=True) chain = TrajectoryEvalChain.from_llm(llm=llm) res = chain.evaluate_agent_trajectory(input='What is your favorite food?', agent_trajectory=intermediate_steps, prediction='I like pie.') assert res['score'] == 1.0 res = chain.evaluate_agent_trajectory(input='What is your favorite food?', agent_trajectory=intermediate_steps, prediction='I like pie.', reference='Paris') assert res['score'] == 0.0
def test_trajectory_eval_chain_no_tools(intermediate_steps: List[Tuple[ AgentAction, str]]) ->None: llm = _FakeTrajectoryChatModel(queries={'a': 'Trajectory good\nScore: 5', 'b': """Trajectory not good Score: 1"""}, sequential_responses=True) chain = TrajectoryEvalChain.from_llm(llm=llm) res = chain.evaluate_agent_trajectory(input= 'What is your favorite food?', agent_trajectory=intermediate_steps, prediction='I like pie.') assert res['score'] == 1.0 res = chain.evaluate_agent_trajectory(input= 'What is your favorite food?', agent_trajectory=intermediate_steps, prediction='I like pie.', reference='Paris') assert res['score'] == 0.0
null
on_llm_start
if self._current_thought is None: self._current_thought = LLMThought(parent_container=self. _parent_container, expanded=self._expand_new_thoughts, collapse_on_complete=self._collapse_completed_thoughts, labeler= self._thought_labeler) self._current_thought.on_llm_start(serialized, prompts)
def on_llm_start(self, serialized: Dict[str, Any], prompts: List[str], ** kwargs: Any) ->None: if self._current_thought is None: self._current_thought = LLMThought(parent_container=self. _parent_container, expanded=self._expand_new_thoughts, collapse_on_complete=self._collapse_completed_thoughts, labeler =self._thought_labeler) self._current_thought.on_llm_start(serialized, prompts)
null
_import_openapi_utils_openapi_utils
from langchain_community.tools.openapi.utils.openapi_utils import OpenAPISpec return OpenAPISpec
def _import_openapi_utils_openapi_utils() ->Any: from langchain_community.tools.openapi.utils.openapi_utils import OpenAPISpec return OpenAPISpec
null
add_texts
"""Add list of text along with embeddings to the vector store Args: texts (Iterable[str]): collection of text to add to the database Returns: List of ids for the newly inserted documents """ async def _add_texts(texts: Iterable[str], metadatas: Optional[List[dict]]= None, **kwargs: Any) ->List[str]: await self.initialize() return await self.aadd_texts(texts, metadatas, **kwargs) return asyncio.run(_add_texts(texts, metadatas, **kwargs))
def add_texts(self, texts: Iterable[str], metadatas: Optional[List[dict]]= None, **kwargs: Any) ->List[str]: """Add list of text along with embeddings to the vector store Args: texts (Iterable[str]): collection of text to add to the database Returns: List of ids for the newly inserted documents """ async def _add_texts(texts: Iterable[str], metadatas: Optional[List[ dict]]=None, **kwargs: Any) ->List[str]: await self.initialize() return await self.aadd_texts(texts, metadatas, **kwargs) return asyncio.run(_add_texts(texts, metadatas, **kwargs))
Add list of text along with embeddings to the vector store Args: texts (Iterable[str]): collection of text to add to the database Returns: List of ids for the newly inserted documents
test_with_config_callbacks
result = RunnableLambda(lambda x: x).with_config({'callbacks': []}) assert isinstance(result, RunnableBinding)
def test_with_config_callbacks() ->None: result = RunnableLambda(lambda x: x).with_config({'callbacks': []}) assert isinstance(result, RunnableBinding)
null
test_max_marginal_relevance_inner_product
texts = ['foo', 'foo', 'fou', 'foy'] vectorstore = AzureCosmosDBVectorSearch.from_texts(texts, azure_openai_embeddings, collection=collection, index_name=INDEX_NAME) vectorstore.create_index(num_lists, dimensions, CosmosDBSimilarityType.IP) sleep(2) query = 'foo' output = vectorstore.max_marginal_relevance_search(query, k=10, lambda_mult=0.1 ) assert len(output) == len(texts) assert output[0].page_content == 'foo' assert output[1].page_content != 'foo' vectorstore.delete_index()
def test_max_marginal_relevance_inner_product(self, azure_openai_embeddings: OpenAIEmbeddings, collection: Any) ->None: texts = ['foo', 'foo', 'fou', 'foy'] vectorstore = AzureCosmosDBVectorSearch.from_texts(texts, azure_openai_embeddings, collection=collection, index_name=INDEX_NAME) vectorstore.create_index(num_lists, dimensions, CosmosDBSimilarityType.IP) sleep(2) query = 'foo' output = vectorstore.max_marginal_relevance_search(query, k=10, lambda_mult=0.1) assert len(output) == len(texts) assert output[0].page_content == 'foo' assert output[1].page_content != 'foo' vectorstore.delete_index()
null
_build_query_sql
q_emb_str = ','.join(map(str, q_emb)) if where_str: where_str = f'WHERE {where_str}' else: where_str = '' q_str = f""" SELECT {self.config.column_map['document']}, {self.config.column_map['metadata']}, cosine_similarity_norm(array<float>[{q_emb_str}], {self.config.column_map['embedding']}) as dist FROM {self.config.database}.{self.config.table} {where_str} ORDER BY dist {self.dist_order} LIMIT {topk} """ debug_output(q_str) return q_str
def _build_query_sql(self, q_emb: List[float], topk: int, where_str: Optional[str]=None) ->str: q_emb_str = ','.join(map(str, q_emb)) if where_str: where_str = f'WHERE {where_str}' else: where_str = '' q_str = f""" SELECT {self.config.column_map['document']}, {self.config.column_map['metadata']}, cosine_similarity_norm(array<float>[{q_emb_str}], {self.config.column_map['embedding']}) as dist FROM {self.config.database}.{self.config.table} {where_str} ORDER BY dist {self.dist_order} LIMIT {topk} """ debug_output(q_str) return q_str
null
similarity_search_with_score_by_vector
with Session(self._conn) as session: collection = self.get_collection(session) set_enable_seqscan_stmt = sqlalchemy.text('SET enable_seqscan = off') session.execute(set_enable_seqscan_stmt) if not collection: raise ValueError('Collection not found') filter_by = EmbeddingStore.collection_id == collection.uuid if filter is not None: filter_clauses = [] for key, value in filter.items(): IN = 'in' if isinstance(value, dict) and IN in map(str.lower, value): value_case_insensitive = {k.lower(): v for k, v in value. items()} filter_by_metadata = EmbeddingStore.cmetadata[key].astext.in_( value_case_insensitive[IN]) filter_clauses.append(filter_by_metadata) elif isinstance(value, dict) and 'substring' in map(str.lower, value): filter_by_metadata = EmbeddingStore.cmetadata[key ].astext.ilike(f"%{value['substring']}%") filter_clauses.append(filter_by_metadata) else: filter_by_metadata = EmbeddingStore.cmetadata[key ].astext == str(value) filter_clauses.append(filter_by_metadata) filter_by = sqlalchemy.and_(filter_by, *filter_clauses) results: List[QueryResult] = session.query(EmbeddingStore, func.abs( EmbeddingStore.embedding.op('<->')(embedding)).label('distance') ).filter(filter_by).order_by(func.abs(EmbeddingStore.embedding.op( '<->')(embedding)).asc()).limit(k).all() docs = [(Document(page_content=result.EmbeddingStore.document, metadata= result.EmbeddingStore.cmetadata), result.distance if self. embedding_function is not None else 0.0) for result in results] return docs
def similarity_search_with_score_by_vector(self, embedding: List[float], k: int=4, filter: Optional[dict]=None) ->List[Tuple[Document, float]]: with Session(self._conn) as session: collection = self.get_collection(session) set_enable_seqscan_stmt = sqlalchemy.text('SET enable_seqscan = off') session.execute(set_enable_seqscan_stmt) if not collection: raise ValueError('Collection not found') filter_by = EmbeddingStore.collection_id == collection.uuid if filter is not None: filter_clauses = [] for key, value in filter.items(): IN = 'in' if isinstance(value, dict) and IN in map(str.lower, value): value_case_insensitive = {k.lower(): v for k, v in value.items()} filter_by_metadata = EmbeddingStore.cmetadata[key ].astext.in_(value_case_insensitive[IN]) filter_clauses.append(filter_by_metadata) elif isinstance(value, dict) and 'substring' in map(str. lower, value): filter_by_metadata = EmbeddingStore.cmetadata[key ].astext.ilike(f"%{value['substring']}%") filter_clauses.append(filter_by_metadata) else: filter_by_metadata = EmbeddingStore.cmetadata[key ].astext == str(value) filter_clauses.append(filter_by_metadata) filter_by = sqlalchemy.and_(filter_by, *filter_clauses) results: List[QueryResult] = session.query(EmbeddingStore, func.abs (EmbeddingStore.embedding.op('<->')(embedding)).label('distance') ).filter(filter_by).order_by(func.abs(EmbeddingStore.embedding. op('<->')(embedding)).asc()).limit(k).all() docs = [(Document(page_content=result.EmbeddingStore.document, metadata =result.EmbeddingStore.cmetadata), result.distance if self. embedding_function is not None else 0.0) for result in results] return docs
null
test_non_hub_path
"""Test that a non-hub path returns None.""" path = 'chains/some_path' loader = Mock() valid_suffixes = {'suffix'} result = try_load_from_hub(path, loader, 'chains', valid_suffixes) assert result is None loader.assert_not_called()
def test_non_hub_path() ->None: """Test that a non-hub path returns None.""" path = 'chains/some_path' loader = Mock() valid_suffixes = {'suffix'} result = try_load_from_hub(path, loader, 'chains', valid_suffixes) assert result is None loader.assert_not_called()
Test that a non-hub path returns None.
_patch_config_list
return [self._patch_config(c, rm, retry_state) for c, rm in zip(config, run_manager)]
def _patch_config_list(self, config: List[RunnableConfig], run_manager: List['T'], retry_state: RetryCallState) ->List[RunnableConfig]: return [self._patch_config(c, rm, retry_state) for c, rm in zip(config, run_manager)]
null
_select_relevance_score_fn
""" The 'correct' relevance function may differ depending on a few things, including: - the distance / similarity metric used by the VectorStore - the scale of your embeddings (OpenAI's are unit normed. Many others are not!) - embedding dimensionality - etc. """ if self.override_relevance_score_fn is not None: return self.override_relevance_score_fn if self._distance_strategy == DistanceStrategy.COSINE: return self._cosine_relevance_score_fn elif self._distance_strategy == DistanceStrategy.EUCLIDEAN_DISTANCE: return self._euclidean_relevance_score_fn elif self._distance_strategy == DistanceStrategy.MAX_INNER_PRODUCT: return self._max_inner_product_relevance_score_fn else: raise ValueError( f'No supported normalization function for distance_strategy of {self._distance_strategy}.Consider providing relevance_score_fn to TimescaleVector constructor.' )
def _select_relevance_score_fn(self) ->Callable[[float], float]: """ The 'correct' relevance function may differ depending on a few things, including: - the distance / similarity metric used by the VectorStore - the scale of your embeddings (OpenAI's are unit normed. Many others are not!) - embedding dimensionality - etc. """ if self.override_relevance_score_fn is not None: return self.override_relevance_score_fn if self._distance_strategy == DistanceStrategy.COSINE: return self._cosine_relevance_score_fn elif self._distance_strategy == DistanceStrategy.EUCLIDEAN_DISTANCE: return self._euclidean_relevance_score_fn elif self._distance_strategy == DistanceStrategy.MAX_INNER_PRODUCT: return self._max_inner_product_relevance_score_fn else: raise ValueError( f'No supported normalization function for distance_strategy of {self._distance_strategy}.Consider providing relevance_score_fn to TimescaleVector constructor.' )
The 'correct' relevance function may differ depending on a few things, including: - the distance / similarity metric used by the VectorStore - the scale of your embeddings (OpenAI's are unit normed. Many others are not!) - embedding dimensionality - etc.
modelname_to_contextsize
"""Calculate the maximum number of tokens possible to generate for a model. Args: modelname: The modelname we want to know the context size for. Returns: The maximum context size Example: .. code-block:: python max_tokens = openai.modelname_to_contextsize("gpt-3.5-turbo-instruct") """ model_token_mapping = {'gpt-4': 8192, 'gpt-4-0314': 8192, 'gpt-4-0613': 8192, 'gpt-4-32k': 32768, 'gpt-4-32k-0314': 32768, 'gpt-4-32k-0613': 32768, 'gpt-3.5-turbo': 4096, 'gpt-3.5-turbo-0301': 4096, 'gpt-3.5-turbo-0613': 4096, 'gpt-3.5-turbo-16k': 16385, 'gpt-3.5-turbo-16k-0613': 16385, 'gpt-3.5-turbo-instruct': 4096, 'text-ada-001': 2049, 'ada': 2049, 'text-babbage-001': 2040, 'babbage': 2049, 'text-curie-001': 2049, 'curie': 2049, 'davinci': 2049, 'text-davinci-003': 4097, 'text-davinci-002': 4097, 'code-davinci-002': 8001, 'code-davinci-001': 8001, 'code-cushman-002': 2048, 'code-cushman-001': 2048} if 'ft-' in modelname: modelname = modelname.split(':')[0] context_size = model_token_mapping.get(modelname, None) if context_size is None: raise ValueError( f'Unknown model: {modelname}. Please provide a valid OpenAI model name.Known models are: ' + ', '.join(model_token_mapping.keys())) return context_size
@staticmethod def modelname_to_contextsize(modelname: str) ->int: """Calculate the maximum number of tokens possible to generate for a model. Args: modelname: The modelname we want to know the context size for. Returns: The maximum context size Example: .. code-block:: python max_tokens = openai.modelname_to_contextsize("gpt-3.5-turbo-instruct") """ model_token_mapping = {'gpt-4': 8192, 'gpt-4-0314': 8192, 'gpt-4-0613': 8192, 'gpt-4-32k': 32768, 'gpt-4-32k-0314': 32768, 'gpt-4-32k-0613': 32768, 'gpt-3.5-turbo': 4096, 'gpt-3.5-turbo-0301': 4096, 'gpt-3.5-turbo-0613': 4096, 'gpt-3.5-turbo-16k': 16385, 'gpt-3.5-turbo-16k-0613': 16385, 'gpt-3.5-turbo-instruct': 4096, 'text-ada-001': 2049, 'ada': 2049, 'text-babbage-001': 2040, 'babbage': 2049, 'text-curie-001': 2049, 'curie': 2049, 'davinci': 2049, 'text-davinci-003': 4097, 'text-davinci-002': 4097, 'code-davinci-002': 8001, 'code-davinci-001': 8001, 'code-cushman-002': 2048, 'code-cushman-001': 2048} if 'ft-' in modelname: modelname = modelname.split(':')[0] context_size = model_token_mapping.get(modelname, None) if context_size is None: raise ValueError( f'Unknown model: {modelname}. Please provide a valid OpenAI model name.Known models are: ' + ', '.join(model_token_mapping.keys())) return context_size
Calculate the maximum number of tokens possible to generate for a model. Args: modelname: The modelname we want to know the context size for. Returns: The maximum context size Example: .. code-block:: python max_tokens = openai.modelname_to_contextsize("gpt-3.5-turbo-instruct")
_on_retriever_error
"""Process the Retriever Run upon error.""" self._submit(self._update_run_single, _copy(run))
def _on_retriever_error(self, run: Run) ->None: """Process the Retriever Run upon error.""" self._submit(self._update_run_single, _copy(run))
Process the Retriever Run upon error.
get_role
"""Get the role of the message. Args: message: The message. Returns: The role of the message. Raises: ValueError: If the message is of an unknown type. """ if isinstance(message, ChatMessage) or isinstance(message, HumanMessage): return 'User' elif isinstance(message, AIMessage): return 'Chatbot' elif isinstance(message, SystemMessage): return 'System' else: raise ValueError(f'Got unknown type {message}')
def get_role(message: BaseMessage) ->str: """Get the role of the message. Args: message: The message. Returns: The role of the message. Raises: ValueError: If the message is of an unknown type. """ if isinstance(message, ChatMessage) or isinstance(message, HumanMessage): return 'User' elif isinstance(message, AIMessage): return 'Chatbot' elif isinstance(message, SystemMessage): return 'System' else: raise ValueError(f'Got unknown type {message}')
Get the role of the message. Args: message: The message. Returns: The role of the message. Raises: ValueError: If the message is of an unknown type.
_get_properties_from_parameters
"""Get the properties of the operation.""" properties = [] for param in parameters: if APIProperty.is_supported_location(param.param_in): properties.append(APIProperty.from_parameter(param, spec)) elif param.required: raise ValueError(INVALID_LOCATION_TEMPL.format(location=param. param_in, name=param.name)) else: logger.warning(INVALID_LOCATION_TEMPL.format(location=param. param_in, name=param.name) + ' Ignoring optional parameter') pass return properties
@staticmethod def _get_properties_from_parameters(parameters: List[Parameter], spec: OpenAPISpec) ->List[APIProperty]: """Get the properties of the operation.""" properties = [] for param in parameters: if APIProperty.is_supported_location(param.param_in): properties.append(APIProperty.from_parameter(param, spec)) elif param.required: raise ValueError(INVALID_LOCATION_TEMPL.format(location=param. param_in, name=param.name)) else: logger.warning(INVALID_LOCATION_TEMPL.format(location=param. param_in, name=param.name) + ' Ignoring optional parameter') pass return properties
Get the properties of the operation.
_init
if embeddings is not None: self._create_collection(embeddings, metadatas) self._extract_fields() self._create_index() self._create_search_params() self._load()
def _init(self, embeddings: Optional[list]=None, metadatas: Optional[list[ dict]]=None) ->None: if embeddings is not None: self._create_collection(embeddings, metadatas) self._extract_fields() self._create_index() self._create_search_params() self._load()
null
test_visit_structured_query_filter_and
query = 'What is the capital of France?' op = Operation(operator=Operator.AND, arguments=[Comparison(comparator= Comparator.EQ, attribute='foo', value=2), Comparison(comparator= Comparator.EQ, attribute='bar', value='baz')]) structured_query = StructuredQuery(query=query, filter=op, limit=None) expected = query, {'filter': [{'bool': {'must': [{'term': {'metadata.foo': 2}}, {'term': {'metadata.bar.keyword': 'baz'}}]}}]} actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query) assert expected == actual
def test_visit_structured_query_filter_and() ->None: query = 'What is the capital of France?' op = Operation(operator=Operator.AND, arguments=[Comparison(comparator= Comparator.EQ, attribute='foo', value=2), Comparison(comparator= Comparator.EQ, attribute='bar', value='baz')]) structured_query = StructuredQuery(query=query, filter=op, limit=None) expected = query, {'filter': [{'bool': {'must': [{'term': { 'metadata.foo': 2}}, {'term': {'metadata.bar.keyword': 'baz'}}]}}]} actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query) assert expected == actual
null
from_texts
if collection is None: raise ValueError("Must provide 'collection' named parameter.") vectorstore = cls(collection, embedding, **kwargs) vectorstore.add_texts(texts, metadatas=metadatas) return vectorstore
@classmethod def from_texts(cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]]=None, collection: Optional[Collection[ CosmosDBDocumentType]]=None, **kwargs: Any) ->AzureCosmosDBVectorSearch: if collection is None: raise ValueError("Must provide 'collection' named parameter.") vectorstore = cls(collection, embedding, **kwargs) vectorstore.add_texts(texts, metadatas=metadatas) return vectorstore
null
write_to_gml
import networkx as nx nx.write_gml(self._graph, path)
def write_to_gml(self, path: str) ->None: import networkx as nx nx.write_gml(self._graph, path)
null
find_attributes
values = {} for [key_index, value_index] in zip(*((iter(attributes),) * 2)): if value_index < 0: continue key = strings[key_index] value = strings[value_index] if key in keys: values[key] = value keys.remove(key) if not keys: return values return values
def find_attributes(attributes: Dict[int, Any], keys: List[str]) ->Dict[str, str]: values = {} for [key_index, value_index] in zip(*((iter(attributes),) * 2)): if value_index < 0: continue key = strings[key_index] value = strings[value_index] if key in keys: values[key] = value keys.remove(key) if not keys: return values return values
null
mlqaen_example_to_document
return Document(page_content=decode_to_str(example['context']), metadata={ 'id': decode_to_str(example['id']), 'title': decode_to_str(example[ 'title']), 'question': decode_to_str(example['question']), 'answer': decode_to_str(example['answers']['text'][0])})
def mlqaen_example_to_document(example: dict) ->Document: return Document(page_content=decode_to_str(example['context']), metadata={'id': decode_to_str(example['id']), 'title': decode_to_str(example['title']), 'question': decode_to_str(example[ 'question']), 'answer': decode_to_str(example['answers']['text'][0])})
null
lc_secrets
return {'google_api_key': 'GOOGLE_API_KEY'}
@property def lc_secrets(self) ->Dict[str, str]: return {'google_api_key': 'GOOGLE_API_KEY'}
null
get_lc_namespace
"""Get the namespace of the langchain object.""" return ['langchain', 'schema', 'output_parser']
@classmethod def get_lc_namespace(cls) ->List[str]: """Get the namespace of the langchain object.""" return ['langchain', 'schema', 'output_parser']
Get the namespace of the langchain object.
_import_file_management_WriteFileTool
from langchain_community.tools.file_management import WriteFileTool return WriteFileTool
def _import_file_management_WriteFileTool() ->Any: from langchain_community.tools.file_management import WriteFileTool return WriteFileTool
null
_select_relevance_score_fn
if self._relevance_score_fn == 'euclidean': return self._euclidean_relevance_score_fn elif self._relevance_score_fn == 'dotProduct': return self._max_inner_product_relevance_score_fn elif self._relevance_score_fn == 'cosine': return self._cosine_relevance_score_fn else: raise NotImplementedError( f'No relevance score function for ${self._relevance_score_fn}')
def _select_relevance_score_fn(self) ->Callable[[float], float]: if self._relevance_score_fn == 'euclidean': return self._euclidean_relevance_score_fn elif self._relevance_score_fn == 'dotProduct': return self._max_inner_product_relevance_score_fn elif self._relevance_score_fn == 'cosine': return self._cosine_relevance_score_fn else: raise NotImplementedError( f'No relevance score function for ${self._relevance_score_fn}')
null
test_requests_put_tool
tool = RequestsPutTool(requests_wrapper=mock_requests_wrapper) input_text = '{"url": "https://example.com", "data": {"key": "value"}}' assert tool.run(input_text) == "put {'key': 'value'}" assert asyncio.run(tool.arun(input_text)) == "aput {'key': 'value'}"
def test_requests_put_tool(mock_requests_wrapper: TextRequestsWrapper) ->None: tool = RequestsPutTool(requests_wrapper=mock_requests_wrapper) input_text = '{"url": "https://example.com", "data": {"key": "value"}}' assert tool.run(input_text) == "put {'key': 'value'}" assert asyncio.run(tool.arun(input_text)) == "aput {'key': 'value'}"
null
test_annoy
"""Test end to end construction and search.""" texts = ['foo', 'bar', 'baz'] docsearch = Annoy.from_texts(texts, FakeEmbeddings()) index_to_id = docsearch.index_to_docstore_id expected_docstore = InMemoryDocstore({index_to_id[0]: Document(page_content ='foo'), index_to_id[1]: Document(page_content='bar'), index_to_id[2]: Document(page_content='baz')}) assert docsearch.docstore.__dict__ == expected_docstore.__dict__ output = docsearch.similarity_search('foo', k=1) assert output == [Document(page_content='foo')]
def test_annoy() ->None: """Test end to end construction and search.""" texts = ['foo', 'bar', 'baz'] docsearch = Annoy.from_texts(texts, FakeEmbeddings()) index_to_id = docsearch.index_to_docstore_id expected_docstore = InMemoryDocstore({index_to_id[0]: Document( page_content='foo'), index_to_id[1]: Document(page_content='bar'), index_to_id[2]: Document(page_content='baz')}) assert docsearch.docstore.__dict__ == expected_docstore.__dict__ output = docsearch.similarity_search('foo', k=1) assert output == [Document(page_content='foo')]
Test end to end construction and search.
_complete_current_thought
"""Complete the current thought, optionally assigning it a new label. Add it to our _completed_thoughts list. """ thought = self._require_current_thought() thought.complete(final_label) self._completed_thoughts.append(thought) self._current_thought = None
def _complete_current_thought(self, final_label: Optional[str]=None) ->None: """Complete the current thought, optionally assigning it a new label. Add it to our _completed_thoughts list. """ thought = self._require_current_thought() thought.complete(final_label) self._completed_thoughts.append(thought) self._current_thought = None
Complete the current thought, optionally assigning it a new label. Add it to our _completed_thoughts list.
test_non_presigned_loading_fail
mocker.register_uri(requests_mock.ANY, requests_mock.ANY, status_code=200) loader = LakeFSLoader(self.lakefs_access_key, self.lakefs_secret_key, self. endpoint) loader.set_repo(self.repo) loader.set_ref(self.ref) loader.set_path(self.path) with pytest.raises(ValueError): loader.load()
@requests_mock.Mocker() @pytest.mark.usefixtures('mock_lakefs_client_no_presign_not_local') def test_non_presigned_loading_fail(self, mocker: Mocker) ->None: mocker.register_uri(requests_mock.ANY, requests_mock.ANY, status_code=200) loader = LakeFSLoader(self.lakefs_access_key, self.lakefs_secret_key, self.endpoint) loader.set_repo(self.repo) loader.set_ref(self.ref) loader.set_path(self.path) with pytest.raises(ValueError): loader.load()
null
_run
"""Get the schema for a specific table.""" return ', '.join(self.db.get_usable_table_names())
def _run(self, tool_input: str='', run_manager: Optional[ CallbackManagerForToolRun]=None) ->str: """Get the schema for a specific table.""" return ', '.join(self.db.get_usable_table_names())
Get the schema for a specific table.
add_last_line_print
"""Add print statement to the last line if it's missing. Sometimes, the LLM-generated code doesn't have `print(variable_name)`, instead the LLM tries to print the variable only by writing `variable_name` (as you would in REPL, for example). This methods checks the AST of the generated Python code and adds the print statement to the last line if it's missing. """ tree = ast.parse(code) node = tree.body[-1] if isinstance(node, ast.Expr) and isinstance(node.value, ast.Call): if isinstance(node.value.func, ast.Name) and node.value.func.id == 'print': return _unparse(tree) if isinstance(node, ast.Expr): tree.body[-1] = ast.Expr(value=ast.Call(func=ast.Name(id='print', ctx= ast.Load()), args=[node.value], keywords=[])) return _unparse(tree)
def add_last_line_print(code: str) ->str: """Add print statement to the last line if it's missing. Sometimes, the LLM-generated code doesn't have `print(variable_name)`, instead the LLM tries to print the variable only by writing `variable_name` (as you would in REPL, for example). This methods checks the AST of the generated Python code and adds the print statement to the last line if it's missing. """ tree = ast.parse(code) node = tree.body[-1] if isinstance(node, ast.Expr) and isinstance(node.value, ast.Call): if isinstance(node.value.func, ast.Name ) and node.value.func.id == 'print': return _unparse(tree) if isinstance(node, ast.Expr): tree.body[-1] = ast.Expr(value=ast.Call(func=ast.Name(id='print', ctx=ast.Load()), args=[node.value], keywords=[])) return _unparse(tree)
Add print statement to the last line if it's missing. Sometimes, the LLM-generated code doesn't have `print(variable_name)`, instead the LLM tries to print the variable only by writing `variable_name` (as you would in REPL, for example). This methods checks the AST of the generated Python code and adds the print statement to the last line if it's missing.
_import_max_compute
from langchain_community.utilities.max_compute import MaxComputeAPIWrapper return MaxComputeAPIWrapper
def _import_max_compute() ->Any: from langchain_community.utilities.max_compute import MaxComputeAPIWrapper return MaxComputeAPIWrapper
null
embed_documents
"""Embed documents using a YandexGPT embeddings models. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ return _embed_with_retry(self, texts=texts)
def embed_documents(self, texts: List[str]) ->List[List[float]]: """Embed documents using a YandexGPT embeddings models. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ return _embed_with_retry(self, texts=texts)
Embed documents using a YandexGPT embeddings models. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text.
check_input_key
"""Check that if memories are of type BaseChatMemory that input keys exist.""" for val in value: if isinstance(val, BaseChatMemory): if val.input_key is None: warnings.warn( f'When using CombinedMemory, input keys should be so the input is known. Was not set on {val}' ) return value
@validator('memories') def check_input_key(cls, value: List[BaseMemory]) ->List[BaseMemory]: """Check that if memories are of type BaseChatMemory that input keys exist.""" for val in value: if isinstance(val, BaseChatMemory): if val.input_key is None: warnings.warn( f'When using CombinedMemory, input keys should be so the input is known. Was not set on {val}' ) return value
Check that if memories are of type BaseChatMemory that input keys exist.
test_elasticsearch_embedding_query
"""Test Elasticsearch embedding query.""" document = 'foo bar' embedding = ElasticsearchEmbeddings.from_credentials(model_id) output = embedding.embed_query(document) assert len(output) == 768
def test_elasticsearch_embedding_query(model_id: str) ->None: """Test Elasticsearch embedding query.""" document = 'foo bar' embedding = ElasticsearchEmbeddings.from_credentials(model_id) output = embedding.embed_query(document) assert len(output) == 768
Test Elasticsearch embedding query.
_import_petals
from langchain_community.llms.petals import Petals return Petals
def _import_petals() ->Any: from langchain_community.llms.petals import Petals return Petals
null
get_input_schema
map_input_schema = self.mapper.get_input_schema(config) if not map_input_schema.__custom_root_type__: return map_input_schema return super().get_input_schema(config)
def get_input_schema(self, config: Optional[RunnableConfig]=None) ->Type[ BaseModel]: map_input_schema = self.mapper.get_input_schema(config) if not map_input_schema.__custom_root_type__: return map_input_schema return super().get_input_schema(config)
null
_create_chat_result
generations = [] for res in response['choices']: message = _convert_dict_to_message(res['message']) gen = ChatGeneration(message=message) generations.append(gen) llm_output = {'token_usage': response['usage']} return ChatResult(generations=generations, llm_output=llm_output)
def _create_chat_result(self, response: Mapping[str, Any]) ->ChatResult: generations = [] for res in response['choices']: message = _convert_dict_to_message(res['message']) gen = ChatGeneration(message=message) generations.append(gen) llm_output = {'token_usage': response['usage']} return ChatResult(generations=generations, llm_output=llm_output)
null
test_two_thoghts
memory = ToTDFSMemory([Thought(text='a', validity=ThoughtValidity. VALID_INTERMEDIATE), Thought(text='b', validity=ThoughtValidity. VALID_INTERMEDIATE)]) self.assertEqual(self.controller(memory), ('a', 'b'))
def test_two_thoghts(self) ->None: memory = ToTDFSMemory([Thought(text='a', validity=ThoughtValidity. VALID_INTERMEDIATE), Thought(text='b', validity=ThoughtValidity. VALID_INTERMEDIATE)]) self.assertEqual(self.controller(memory), ('a', 'b'))
null
setUp
self.builtins_import = builtins.__import__
def setUp(self) ->None: self.builtins_import = builtins.__import__
null
test_eval_chain_requires_references
"""Test loading evaluators.""" fake_llm = FakeLLM(queries={'text': """The meaning of life CORRECT"""}, sequential_responses=True) evaluators = load_evaluators(evaluator_types, llm=fake_llm) for evaluator in evaluators: if not isinstance(evaluator, (StringEvaluator, PairwiseStringEvaluator)): raise ValueError('Evaluator is not a [pairwise]string evaluator') assert evaluator.requires_reference
@pytest.mark.parametrize('evaluator_types', [[EvaluatorType. LABELED_CRITERIA], [EvaluatorType.LABELED_PAIRWISE_STRING], [ EvaluatorType.LABELED_SCORE_STRING], [EvaluatorType.QA], [EvaluatorType .CONTEXT_QA], [EvaluatorType.COT_QA], [EvaluatorType.COT_QA, EvaluatorType.LABELED_CRITERIA], [EvaluatorType.COT_QA, EvaluatorType. LABELED_CRITERIA, EvaluatorType.LABELED_PAIRWISE_STRING], [ EvaluatorType.JSON_EQUALITY], [EvaluatorType.EXACT_MATCH, EvaluatorType .REGEX_MATCH]]) def test_eval_chain_requires_references(evaluator_types: List[EvaluatorType] ) ->None: """Test loading evaluators.""" fake_llm = FakeLLM(queries={'text': 'The meaning of life\nCORRECT'}, sequential_responses=True) evaluators = load_evaluators(evaluator_types, llm=fake_llm) for evaluator in evaluators: if not isinstance(evaluator, (StringEvaluator, PairwiseStringEvaluator) ): raise ValueError('Evaluator is not a [pairwise]string evaluator') assert evaluator.requires_reference
Test loading evaluators.
_import_ollama
from langchain_community.llms.ollama import Ollama return Ollama
def _import_ollama() ->Any: from langchain_community.llms.ollama import Ollama return Ollama
null
test_too_many_chunks
documents = [f'text-{i}' for i in range(20)] embedding = ErnieEmbeddings(chunk_size=20) with pytest.raises(ValueError): embedding.embed_documents(documents)
def test_too_many_chunks() ->None: documents = [f'text-{i}' for i in range(20)] embedding = ErnieEmbeddings(chunk_size=20) with pytest.raises(ValueError): embedding.embed_documents(documents)
null
parse
raise NotImplementedError()
def parse(self, text: str) ->Any: raise NotImplementedError()
null
__init__
super().__init__(runnables={key: coerce_to_runnable(r) for key, r in runnables.items()})
def __init__(self, runnables: Mapping[str, Union[Runnable[Any, Output], Callable[[Any], Output]]]) ->None: super().__init__(runnables={key: coerce_to_runnable(r) for key, r in runnables.items()})
null
test_representation_of_runnables
"""Test representation of runnables.""" runnable = RunnableLambda(lambda x: x * 2) assert repr(runnable) == 'RunnableLambda(lambda x: x * 2)' def f(x: int) ->int: """Return 2.""" return 2 assert repr(RunnableLambda(func=f)) == 'RunnableLambda(f)' async def af(x: int) ->int: """Return 2.""" return 2 assert repr(RunnableLambda(func=f, afunc=af)) == 'RunnableLambda(f)' assert repr(RunnableLambda(lambda x: x + 2) | {'a': RunnableLambda(lambda x: x * 2), 'b': RunnableLambda(lambda x: x * 3)} ) == """RunnableLambda(...) | { a: RunnableLambda(...), b: RunnableLambda(...) }""", 'repr where code string contains multiple lambdas gives up'
@pytest.mark.skipif(sys.version_info < (3, 9), reason= 'Requires python version >= 3.9 to run.') def test_representation_of_runnables() ->None: """Test representation of runnables.""" runnable = RunnableLambda(lambda x: x * 2) assert repr(runnable) == 'RunnableLambda(lambda x: x * 2)' def f(x: int) ->int: """Return 2.""" return 2 assert repr(RunnableLambda(func=f)) == 'RunnableLambda(f)' async def af(x: int) ->int: """Return 2.""" return 2 assert repr(RunnableLambda(func=f, afunc=af)) == 'RunnableLambda(f)' assert repr(RunnableLambda(lambda x: x + 2) | {'a': RunnableLambda(lambda x: x * 2), 'b': RunnableLambda(lambda x: x * 3)} ) == """RunnableLambda(...) | { a: RunnableLambda(...), b: RunnableLambda(...) }""", 'repr where code string contains multiple lambdas gives up'
Test representation of runnables.
delete
""" Delete records in jaguardb by a list of zero-ids Args: pod (str): name of a Pod ids (List[str]): a list of zid as string Returns: Do not return anything """ podstore = self._pod + '.' + self._store for zid in zids: q = 'delete from ' + podstore + " where zid='" + zid + "'" self.run(q)
def delete(self, zids: List[str], **kwargs: Any) ->None: """ Delete records in jaguardb by a list of zero-ids Args: pod (str): name of a Pod ids (List[str]): a list of zid as string Returns: Do not return anything """ podstore = self._pod + '.' + self._store for zid in zids: q = 'delete from ' + podstore + " where zid='" + zid + "'" self.run(q)
Delete records in jaguardb by a list of zero-ids Args: pod (str): name of a Pod ids (List[str]): a list of zid as string Returns: Do not return anything
test_person_with_invalid_kwargs
person = Person(secret='hello') with pytest.raises(TypeError): dumps(person, invalid_kwarg='hello')
def test_person_with_invalid_kwargs() ->None: person = Person(secret='hello') with pytest.raises(TypeError): dumps(person, invalid_kwarg='hello')
null
test_gooseai_call_fairseq
"""Test valid call to gooseai with fairseq model.""" llm = GooseAI(model_name='fairseq-1-3b', max_tokens=10) output = llm('Say foo:') assert isinstance(output, str)
def test_gooseai_call_fairseq() ->None: """Test valid call to gooseai with fairseq model.""" llm = GooseAI(model_name='fairseq-1-3b', max_tokens=10) output = llm('Say foo:') assert isinstance(output, str)
Test valid call to gooseai with fairseq model.
test_required_dependencies
"""A test that checks if a new non-optional dependency is being introduced. If this test is triggered, it means that a contributor is trying to introduce a new required dependency. This should be avoided in most situations. """ dependencies = poetry_conf['dependencies'] is_required = {package_name: (isinstance(requirements, str) or not requirements.get('optional', False)) for package_name, requirements in dependencies.items()} required_dependencies = [package_name for package_name, required in is_required.items() if required] assert sorted(required_dependencies) == sorted(['PyYAML', 'SQLAlchemy', 'aiohttp', 'dataclasses-json', 'langchain-core', 'langsmith', 'numpy', 'python', 'requests', 'tenacity']) unrequired_dependencies = [package_name for package_name, required in is_required.items() if not required] in_extras = [dep for group in poetry_conf['extras'].values() for dep in group] assert set(unrequired_dependencies) == set(in_extras)
def test_required_dependencies(poetry_conf: Mapping[str, Any]) ->None: """A test that checks if a new non-optional dependency is being introduced. If this test is triggered, it means that a contributor is trying to introduce a new required dependency. This should be avoided in most situations. """ dependencies = poetry_conf['dependencies'] is_required = {package_name: (isinstance(requirements, str) or not requirements.get('optional', False)) for package_name, requirements in dependencies.items()} required_dependencies = [package_name for package_name, required in is_required.items() if required] assert sorted(required_dependencies) == sorted(['PyYAML', 'SQLAlchemy', 'aiohttp', 'dataclasses-json', 'langchain-core', 'langsmith', 'numpy', 'python', 'requests', 'tenacity']) unrequired_dependencies = [package_name for package_name, required in is_required.items() if not required] in_extras = [dep for group in poetry_conf['extras'].values() for dep in group] assert set(unrequired_dependencies) == set(in_extras)
A test that checks if a new non-optional dependency is being introduced. If this test is triggered, it means that a contributor is trying to introduce a new required dependency. This should be avoided in most situations.
test_md_header_text_splitter_fenced_code_block_interleaved
"""Test markdown splitter by header: Interleaved fenced code block.""" markdown_document = f"""# This is a Header {fence} foo # Not a header {other_fence} # Not a header {fence}""" headers_to_split_on = [('#', 'Header 1'), ('##', 'Header 2')] markdown_splitter = MarkdownHeaderTextSplitter(headers_to_split_on= headers_to_split_on) output = markdown_splitter.split_text(markdown_document) expected_output = [Document(page_content= f"""{fence} foo # Not a header {other_fence} # Not a header {fence}""", metadata={'Header 1': 'This is a Header'})] assert output == expected_output
@pytest.mark.parametrize(['fence', 'other_fence'], [('```', '~~~'), ('~~~', '```')]) def test_md_header_text_splitter_fenced_code_block_interleaved(fence: str, other_fence: str) ->None: """Test markdown splitter by header: Interleaved fenced code block.""" markdown_document = f"""# This is a Header {fence} foo # Not a header {other_fence} # Not a header {fence}""" headers_to_split_on = [('#', 'Header 1'), ('##', 'Header 2')] markdown_splitter = MarkdownHeaderTextSplitter(headers_to_split_on= headers_to_split_on) output = markdown_splitter.split_text(markdown_document) expected_output = [Document(page_content= f"""{fence} foo # Not a header {other_fence} # Not a header {fence}""", metadata={'Header 1': 'This is a Header'})] assert output == expected_output
Test markdown splitter by header: Interleaved fenced code block.
deprecated_method
"""original doc""" return 'This is a deprecated method.'
@deprecated(since='2.0.0', removal='3.0.0') def deprecated_method(self) ->str: """original doc""" return 'This is a deprecated method.'
original doc
from_components
"""Takes the object creation out of the constructor. Args: project_id: The GCP project id. region: The default location making the API calls. It must have the same location as the GCS bucket and must be regional. gcs_bucket_name: The location where the vectors will be stored in order for the index to be created. index_id: The id of the created index. endpoint_id: The id of the created endpoint. credentials_path: (Optional) The path of the Google credentials on the local file system. embedding: The :class:`Embeddings` that will be used for embedding the texts. kwargs: Additional keyword arguments to pass to MatchingEngine.__init__(). Returns: A configured MatchingEngine with the texts added to the index. """ gcs_bucket_name = cls._validate_gcs_bucket(gcs_bucket_name) credentials = cls._create_credentials_from_file(credentials_path) index = cls._create_index_by_id(index_id, project_id, region, credentials) endpoint = cls._create_endpoint_by_id(endpoint_id, project_id, region, credentials) gcs_client = cls._get_gcs_client(credentials, project_id) cls._init_aiplatform(project_id, region, gcs_bucket_name, credentials) return cls(project_id=project_id, index=index, endpoint=endpoint, embedding =embedding or cls._get_default_embeddings(), gcs_client=gcs_client, credentials=credentials, gcs_bucket_name=gcs_bucket_name, **kwargs)
@classmethod def from_components(cls: Type['MatchingEngine'], project_id: str, region: str, gcs_bucket_name: str, index_id: str, endpoint_id: str, credentials_path: Optional[str]=None, embedding: Optional[Embeddings]= None, **kwargs: Any) ->'MatchingEngine': """Takes the object creation out of the constructor. Args: project_id: The GCP project id. region: The default location making the API calls. It must have the same location as the GCS bucket and must be regional. gcs_bucket_name: The location where the vectors will be stored in order for the index to be created. index_id: The id of the created index. endpoint_id: The id of the created endpoint. credentials_path: (Optional) The path of the Google credentials on the local file system. embedding: The :class:`Embeddings` that will be used for embedding the texts. kwargs: Additional keyword arguments to pass to MatchingEngine.__init__(). Returns: A configured MatchingEngine with the texts added to the index. """ gcs_bucket_name = cls._validate_gcs_bucket(gcs_bucket_name) credentials = cls._create_credentials_from_file(credentials_path) index = cls._create_index_by_id(index_id, project_id, region, credentials) endpoint = cls._create_endpoint_by_id(endpoint_id, project_id, region, credentials) gcs_client = cls._get_gcs_client(credentials, project_id) cls._init_aiplatform(project_id, region, gcs_bucket_name, credentials) return cls(project_id=project_id, index=index, endpoint=endpoint, embedding=embedding or cls._get_default_embeddings(), gcs_client= gcs_client, credentials=credentials, gcs_bucket_name= gcs_bucket_name, **kwargs)
Takes the object creation out of the constructor. Args: project_id: The GCP project id. region: The default location making the API calls. It must have the same location as the GCS bucket and must be regional. gcs_bucket_name: The location where the vectors will be stored in order for the index to be created. index_id: The id of the created index. endpoint_id: The id of the created endpoint. credentials_path: (Optional) The path of the Google credentials on the local file system. embedding: The :class:`Embeddings` that will be used for embedding the texts. kwargs: Additional keyword arguments to pass to MatchingEngine.__init__(). Returns: A configured MatchingEngine with the texts added to the index.
embed_documents
"""Compute doc embeddings using a HuggingFace transformer model. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ texts = [t.replace('\n', ' ') for t in texts] embeddings = self.client.encode(texts, **self.encode_kwargs) return embeddings.tolist()
def embed_documents(self, texts: List[str]) ->List[List[float]]: """Compute doc embeddings using a HuggingFace transformer model. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ texts = [t.replace('\n', ' ') for t in texts] embeddings = self.client.encode(texts, **self.encode_kwargs) return embeddings.tolist()
Compute doc embeddings using a HuggingFace transformer model. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text.
main
"""Main function""" args = get_args() global_imports = {} for file in find_files(args.docs_dir): print(f'Adding links for imports in {file}') file_imports = replace_imports(file) if file_imports: relative_path = os.path.relpath(file, _DOCS_DIR).replace('.mdx', '' ).replace('.md', '') doc_url = f'https://python.langchain.com/docs/{relative_path}' for import_info in file_imports: doc_title = import_info['title'] class_name = import_info['imported'] if class_name not in global_imports: global_imports[class_name] = {} global_imports[class_name][doc_title] = doc_url _JSON_PATH.parent.mkdir(parents=True, exist_ok=True) with _JSON_PATH.open('w') as f: json.dump(global_imports, f)
def main(): """Main function""" args = get_args() global_imports = {} for file in find_files(args.docs_dir): print(f'Adding links for imports in {file}') file_imports = replace_imports(file) if file_imports: relative_path = os.path.relpath(file, _DOCS_DIR).replace('.mdx', '' ).replace('.md', '') doc_url = f'https://python.langchain.com/docs/{relative_path}' for import_info in file_imports: doc_title = import_info['title'] class_name = import_info['imported'] if class_name not in global_imports: global_imports[class_name] = {} global_imports[class_name][doc_title] = doc_url _JSON_PATH.parent.mkdir(parents=True, exist_ok=True) with _JSON_PATH.open('w') as f: json.dump(global_imports, f)
Main function
from_steam_api_wrapper
operations: List[dict] = [{'mode': 'get_games_details', 'name': 'Get Games Details', 'description': STEAM_GET_GAMES_DETAILS}, {'mode': 'get_recommended_games', 'name': 'Get Recommended Games', 'description': STEAM_GET_RECOMMENDED_GAMES}] tools = [SteamWebAPIQueryRun(name=action['name'], description=action[ 'description'], mode=action['mode'], api_wrapper=steam_api_wrapper) for action in operations] return cls(tools=tools)
@classmethod def from_steam_api_wrapper(cls, steam_api_wrapper: SteamWebAPIWrapper ) ->'SteamToolkit': operations: List[dict] = [{'mode': 'get_games_details', 'name': 'Get Games Details', 'description': STEAM_GET_GAMES_DETAILS}, { 'mode': 'get_recommended_games', 'name': 'Get Recommended Games', 'description': STEAM_GET_RECOMMENDED_GAMES}] tools = [SteamWebAPIQueryRun(name=action['name'], description=action[ 'description'], mode=action['mode'], api_wrapper=steam_api_wrapper) for action in operations] return cls(tools=tools)
null
__init__
"""Initialize with chains to experiment with. Args: chains: list of chains to experiment with. """ for chain in chains: if not isinstance(chain, Chain): raise ValueError( 'ModelLaboratory should now be initialized with Chains. If you want to initialize with LLMs, use the `from_llms` method instead (`ModelLaboratory.from_llms(...)`)' ) if len(chain.input_keys) != 1: raise ValueError( f'Currently only support chains with one input variable, got {chain.input_keys}' ) if len(chain.output_keys) != 1: raise ValueError( f'Currently only support chains with one output variable, got {chain.output_keys}' ) if names is not None: if len(names) != len(chains): raise ValueError('Length of chains does not match length of names.') self.chains = chains chain_range = [str(i) for i in range(len(self.chains))] self.chain_colors = get_color_mapping(chain_range) self.names = names
def __init__(self, chains: Sequence[Chain], names: Optional[List[str]]=None): """Initialize with chains to experiment with. Args: chains: list of chains to experiment with. """ for chain in chains: if not isinstance(chain, Chain): raise ValueError( 'ModelLaboratory should now be initialized with Chains. If you want to initialize with LLMs, use the `from_llms` method instead (`ModelLaboratory.from_llms(...)`)' ) if len(chain.input_keys) != 1: raise ValueError( f'Currently only support chains with one input variable, got {chain.input_keys}' ) if len(chain.output_keys) != 1: raise ValueError( f'Currently only support chains with one output variable, got {chain.output_keys}' ) if names is not None: if len(names) != len(chains): raise ValueError('Length of chains does not match length of names.' ) self.chains = chains chain_range = [str(i) for i in range(len(self.chains))] self.chain_colors = get_color_mapping(chain_range) self.names = names
Initialize with chains to experiment with. Args: chains: list of chains to experiment with.
output_keys
"""Return the singular output key. :meta private: """ if not self.return_intermediate_steps: return [self.output_key] else: return [self.output_key, INTERMEDIATE_STEPS_KEY]
@property def output_keys(self) ->List[str]: """Return the singular output key. :meta private: """ if not self.return_intermediate_steps: return [self.output_key] else: return [self.output_key, INTERMEDIATE_STEPS_KEY]
Return the singular output key. :meta private:
load
"""Revive a LangChain class from a JSON object. Use this if you already have a parsed JSON object, eg. from `json.load` or `orjson.loads`. Args: obj: The object to load. secrets_map: A map of secrets to load. valid_namespaces: A list of additional namespaces (modules) to allow to be deserialized. Returns: Revived LangChain objects. """ reviver = Reviver(secrets_map, valid_namespaces) def _load(obj: Any) ->Any: if isinstance(obj, dict): loaded_obj = {k: _load(v) for k, v in obj.items()} return reviver(loaded_obj) if isinstance(obj, list): return [_load(o) for o in obj] return obj return _load(obj)
def load(obj: Any, *, secrets_map: Optional[Dict[str, str]]=None, valid_namespaces: Optional[List[str]]=None) ->Any: """Revive a LangChain class from a JSON object. Use this if you already have a parsed JSON object, eg. from `json.load` or `orjson.loads`. Args: obj: The object to load. secrets_map: A map of secrets to load. valid_namespaces: A list of additional namespaces (modules) to allow to be deserialized. Returns: Revived LangChain objects. """ reviver = Reviver(secrets_map, valid_namespaces) def _load(obj: Any) ->Any: if isinstance(obj, dict): loaded_obj = {k: _load(v) for k, v in obj.items()} return reviver(loaded_obj) if isinstance(obj, list): return [_load(o) for o in obj] return obj return _load(obj)
Revive a LangChain class from a JSON object. Use this if you already have a parsed JSON object, eg. from `json.load` or `orjson.loads`. Args: obj: The object to load. secrets_map: A map of secrets to load. valid_namespaces: A list of additional namespaces (modules) to allow to be deserialized. Returns: Revived LangChain objects.
create
tiledb_vs, tiledb = dependable_tiledb_import() with tiledb.scope_ctx(ctx_or_config=config): try: tiledb.group_create(index_uri) except tiledb.TileDBError as err: raise err group = tiledb.Group(index_uri, 'w') vector_index_uri = get_vector_index_uri(group.uri) docs_uri = get_documents_array_uri(group.uri) if index_type == 'FLAT': tiledb_vs.flat_index.create(uri=vector_index_uri, dimensions= dimensions, vector_type=vector_type, config=config) elif index_type == 'IVF_FLAT': tiledb_vs.ivf_flat_index.create(uri=vector_index_uri, dimensions= dimensions, vector_type=vector_type, config=config) group.add(vector_index_uri, name=VECTOR_INDEX_NAME) dim = tiledb.Dim(name='id', domain=(0, MAX_UINT64 - 1), dtype=np.dtype( np.uint64)) dom = tiledb.Domain(dim) text_attr = tiledb.Attr(name='text', dtype=np.dtype('U1'), var=True) attrs = [text_attr] if metadatas: metadata_attr = tiledb.Attr(name='metadata', dtype=np.uint8, var=True) attrs.append(metadata_attr) schema = tiledb.ArraySchema(domain=dom, sparse=True, allows_duplicates= False, attrs=attrs) tiledb.Array.create(docs_uri, schema) group.add(docs_uri, name=DOCUMENTS_ARRAY_NAME) group.close()
@classmethod def create(cls, index_uri: str, index_type: str, dimensions: int, vector_type: np.dtype, *, metadatas: bool=True, config: Optional[ Mapping[str, Any]]=None) ->None: tiledb_vs, tiledb = dependable_tiledb_import() with tiledb.scope_ctx(ctx_or_config=config): try: tiledb.group_create(index_uri) except tiledb.TileDBError as err: raise err group = tiledb.Group(index_uri, 'w') vector_index_uri = get_vector_index_uri(group.uri) docs_uri = get_documents_array_uri(group.uri) if index_type == 'FLAT': tiledb_vs.flat_index.create(uri=vector_index_uri, dimensions= dimensions, vector_type=vector_type, config=config) elif index_type == 'IVF_FLAT': tiledb_vs.ivf_flat_index.create(uri=vector_index_uri, dimensions=dimensions, vector_type=vector_type, config=config) group.add(vector_index_uri, name=VECTOR_INDEX_NAME) dim = tiledb.Dim(name='id', domain=(0, MAX_UINT64 - 1), dtype=np. dtype(np.uint64)) dom = tiledb.Domain(dim) text_attr = tiledb.Attr(name='text', dtype=np.dtype('U1'), var=True) attrs = [text_attr] if metadatas: metadata_attr = tiledb.Attr(name='metadata', dtype=np.uint8, var=True) attrs.append(metadata_attr) schema = tiledb.ArraySchema(domain=dom, sparse=True, allows_duplicates=False, attrs=attrs) tiledb.Array.create(docs_uri, schema) group.add(docs_uri, name=DOCUMENTS_ARRAY_NAME) group.close()
null
pytest_addoption
"""Add custom command line options to pytest.""" parser.addoption('--only-extended', action='store_true', help= 'Only run extended tests. Does not allow skipping any extended tests.') parser.addoption('--only-core', action='store_true', help= 'Only run core tests. Never runs any extended tests.')
def pytest_addoption(parser: Parser) ->None: """Add custom command line options to pytest.""" parser.addoption('--only-extended', action='store_true', help= 'Only run extended tests. Does not allow skipping any extended tests.') parser.addoption('--only-core', action='store_true', help= 'Only run core tests. Never runs any extended tests.')
Add custom command line options to pytest.
on_llm_error
"""Do nothing when LLM outputs an error.""" pass
def on_llm_error(self, error: BaseException, **kwargs: Any) ->None: """Do nothing when LLM outputs an error.""" pass
Do nothing when LLM outputs an error.
embed_documents
"""Embed a list of strings. Vertex AI currently sets a max batch size of 5 strings. Args: texts: List[str] The list of strings to embed. batch_size: [int] The batch size of embeddings to send to the model Returns: List of embeddings, one for each text. """ task_type = self.task_type or 'retrieval_document' return self._embed(texts, task_type=task_type)
def embed_documents(self, texts: List[str], batch_size: int=5) ->List[List[ float]]: """Embed a list of strings. Vertex AI currently sets a max batch size of 5 strings. Args: texts: List[str] The list of strings to embed. batch_size: [int] The batch size of embeddings to send to the model Returns: List of embeddings, one for each text. """ task_type = self.task_type or 'retrieval_document' return self._embed(texts, task_type=task_type)
Embed a list of strings. Vertex AI currently sets a max batch size of 5 strings. Args: texts: List[str] The list of strings to embed. batch_size: [int] The batch size of embeddings to send to the model Returns: List of embeddings, one for each text.