method_name
stringlengths
1
78
method_body
stringlengths
3
9.66k
full_code
stringlengths
31
10.7k
docstring
stringlengths
4
4.74k
from_llm_and_tools
"""Construct an agent from an LLM and tools.""" cls._validate_tools(tools) prompt = cls.create_prompt(tools, prefix=prefix, suffix=suffix, format_instructions=format_instructions, input_variables=input_variables) llm_chain = LLMChain(llm=llm, prompt=prompt, callback_manager=callback_manager) tool_names = [tool.name for tool in tools] _output_parser = output_parser or cls._get_default_output_parser() return cls(llm_chain=llm_chain, allowed_tools=tool_names, output_parser= _output_parser, **kwargs)
@classmethod def from_llm_and_tools(cls, llm: BaseLanguageModel, tools: Sequence[ BaseTool], callback_manager: Optional[BaseCallbackManager]=None, output_parser: Optional[AgentOutputParser]=None, prefix: str=PREFIX, suffix: str=SUFFIX, format_instructions: str=FORMAT_INSTRUCTIONS, input_variables: Optional[List[str]]=None, **kwargs: Any) ->Agent: """Construct an agent from an LLM and tools.""" cls._validate_tools(tools) prompt = cls.create_prompt(tools, prefix=prefix, suffix=suffix, format_instructions=format_instructions, input_variables= input_variables) llm_chain = LLMChain(llm=llm, prompt=prompt, callback_manager= callback_manager) tool_names = [tool.name for tool in tools] _output_parser = output_parser or cls._get_default_output_parser() return cls(llm_chain=llm_chain, allowed_tools=tool_names, output_parser =_output_parser, **kwargs)
Construct an agent from an LLM and tools.
__init__
"""Initialize the spacy text splitter.""" super().__init__(**kwargs) self._tokenizer = _make_spacy_pipeline_for_splitting(pipeline, max_length= max_length) self._separator = separator
def __init__(self, separator: str='\n\n', pipeline: str='en_core_web_sm', max_length: int=1000000, **kwargs: Any) ->None: """Initialize the spacy text splitter.""" super().__init__(**kwargs) self._tokenizer = _make_spacy_pipeline_for_splitting(pipeline, max_length=max_length) self._separator = separator
Initialize the spacy text splitter.
test_create_lc_store
"""Test that a docstore is created from a base store.""" docstore = create_lc_store(file_store) docstore.mset([('key1', Document(page_content='hello', metadata={'key': 'value'}))]) fetched_doc = cast(Document, docstore.mget(['key1'])[0]) assert fetched_doc.page_content == 'hello' assert fetched_doc.metadata == {'key': 'value'}
def test_create_lc_store(file_store: LocalFileStore) ->None: """Test that a docstore is created from a base store.""" docstore = create_lc_store(file_store) docstore.mset([('key1', Document(page_content='hello', metadata={'key': 'value'}))]) fetched_doc = cast(Document, docstore.mget(['key1'])[0]) assert fetched_doc.page_content == 'hello' assert fetched_doc.metadata == {'key': 'value'}
Test that a docstore is created from a base store.
_request
res = requests.request(method, url, headers=self.headers, json=query_dict, timeout=self.request_timeout_sec) res.raise_for_status() return res.json()
def _request(self, url: str, method: str='GET', query_dict: Dict[str, Any]={} ) ->Any: res = requests.request(method, url, headers=self.headers, json= query_dict, timeout=self.request_timeout_sec) res.raise_for_status() return res.json()
null
_llm_type
"""Return type of chat model.""" return 'volc-engine-maas-chat'
@property def _llm_type(self) ->str: """Return type of chat model.""" return 'volc-engine-maas-chat'
Return type of chat model.
_has_env_vars
return all(['ASTRA_DB_APPLICATION_TOKEN' in os.environ, 'ASTRA_DB_API_ENDPOINT' in os.environ])
def _has_env_vars() ->bool: return all(['ASTRA_DB_APPLICATION_TOKEN' in os.environ, 'ASTRA_DB_API_ENDPOINT' in os.environ])
null
has_mul_sub_str
""" Check if a string contains multiple substrings. Args: s: string to check. *args: substrings to check. Returns: True if all substrings are in the string, False otherwise. """ for a in args: if a not in s: return False return True
def has_mul_sub_str(s: str, *args: Any) ->bool: """ Check if a string contains multiple substrings. Args: s: string to check. *args: substrings to check. Returns: True if all substrings are in the string, False otherwise. """ for a in args: if a not in s: return False return True
Check if a string contains multiple substrings. Args: s: string to check. *args: substrings to check. Returns: True if all substrings are in the string, False otherwise.
intermediate_steps
return [(AgentAction(tool='Foo', tool_input='Bar', log= 'Star date 2021-06-13: Foo received input: Bar'), 'Baz')]
@pytest.fixture def intermediate_steps() ->List[Tuple[AgentAction, str]]: return [(AgentAction(tool='Foo', tool_input='Bar', log= 'Star date 2021-06-13: Foo received input: Bar'), 'Baz')]
null
test_beam_call
"""Test valid call to Beam.""" llm = Beam(model_name='gpt2', name='langchain-gpt2', cpu=8, memory='32Gi', gpu='A10G', python_version='python3.8', python_packages=[ 'diffusers[torch]>=0.10', 'transformers', 'torch', 'pillow', 'accelerate', 'safetensors', 'xformers'], max_length='5') llm._deploy() output = llm._call('Your prompt goes here') assert isinstance(output, str)
def test_beam_call() ->None: """Test valid call to Beam.""" llm = Beam(model_name='gpt2', name='langchain-gpt2', cpu=8, memory= '32Gi', gpu='A10G', python_version='python3.8', python_packages=[ 'diffusers[torch]>=0.10', 'transformers', 'torch', 'pillow', 'accelerate', 'safetensors', 'xformers'], max_length='5') llm._deploy() output = llm._call('Your prompt goes here') assert isinstance(output, str)
Test valid call to Beam.
test_from_texts_cosine_distance
texts = ['Dogs are tough.', 'Cats have fluff.', 'What is a sandwich?', 'That fence is purple.'] vectorstore = AzureCosmosDBVectorSearch.from_texts(texts, azure_openai_embeddings, collection=collection, index_name=INDEX_NAME) vectorstore.create_index(num_lists, dimensions, similarity_algorithm) sleep(2) output = vectorstore.similarity_search('Sandwich', k=1) assert output[0].page_content == 'What is a sandwich?' vectorstore.delete_index()
def test_from_texts_cosine_distance(self, azure_openai_embeddings: OpenAIEmbeddings, collection: Any) ->None: texts = ['Dogs are tough.', 'Cats have fluff.', 'What is a sandwich?', 'That fence is purple.'] vectorstore = AzureCosmosDBVectorSearch.from_texts(texts, azure_openai_embeddings, collection=collection, index_name=INDEX_NAME) vectorstore.create_index(num_lists, dimensions, similarity_algorithm) sleep(2) output = vectorstore.similarity_search('Sandwich', k=1) assert output[0].page_content == 'What is a sandwich?' vectorstore.delete_index()
null
__init__
self.test_name = test_name self.envvars = envvars self.expected_project_name = expected_project_name
def __init__(self, test_name: str, envvars: Dict[str, str], expected_project_name: str): self.test_name = test_name self.envvars = envvars self.expected_project_name = expected_project_name
null
mock_create_project
proj = mock.MagicMock() proj.id = '123' return proj
def mock_create_project(*args: Any, **kwargs: Any) ->Any: proj = mock.MagicMock() proj.id = '123' return proj
null
_get_folder_path
"""Get the folder path to save the Documents in. Args: soup: BeautifulSoup4 soup object. Returns: Folder path. """ course_name = soup.find('span', {'id': 'crumb_1'}) if course_name is None: raise ValueError('No course name found.') course_name = course_name.text.strip() course_name_clean = unquote(course_name).replace(' ', '_').replace('/', '_' ).replace(':', '_').replace(',', '_').replace('?', '_').replace("'", '_' ).replace('!', '_').replace('"', '_') folder_path = Path('.') / course_name_clean return str(folder_path)
def _get_folder_path(self, soup: Any) ->str: """Get the folder path to save the Documents in. Args: soup: BeautifulSoup4 soup object. Returns: Folder path. """ course_name = soup.find('span', {'id': 'crumb_1'}) if course_name is None: raise ValueError('No course name found.') course_name = course_name.text.strip() course_name_clean = unquote(course_name).replace(' ', '_').replace('/', '_' ).replace(':', '_').replace(',', '_').replace('?', '_').replace("'", '_').replace('!', '_').replace('"', '_') folder_path = Path('.') / course_name_clean return str(folder_path)
Get the folder path to save the Documents in. Args: soup: BeautifulSoup4 soup object. Returns: Folder path.
_import_edenai_EdenAiTextModerationTool
from langchain_community.tools.edenai import EdenAiTextModerationTool return EdenAiTextModerationTool
def _import_edenai_EdenAiTextModerationTool() ->Any: from langchain_community.tools.edenai import EdenAiTextModerationTool return EdenAiTextModerationTool
null
__init__
"""Initialize with AwaDB client. If table_name is not specified, a random table name of `_DEFAULT_TABLE_NAME + last segment of uuid` would be created automatically. Args: table_name: Name of the table created, default _DEFAULT_TABLE_NAME. embedding: Optional Embeddings initially set. log_and_data_dir: Optional the root directory of log and data. client: Optional AwaDB client. kwargs: Any possible extend parameters in the future. Returns: None. """ try: import awadb except ImportError: raise ImportError( 'Could not import awadb python package. Please install it with `pip install awadb`.' ) if client is not None: self.awadb_client = client elif log_and_data_dir is not None: self.awadb_client = awadb.Client(log_and_data_dir) else: self.awadb_client = awadb.Client() if table_name == self._DEFAULT_TABLE_NAME: table_name += '_' table_name += str(uuid.uuid4()).split('-')[-1] self.awadb_client.Create(table_name) self.table2embeddings: dict[str, Embeddings] = {} if embedding is not None: self.table2embeddings[table_name] = embedding self.using_table_name = table_name
def __init__(self, table_name: str=_DEFAULT_TABLE_NAME, embedding: Optional [Embeddings]=None, log_and_data_dir: Optional[str]=None, client: Optional[awadb.Client]=None, **kwargs: Any) ->None: """Initialize with AwaDB client. If table_name is not specified, a random table name of `_DEFAULT_TABLE_NAME + last segment of uuid` would be created automatically. Args: table_name: Name of the table created, default _DEFAULT_TABLE_NAME. embedding: Optional Embeddings initially set. log_and_data_dir: Optional the root directory of log and data. client: Optional AwaDB client. kwargs: Any possible extend parameters in the future. Returns: None. """ try: import awadb except ImportError: raise ImportError( 'Could not import awadb python package. Please install it with `pip install awadb`.' ) if client is not None: self.awadb_client = client elif log_and_data_dir is not None: self.awadb_client = awadb.Client(log_and_data_dir) else: self.awadb_client = awadb.Client() if table_name == self._DEFAULT_TABLE_NAME: table_name += '_' table_name += str(uuid.uuid4()).split('-')[-1] self.awadb_client.Create(table_name) self.table2embeddings: dict[str, Embeddings] = {} if embedding is not None: self.table2embeddings[table_name] = embedding self.using_table_name = table_name
Initialize with AwaDB client. If table_name is not specified, a random table name of `_DEFAULT_TABLE_NAME + last segment of uuid` would be created automatically. Args: table_name: Name of the table created, default _DEFAULT_TABLE_NAME. embedding: Optional Embeddings initially set. log_and_data_dir: Optional the root directory of log and data. client: Optional AwaDB client. kwargs: Any possible extend parameters in the future. Returns: None.
on_agent_action
"""Run on agent action.""" self.step += 1 self.tool_starts += 1 self.starts += 1 resp: Dict[str, Any] = {} resp.update({'action': 'on_agent_action', 'tool': action.tool, 'tool_input': action.tool_input, 'log': action.log}) resp.update(self.get_custom_callback_meta()) self.deck.append(self.markdown_renderer().to_html('### Agent Action')) self.deck.append(self.table_renderer().to_html(self.pandas.DataFrame([resp] )) + '\n')
def on_agent_action(self, action: AgentAction, **kwargs: Any) ->Any: """Run on agent action.""" self.step += 1 self.tool_starts += 1 self.starts += 1 resp: Dict[str, Any] = {} resp.update({'action': 'on_agent_action', 'tool': action.tool, 'tool_input': action.tool_input, 'log': action.log}) resp.update(self.get_custom_callback_meta()) self.deck.append(self.markdown_renderer().to_html('### Agent Action')) self.deck.append(self.table_renderer().to_html(self.pandas.DataFrame([ resp])) + '\n')
Run on agent action.
get_format_instructions
return self.parser.get_format_instructions()
def get_format_instructions(self) ->str: return self.parser.get_format_instructions()
null
test_csv_loader_load_single_column_file
file_path = self._get_csv_file_path('test_one_col.csv') expected_docs = [Document(page_content='column1: value1', metadata={ 'source': file_path, 'row': 0}), Document(page_content= 'column1: value2', metadata={'source': file_path, 'row': 1}), Document( page_content='column1: value3', metadata={'source': file_path, 'row': 2})] loader = CSVLoader(file_path=file_path) result = loader.load() assert result == expected_docs
def test_csv_loader_load_single_column_file(self) ->None: file_path = self._get_csv_file_path('test_one_col.csv') expected_docs = [Document(page_content='column1: value1', metadata={ 'source': file_path, 'row': 0}), Document(page_content= 'column1: value2', metadata={'source': file_path, 'row': 1}), Document(page_content='column1: value3', metadata={'source': file_path, 'row': 2})] loader = CSVLoader(file_path=file_path) result = loader.load() assert result == expected_docs
null
test_add_texts_with_given_uuids
texts = ['foo', 'bar', 'baz'] embedding = FakeEmbeddings() uuids = [uuid.uuid5(uuid.NAMESPACE_DNS, text) for text in texts] docsearch = Weaviate.from_texts(texts, embedding=embedding, weaviate_url= weaviate_url, uuids=uuids) docsearch.add_texts(['foo'], uuids=[uuids[0]]) output = docsearch.similarity_search_by_vector(embedding.embed_query('foo'), k=2) assert output[0] == Document(page_content='foo') assert output[1] != Document(page_content='foo')
def test_add_texts_with_given_uuids(self, weaviate_url: str) ->None: texts = ['foo', 'bar', 'baz'] embedding = FakeEmbeddings() uuids = [uuid.uuid5(uuid.NAMESPACE_DNS, text) for text in texts] docsearch = Weaviate.from_texts(texts, embedding=embedding, weaviate_url=weaviate_url, uuids=uuids) docsearch.add_texts(['foo'], uuids=[uuids[0]]) output = docsearch.similarity_search_by_vector(embedding.embed_query( 'foo'), k=2) assert output[0] == Document(page_content='foo') assert output[1] != Document(page_content='foo')
null
parse
"""Parse into a plan."""
@abstractmethod def parse(self, text: str) ->Plan: """Parse into a plan."""
Parse into a plan.
test_add_embeddings
""" Test add_embeddings, which accepts pre-built embeddings instead of using inference for the texts. This allows you to separate the embeddings text and the page_content for better proximity between user's question and embedded text. For example, your embedding text can be a question, whereas page_content is the answer. """ embeddings = ConsistentFakeEmbeddings() text_input = ['foo1', 'foo2', 'foo3'] metadatas = [{'page': i} for i in range(len(text_input))] """In real use case, embedding_input can be questions for each text""" embedding_input = ['foo2', 'foo3', 'foo1'] embedding_vectors = embeddings.embed_documents(embedding_input) docsearch = OpenSearchVectorSearch.from_texts(['filler'], embeddings, opensearch_url=DEFAULT_OPENSEARCH_URL) docsearch.add_embeddings(list(zip(text_input, embedding_vectors)), metadatas) output = docsearch.similarity_search('foo1', k=1) assert output == [Document(page_content='foo3', metadata={'page': 2})]
def test_add_embeddings() ->None: """ Test add_embeddings, which accepts pre-built embeddings instead of using inference for the texts. This allows you to separate the embeddings text and the page_content for better proximity between user's question and embedded text. For example, your embedding text can be a question, whereas page_content is the answer. """ embeddings = ConsistentFakeEmbeddings() text_input = ['foo1', 'foo2', 'foo3'] metadatas = [{'page': i} for i in range(len(text_input))] """In real use case, embedding_input can be questions for each text""" embedding_input = ['foo2', 'foo3', 'foo1'] embedding_vectors = embeddings.embed_documents(embedding_input) docsearch = OpenSearchVectorSearch.from_texts(['filler'], embeddings, opensearch_url=DEFAULT_OPENSEARCH_URL) docsearch.add_embeddings(list(zip(text_input, embedding_vectors)), metadatas) output = docsearch.similarity_search('foo1', k=1) assert output == [Document(page_content='foo3', metadata={'page': 2})]
Test add_embeddings, which accepts pre-built embeddings instead of using inference for the texts. This allows you to separate the embeddings text and the page_content for better proximity between user's question and embedded text. For example, your embedding text can be a question, whereas page_content is the answer.
validate_environment
"""Validate that api key and python package exists in environment.""" values['openai_api_key'] = get_from_dict_or_env(values, 'openai_api_key', 'OPENAI_API_KEY') values['openai_organization'] = values['openai_organization'] or os.getenv( 'OPENAI_ORG_ID') or os.getenv('OPENAI_ORGANIZATION') or None values['openai_api_base'] = values['openai_api_base'] or os.getenv( 'OPENAI_API_BASE') values['openai_proxy'] = get_from_dict_or_env(values, 'openai_proxy', 'OPENAI_PROXY', default='') try: import openai except ImportError: raise ImportError( 'Could not import openai python package. Please install it with `pip install openai`.' ) if is_openai_v1(): client_params = {'api_key': values['openai_api_key'], 'organization': values['openai_organization'], 'base_url': values['openai_api_base' ], 'timeout': values['request_timeout'], 'max_retries': values[ 'max_retries'], 'default_headers': values['default_headers'], 'default_query': values['default_query'], 'http_client': values[ 'http_client']} if not values.get('client'): values['client'] = openai.OpenAI(**client_params).images if not values.get('async_client'): values['async_client'] = openai.AsyncOpenAI(**client_params).images elif not values.get('client'): values['client'] = openai.Image else: pass return values
@root_validator() def validate_environment(cls, values: Dict) ->Dict: """Validate that api key and python package exists in environment.""" values['openai_api_key'] = get_from_dict_or_env(values, 'openai_api_key', 'OPENAI_API_KEY') values['openai_organization'] = values['openai_organization'] or os.getenv( 'OPENAI_ORG_ID') or os.getenv('OPENAI_ORGANIZATION') or None values['openai_api_base'] = values['openai_api_base'] or os.getenv( 'OPENAI_API_BASE') values['openai_proxy'] = get_from_dict_or_env(values, 'openai_proxy', 'OPENAI_PROXY', default='') try: import openai except ImportError: raise ImportError( 'Could not import openai python package. Please install it with `pip install openai`.' ) if is_openai_v1(): client_params = {'api_key': values['openai_api_key'], 'organization': values['openai_organization'], 'base_url': values['openai_api_base'], 'timeout': values['request_timeout'], 'max_retries': values['max_retries'], 'default_headers': values ['default_headers'], 'default_query': values['default_query'], 'http_client': values['http_client']} if not values.get('client'): values['client'] = openai.OpenAI(**client_params).images if not values.get('async_client'): values['async_client'] = openai.AsyncOpenAI(**client_params).images elif not values.get('client'): values['client'] = openai.Image else: pass return values
Validate that api key and python package exists in environment.
getInternalTx
url = ( f'https://api.etherscan.io/api?module=account&action=txlistinternal&address={self.account_address}&startblock={self.start_block}&endblock={self.end_block}&page={self.page}&offset={self.offset}&sort={self.sort}&apikey={self.api_key}' ) try: response = requests.get(url) response.raise_for_status() except requests.exceptions.RequestException as e: print('Error occurred while making the request:', e) items = response.json()['result'] result = [] if len(items) == 0: return [Document(page_content='')] for item in items: content = str(item) metadata = {'from': item['from'], 'tx_hash': item['hash'], 'to': item['to'] } result.append(Document(page_content=content, metadata=metadata)) return result
def getInternalTx(self) ->List[Document]: url = ( f'https://api.etherscan.io/api?module=account&action=txlistinternal&address={self.account_address}&startblock={self.start_block}&endblock={self.end_block}&page={self.page}&offset={self.offset}&sort={self.sort}&apikey={self.api_key}' ) try: response = requests.get(url) response.raise_for_status() except requests.exceptions.RequestException as e: print('Error occurred while making the request:', e) items = response.json()['result'] result = [] if len(items) == 0: return [Document(page_content='')] for item in items: content = str(item) metadata = {'from': item['from'], 'tx_hash': item['hash'], 'to': item['to']} result.append(Document(page_content=content, metadata=metadata)) return result
null
add_texts
"""Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. doc_metadata: optional metadata for the document This function indexes all the input text strings in the Vectara corpus as a single Vectara document, where each input text is considered a "section" and the metadata are associated with each section. if 'doc_metadata' is provided, it is associated with the Vectara document. Returns: document ID of the document added """ doc_hash = md5() for t in texts: doc_hash.update(t.encode()) doc_id = doc_hash.hexdigest() if metadatas is None: metadatas = [{} for _ in texts] if doc_metadata: doc_metadata['source'] = 'langchain' else: doc_metadata = {'source': 'langchain'} use_core_api = kwargs.get('use_core_api', False) section_key = 'parts' if use_core_api else 'section' doc = {'document_id': doc_id, 'metadataJson': json.dumps(doc_metadata), section_key: [{'text': text, 'metadataJson': json.dumps(md)} for text, md in zip(texts, metadatas)]} success_str = self._index_doc(doc, use_core_api=use_core_api) if success_str == 'E_ALREADY_EXISTS': self._delete_doc(doc_id) self._index_doc(doc) elif success_str == 'E_NO_PERMISSIONS': print( """No permissions to add document to Vectara. Check your corpus ID, customer ID and API key""" ) return [doc_id]
def add_texts(self, texts: Iterable[str], metadatas: Optional[List[dict]]= None, doc_metadata: Optional[dict]=None, **kwargs: Any) ->List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. doc_metadata: optional metadata for the document This function indexes all the input text strings in the Vectara corpus as a single Vectara document, where each input text is considered a "section" and the metadata are associated with each section. if 'doc_metadata' is provided, it is associated with the Vectara document. Returns: document ID of the document added """ doc_hash = md5() for t in texts: doc_hash.update(t.encode()) doc_id = doc_hash.hexdigest() if metadatas is None: metadatas = [{} for _ in texts] if doc_metadata: doc_metadata['source'] = 'langchain' else: doc_metadata = {'source': 'langchain'} use_core_api = kwargs.get('use_core_api', False) section_key = 'parts' if use_core_api else 'section' doc = {'document_id': doc_id, 'metadataJson': json.dumps(doc_metadata), section_key: [{'text': text, 'metadataJson': json.dumps(md)} for text, md in zip(texts, metadatas)]} success_str = self._index_doc(doc, use_core_api=use_core_api) if success_str == 'E_ALREADY_EXISTS': self._delete_doc(doc_id) self._index_doc(doc) elif success_str == 'E_NO_PERMISSIONS': print( """No permissions to add document to Vectara. Check your corpus ID, customer ID and API key""" ) return [doc_id]
Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. doc_metadata: optional metadata for the document This function indexes all the input text strings in the Vectara corpus as a single Vectara document, where each input text is considered a "section" and the metadata are associated with each section. if 'doc_metadata' is provided, it is associated with the Vectara document. Returns: document ID of the document added
test_load
mocker.patch('requests.get', return_value=mocker.MagicMock(json=lambda : [], links=None)) loader = GitHubIssuesLoader(repo='repo', access_token='access_token') documents = loader.load() assert documents == []
def test_load(mocker: MockerFixture) ->None: mocker.patch('requests.get', return_value=mocker.MagicMock(json=lambda : [], links=None)) loader = GitHubIssuesLoader(repo='repo', access_token='access_token') documents = loader.load() assert documents == []
null
_llm_type
"""Return type of llm.""" return 'mosaic'
@property def _llm_type(self) ->str: """Return type of llm.""" return 'mosaic'
Return type of llm.
try_neq_default
"""Try to determine if a value is different from the default. Args: value: The value. key: The key. model: The model. Returns: Whether the value is different from the default. """ try: return model.__fields__[key].get_default() != value except Exception: return True
def try_neq_default(value: Any, key: str, model: BaseModel) ->bool: """Try to determine if a value is different from the default. Args: value: The value. key: The key. model: The model. Returns: Whether the value is different from the default. """ try: return model.__fields__[key].get_default() != value except Exception: return True
Try to determine if a value is different from the default. Args: value: The value. key: The key. model: The model. Returns: Whether the value is different from the default.
get_type
""" Retrieves the simplified schema type for a given original type. Args: type (str): The original schema type to find the simplified type for. Returns: str: The simplified schema type if it exists; otherwise, returns the original type. """ try: return self.schema[type] except KeyError: return type
def get_type(self, type: str) ->str: """ Retrieves the simplified schema type for a given original type. Args: type (str): The original schema type to find the simplified type for. Returns: str: The simplified schema type if it exists; otherwise, returns the original type. """ try: return self.schema[type] except KeyError: return type
Retrieves the simplified schema type for a given original type. Args: type (str): The original schema type to find the simplified type for. Returns: str: The simplified schema type if it exists; otherwise, returns the original type.
test_quip_loader_load_data_by_folder_id
mock_quip.get_folder.side_effect = [self._get_mock_folder(self. MOCK_FOLDER_IDS[0])] mock_quip.get_thread.side_effect = [self._get_mock_thread(self. MOCK_THREAD_IDS[0]), self._get_mock_thread(self.MOCK_THREAD_IDS[1])] quip_loader = self._get_mock_quip_loader(mock_quip) documents = quip_loader.load(folder_ids=[self.MOCK_FOLDER_IDS[0]]) assert mock_quip.get_folder.call_count == 1 assert mock_quip.get_thread.call_count == 2 assert len(documents) == 2 assert all(isinstance(doc, Document) for doc in documents) assert documents[0].metadata.get('source' ) == f'https://example.quip.com/{self.MOCK_THREAD_IDS[0]}' assert documents[1].metadata.get('source' ) == f'https://example.quip.com/{self.MOCK_THREAD_IDS[1]}'
def test_quip_loader_load_data_by_folder_id(self, mock_quip: MagicMock) ->None: mock_quip.get_folder.side_effect = [self._get_mock_folder(self. MOCK_FOLDER_IDS[0])] mock_quip.get_thread.side_effect = [self._get_mock_thread(self. MOCK_THREAD_IDS[0]), self._get_mock_thread(self.MOCK_THREAD_IDS[1])] quip_loader = self._get_mock_quip_loader(mock_quip) documents = quip_loader.load(folder_ids=[self.MOCK_FOLDER_IDS[0]]) assert mock_quip.get_folder.call_count == 1 assert mock_quip.get_thread.call_count == 2 assert len(documents) == 2 assert all(isinstance(doc, Document) for doc in documents) assert documents[0].metadata.get('source' ) == f'https://example.quip.com/{self.MOCK_THREAD_IDS[0]}' assert documents[1].metadata.get('source' ) == f'https://example.quip.com/{self.MOCK_THREAD_IDS[1]}'
null
_on_chain_start
crumbs = self.get_breadcrumbs(run) run_type = run.run_type.capitalize() self.function_callback( f"{get_colored_text('[chain/start]', color='green')} " + get_bolded_text(f"""[{crumbs}] Entering {run_type} run with input: """) + f"{try_json_stringify(run.inputs, '[inputs]')}")
def _on_chain_start(self, run: Run) ->None: crumbs = self.get_breadcrumbs(run) run_type = run.run_type.capitalize() self.function_callback( f"{get_colored_text('[chain/start]', color='green')} " + get_bolded_text( f"""[{crumbs}] Entering {run_type} run with input: """) + f"{try_json_stringify(run.inputs, '[inputs]')}")
null
embed_query
"""Compute query embeddings using Cloudflare Workers AI. Args: text: The text to embed. Returns: Embeddings for the text. """ text = text.replace('\n', ' ') if self.strip_new_lines else text response = requests.post( f'{self.api_base_url}/{self.account_id}/ai/run/{self.model_name}', headers=self.headers, json={'text': [text]}) return response.json()['result']['data'][0]
def embed_query(self, text: str) ->List[float]: """Compute query embeddings using Cloudflare Workers AI. Args: text: The text to embed. Returns: Embeddings for the text. """ text = text.replace('\n', ' ') if self.strip_new_lines else text response = requests.post( f'{self.api_base_url}/{self.account_id}/ai/run/{self.model_name}', headers=self.headers, json={'text': [text]}) return response.json()['result']['data'][0]
Compute query embeddings using Cloudflare Workers AI. Args: text: The text to embed. Returns: Embeddings for the text.
__init__
"""Initialize the sentence_transformer.""" super().__init__(project=project, location=location, credentials= credentials, request_parallelism=request_parallelism, max_retries= max_retries, model_name=model_name, **kwargs) self.instance['max_batch_size'] = kwargs.get('max_batch_size', _MAX_BATCH_SIZE) self.instance['batch_size'] = self.instance['max_batch_size'] self.instance['min_batch_size'] = kwargs.get('min_batch_size', _MIN_BATCH_SIZE) self.instance['min_good_batch_size'] = self.instance['min_batch_size'] self.instance['lock'] = threading.Lock() self.instance['batch_size_validated'] = False self.instance['task_executor'] = ThreadPoolExecutor(max_workers= request_parallelism) self.instance['embeddings_task_type_supported' ] = not self.client._endpoint_name.endswith('/textembedding-gecko@001')
def __init__(self, model_name: str='textembedding-gecko-default', project: Optional[str]=None, location: str='us-central1', request_parallelism: int=5, max_retries: int=6, credentials: Optional[Any]=None, **kwargs: Any): """Initialize the sentence_transformer.""" super().__init__(project=project, location=location, credentials= credentials, request_parallelism=request_parallelism, max_retries= max_retries, model_name=model_name, **kwargs) self.instance['max_batch_size'] = kwargs.get('max_batch_size', _MAX_BATCH_SIZE) self.instance['batch_size'] = self.instance['max_batch_size'] self.instance['min_batch_size'] = kwargs.get('min_batch_size', _MIN_BATCH_SIZE) self.instance['min_good_batch_size'] = self.instance['min_batch_size'] self.instance['lock'] = threading.Lock() self.instance['batch_size_validated'] = False self.instance['task_executor'] = ThreadPoolExecutor(max_workers= request_parallelism) self.instance['embeddings_task_type_supported' ] = not self.client._endpoint_name.endswith('/textembedding-gecko@001')
Initialize the sentence_transformer.
_on_run_create
if self.root_id is not None: return self.root_id = run.id if self._arg_on_start is not None: call_func_with_variable_args(self._arg_on_start, run, self.config)
def _on_run_create(self, run: Run) ->None: if self.root_id is not None: return self.root_id = run.id if self._arg_on_start is not None: call_func_with_variable_args(self._arg_on_start, run, self.config)
null
add_texts
"""Run more texts through the embeddings and add to the vectorstore. Args: texts (Iterable[str]): Texts to add to the vectorstore. metadatas (Optional[List[dict]], optional): Optional list of metadatas. ids (Optional[List[str]], optional): Optional list of IDs. Returns: List[str]: List of IDs of the added texts. """ if ids is None: ids = [str(uuid.uuid1()) for _ in texts] embeddings = None texts = list(texts) if self._embedding_function is not None: embeddings = self._embedding_function.embed_documents(texts) if metadatas: length_diff = len(texts) - len(metadatas) if length_diff: metadatas = metadatas + [{}] * length_diff empty_ids = [] non_empty_ids = [] for idx, m in enumerate(metadatas): if m: non_empty_ids.append(idx) else: empty_ids.append(idx) if non_empty_ids: metadatas = [metadatas[idx] for idx in non_empty_ids] texts_with_metadatas = [texts[idx] for idx in non_empty_ids] embeddings_with_metadatas = [embeddings[idx] for idx in non_empty_ids ] if embeddings else None ids_with_metadata = [ids[idx] for idx in non_empty_ids] try: self._collection.upsert(metadatas=metadatas, embeddings= embeddings_with_metadatas, documents=texts_with_metadatas, ids=ids_with_metadata) except ValueError as e: if 'Expected metadata value to be' in str(e): msg = ( 'Try filtering complex metadata from the document using langchain_community.vectorstores.utils.filter_complex_metadata.' ) raise ValueError(e.args[0] + '\n\n' + msg) else: raise e if empty_ids: texts_without_metadatas = [texts[j] for j in empty_ids] embeddings_without_metadatas = [embeddings[j] for j in empty_ids ] if embeddings else None ids_without_metadatas = [ids[j] for j in empty_ids] self._collection.upsert(embeddings=embeddings_without_metadatas, documents=texts_without_metadatas, ids=ids_without_metadatas) else: self._collection.upsert(embeddings=embeddings, documents=texts, ids=ids) return ids
def add_texts(self, texts: Iterable[str], metadatas: Optional[List[dict]]= None, ids: Optional[List[str]]=None, **kwargs: Any) ->List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts (Iterable[str]): Texts to add to the vectorstore. metadatas (Optional[List[dict]], optional): Optional list of metadatas. ids (Optional[List[str]], optional): Optional list of IDs. Returns: List[str]: List of IDs of the added texts. """ if ids is None: ids = [str(uuid.uuid1()) for _ in texts] embeddings = None texts = list(texts) if self._embedding_function is not None: embeddings = self._embedding_function.embed_documents(texts) if metadatas: length_diff = len(texts) - len(metadatas) if length_diff: metadatas = metadatas + [{}] * length_diff empty_ids = [] non_empty_ids = [] for idx, m in enumerate(metadatas): if m: non_empty_ids.append(idx) else: empty_ids.append(idx) if non_empty_ids: metadatas = [metadatas[idx] for idx in non_empty_ids] texts_with_metadatas = [texts[idx] for idx in non_empty_ids] embeddings_with_metadatas = [embeddings[idx] for idx in non_empty_ids] if embeddings else None ids_with_metadata = [ids[idx] for idx in non_empty_ids] try: self._collection.upsert(metadatas=metadatas, embeddings= embeddings_with_metadatas, documents= texts_with_metadatas, ids=ids_with_metadata) except ValueError as e: if 'Expected metadata value to be' in str(e): msg = ( 'Try filtering complex metadata from the document using langchain_community.vectorstores.utils.filter_complex_metadata.' ) raise ValueError(e.args[0] + '\n\n' + msg) else: raise e if empty_ids: texts_without_metadatas = [texts[j] for j in empty_ids] embeddings_without_metadatas = [embeddings[j] for j in empty_ids ] if embeddings else None ids_without_metadatas = [ids[j] for j in empty_ids] self._collection.upsert(embeddings=embeddings_without_metadatas, documents=texts_without_metadatas, ids=ids_without_metadatas) else: self._collection.upsert(embeddings=embeddings, documents=texts, ids=ids ) return ids
Run more texts through the embeddings and add to the vectorstore. Args: texts (Iterable[str]): Texts to add to the vectorstore. metadatas (Optional[List[dict]], optional): Optional list of metadatas. ids (Optional[List[str]], optional): Optional list of IDs. Returns: List[str]: List of IDs of the added texts.
raise_error
raise ValueError(f"Metadata value for key '{key}' must be a string, int, " + f'float, or list of strings. Got {type(value).__name__}')
def raise_error(key: str, value: Any) ->None: raise ValueError( f"Metadata value for key '{key}' must be a string, int, " + f'float, or list of strings. Got {type(value).__name__}')
null
from_texts
embeddings = embedding.embed_documents(list(texts)) return cls._initialize_from_embeddings(texts, embeddings, embedding, metadatas=metadatas, ids=ids, collection_name=collection_name, pre_delete_collection=pre_delete_collection, **kwargs)
@classmethod def from_texts(cls: Type[PGEmbedding], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]]=None, collection_name: str= _LANGCHAIN_DEFAULT_COLLECTION_NAME, ids: Optional[List[str]]=None, pre_delete_collection: bool=False, **kwargs: Any) ->PGEmbedding: embeddings = embedding.embed_documents(list(texts)) return cls._initialize_from_embeddings(texts, embeddings, embedding, metadatas=metadatas, ids=ids, collection_name=collection_name, pre_delete_collection=pre_delete_collection, **kwargs)
null
clear
self.messages = []
def clear(self) ->None: self.messages = []
null
get_sqlite_cache
return SQLAlchemyCache(engine=create_engine('sqlite://'))
def get_sqlite_cache() ->SQLAlchemyCache: return SQLAlchemyCache(engine=create_engine('sqlite://'))
null
_llm_type
return 'fake-chat-model'
@property def _llm_type(self) ->str: return 'fake-chat-model'
null
test_marqo_with_metadatas
"""Test end to end construction and search.""" texts = ['foo', 'bar', 'baz'] metadatas = [{'page': i} for i in range(len(texts))] marqo_search = Marqo.from_texts(texts=texts, metadatas=metadatas, index_name=INDEX_NAME, url=DEFAULT_MARQO_URL, api_key= DEFAULT_MARQO_API_KEY, verbose=False) results = marqo_search.similarity_search('foo', k=1) assert results == [Document(page_content='foo', metadata={'page': 0})]
def test_marqo_with_metadatas(client: Marqo) ->None: """Test end to end construction and search.""" texts = ['foo', 'bar', 'baz'] metadatas = [{'page': i} for i in range(len(texts))] marqo_search = Marqo.from_texts(texts=texts, metadatas=metadatas, index_name=INDEX_NAME, url=DEFAULT_MARQO_URL, api_key= DEFAULT_MARQO_API_KEY, verbose=False) results = marqo_search.similarity_search('foo', k=1) assert results == [Document(page_content='foo', metadata={'page': 0})]
Test end to end construction and search.
ast_parse
filter = cast(Optional[FilterDirective], get_parser().parse_folder(raw_filter)) fixed = fix_filter_directive(filter, allowed_comparators= allowed_comparators, allowed_operators=allowed_operators, allowed_attributes=allowed_attributes) return fixed
def ast_parse(raw_filter: str) ->Optional[FilterDirective]: filter = cast(Optional[FilterDirective], get_parser().parse_folder( raw_filter)) fixed = fix_filter_directive(filter, allowed_comparators= allowed_comparators, allowed_operators=allowed_operators, allowed_attributes=allowed_attributes) return fixed
null
_type
"""Return the type key.""" return 'regex_parser'
@property def _type(self) ->str: """Return the type key.""" return 'regex_parser'
Return the type key.
validate_environment
"""Validate that api key and python package exists in environment.""" openai_api_key = get_from_dict_or_env(values, 'openai_api_key', 'OPENAI_API_KEY') openai_api_base = get_from_dict_or_env(values, 'openai_api_base', 'OPENAI_API_BASE', default='') openai_proxy = get_from_dict_or_env(values, 'openai_proxy', 'OPENAI_PROXY', default='') openai_organization = get_from_dict_or_env(values, 'openai_organization', 'OPENAI_ORGANIZATION', default='') try: import openai openai.api_key = openai_api_key if openai_api_base: openai.api_base = openai_api_base if openai_organization: openai.organization = openai_organization if openai_proxy: openai.proxy = {'http': openai_proxy, 'https': openai_proxy} except ImportError: raise ImportError( 'Could not import openai python package. Please install it with `pip install openai`.' ) try: values['client'] = openai.ChatCompletion except AttributeError: raise ValueError( '`openai` has no `ChatCompletion` attribute, this is likely due to an old version of the openai package. Try upgrading it with `pip install --upgrade openai`.' ) warnings.warn( 'You are trying to use a chat model. This way of initializing it is no longer supported. Instead, please use: `from langchain_community.chat_models import ChatOpenAI`' ) return values
@root_validator() def validate_environment(cls, values: Dict) ->Dict: """Validate that api key and python package exists in environment.""" openai_api_key = get_from_dict_or_env(values, 'openai_api_key', 'OPENAI_API_KEY') openai_api_base = get_from_dict_or_env(values, 'openai_api_base', 'OPENAI_API_BASE', default='') openai_proxy = get_from_dict_or_env(values, 'openai_proxy', 'OPENAI_PROXY', default='') openai_organization = get_from_dict_or_env(values, 'openai_organization', 'OPENAI_ORGANIZATION', default='') try: import openai openai.api_key = openai_api_key if openai_api_base: openai.api_base = openai_api_base if openai_organization: openai.organization = openai_organization if openai_proxy: openai.proxy = {'http': openai_proxy, 'https': openai_proxy} except ImportError: raise ImportError( 'Could not import openai python package. Please install it with `pip install openai`.' ) try: values['client'] = openai.ChatCompletion except AttributeError: raise ValueError( '`openai` has no `ChatCompletion` attribute, this is likely due to an old version of the openai package. Try upgrading it with `pip install --upgrade openai`.' ) warnings.warn( 'You are trying to use a chat model. This way of initializing it is no longer supported. Instead, please use: `from langchain_community.chat_models import ChatOpenAI`' ) return values
Validate that api key and python package exists in environment.
_get_relevant_documents
return self.retriever.get_relevant_documents(query, run_manager=run_manager .get_child(), **kwargs)
def _get_relevant_documents(self, query: str, *, run_manager: CallbackManagerForRetrieverRun, **kwargs: Any) ->List[Document]: return self.retriever.get_relevant_documents(query, run_manager= run_manager.get_child(), **kwargs)
null
map
if not run.outputs: raise ValueError(f'Run {run.id} has no outputs to evaluate.') return {'input': run.inputs['input'], 'prediction': run.outputs['output']}
def map(self, run: Run) ->Dict[str, str]: if not run.outputs: raise ValueError(f'Run {run.id} has no outputs to evaluate.') return {'input': run.inputs['input'], 'prediction': run.outputs['output']}
null
_import_requests_tool_BaseRequestsTool
from langchain_community.tools.requests.tool import BaseRequestsTool return BaseRequestsTool
def _import_requests_tool_BaseRequestsTool() ->Any: from langchain_community.tools.requests.tool import BaseRequestsTool return BaseRequestsTool
null
check_voice_models_key_is_provider_name
for key in values.get('voice_models', {}).keys(): if key not in values.get('providers', []): raise ValueError( 'voice_model should be formatted like this {<provider_name>: <its_voice_model>}' ) return values
@root_validator def check_voice_models_key_is_provider_name(cls, values: dict) ->dict: for key in values.get('voice_models', {}).keys(): if key not in values.get('providers', []): raise ValueError( 'voice_model should be formatted like this {<provider_name>: <its_voice_model>}' ) return values
null
input_keys
"""Expect input key. :meta private: """ return [self.input_key]
@property def input_keys(self) ->List[str]: """Expect input key. :meta private: """ return [self.input_key]
Expect input key. :meta private:
_create_chat_result
message = _convert_dict_to_message(response['choices'][0].get('message')) generations = [ChatGeneration(message=message)] token_usage = response['usage'] llm_output = {'token_usage': token_usage, 'model': self.model} return ChatResult(generations=generations, llm_output=llm_output)
def _create_chat_result(self, response: Mapping[str, Any]) ->ChatResult: message = _convert_dict_to_message(response['choices'][0].get('message')) generations = [ChatGeneration(message=message)] token_usage = response['usage'] llm_output = {'token_usage': token_usage, 'model': self.model} return ChatResult(generations=generations, llm_output=llm_output)
null
__init__
"""Initialize with a Pandas DataFrame containing chat logs. Args: chat_log: Pandas DataFrame containing chat logs. user_id_col: Name of the column containing the user ID. Defaults to "ID". """ if not isinstance(chat_log, pd.DataFrame): raise ValueError( f'Expected chat_log to be a pd.DataFrame, got {type(chat_log)}') self.chat_log = chat_log self.user_id_col = user_id_col
def __init__(self, chat_log: pd.DataFrame, user_id_col: str='ID'): """Initialize with a Pandas DataFrame containing chat logs. Args: chat_log: Pandas DataFrame containing chat logs. user_id_col: Name of the column containing the user ID. Defaults to "ID". """ if not isinstance(chat_log, pd.DataFrame): raise ValueError( f'Expected chat_log to be a pd.DataFrame, got {type(chat_log)}') self.chat_log = chat_log self.user_id_col = user_id_col
Initialize with a Pandas DataFrame containing chat logs. Args: chat_log: Pandas DataFrame containing chat logs. user_id_col: Name of the column containing the user ID. Defaults to "ID".
_get_embeddings_from_stateful_docs
if len(documents) and 'embedded_doc' in documents[0].state: embedded_documents = [doc.state['embedded_doc'] for doc in documents] else: embedded_documents = embeddings.embed_documents([d.page_content for d in documents]) for doc, embedding in zip(documents, embedded_documents): doc.state['embedded_doc'] = embedding return embedded_documents
def _get_embeddings_from_stateful_docs(embeddings: Embeddings, documents: Sequence[_DocumentWithState]) ->List[List[float]]: if len(documents) and 'embedded_doc' in documents[0].state: embedded_documents = [doc.state['embedded_doc'] for doc in documents] else: embedded_documents = embeddings.embed_documents([d.page_content for d in documents]) for doc, embedding in zip(documents, embedded_documents): doc.state['embedded_doc'] = embedding return embedded_documents
null
test_non_faker_values
"""Test anonymizing multiple items in a sentence without faker values""" from langchain_experimental.data_anonymizer import PresidioAnonymizer text = ( 'My name is John Smith. Your name is Adam Smith. Her name is Jane Smith.Our names are: John Smith, Adam Smith, Jane Smith.' ) expected_result = ( 'My name is <PERSON>. Your name is <PERSON_2>. Her name is <PERSON_3>.Our names are: <PERSON>, <PERSON_2>, <PERSON_3>.' ) anonymizer = PresidioAnonymizer(add_default_faker_operators=False) anonymized_text = anonymizer.anonymize(text) assert anonymized_text == expected_result
@pytest.mark.requires('presidio_analyzer', 'presidio_anonymizer', 'faker') def test_non_faker_values() ->None: """Test anonymizing multiple items in a sentence without faker values""" from langchain_experimental.data_anonymizer import PresidioAnonymizer text = ( 'My name is John Smith. Your name is Adam Smith. Her name is Jane Smith.Our names are: John Smith, Adam Smith, Jane Smith.' ) expected_result = ( 'My name is <PERSON>. Your name is <PERSON_2>. Her name is <PERSON_3>.Our names are: <PERSON>, <PERSON_2>, <PERSON_3>.' ) anonymizer = PresidioAnonymizer(add_default_faker_operators=False) anonymized_text = anonymizer.anonymize(text) assert anonymized_text == expected_result
Test anonymizing multiple items in a sentence without faker values
get_default_document_variable_name
"""Get default document variable name, if not provided.""" if 'document_variable_name' not in values: llm_chain_variables = values['llm_chain'].prompt.input_variables if len(llm_chain_variables) == 1: values['document_variable_name'] = llm_chain_variables[0] else: raise ValueError( 'document_variable_name must be provided if there are multiple llm_chain input_variables' ) else: llm_chain_variables = values['llm_chain'].prompt.input_variables if values['document_variable_name'] not in llm_chain_variables: raise ValueError( f"document_variable_name {values['document_variable_name']} was not found in llm_chain input_variables: {llm_chain_variables}" ) return values
@root_validator(pre=True) def get_default_document_variable_name(cls, values: Dict) ->Dict: """Get default document variable name, if not provided.""" if 'document_variable_name' not in values: llm_chain_variables = values['llm_chain'].prompt.input_variables if len(llm_chain_variables) == 1: values['document_variable_name'] = llm_chain_variables[0] else: raise ValueError( 'document_variable_name must be provided if there are multiple llm_chain input_variables' ) else: llm_chain_variables = values['llm_chain'].prompt.input_variables if values['document_variable_name'] not in llm_chain_variables: raise ValueError( f"document_variable_name {values['document_variable_name']} was not found in llm_chain input_variables: {llm_chain_variables}" ) return values
Get default document variable name, if not provided.
_type
return 'chat'
@property def _type(self) ->str: return 'chat'
null
on_llm_end_common
self.llm_ends += 1 self.ends += 1
def on_llm_end_common(self) ->None: self.llm_ends += 1 self.ends += 1
null
keys
"""Return the keys of the dict at the given path. Args: text: Python representation of the path to the dict (e.g. data["key1"][0]["key2"]). """ try: items = _parse_input(text) val = self.dict_ for i in items: if i: val = val[i] if not isinstance(val, dict): raise ValueError( f'Value at path `{text}` is not a dict, get the value directly.') return str(list(val.keys())) except Exception as e: return repr(e)
def keys(self, text: str) ->str: """Return the keys of the dict at the given path. Args: text: Python representation of the path to the dict (e.g. data["key1"][0]["key2"]). """ try: items = _parse_input(text) val = self.dict_ for i in items: if i: val = val[i] if not isinstance(val, dict): raise ValueError( f'Value at path `{text}` is not a dict, get the value directly.' ) return str(list(val.keys())) except Exception as e: return repr(e)
Return the keys of the dict at the given path. Args: text: Python representation of the path to the dict (e.g. data["key1"][0]["key2"]).
test_custom_prefixes
intermediate_steps = [(AgentAction(tool='Tool1', tool_input='input1', log= 'Log1'), 'Observation1')] observation_prefix = 'Custom Observation: ' llm_prefix = 'Custom Thought: ' expected_result = """Log1 Custom Observation: Observation1 Custom Thought: """ assert format_log_to_str(intermediate_steps, observation_prefix, llm_prefix ) == expected_result
def test_custom_prefixes() ->None: intermediate_steps = [(AgentAction(tool='Tool1', tool_input='input1', log='Log1'), 'Observation1')] observation_prefix = 'Custom Observation: ' llm_prefix = 'Custom Thought: ' expected_result = ( 'Log1\nCustom Observation: Observation1\nCustom Thought: ') assert format_log_to_str(intermediate_steps, observation_prefix, llm_prefix ) == expected_result
null
_import_databricks_chat
from langchain_community.chat_models.databricks import ChatDatabricks return ChatDatabricks
def _import_databricks_chat() ->Any: from langchain_community.chat_models.databricks import ChatDatabricks return ChatDatabricks
null
test_chat_generation_chunk
assert ChatGenerationChunk(message=HumanMessageChunk(content='Hello, ') ) + ChatGenerationChunk(message=HumanMessageChunk(content='world!') ) == ChatGenerationChunk(message=HumanMessageChunk(content='Hello, world!') ), 'ChatGenerationChunk + ChatGenerationChunk should be a ChatGenerationChunk' assert ChatGenerationChunk(message=HumanMessageChunk(content='Hello, ') ) + ChatGenerationChunk(message=HumanMessageChunk(content='world!'), generation_info={'foo': 'bar'}) == ChatGenerationChunk(message= HumanMessageChunk(content='Hello, world!'), generation_info={'foo': 'bar'} ), 'GenerationChunk + GenerationChunk should be a GenerationChunk with merged generation_info' assert ChatGenerationChunk(message=HumanMessageChunk(content='Hello, ') ) + ChatGenerationChunk(message=HumanMessageChunk(content='world!'), generation_info={'foo': 'bar'}) + ChatGenerationChunk(message= HumanMessageChunk(content='!'), generation_info={'baz': 'foo'} ) == ChatGenerationChunk(message=HumanMessageChunk(content= 'Hello, world!!'), generation_info={'foo': 'bar', 'baz': 'foo'} ), 'GenerationChunk + GenerationChunk should be a GenerationChunk with merged generation_info'
def test_chat_generation_chunk() ->None: assert ChatGenerationChunk(message=HumanMessageChunk(content='Hello, ') ) + ChatGenerationChunk(message=HumanMessageChunk(content='world!') ) == ChatGenerationChunk(message=HumanMessageChunk(content= 'Hello, world!') ), 'ChatGenerationChunk + ChatGenerationChunk should be a ChatGenerationChunk' assert ChatGenerationChunk(message=HumanMessageChunk(content='Hello, ') ) + ChatGenerationChunk(message=HumanMessageChunk(content='world!'), generation_info={'foo': 'bar'}) == ChatGenerationChunk(message= HumanMessageChunk(content='Hello, world!'), generation_info={'foo': 'bar'} ), 'GenerationChunk + GenerationChunk should be a GenerationChunk with merged generation_info' assert ChatGenerationChunk(message=HumanMessageChunk(content='Hello, ') ) + ChatGenerationChunk(message=HumanMessageChunk(content='world!'), generation_info={'foo': 'bar'}) + ChatGenerationChunk(message= HumanMessageChunk(content='!'), generation_info={'baz': 'foo'} ) == ChatGenerationChunk(message=HumanMessageChunk(content= 'Hello, world!!'), generation_info={'foo': 'bar', 'baz': 'foo'} ), 'GenerationChunk + GenerationChunk should be a GenerationChunk with merged generation_info'
null
_import_sqlitevss
from langchain_community.vectorstores.sqlitevss import SQLiteVSS return SQLiteVSS
def _import_sqlitevss() ->Any: from langchain_community.vectorstores.sqlitevss import SQLiteVSS return SQLiteVSS
null
max_marginal_relevance_search
"""Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ embedding = self._embedding.embed_query(query) return self.max_marginal_relevance_search_by_vector(embedding, k, fetch_k, lambda_mult, **kwargs)
def max_marginal_relevance_search(self, query: str, k: int=4, fetch_k: int= 20, lambda_mult: float=0.5, **kwargs: Any) ->List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ embedding = self._embedding.embed_query(query) return self.max_marginal_relevance_search_by_vector(embedding, k, fetch_k, lambda_mult, **kwargs)
Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance.
_import_google_trends
from langchain_community.utilities.google_trends import GoogleTrendsAPIWrapper return GoogleTrendsAPIWrapper
def _import_google_trends() ->Any: from langchain_community.utilities.google_trends import GoogleTrendsAPIWrapper return GoogleTrendsAPIWrapper
null
__init__
try: from databricks.vector_search.client import VectorSearchIndex except ImportError as e: raise ImportError( 'Could not import databricks-vectorsearch python package. Please install it with `pip install databricks-vectorsearch`.' ) from e self.index = index if not isinstance(index, VectorSearchIndex): raise TypeError('index must be of type VectorSearchIndex.') index_details = self.index.describe() self.primary_key = index_details['primary_key'] self.index_type = index_details.get('index_type') self._delta_sync_index_spec = index_details.get('delta_sync_index_spec', dict() ) self._direct_access_index_spec = index_details.get('direct_access_index_spec', dict()) if self._is_databricks_managed_embeddings(): index_source_column = self._embedding_source_column_name() if text_column is not None and text_column != index_source_column: raise ValueError( f"text_column '{text_column}' does not match with the source column of the index: '{index_source_column}'." ) self.text_column = index_source_column else: self._require_arg(text_column, 'text_column') self.text_column = text_column self.columns = columns or [] if self.primary_key not in self.columns: self.columns.append(self.primary_key) if self.text_column and self.text_column not in self.columns: self.columns.append(self.text_column) if self._is_direct_access_index(): index_schema = self._index_schema() if index_schema: for col in self.columns: if col not in index_schema: raise ValueError( f"column '{col}' is not in the index's schema.") if not self._is_databricks_managed_embeddings(): self._require_arg(embedding, 'embedding') self._embedding = embedding index_embedding_dimension = self._embedding_vector_column_dimension() if index_embedding_dimension is not None: inferred_embedding_dimension = self._infer_embedding_dimension() if inferred_embedding_dimension != index_embedding_dimension: raise ValueError( f"embedding model's dimension '{inferred_embedding_dimension}' does not match with the index's dimension '{index_embedding_dimension}'." ) else: if embedding is not None: logger.warning( 'embedding model is not used in delta-sync index with Databricks-managed embeddings.' ) self._embedding = None
def __init__(self, index: VectorSearchIndex, *, embedding: Optional[ Embeddings]=None, text_column: Optional[str]=None, columns: Optional[ List[str]]=None): try: from databricks.vector_search.client import VectorSearchIndex except ImportError as e: raise ImportError( 'Could not import databricks-vectorsearch python package. Please install it with `pip install databricks-vectorsearch`.' ) from e self.index = index if not isinstance(index, VectorSearchIndex): raise TypeError('index must be of type VectorSearchIndex.') index_details = self.index.describe() self.primary_key = index_details['primary_key'] self.index_type = index_details.get('index_type') self._delta_sync_index_spec = index_details.get('delta_sync_index_spec', dict()) self._direct_access_index_spec = index_details.get( 'direct_access_index_spec', dict()) if self._is_databricks_managed_embeddings(): index_source_column = self._embedding_source_column_name() if text_column is not None and text_column != index_source_column: raise ValueError( f"text_column '{text_column}' does not match with the source column of the index: '{index_source_column}'." ) self.text_column = index_source_column else: self._require_arg(text_column, 'text_column') self.text_column = text_column self.columns = columns or [] if self.primary_key not in self.columns: self.columns.append(self.primary_key) if self.text_column and self.text_column not in self.columns: self.columns.append(self.text_column) if self._is_direct_access_index(): index_schema = self._index_schema() if index_schema: for col in self.columns: if col not in index_schema: raise ValueError( f"column '{col}' is not in the index's schema.") if not self._is_databricks_managed_embeddings(): self._require_arg(embedding, 'embedding') self._embedding = embedding index_embedding_dimension = self._embedding_vector_column_dimension() if index_embedding_dimension is not None: inferred_embedding_dimension = self._infer_embedding_dimension() if inferred_embedding_dimension != index_embedding_dimension: raise ValueError( f"embedding model's dimension '{inferred_embedding_dimension}' does not match with the index's dimension '{index_embedding_dimension}'." ) else: if embedding is not None: logger.warning( 'embedding model is not used in delta-sync index with Databricks-managed embeddings.' ) self._embedding = None
null
test_delete_keys
"""Test deleting keys from the database.""" keys = ['key1', 'key2', 'key3'] manager.update(keys) keys_to_delete = ['key1', 'key2'] manager.delete_keys(keys_to_delete) remaining_keys = manager.list_keys() assert remaining_keys == ['key3']
def test_delete_keys(manager: SQLRecordManager) ->None: """Test deleting keys from the database.""" keys = ['key1', 'key2', 'key3'] manager.update(keys) keys_to_delete = ['key1', 'key2'] manager.delete_keys(keys_to_delete) remaining_keys = manager.list_keys() assert remaining_keys == ['key3']
Test deleting keys from the database.
delete
if ids is None: raise ValueError('No document ids provided to delete.') for document_id in ids: self.delete_document_by_id(document_id) return True
def delete(self, ids: Optional[List[str]]=None, **kwargs: Any) ->Optional[bool ]: if ids is None: raise ValueError('No document ids provided to delete.') for document_id in ids: self.delete_document_by_id(document_id) return True
null
deeplake_datastore
texts = ['foo', 'bar', 'baz'] metadatas = [{'page': str(i)} for i in range(len(texts))] docsearch = DeepLake.from_texts(dataset_path='./test_path', texts=texts, metadatas=metadatas, embedding_function=FakeEmbeddings(), overwrite=True) return docsearch
@pytest.fixture def deeplake_datastore() ->DeepLake: texts = ['foo', 'bar', 'baz'] metadatas = [{'page': str(i)} for i in range(len(texts))] docsearch = DeepLake.from_texts(dataset_path='./test_path', texts=texts, metadatas=metadatas, embedding_function=FakeEmbeddings(), overwrite =True) return docsearch
null
_log_visualizations
if not (self.visualizations and self.nlp): return spacy = import_spacy() prompts = session_df['prompts'].tolist() outputs = session_df['text'].tolist() for idx, (prompt, output) in enumerate(zip(prompts, outputs)): doc = self.nlp(output) sentence_spans = list(doc.sents) for visualization in self.visualizations: try: html = spacy.displacy.render(sentence_spans, style= visualization, options={'compact': True}, jupyter=False, page=True) self.experiment.log_asset_data(html, name= f'langchain-viz-{visualization}-{idx}.html', metadata={ 'prompt': prompt}, step=idx) except Exception as e: self.comet_ml.LOGGER.warning(e, exc_info=True, extra={ 'show_traceback': True}) return
def _log_visualizations(self, session_df: Any) ->None: if not (self.visualizations and self.nlp): return spacy = import_spacy() prompts = session_df['prompts'].tolist() outputs = session_df['text'].tolist() for idx, (prompt, output) in enumerate(zip(prompts, outputs)): doc = self.nlp(output) sentence_spans = list(doc.sents) for visualization in self.visualizations: try: html = spacy.displacy.render(sentence_spans, style= visualization, options={'compact': True}, jupyter=False, page=True) self.experiment.log_asset_data(html, name= f'langchain-viz-{visualization}-{idx}.html', metadata={ 'prompt': prompt}, step=idx) except Exception as e: self.comet_ml.LOGGER.warning(e, exc_info=True, extra={ 'show_traceback': True}) return
null
_completion_with_retry
if embeddings_type and self.instance['embeddings_task_type_supported']: from vertexai.language_models import TextEmbeddingInput requests = [TextEmbeddingInput(text=t, task_type=embeddings_type) for t in texts_to_process] else: requests = texts_to_process embeddings = self.client.get_embeddings(requests) return [embs.values for embs in embeddings]
@retry_decorator def _completion_with_retry(texts_to_process: List[str]) ->Any: if embeddings_type and self.instance['embeddings_task_type_supported']: from vertexai.language_models import TextEmbeddingInput requests = [TextEmbeddingInput(text=t, task_type=embeddings_type) for t in texts_to_process] else: requests = texts_to_process embeddings = self.client.get_embeddings(requests) return [embs.values for embs in embeddings]
null
output_keys
"""Return output keys. :meta private: """ return self.output_variables
@property def output_keys(self) ->List[str]: """Return output keys. :meta private: """ return self.output_variables
Return output keys. :meta private:
_get_input_output
if self.input_key is None: prompt_input_key = get_prompt_input_key(inputs, self.memory_variables) else: prompt_input_key = self.input_key if self.output_key is None: if len(outputs) != 1: raise ValueError(f'One output key expected, got {outputs.keys()}') output_key = list(outputs.keys())[0] else: output_key = self.output_key return inputs[prompt_input_key], outputs[output_key]
def _get_input_output(self, inputs: Dict[str, Any], outputs: Dict[str, str] ) ->Tuple[str, str]: if self.input_key is None: prompt_input_key = get_prompt_input_key(inputs, self.memory_variables) else: prompt_input_key = self.input_key if self.output_key is None: if len(outputs) != 1: raise ValueError(f'One output key expected, got {outputs.keys()}') output_key = list(outputs.keys())[0] else: output_key = self.output_key return inputs[prompt_input_key], outputs[output_key]
null
_chain_type
return 'nat_bot_chain'
@property def _chain_type(self) ->str: return 'nat_bot_chain'
null
add_operators
"""Add operators to the anonymizer Args: operators: Operators to add to the anonymizer. """ self.operators.update(operators)
def add_operators(self, operators: Dict[str, OperatorConfig]) ->None: """Add operators to the anonymizer Args: operators: Operators to add to the anonymizer. """ self.operators.update(operators)
Add operators to the anonymizer Args: operators: Operators to add to the anonymizer.
process_index_results
"""Turns annoy results into a list of documents and scores. Args: idxs: List of indices of the documents in the index. dists: List of distances of the documents in the index. Returns: List of Documents and scores. """ docs = [] for idx, dist in zip(idxs, dists): _id = self.index_to_docstore_id[idx] doc = self.docstore.search(_id) if not isinstance(doc, Document): raise ValueError(f'Could not find document for id {_id}, got {doc}') docs.append((doc, dist)) return docs
def process_index_results(self, idxs: List[int], dists: List[float]) ->List[ Tuple[Document, float]]: """Turns annoy results into a list of documents and scores. Args: idxs: List of indices of the documents in the index. dists: List of distances of the documents in the index. Returns: List of Documents and scores. """ docs = [] for idx, dist in zip(idxs, dists): _id = self.index_to_docstore_id[idx] doc = self.docstore.search(_id) if not isinstance(doc, Document): raise ValueError(f'Could not find document for id {_id}, got {doc}' ) docs.append((doc, dist)) return docs
Turns annoy results into a list of documents and scores. Args: idxs: List of indices of the documents in the index. dists: List of distances of the documents in the index. Returns: List of Documents and scores.
_IfExp
self.write('(') self.dispatch(t.body) self.write(' if ') self.dispatch(t.test) self.write(' else ') self.dispatch(t.orelse) self.write(')')
def _IfExp(self, t): self.write('(') self.dispatch(t.body) self.write(' if ') self.dispatch(t.test) self.write(' else ') self.dispatch(t.orelse) self.write(')')
null
format_messages
"""Format kwargs into a list of messages."""
@abstractmethod def format_messages(self, **kwargs: Any) ->List[BaseMessage]: """Format kwargs into a list of messages."""
Format kwargs into a list of messages.
_build_sugiyama_layout
try: from grandalf.graphs import Edge, Graph, Vertex from grandalf.layouts import SugiyamaLayout from grandalf.routing import EdgeViewer, route_with_lines except ImportError: print('Install grandalf to draw graphs. `pip install grandalf`') raise vertices_ = {id: Vertex(f' {data} ') for id, data in vertices.items()} edges_ = [Edge(vertices_[s], vertices_[e]) for s, e in edges] vertices_list = vertices_.values() graph = Graph(vertices_list, edges_) for vertex in vertices_list: vertex.view = VertexViewer(vertex.data) minw = min(v.view.w for v in vertices_list) for edge in edges_: edge.view = EdgeViewer() sug = SugiyamaLayout(graph.C[0]) graph = graph.C[0] roots = list(filter(lambda x: len(x.e_in()) == 0, graph.sV)) sug.init_all(roots=roots, optimize=True) sug.yspace = VertexViewer.HEIGHT sug.xspace = minw sug.route_edge = route_with_lines sug.draw() return sug
def _build_sugiyama_layout(vertices: Mapping[str, str], edges: Sequence[ Tuple[str, str]]) ->Any: try: from grandalf.graphs import Edge, Graph, Vertex from grandalf.layouts import SugiyamaLayout from grandalf.routing import EdgeViewer, route_with_lines except ImportError: print('Install grandalf to draw graphs. `pip install grandalf`') raise vertices_ = {id: Vertex(f' {data} ') for id, data in vertices.items()} edges_ = [Edge(vertices_[s], vertices_[e]) for s, e in edges] vertices_list = vertices_.values() graph = Graph(vertices_list, edges_) for vertex in vertices_list: vertex.view = VertexViewer(vertex.data) minw = min(v.view.w for v in vertices_list) for edge in edges_: edge.view = EdgeViewer() sug = SugiyamaLayout(graph.C[0]) graph = graph.C[0] roots = list(filter(lambda x: len(x.e_in()) == 0, graph.sV)) sug.init_all(roots=roots, optimize=True) sug.yspace = VertexViewer.HEIGHT sug.xspace = minw sug.route_edge = route_with_lines sug.draw() return sug
null
test_chat_google_raises_with_invalid_temperature
pytest.importorskip('google.generativeai') with pytest.raises(ValueError) as e: ChatGooglePalm(google_api_key='fake', temperature=2.0) assert 'must be in the range' in str(e)
def test_chat_google_raises_with_invalid_temperature() ->None: pytest.importorskip('google.generativeai') with pytest.raises(ValueError) as e: ChatGooglePalm(google_api_key='fake', temperature=2.0) assert 'must be in the range' in str(e)
null
lc_attributes
attributes: Dict[str, Any] = {} if self.openai_organization: attributes['openai_organization'] = self.openai_organization if self.openai_api_base: attributes['openai_api_base'] = self.openai_api_base if self.openai_proxy: attributes['openai_proxy'] = self.openai_proxy return attributes
@property def lc_attributes(self) ->Dict[str, Any]: attributes: Dict[str, Any] = {} if self.openai_organization: attributes['openai_organization'] = self.openai_organization if self.openai_api_base: attributes['openai_api_base'] = self.openai_api_base if self.openai_proxy: attributes['openai_proxy'] = self.openai_proxy return attributes
null
_import_pgvector
from langchain_community.vectorstores.pgvector import PGVector return PGVector
def _import_pgvector() ->Any: from langchain_community.vectorstores.pgvector import PGVector return PGVector
null
_parse_note_xml
"""Parse Evernote xml.""" try: from lxml import etree except ImportError as e: logger.error( 'Could not import `lxml`. Although it is not a required package to use Langchain, using the EverNote loader requires `lxml`. Please install `lxml` via `pip install lxml` and try again.' ) raise e context = etree.iterparse(xml_file, encoding='utf-8', strip_cdata=False, huge_tree=True, recover=True) for action, elem in context: if elem.tag == 'note': yield EverNoteLoader._parse_note(elem)
@staticmethod def _parse_note_xml(xml_file: str) ->Iterator[Dict[str, Any]]: """Parse Evernote xml.""" try: from lxml import etree except ImportError as e: logger.error( 'Could not import `lxml`. Although it is not a required package to use Langchain, using the EverNote loader requires `lxml`. Please install `lxml` via `pip install lxml` and try again.' ) raise e context = etree.iterparse(xml_file, encoding='utf-8', strip_cdata=False, huge_tree=True, recover=True) for action, elem in context: if elem.tag == 'note': yield EverNoteLoader._parse_note(elem)
Parse Evernote xml.
get_message
return self.message
def get_message(self) ->str: return self.message
null
validate_prompt
prompt: BasePromptTemplate = values['prompt'] if 'agent_scratchpad' not in prompt.input_variables: raise ValueError( f'`agent_scratchpad` should be one of the variables in the prompt, got {prompt.input_variables}' ) return values
@root_validator def validate_prompt(cls, values: dict) ->dict: prompt: BasePromptTemplate = values['prompt'] if 'agent_scratchpad' not in prompt.input_variables: raise ValueError( f'`agent_scratchpad` should be one of the variables in the prompt, got {prompt.input_variables}' ) return values
null
create_outputs
"""Create outputs from response.""" result = [{self.output_key: self.output_parser.parse_result(generation), 'full_generation': generation} for generation in llm_result.generations] if self.return_final_only: result = [{self.output_key: r[self.output_key]} for r in result] return result
def create_outputs(self, llm_result: LLMResult) ->List[Dict[str, Any]]: """Create outputs from response.""" result = [{self.output_key: self.output_parser.parse_result(generation), 'full_generation': generation} for generation in llm_result.generations ] if self.return_final_only: result = [{self.output_key: r[self.output_key]} for r in result] return result
Create outputs from response.
on_chain_end
"""Run when chain ends running.""" self.step += 1 self.chain_ends += 1 self.ends += 1 resp = self._init_resp() resp.update({'action': 'on_chain_end', 'outputs': outputs.get('output', outputs.get('text'))}) resp.update(self.get_custom_callback_meta()) self.on_chain_end_records.append(resp) self.action_records.append(resp) if self.stream_logs: self.logger.report_text(resp)
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) ->None: """Run when chain ends running.""" self.step += 1 self.chain_ends += 1 self.ends += 1 resp = self._init_resp() resp.update({'action': 'on_chain_end', 'outputs': outputs.get('output', outputs.get('text'))}) resp.update(self.get_custom_callback_meta()) self.on_chain_end_records.append(resp) self.action_records.append(resp) if self.stream_logs: self.logger.report_text(resp)
Run when chain ends running.
__repr_args__
return []
def __repr_args__(self) ->Any: return []
null
_get_tools_requests_put
return RequestsPutTool(requests_wrapper=TextRequestsWrapper())
def _get_tools_requests_put() ->BaseTool: return RequestsPutTool(requests_wrapper=TextRequestsWrapper())
null
_generate
"""Run the LLM on the given prompts."""
@abstractmethod def _generate(self, prompts: List[str], stop: Optional[List[str]]=None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any ) ->LLMResult: """Run the LLM on the given prompts."""
Run the LLM on the given prompts.
output_keys
return [self.output_key]
@property def output_keys(self) ->List[str]: return [self.output_key]
null
clear
"""Remove all messages from the store"""
@abstractmethod def clear(self) ->None: """Remove all messages from the store"""
Remove all messages from the store
_is_relevant_code
"""Check if a line is part of the procedure division or a relevant section.""" if 'PROCEDURE DIVISION' in line.upper(): return True return False
def _is_relevant_code(self, line: str) ->bool: """Check if a line is part of the procedure division or a relevant section.""" if 'PROCEDURE DIVISION' in line.upper(): return True return False
Check if a line is part of the procedure division or a relevant section.
test_sparql_select
""" Test for generating and executing simple SPARQL SELECT query. """ berners_lee_card = 'http://www.w3.org/People/Berners-Lee/card' graph = RdfGraph(source_file=berners_lee_card, standard='rdf') chain = GraphSparqlQAChain.from_llm(OpenAI(temperature=0), graph=graph) output = chain.run("What is Tim Berners-Lee's work homepage?") expected_output = ( ' The work homepage of Tim Berners-Lee is http://www.w3.org/People/Berners-Lee/.' ) assert output == expected_output
def test_sparql_select() ->None: """ Test for generating and executing simple SPARQL SELECT query. """ berners_lee_card = 'http://www.w3.org/People/Berners-Lee/card' graph = RdfGraph(source_file=berners_lee_card, standard='rdf') chain = GraphSparqlQAChain.from_llm(OpenAI(temperature=0), graph=graph) output = chain.run("What is Tim Berners-Lee's work homepage?") expected_output = ( ' The work homepage of Tim Berners-Lee is http://www.w3.org/People/Berners-Lee/.' ) assert output == expected_output
Test for generating and executing simple SPARQL SELECT query.
test_google_vertex_ai_multiturnsearch_get_relevant_documents
"""Test the get_relevant_documents() method.""" retriever = GoogleVertexAIMultiTurnSearchRetriever() documents = retriever.get_relevant_documents("What are Alphabet's Other Bets?") assert len(documents) > 0 for doc in documents: assert isinstance(doc, Document) assert doc.page_content assert doc.metadata['id'] assert doc.metadata['source']
@pytest.mark.requires('google.api_core') def test_google_vertex_ai_multiturnsearch_get_relevant_documents() ->None: """Test the get_relevant_documents() method.""" retriever = GoogleVertexAIMultiTurnSearchRetriever() documents = retriever.get_relevant_documents( "What are Alphabet's Other Bets?") assert len(documents) > 0 for doc in documents: assert isinstance(doc, Document) assert doc.page_content assert doc.metadata['id'] assert doc.metadata['source']
Test the get_relevant_documents() method.
load
"""Load given path as pages.""" return list(self.lazy_load())
def load(self) ->List[Document]: """Load given path as pages.""" return list(self.lazy_load())
Load given path as pages.
_import_file_management_ListDirectoryTool
from langchain_community.tools.file_management import ListDirectoryTool return ListDirectoryTool
def _import_file_management_ListDirectoryTool() ->Any: from langchain_community.tools.file_management import ListDirectoryTool return ListDirectoryTool
null
chain
...
@overload def chain(func: Callable[[Input], Coroutine[Any, Any, Output]]) ->Runnable[ Input, Output]: ...
null
is_api_accessible
try: response = requests.get(url) return response.status_code == 200 except Exception: return False
def is_api_accessible(url: str) ->bool: try: response = requests.get(url) return response.status_code == 200 except Exception: return False
null
_import_azure_cognitive_services_AzureCogsText2SpeechTool
from langchain_community.tools.azure_cognitive_services import AzureCogsText2SpeechTool return AzureCogsText2SpeechTool
def _import_azure_cognitive_services_AzureCogsText2SpeechTool() ->Any: from langchain_community.tools.azure_cognitive_services import AzureCogsText2SpeechTool return AzureCogsText2SpeechTool
null
_get_embedding_dimension
return len(self._get_embedding(text='This is a sample sentence.'))
def _get_embedding_dimension(self) ->int: return len(self._get_embedding(text='This is a sample sentence.'))
null
test_meilisearch_with_metadatas_with_scores
"""Test end to end construction and scored search.""" texts = ['foo', 'bar', 'baz'] metadatas = [{'page': str(i)} for i in range(len(texts))] docsearch = Meilisearch.from_texts(texts=texts, embedding=FakeEmbeddings(), url=TEST_MEILI_HTTP_ADDR, api_key=TEST_MEILI_MASTER_KEY, index_name= INDEX_NAME, metadatas=metadatas) self._wait_last_task() output = docsearch.similarity_search_with_score('foo', k=1) assert output == [(Document(page_content='foo', metadata={'page': '0'}), 9.0)]
def test_meilisearch_with_metadatas_with_scores(self) ->None: """Test end to end construction and scored search.""" texts = ['foo', 'bar', 'baz'] metadatas = [{'page': str(i)} for i in range(len(texts))] docsearch = Meilisearch.from_texts(texts=texts, embedding= FakeEmbeddings(), url=TEST_MEILI_HTTP_ADDR, api_key= TEST_MEILI_MASTER_KEY, index_name=INDEX_NAME, metadatas=metadatas) self._wait_last_task() output = docsearch.similarity_search_with_score('foo', k=1) assert output == [(Document(page_content='foo', metadata={'page': '0'}), 9.0)]
Test end to end construction and scored search.
_call
"""Generate Cypher statement, use it to look up in db and answer question.""" _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() callbacks = _run_manager.get_child() question = inputs[self.input_key] intermediate_steps: List = [] generated_cypher = self.cypher_generation_chain.run({'question': question, 'schema': self.graph.schema}, callbacks=callbacks) generated_cypher = extract_cypher(generated_cypher) _run_manager.on_text('Generated Cypher:', end='\n', verbose=self.verbose) _run_manager.on_text(generated_cypher, color='green', end='\n', verbose= self.verbose) intermediate_steps.append({'query': generated_cypher}) context = self.graph.query(generated_cypher)[:self.top_k] if self.return_direct: final_result = context else: _run_manager.on_text('Full Context:', end='\n', verbose=self.verbose) _run_manager.on_text(str(context), color='green', end='\n', verbose= self.verbose) intermediate_steps.append({'context': context}) result = self.qa_chain({'question': question, 'context': context}, callbacks=callbacks) final_result = result[self.qa_chain.output_key] chain_result: Dict[str, Any] = {self.output_key: final_result} if self.return_intermediate_steps: chain_result[INTERMEDIATE_STEPS_KEY] = intermediate_steps return chain_result
def _call(self, inputs: Dict[str, Any], run_manager: Optional[ CallbackManagerForChainRun]=None) ->Dict[str, Any]: """Generate Cypher statement, use it to look up in db and answer question.""" _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() callbacks = _run_manager.get_child() question = inputs[self.input_key] intermediate_steps: List = [] generated_cypher = self.cypher_generation_chain.run({'question': question, 'schema': self.graph.schema}, callbacks=callbacks) generated_cypher = extract_cypher(generated_cypher) _run_manager.on_text('Generated Cypher:', end='\n', verbose=self.verbose) _run_manager.on_text(generated_cypher, color='green', end='\n', verbose =self.verbose) intermediate_steps.append({'query': generated_cypher}) context = self.graph.query(generated_cypher)[:self.top_k] if self.return_direct: final_result = context else: _run_manager.on_text('Full Context:', end='\n', verbose=self.verbose) _run_manager.on_text(str(context), color='green', end='\n', verbose =self.verbose) intermediate_steps.append({'context': context}) result = self.qa_chain({'question': question, 'context': context}, callbacks=callbacks) final_result = result[self.qa_chain.output_key] chain_result: Dict[str, Any] = {self.output_key: final_result} if self.return_intermediate_steps: chain_result[INTERMEDIATE_STEPS_KEY] = intermediate_steps return chain_result
Generate Cypher statement, use it to look up in db and answer question.