method_name
stringlengths
1
78
method_body
stringlengths
3
9.66k
full_code
stringlengths
31
10.7k
docstring
stringlengths
4
4.74k
from_llm_and_tools
"""Construct an agent from an LLM and tools.""" cls._validate_tools(tools) prompt = cls.create_prompt(tools, prefix=prefix, suffix=suffix, format_instructions=format_instructions, input_variables=input_variables) llm_chain = LLMChain(llm=llm, prompt=prompt, callback_manager=callback_manager) tool_names = [tool.name...
@classmethod def from_llm_and_tools(cls, llm: BaseLanguageModel, tools: Sequence[ BaseTool], callback_manager: Optional[BaseCallbackManager]=None, output_parser: Optional[AgentOutputParser]=None, prefix: str=PREFIX, suffix: str=SUFFIX, format_instructions: str=FORMAT_INSTRUCTIONS, input_variables: Optio...
Construct an agent from an LLM and tools.
__init__
"""Initialize the spacy text splitter.""" super().__init__(**kwargs) self._tokenizer = _make_spacy_pipeline_for_splitting(pipeline, max_length= max_length) self._separator = separator
def __init__(self, separator: str='\n\n', pipeline: str='en_core_web_sm', max_length: int=1000000, **kwargs: Any) ->None: """Initialize the spacy text splitter.""" super().__init__(**kwargs) self._tokenizer = _make_spacy_pipeline_for_splitting(pipeline, max_length=max_length) self._separator...
Initialize the spacy text splitter.
test_create_lc_store
"""Test that a docstore is created from a base store.""" docstore = create_lc_store(file_store) docstore.mset([('key1', Document(page_content='hello', metadata={'key': 'value'}))]) fetched_doc = cast(Document, docstore.mget(['key1'])[0]) assert fetched_doc.page_content == 'hello' assert fetched_doc.metadata == {'ke...
def test_create_lc_store(file_store: LocalFileStore) ->None: """Test that a docstore is created from a base store.""" docstore = create_lc_store(file_store) docstore.mset([('key1', Document(page_content='hello', metadata={'key': 'value'}))]) fetched_doc = cast(Document, docstore.mget(['key1'])[0...
Test that a docstore is created from a base store.
_request
res = requests.request(method, url, headers=self.headers, json=query_dict, timeout=self.request_timeout_sec) res.raise_for_status() return res.json()
def _request(self, url: str, method: str='GET', query_dict: Dict[str, Any]={} ) ->Any: res = requests.request(method, url, headers=self.headers, json= query_dict, timeout=self.request_timeout_sec) res.raise_for_status() return res.json()
null
_llm_type
"""Return type of chat model.""" return 'volc-engine-maas-chat'
@property def _llm_type(self) ->str: """Return type of chat model.""" return 'volc-engine-maas-chat'
Return type of chat model.
_has_env_vars
return all(['ASTRA_DB_APPLICATION_TOKEN' in os.environ, 'ASTRA_DB_API_ENDPOINT' in os.environ])
def _has_env_vars() ->bool: return all(['ASTRA_DB_APPLICATION_TOKEN' in os.environ, 'ASTRA_DB_API_ENDPOINT' in os.environ])
null
has_mul_sub_str
""" Check if a string contains multiple substrings. Args: s: string to check. *args: substrings to check. Returns: True if all substrings are in the string, False otherwise. """ for a in args: if a not in s: return False return True
def has_mul_sub_str(s: str, *args: Any) ->bool: """ Check if a string contains multiple substrings. Args: s: string to check. *args: substrings to check. Returns: True if all substrings are in the string, False otherwise. """ for a in args: if a not in s: ...
Check if a string contains multiple substrings. Args: s: string to check. *args: substrings to check. Returns: True if all substrings are in the string, False otherwise.
intermediate_steps
return [(AgentAction(tool='Foo', tool_input='Bar', log= 'Star date 2021-06-13: Foo received input: Bar'), 'Baz')]
@pytest.fixture def intermediate_steps() ->List[Tuple[AgentAction, str]]: return [(AgentAction(tool='Foo', tool_input='Bar', log= 'Star date 2021-06-13: Foo received input: Bar'), 'Baz')]
null
test_beam_call
"""Test valid call to Beam.""" llm = Beam(model_name='gpt2', name='langchain-gpt2', cpu=8, memory='32Gi', gpu='A10G', python_version='python3.8', python_packages=[ 'diffusers[torch]>=0.10', 'transformers', 'torch', 'pillow', 'accelerate', 'safetensors', 'xformers'], max_length='5') llm._deploy() output = ll...
def test_beam_call() ->None: """Test valid call to Beam.""" llm = Beam(model_name='gpt2', name='langchain-gpt2', cpu=8, memory= '32Gi', gpu='A10G', python_version='python3.8', python_packages=[ 'diffusers[torch]>=0.10', 'transformers', 'torch', 'pillow', 'accelerate', 'safetensors', 'xfo...
Test valid call to Beam.
test_from_texts_cosine_distance
texts = ['Dogs are tough.', 'Cats have fluff.', 'What is a sandwich?', 'That fence is purple.'] vectorstore = AzureCosmosDBVectorSearch.from_texts(texts, azure_openai_embeddings, collection=collection, index_name=INDEX_NAME) vectorstore.create_index(num_lists, dimensions, similarity_algorithm) sleep(2) output =...
def test_from_texts_cosine_distance(self, azure_openai_embeddings: OpenAIEmbeddings, collection: Any) ->None: texts = ['Dogs are tough.', 'Cats have fluff.', 'What is a sandwich?', 'That fence is purple.'] vectorstore = AzureCosmosDBVectorSearch.from_texts(texts, azure_openai_embeddings, col...
null
__init__
self.test_name = test_name self.envvars = envvars self.expected_project_name = expected_project_name
def __init__(self, test_name: str, envvars: Dict[str, str], expected_project_name: str): self.test_name = test_name self.envvars = envvars self.expected_project_name = expected_project_name
null
mock_create_project
proj = mock.MagicMock() proj.id = '123' return proj
def mock_create_project(*args: Any, **kwargs: Any) ->Any: proj = mock.MagicMock() proj.id = '123' return proj
null
_get_folder_path
"""Get the folder path to save the Documents in. Args: soup: BeautifulSoup4 soup object. Returns: Folder path. """ course_name = soup.find('span', {'id': 'crumb_1'}) if course_name is None: raise ValueError('No course name found.') course_name = course_name.text.str...
def _get_folder_path(self, soup: Any) ->str: """Get the folder path to save the Documents in. Args: soup: BeautifulSoup4 soup object. Returns: Folder path. """ course_name = soup.find('span', {'id': 'crumb_1'}) if course_name is None: raise ValueErro...
Get the folder path to save the Documents in. Args: soup: BeautifulSoup4 soup object. Returns: Folder path.
_import_edenai_EdenAiTextModerationTool
from langchain_community.tools.edenai import EdenAiTextModerationTool return EdenAiTextModerationTool
def _import_edenai_EdenAiTextModerationTool() ->Any: from langchain_community.tools.edenai import EdenAiTextModerationTool return EdenAiTextModerationTool
null
__init__
"""Initialize with AwaDB client. If table_name is not specified, a random table name of `_DEFAULT_TABLE_NAME + last segment of uuid` would be created automatically. Args: table_name: Name of the table created, default _DEFAULT_TABLE_NAME. embedding: Opti...
def __init__(self, table_name: str=_DEFAULT_TABLE_NAME, embedding: Optional [Embeddings]=None, log_and_data_dir: Optional[str]=None, client: Optional[awadb.Client]=None, **kwargs: Any) ->None: """Initialize with AwaDB client. If table_name is not specified, a random table name of `_DEF...
Initialize with AwaDB client. If table_name is not specified, a random table name of `_DEFAULT_TABLE_NAME + last segment of uuid` would be created automatically. Args: table_name: Name of the table created, default _DEFAULT_TABLE_NAME. embedding: Optional Embeddings initially set. log_and_data_dir...
on_agent_action
"""Run on agent action.""" self.step += 1 self.tool_starts += 1 self.starts += 1 resp: Dict[str, Any] = {} resp.update({'action': 'on_agent_action', 'tool': action.tool, 'tool_input': action.tool_input, 'log': action.log}) resp.update(self.get_custom_callback_meta()) self.deck.append(self.markdown_renderer().to_htm...
def on_agent_action(self, action: AgentAction, **kwargs: Any) ->Any: """Run on agent action.""" self.step += 1 self.tool_starts += 1 self.starts += 1 resp: Dict[str, Any] = {} resp.update({'action': 'on_agent_action', 'tool': action.tool, 'tool_input': action.tool_input, 'log': action.lo...
Run on agent action.
get_format_instructions
return self.parser.get_format_instructions()
def get_format_instructions(self) ->str: return self.parser.get_format_instructions()
null
test_csv_loader_load_single_column_file
file_path = self._get_csv_file_path('test_one_col.csv') expected_docs = [Document(page_content='column1: value1', metadata={ 'source': file_path, 'row': 0}), Document(page_content= 'column1: value2', metadata={'source': file_path, 'row': 1}), Document( page_content='column1: value3', metadata={'source': fil...
def test_csv_loader_load_single_column_file(self) ->None: file_path = self._get_csv_file_path('test_one_col.csv') expected_docs = [Document(page_content='column1: value1', metadata={ 'source': file_path, 'row': 0}), Document(page_content= 'column1: value2', metadata={'source': file_path, 'row': ...
null
test_add_texts_with_given_uuids
texts = ['foo', 'bar', 'baz'] embedding = FakeEmbeddings() uuids = [uuid.uuid5(uuid.NAMESPACE_DNS, text) for text in texts] docsearch = Weaviate.from_texts(texts, embedding=embedding, weaviate_url= weaviate_url, uuids=uuids) docsearch.add_texts(['foo'], uuids=[uuids[0]]) output = docsearch.similarity_search_by_vect...
def test_add_texts_with_given_uuids(self, weaviate_url: str) ->None: texts = ['foo', 'bar', 'baz'] embedding = FakeEmbeddings() uuids = [uuid.uuid5(uuid.NAMESPACE_DNS, text) for text in texts] docsearch = Weaviate.from_texts(texts, embedding=embedding, weaviate_url=weaviate_url, uuids=uuids) ...
null
parse
"""Parse into a plan."""
@abstractmethod def parse(self, text: str) ->Plan: """Parse into a plan."""
Parse into a plan.
test_add_embeddings
""" Test add_embeddings, which accepts pre-built embeddings instead of using inference for the texts. This allows you to separate the embeddings text and the page_content for better proximity between user's question and embedded text. For example, your embedding text can be a question, whereas pag...
def test_add_embeddings() ->None: """ Test add_embeddings, which accepts pre-built embeddings instead of using inference for the texts. This allows you to separate the embeddings text and the page_content for better proximity between user's question and embedded text. For example, your embeddi...
Test add_embeddings, which accepts pre-built embeddings instead of using inference for the texts. This allows you to separate the embeddings text and the page_content for better proximity between user's question and embedded text. For example, your embedding text can be a question, whereas page_content is the answer...
validate_environment
"""Validate that api key and python package exists in environment.""" values['openai_api_key'] = get_from_dict_or_env(values, 'openai_api_key', 'OPENAI_API_KEY') values['openai_organization'] = values['openai_organization'] or os.getenv( 'OPENAI_ORG_ID') or os.getenv('OPENAI_ORGANIZATION') or None values['opena...
@root_validator() def validate_environment(cls, values: Dict) ->Dict: """Validate that api key and python package exists in environment.""" values['openai_api_key'] = get_from_dict_or_env(values, 'openai_api_key', 'OPENAI_API_KEY') values['openai_organization'] = values['openai_organization'] or os....
Validate that api key and python package exists in environment.
getInternalTx
url = ( f'https://api.etherscan.io/api?module=account&action=txlistinternal&address={self.account_address}&startblock={self.start_block}&endblock={self.end_block}&page={self.page}&offset={self.offset}&sort={self.sort}&apikey={self.api_key}' ) try: response = requests.get(url) response.raise_for_status()...
def getInternalTx(self) ->List[Document]: url = ( f'https://api.etherscan.io/api?module=account&action=txlistinternal&address={self.account_address}&startblock={self.start_block}&endblock={self.end_block}&page={self.page}&offset={self.offset}&sort={self.sort}&apikey={self.api_key}' ) try: ...
null
add_texts
"""Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. doc_metadata: optional metadata for the document This function indexes a...
def add_texts(self, texts: Iterable[str], metadatas: Optional[List[dict]]= None, doc_metadata: Optional[dict]=None, **kwargs: Any) ->List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metada...
Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. doc_metadata: optional metadata for the document This function indexes all the input text strings in the Vectara co...
test_load
mocker.patch('requests.get', return_value=mocker.MagicMock(json=lambda : [], links=None)) loader = GitHubIssuesLoader(repo='repo', access_token='access_token') documents = loader.load() assert documents == []
def test_load(mocker: MockerFixture) ->None: mocker.patch('requests.get', return_value=mocker.MagicMock(json=lambda : [], links=None)) loader = GitHubIssuesLoader(repo='repo', access_token='access_token') documents = loader.load() assert documents == []
null
_llm_type
"""Return type of llm.""" return 'mosaic'
@property def _llm_type(self) ->str: """Return type of llm.""" return 'mosaic'
Return type of llm.
try_neq_default
"""Try to determine if a value is different from the default. Args: value: The value. key: The key. model: The model. Returns: Whether the value is different from the default. """ try: return model.__fields__[key].get_default() != value except Exception: return True
def try_neq_default(value: Any, key: str, model: BaseModel) ->bool: """Try to determine if a value is different from the default. Args: value: The value. key: The key. model: The model. Returns: Whether the value is different from the default. """ try: retur...
Try to determine if a value is different from the default. Args: value: The value. key: The key. model: The model. Returns: Whether the value is different from the default.
get_type
""" Retrieves the simplified schema type for a given original type. Args: type (str): The original schema type to find the simplified type for. Returns: str: The simplified schema type if it exists; otherwise, returns the original type. """ try:...
def get_type(self, type: str) ->str: """ Retrieves the simplified schema type for a given original type. Args: type (str): The original schema type to find the simplified type for. Returns: str: The simplified schema type if it exists; otherwise, re...
Retrieves the simplified schema type for a given original type. Args: type (str): The original schema type to find the simplified type for. Returns: str: The simplified schema type if it exists; otherwise, returns the original type.
test_quip_loader_load_data_by_folder_id
mock_quip.get_folder.side_effect = [self._get_mock_folder(self. MOCK_FOLDER_IDS[0])] mock_quip.get_thread.side_effect = [self._get_mock_thread(self. MOCK_THREAD_IDS[0]), self._get_mock_thread(self.MOCK_THREAD_IDS[1])] quip_loader = self._get_mock_quip_loader(mock_quip) documents = quip_loader.load(folder_ids=[s...
def test_quip_loader_load_data_by_folder_id(self, mock_quip: MagicMock) ->None: mock_quip.get_folder.side_effect = [self._get_mock_folder(self. MOCK_FOLDER_IDS[0])] mock_quip.get_thread.side_effect = [self._get_mock_thread(self. MOCK_THREAD_IDS[0]), self._get_mock_thread(self.MOCK_THREAD_IDS[1])...
null
_on_chain_start
crumbs = self.get_breadcrumbs(run) run_type = run.run_type.capitalize() self.function_callback( f"{get_colored_text('[chain/start]', color='green')} " + get_bolded_text(f"""[{crumbs}] Entering {run_type} run with input: """) + f"{try_json_stringify(run.inputs, '[inputs]')}")
def _on_chain_start(self, run: Run) ->None: crumbs = self.get_breadcrumbs(run) run_type = run.run_type.capitalize() self.function_callback( f"{get_colored_text('[chain/start]', color='green')} " + get_bolded_text( f"""[{crumbs}] Entering {run_type} run with input: """) + f"{t...
null
embed_query
"""Compute query embeddings using Cloudflare Workers AI. Args: text: The text to embed. Returns: Embeddings for the text. """ text = text.replace('\n', ' ') if self.strip_new_lines else text response = requests.post( f'{self.api_base_url}/{self.account_id}/ai/run/{s...
def embed_query(self, text: str) ->List[float]: """Compute query embeddings using Cloudflare Workers AI. Args: text: The text to embed. Returns: Embeddings for the text. """ text = text.replace('\n', ' ') if self.strip_new_lines else text response = requests...
Compute query embeddings using Cloudflare Workers AI. Args: text: The text to embed. Returns: Embeddings for the text.
__init__
"""Initialize the sentence_transformer.""" super().__init__(project=project, location=location, credentials= credentials, request_parallelism=request_parallelism, max_retries= max_retries, model_name=model_name, **kwargs) self.instance['max_batch_size'] = kwargs.get('max_batch_size', _MAX_BATCH_SIZE) self.insta...
def __init__(self, model_name: str='textembedding-gecko-default', project: Optional[str]=None, location: str='us-central1', request_parallelism: int=5, max_retries: int=6, credentials: Optional[Any]=None, **kwargs: Any): """Initialize the sentence_transformer.""" super().__init__(project=project, locati...
Initialize the sentence_transformer.
_on_run_create
if self.root_id is not None: return self.root_id = run.id if self._arg_on_start is not None: call_func_with_variable_args(self._arg_on_start, run, self.config)
def _on_run_create(self, run: Run) ->None: if self.root_id is not None: return self.root_id = run.id if self._arg_on_start is not None: call_func_with_variable_args(self._arg_on_start, run, self.config)
null
add_texts
"""Run more texts through the embeddings and add to the vectorstore. Args: texts (Iterable[str]): Texts to add to the vectorstore. metadatas (Optional[List[dict]], optional): Optional list of metadatas. ids (Optional[List[str]], optional): Optional list of IDs. Retu...
def add_texts(self, texts: Iterable[str], metadatas: Optional[List[dict]]= None, ids: Optional[List[str]]=None, **kwargs: Any) ->List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts (Iterable[str]): Texts to add to the vectorstore. metadata...
Run more texts through the embeddings and add to the vectorstore. Args: texts (Iterable[str]): Texts to add to the vectorstore. metadatas (Optional[List[dict]], optional): Optional list of metadatas. ids (Optional[List[str]], optional): Optional list of IDs. Returns: List[str]: List of IDs of the adde...
raise_error
raise ValueError(f"Metadata value for key '{key}' must be a string, int, " + f'float, or list of strings. Got {type(value).__name__}')
def raise_error(key: str, value: Any) ->None: raise ValueError( f"Metadata value for key '{key}' must be a string, int, " + f'float, or list of strings. Got {type(value).__name__}')
null
from_texts
embeddings = embedding.embed_documents(list(texts)) return cls._initialize_from_embeddings(texts, embeddings, embedding, metadatas=metadatas, ids=ids, collection_name=collection_name, pre_delete_collection=pre_delete_collection, **kwargs)
@classmethod def from_texts(cls: Type[PGEmbedding], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]]=None, collection_name: str= _LANGCHAIN_DEFAULT_COLLECTION_NAME, ids: Optional[List[str]]=None, pre_delete_collection: bool=False, **kwargs: Any) ->PGEmbedding: embeddings = embedd...
null
clear
self.messages = []
def clear(self) ->None: self.messages = []
null
get_sqlite_cache
return SQLAlchemyCache(engine=create_engine('sqlite://'))
def get_sqlite_cache() ->SQLAlchemyCache: return SQLAlchemyCache(engine=create_engine('sqlite://'))
null
_llm_type
return 'fake-chat-model'
@property def _llm_type(self) ->str: return 'fake-chat-model'
null
test_marqo_with_metadatas
"""Test end to end construction and search.""" texts = ['foo', 'bar', 'baz'] metadatas = [{'page': i} for i in range(len(texts))] marqo_search = Marqo.from_texts(texts=texts, metadatas=metadatas, index_name=INDEX_NAME, url=DEFAULT_MARQO_URL, api_key= DEFAULT_MARQO_API_KEY, verbose=False) results = marqo_search....
def test_marqo_with_metadatas(client: Marqo) ->None: """Test end to end construction and search.""" texts = ['foo', 'bar', 'baz'] metadatas = [{'page': i} for i in range(len(texts))] marqo_search = Marqo.from_texts(texts=texts, metadatas=metadatas, index_name=INDEX_NAME, url=DEFAULT_MARQO_URL, a...
Test end to end construction and search.
ast_parse
filter = cast(Optional[FilterDirective], get_parser().parse_folder(raw_filter)) fixed = fix_filter_directive(filter, allowed_comparators= allowed_comparators, allowed_operators=allowed_operators, allowed_attributes=allowed_attributes) return fixed
def ast_parse(raw_filter: str) ->Optional[FilterDirective]: filter = cast(Optional[FilterDirective], get_parser().parse_folder( raw_filter)) fixed = fix_filter_directive(filter, allowed_comparators= allowed_comparators, allowed_operators=allowed_operators, allowed_attributes=allowed_attr...
null
_type
"""Return the type key.""" return 'regex_parser'
@property def _type(self) ->str: """Return the type key.""" return 'regex_parser'
Return the type key.
validate_environment
"""Validate that api key and python package exists in environment.""" openai_api_key = get_from_dict_or_env(values, 'openai_api_key', 'OPENAI_API_KEY') openai_api_base = get_from_dict_or_env(values, 'openai_api_base', 'OPENAI_API_BASE', default='') openai_proxy = get_from_dict_or_env(values, 'openai_proxy', 'OP...
@root_validator() def validate_environment(cls, values: Dict) ->Dict: """Validate that api key and python package exists in environment.""" openai_api_key = get_from_dict_or_env(values, 'openai_api_key', 'OPENAI_API_KEY') openai_api_base = get_from_dict_or_env(values, 'openai_api_base', 'OPE...
Validate that api key and python package exists in environment.
_get_relevant_documents
return self.retriever.get_relevant_documents(query, run_manager=run_manager .get_child(), **kwargs)
def _get_relevant_documents(self, query: str, *, run_manager: CallbackManagerForRetrieverRun, **kwargs: Any) ->List[Document]: return self.retriever.get_relevant_documents(query, run_manager= run_manager.get_child(), **kwargs)
null
map
if not run.outputs: raise ValueError(f'Run {run.id} has no outputs to evaluate.') return {'input': run.inputs['input'], 'prediction': run.outputs['output']}
def map(self, run: Run) ->Dict[str, str]: if not run.outputs: raise ValueError(f'Run {run.id} has no outputs to evaluate.') return {'input': run.inputs['input'], 'prediction': run.outputs['output']}
null
_import_requests_tool_BaseRequestsTool
from langchain_community.tools.requests.tool import BaseRequestsTool return BaseRequestsTool
def _import_requests_tool_BaseRequestsTool() ->Any: from langchain_community.tools.requests.tool import BaseRequestsTool return BaseRequestsTool
null
check_voice_models_key_is_provider_name
for key in values.get('voice_models', {}).keys(): if key not in values.get('providers', []): raise ValueError( 'voice_model should be formatted like this {<provider_name>: <its_voice_model>}' ) return values
@root_validator def check_voice_models_key_is_provider_name(cls, values: dict) ->dict: for key in values.get('voice_models', {}).keys(): if key not in values.get('providers', []): raise ValueError( 'voice_model should be formatted like this {<provider_name>: <its_voice_model>}' ...
null
input_keys
"""Expect input key. :meta private: """ return [self.input_key]
@property def input_keys(self) ->List[str]: """Expect input key. :meta private: """ return [self.input_key]
Expect input key. :meta private:
_create_chat_result
message = _convert_dict_to_message(response['choices'][0].get('message')) generations = [ChatGeneration(message=message)] token_usage = response['usage'] llm_output = {'token_usage': token_usage, 'model': self.model} return ChatResult(generations=generations, llm_output=llm_output)
def _create_chat_result(self, response: Mapping[str, Any]) ->ChatResult: message = _convert_dict_to_message(response['choices'][0].get('message')) generations = [ChatGeneration(message=message)] token_usage = response['usage'] llm_output = {'token_usage': token_usage, 'model': self.model} return Cha...
null
__init__
"""Initialize with a Pandas DataFrame containing chat logs. Args: chat_log: Pandas DataFrame containing chat logs. user_id_col: Name of the column containing the user ID. Defaults to "ID". """ if not isinstance(chat_log, pd.DataFrame): raise ValueError( f'Expected ch...
def __init__(self, chat_log: pd.DataFrame, user_id_col: str='ID'): """Initialize with a Pandas DataFrame containing chat logs. Args: chat_log: Pandas DataFrame containing chat logs. user_id_col: Name of the column containing the user ID. Defaults to "ID". """ if not isin...
Initialize with a Pandas DataFrame containing chat logs. Args: chat_log: Pandas DataFrame containing chat logs. user_id_col: Name of the column containing the user ID. Defaults to "ID".
_get_embeddings_from_stateful_docs
if len(documents) and 'embedded_doc' in documents[0].state: embedded_documents = [doc.state['embedded_doc'] for doc in documents] else: embedded_documents = embeddings.embed_documents([d.page_content for d in documents]) for doc, embedding in zip(documents, embedded_documents): doc.state['em...
def _get_embeddings_from_stateful_docs(embeddings: Embeddings, documents: Sequence[_DocumentWithState]) ->List[List[float]]: if len(documents) and 'embedded_doc' in documents[0].state: embedded_documents = [doc.state['embedded_doc'] for doc in documents] else: embedded_documents = embeddings...
null
test_non_faker_values
"""Test anonymizing multiple items in a sentence without faker values""" from langchain_experimental.data_anonymizer import PresidioAnonymizer text = ( 'My name is John Smith. Your name is Adam Smith. Her name is Jane Smith.Our names are: John Smith, Adam Smith, Jane Smith.' ) expected_result = ( 'My name i...
@pytest.mark.requires('presidio_analyzer', 'presidio_anonymizer', 'faker') def test_non_faker_values() ->None: """Test anonymizing multiple items in a sentence without faker values""" from langchain_experimental.data_anonymizer import PresidioAnonymizer text = ( 'My name is John Smith. Your name is ...
Test anonymizing multiple items in a sentence without faker values
get_default_document_variable_name
"""Get default document variable name, if not provided.""" if 'document_variable_name' not in values: llm_chain_variables = values['llm_chain'].prompt.input_variables if len(llm_chain_variables) == 1: values['document_variable_name'] = llm_chain_variables[0] else: raise ValueError( ...
@root_validator(pre=True) def get_default_document_variable_name(cls, values: Dict) ->Dict: """Get default document variable name, if not provided.""" if 'document_variable_name' not in values: llm_chain_variables = values['llm_chain'].prompt.input_variables if len(llm_chain_variables) == 1: ...
Get default document variable name, if not provided.
_type
return 'chat'
@property def _type(self) ->str: return 'chat'
null
on_llm_end_common
self.llm_ends += 1 self.ends += 1
def on_llm_end_common(self) ->None: self.llm_ends += 1 self.ends += 1
null
keys
"""Return the keys of the dict at the given path. Args: text: Python representation of the path to the dict (e.g. data["key1"][0]["key2"]). """ try: items = _parse_input(text) val = self.dict_ for i in items: if i: val = val[i] if not isinstance(val, dict...
def keys(self, text: str) ->str: """Return the keys of the dict at the given path. Args: text: Python representation of the path to the dict (e.g. data["key1"][0]["key2"]). """ try: items = _parse_input(text) val = self.dict_ for i in items: if i:...
Return the keys of the dict at the given path. Args: text: Python representation of the path to the dict (e.g. data["key1"][0]["key2"]).
test_custom_prefixes
intermediate_steps = [(AgentAction(tool='Tool1', tool_input='input1', log= 'Log1'), 'Observation1')] observation_prefix = 'Custom Observation: ' llm_prefix = 'Custom Thought: ' expected_result = """Log1 Custom Observation: Observation1 Custom Thought: """ assert format_log_to_str(intermediate_steps, observation_pre...
def test_custom_prefixes() ->None: intermediate_steps = [(AgentAction(tool='Tool1', tool_input='input1', log='Log1'), 'Observation1')] observation_prefix = 'Custom Observation: ' llm_prefix = 'Custom Thought: ' expected_result = ( 'Log1\nCustom Observation: Observation1\nCustom Thought: ...
null
_import_databricks_chat
from langchain_community.chat_models.databricks import ChatDatabricks return ChatDatabricks
def _import_databricks_chat() ->Any: from langchain_community.chat_models.databricks import ChatDatabricks return ChatDatabricks
null
test_chat_generation_chunk
assert ChatGenerationChunk(message=HumanMessageChunk(content='Hello, ') ) + ChatGenerationChunk(message=HumanMessageChunk(content='world!') ) == ChatGenerationChunk(message=HumanMessageChunk(content='Hello, world!') ), 'ChatGenerationChunk + ChatGenerationChunk should be a ChatGenerationChunk' assert ChatGe...
def test_chat_generation_chunk() ->None: assert ChatGenerationChunk(message=HumanMessageChunk(content='Hello, ') ) + ChatGenerationChunk(message=HumanMessageChunk(content='world!') ) == ChatGenerationChunk(message=HumanMessageChunk(content= 'Hello, world!') ), 'ChatGenerationChunk + ...
null
_import_sqlitevss
from langchain_community.vectorstores.sqlitevss import SQLiteVSS return SQLiteVSS
def _import_sqlitevss() ->Any: from langchain_community.vectorstores.sqlitevss import SQLiteVSS return SQLiteVSS
null
max_marginal_relevance_search
"""Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. ...
def max_marginal_relevance_search(self, query: str, k: int=4, fetch_k: int= 20, lambda_mult: float=0.5, **kwargs: Any) ->List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected docume...
Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass ...
_import_google_trends
from langchain_community.utilities.google_trends import GoogleTrendsAPIWrapper return GoogleTrendsAPIWrapper
def _import_google_trends() ->Any: from langchain_community.utilities.google_trends import GoogleTrendsAPIWrapper return GoogleTrendsAPIWrapper
null
__init__
try: from databricks.vector_search.client import VectorSearchIndex except ImportError as e: raise ImportError( 'Could not import databricks-vectorsearch python package. Please install it with `pip install databricks-vectorsearch`.' ) from e self.index = index if not isinstance(index, VectorSearc...
def __init__(self, index: VectorSearchIndex, *, embedding: Optional[ Embeddings]=None, text_column: Optional[str]=None, columns: Optional[ List[str]]=None): try: from databricks.vector_search.client import VectorSearchIndex except ImportError as e: raise ImportError( 'Could n...
null
test_delete_keys
"""Test deleting keys from the database.""" keys = ['key1', 'key2', 'key3'] manager.update(keys) keys_to_delete = ['key1', 'key2'] manager.delete_keys(keys_to_delete) remaining_keys = manager.list_keys() assert remaining_keys == ['key3']
def test_delete_keys(manager: SQLRecordManager) ->None: """Test deleting keys from the database.""" keys = ['key1', 'key2', 'key3'] manager.update(keys) keys_to_delete = ['key1', 'key2'] manager.delete_keys(keys_to_delete) remaining_keys = manager.list_keys() assert remaining_keys == ['key3'...
Test deleting keys from the database.
delete
if ids is None: raise ValueError('No document ids provided to delete.') for document_id in ids: self.delete_document_by_id(document_id) return True
def delete(self, ids: Optional[List[str]]=None, **kwargs: Any) ->Optional[bool ]: if ids is None: raise ValueError('No document ids provided to delete.') for document_id in ids: self.delete_document_by_id(document_id) return True
null
deeplake_datastore
texts = ['foo', 'bar', 'baz'] metadatas = [{'page': str(i)} for i in range(len(texts))] docsearch = DeepLake.from_texts(dataset_path='./test_path', texts=texts, metadatas=metadatas, embedding_function=FakeEmbeddings(), overwrite=True) return docsearch
@pytest.fixture def deeplake_datastore() ->DeepLake: texts = ['foo', 'bar', 'baz'] metadatas = [{'page': str(i)} for i in range(len(texts))] docsearch = DeepLake.from_texts(dataset_path='./test_path', texts=texts, metadatas=metadatas, embedding_function=FakeEmbeddings(), overwrite =True) ...
null
_log_visualizations
if not (self.visualizations and self.nlp): return spacy = import_spacy() prompts = session_df['prompts'].tolist() outputs = session_df['text'].tolist() for idx, (prompt, output) in enumerate(zip(prompts, outputs)): doc = self.nlp(output) sentence_spans = list(doc.sents) for visualization in self.visuali...
def _log_visualizations(self, session_df: Any) ->None: if not (self.visualizations and self.nlp): return spacy = import_spacy() prompts = session_df['prompts'].tolist() outputs = session_df['text'].tolist() for idx, (prompt, output) in enumerate(zip(prompts, outputs)): doc = self.nlp...
null
_completion_with_retry
if embeddings_type and self.instance['embeddings_task_type_supported']: from vertexai.language_models import TextEmbeddingInput requests = [TextEmbeddingInput(text=t, task_type=embeddings_type) for t in texts_to_process] else: requests = texts_to_process embeddings = self.client.get_embeddings(reque...
@retry_decorator def _completion_with_retry(texts_to_process: List[str]) ->Any: if embeddings_type and self.instance['embeddings_task_type_supported']: from vertexai.language_models import TextEmbeddingInput requests = [TextEmbeddingInput(text=t, task_type=embeddings_type) for t in texts...
null
output_keys
"""Return output keys. :meta private: """ return self.output_variables
@property def output_keys(self) ->List[str]: """Return output keys. :meta private: """ return self.output_variables
Return output keys. :meta private:
_get_input_output
if self.input_key is None: prompt_input_key = get_prompt_input_key(inputs, self.memory_variables) else: prompt_input_key = self.input_key if self.output_key is None: if len(outputs) != 1: raise ValueError(f'One output key expected, got {outputs.keys()}') output_key = list(outputs.keys())[0] else...
def _get_input_output(self, inputs: Dict[str, Any], outputs: Dict[str, str] ) ->Tuple[str, str]: if self.input_key is None: prompt_input_key = get_prompt_input_key(inputs, self.memory_variables) else: prompt_input_key = self.input_key if self.output_key is None: if len(outputs) !...
null
_chain_type
return 'nat_bot_chain'
@property def _chain_type(self) ->str: return 'nat_bot_chain'
null
add_operators
"""Add operators to the anonymizer Args: operators: Operators to add to the anonymizer. """ self.operators.update(operators)
def add_operators(self, operators: Dict[str, OperatorConfig]) ->None: """Add operators to the anonymizer Args: operators: Operators to add to the anonymizer. """ self.operators.update(operators)
Add operators to the anonymizer Args: operators: Operators to add to the anonymizer.
process_index_results
"""Turns annoy results into a list of documents and scores. Args: idxs: List of indices of the documents in the index. dists: List of distances of the documents in the index. Returns: List of Documents and scores. """ docs = [] for idx, dist in zip(idxs, dist...
def process_index_results(self, idxs: List[int], dists: List[float]) ->List[ Tuple[Document, float]]: """Turns annoy results into a list of documents and scores. Args: idxs: List of indices of the documents in the index. dists: List of distances of the documents in the index. ...
Turns annoy results into a list of documents and scores. Args: idxs: List of indices of the documents in the index. dists: List of distances of the documents in the index. Returns: List of Documents and scores.
_IfExp
self.write('(') self.dispatch(t.body) self.write(' if ') self.dispatch(t.test) self.write(' else ') self.dispatch(t.orelse) self.write(')')
def _IfExp(self, t): self.write('(') self.dispatch(t.body) self.write(' if ') self.dispatch(t.test) self.write(' else ') self.dispatch(t.orelse) self.write(')')
null
format_messages
"""Format kwargs into a list of messages."""
@abstractmethod def format_messages(self, **kwargs: Any) ->List[BaseMessage]: """Format kwargs into a list of messages."""
Format kwargs into a list of messages.
_build_sugiyama_layout
try: from grandalf.graphs import Edge, Graph, Vertex from grandalf.layouts import SugiyamaLayout from grandalf.routing import EdgeViewer, route_with_lines except ImportError: print('Install grandalf to draw graphs. `pip install grandalf`') raise vertices_ = {id: Vertex(f' {data} ') for id, data in v...
def _build_sugiyama_layout(vertices: Mapping[str, str], edges: Sequence[ Tuple[str, str]]) ->Any: try: from grandalf.graphs import Edge, Graph, Vertex from grandalf.layouts import SugiyamaLayout from grandalf.routing import EdgeViewer, route_with_lines except ImportError: pri...
null
test_chat_google_raises_with_invalid_temperature
pytest.importorskip('google.generativeai') with pytest.raises(ValueError) as e: ChatGooglePalm(google_api_key='fake', temperature=2.0) assert 'must be in the range' in str(e)
def test_chat_google_raises_with_invalid_temperature() ->None: pytest.importorskip('google.generativeai') with pytest.raises(ValueError) as e: ChatGooglePalm(google_api_key='fake', temperature=2.0) assert 'must be in the range' in str(e)
null
lc_attributes
attributes: Dict[str, Any] = {} if self.openai_organization: attributes['openai_organization'] = self.openai_organization if self.openai_api_base: attributes['openai_api_base'] = self.openai_api_base if self.openai_proxy: attributes['openai_proxy'] = self.openai_proxy return attributes
@property def lc_attributes(self) ->Dict[str, Any]: attributes: Dict[str, Any] = {} if self.openai_organization: attributes['openai_organization'] = self.openai_organization if self.openai_api_base: attributes['openai_api_base'] = self.openai_api_base if self.openai_proxy: attrib...
null
_import_pgvector
from langchain_community.vectorstores.pgvector import PGVector return PGVector
def _import_pgvector() ->Any: from langchain_community.vectorstores.pgvector import PGVector return PGVector
null
_parse_note_xml
"""Parse Evernote xml.""" try: from lxml import etree except ImportError as e: logger.error( 'Could not import `lxml`. Although it is not a required package to use Langchain, using the EverNote loader requires `lxml`. Please install `lxml` via `pip install lxml` and try again.' ) raise e con...
@staticmethod def _parse_note_xml(xml_file: str) ->Iterator[Dict[str, Any]]: """Parse Evernote xml.""" try: from lxml import etree except ImportError as e: logger.error( 'Could not import `lxml`. Although it is not a required package to use Langchain, using the EverNote loader re...
Parse Evernote xml.
get_message
return self.message
def get_message(self) ->str: return self.message
null
validate_prompt
prompt: BasePromptTemplate = values['prompt'] if 'agent_scratchpad' not in prompt.input_variables: raise ValueError( f'`agent_scratchpad` should be one of the variables in the prompt, got {prompt.input_variables}' ) return values
@root_validator def validate_prompt(cls, values: dict) ->dict: prompt: BasePromptTemplate = values['prompt'] if 'agent_scratchpad' not in prompt.input_variables: raise ValueError( f'`agent_scratchpad` should be one of the variables in the prompt, got {prompt.input_variables}' ) ...
null
create_outputs
"""Create outputs from response.""" result = [{self.output_key: self.output_parser.parse_result(generation), 'full_generation': generation} for generation in llm_result.generations] if self.return_final_only: result = [{self.output_key: r[self.output_key]} for r in result] return result
def create_outputs(self, llm_result: LLMResult) ->List[Dict[str, Any]]: """Create outputs from response.""" result = [{self.output_key: self.output_parser.parse_result(generation), 'full_generation': generation} for generation in llm_result.generations ] if self.return_final_only: re...
Create outputs from response.
on_chain_end
"""Run when chain ends running.""" self.step += 1 self.chain_ends += 1 self.ends += 1 resp = self._init_resp() resp.update({'action': 'on_chain_end', 'outputs': outputs.get('output', outputs.get('text'))}) resp.update(self.get_custom_callback_meta()) self.on_chain_end_records.append(resp) self.action_records.append...
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) ->None: """Run when chain ends running.""" self.step += 1 self.chain_ends += 1 self.ends += 1 resp = self._init_resp() resp.update({'action': 'on_chain_end', 'outputs': outputs.get('output', outputs.get('text'))}) resp.up...
Run when chain ends running.
__repr_args__
return []
def __repr_args__(self) ->Any: return []
null
_get_tools_requests_put
return RequestsPutTool(requests_wrapper=TextRequestsWrapper())
def _get_tools_requests_put() ->BaseTool: return RequestsPutTool(requests_wrapper=TextRequestsWrapper())
null
_generate
"""Run the LLM on the given prompts."""
@abstractmethod def _generate(self, prompts: List[str], stop: Optional[List[str]]=None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any ) ->LLMResult: """Run the LLM on the given prompts."""
Run the LLM on the given prompts.
output_keys
return [self.output_key]
@property def output_keys(self) ->List[str]: return [self.output_key]
null
clear
"""Remove all messages from the store"""
@abstractmethod def clear(self) ->None: """Remove all messages from the store"""
Remove all messages from the store
_is_relevant_code
"""Check if a line is part of the procedure division or a relevant section.""" if 'PROCEDURE DIVISION' in line.upper(): return True return False
def _is_relevant_code(self, line: str) ->bool: """Check if a line is part of the procedure division or a relevant section.""" if 'PROCEDURE DIVISION' in line.upper(): return True return False
Check if a line is part of the procedure division or a relevant section.
test_sparql_select
""" Test for generating and executing simple SPARQL SELECT query. """ berners_lee_card = 'http://www.w3.org/People/Berners-Lee/card' graph = RdfGraph(source_file=berners_lee_card, standard='rdf') chain = GraphSparqlQAChain.from_llm(OpenAI(temperature=0), graph=graph) output = chain.run("What is Tim Berners-Lee'...
def test_sparql_select() ->None: """ Test for generating and executing simple SPARQL SELECT query. """ berners_lee_card = 'http://www.w3.org/People/Berners-Lee/card' graph = RdfGraph(source_file=berners_lee_card, standard='rdf') chain = GraphSparqlQAChain.from_llm(OpenAI(temperature=0), graph=gr...
Test for generating and executing simple SPARQL SELECT query.
test_google_vertex_ai_multiturnsearch_get_relevant_documents
"""Test the get_relevant_documents() method.""" retriever = GoogleVertexAIMultiTurnSearchRetriever() documents = retriever.get_relevant_documents("What are Alphabet's Other Bets?") assert len(documents) > 0 for doc in documents: assert isinstance(doc, Document) assert doc.page_content assert doc.metadata['i...
@pytest.mark.requires('google.api_core') def test_google_vertex_ai_multiturnsearch_get_relevant_documents() ->None: """Test the get_relevant_documents() method.""" retriever = GoogleVertexAIMultiTurnSearchRetriever() documents = retriever.get_relevant_documents( "What are Alphabet's Other Bets?") ...
Test the get_relevant_documents() method.
load
"""Load given path as pages.""" return list(self.lazy_load())
def load(self) ->List[Document]: """Load given path as pages.""" return list(self.lazy_load())
Load given path as pages.
_import_file_management_ListDirectoryTool
from langchain_community.tools.file_management import ListDirectoryTool return ListDirectoryTool
def _import_file_management_ListDirectoryTool() ->Any: from langchain_community.tools.file_management import ListDirectoryTool return ListDirectoryTool
null
chain
...
@overload def chain(func: Callable[[Input], Coroutine[Any, Any, Output]]) ->Runnable[ Input, Output]: ...
null
is_api_accessible
try: response = requests.get(url) return response.status_code == 200 except Exception: return False
def is_api_accessible(url: str) ->bool: try: response = requests.get(url) return response.status_code == 200 except Exception: return False
null
_import_azure_cognitive_services_AzureCogsText2SpeechTool
from langchain_community.tools.azure_cognitive_services import AzureCogsText2SpeechTool return AzureCogsText2SpeechTool
def _import_azure_cognitive_services_AzureCogsText2SpeechTool() ->Any: from langchain_community.tools.azure_cognitive_services import AzureCogsText2SpeechTool return AzureCogsText2SpeechTool
null
_get_embedding_dimension
return len(self._get_embedding(text='This is a sample sentence.'))
def _get_embedding_dimension(self) ->int: return len(self._get_embedding(text='This is a sample sentence.'))
null
test_meilisearch_with_metadatas_with_scores
"""Test end to end construction and scored search.""" texts = ['foo', 'bar', 'baz'] metadatas = [{'page': str(i)} for i in range(len(texts))] docsearch = Meilisearch.from_texts(texts=texts, embedding=FakeEmbeddings(), url=TEST_MEILI_HTTP_ADDR, api_key=TEST_MEILI_MASTER_KEY, index_name= INDEX_NAME, metadatas=met...
def test_meilisearch_with_metadatas_with_scores(self) ->None: """Test end to end construction and scored search.""" texts = ['foo', 'bar', 'baz'] metadatas = [{'page': str(i)} for i in range(len(texts))] docsearch = Meilisearch.from_texts(texts=texts, embedding= FakeEmbeddings(), url=TEST_MEILI_...
Test end to end construction and scored search.
_call
"""Generate Cypher statement, use it to look up in db and answer question.""" _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() callbacks = _run_manager.get_child() question = inputs[self.input_key] intermediate_steps: List = [] generated_cypher = self.cypher_generation_chain.run({'question': ...
def _call(self, inputs: Dict[str, Any], run_manager: Optional[ CallbackManagerForChainRun]=None) ->Dict[str, Any]: """Generate Cypher statement, use it to look up in db and answer question.""" _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() callbacks = _run_manager.get_child(...
Generate Cypher statement, use it to look up in db and answer question.