method_name
stringlengths
1
78
method_body
stringlengths
3
9.66k
full_code
stringlengths
31
10.7k
docstring
stringlengths
4
4.74k
_construct_doc
"""Construct the contents of the reference.rst file for the given package. Args: package_namespace: The package top level namespace members_by_namespace: The members of the package, dict organized by top level module contains a list of classes and functions ...
def _construct_doc(package_namespace: str, members_by_namespace: Dict[str, ModuleMembers], package_version: str) ->str: """Construct the contents of the reference.rst file for the given package. Args: package_namespace: The package top level namespace members_by_namespace: The members of th...
Construct the contents of the reference.rst file for the given package. Args: package_namespace: The package top level namespace members_by_namespace: The members of the package, dict organized by top level module contains a list of classes and functions insi...
test_chat_hunyuan_with_temperature
chat = ChatHunyuan(temperature=0.6) message = HumanMessage(content='Hello') response = chat([message]) assert isinstance(response, AIMessage) assert isinstance(response.content, str)
def test_chat_hunyuan_with_temperature() ->None: chat = ChatHunyuan(temperature=0.6) message = HumanMessage(content='Hello') response = chat([message]) assert isinstance(response, AIMessage) assert isinstance(response.content, str)
null
__add__
if isinstance(other, FunctionMessageChunk): if self.name != other.name: raise ValueError( 'Cannot concatenate FunctionMessageChunks with different names.') return self.__class__(name=self.name, content=merge_content(self. content, other.content), additional_kwargs=self._merge_kwargs_...
def __add__(self, other: Any) ->BaseMessageChunk: if isinstance(other, FunctionMessageChunk): if self.name != other.name: raise ValueError( 'Cannot concatenate FunctionMessageChunks with different names.' ) return self.__class__(name=self.name, content=mer...
null
_get_embedding
return list(np.random.normal(size=self.size))
def _get_embedding(self) ->List[float]: return list(np.random.normal(size=self.size))
null
compress_documents
"""Transform a list of documents.""" for _transformer in self.transformers: if isinstance(_transformer, BaseDocumentCompressor): accepts_callbacks = signature(_transformer.compress_documents ).parameters.get('callbacks') is not None if accepts_callbacks: documents = _transfor...
def compress_documents(self, documents: Sequence[Document], query: str, callbacks: Optional[Callbacks]=None) ->Sequence[Document]: """Transform a list of documents.""" for _transformer in self.transformers: if isinstance(_transformer, BaseDocumentCompressor): accepts_callbacks = signatur...
Transform a list of documents.
_import_bing_search_tool_BingSearchRun
from langchain_community.tools.bing_search.tool import BingSearchRun return BingSearchRun
def _import_bing_search_tool_BingSearchRun() ->Any: from langchain_community.tools.bing_search.tool import BingSearchRun return BingSearchRun
null
test_mget
"""Test mget method.""" store = RedisStore(client=redis_client, ttl=None) keys = ['key1', 'key2'] redis_client.mset({'key1': b'value1', 'key2': b'value2'}) result = store.mget(keys) assert result == [b'value1', b'value2']
def test_mget(redis_client: Redis) ->None: """Test mget method.""" store = RedisStore(client=redis_client, ttl=None) keys = ['key1', 'key2'] redis_client.mset({'key1': b'value1', 'key2': b'value2'}) result = store.mget(keys) assert result == [b'value1', b'value2']
Test mget method.
run
"""Run query through Google Trends with Serpapi""" serpapi_api_key = cast(SecretStr, self.serp_api_key) params = {'engine': 'google_trends', 'api_key': serpapi_api_key. get_secret_value(), 'q': query} total_results = [] client = self.serp_search_engine(params) total_results = client.get_dict()['interest_over_time']...
def run(self, query: str) ->str: """Run query through Google Trends with Serpapi""" serpapi_api_key = cast(SecretStr, self.serp_api_key) params = {'engine': 'google_trends', 'api_key': serpapi_api_key. get_secret_value(), 'q': query} total_results = [] client = self.serp_search_engine(params...
Run query through Google Trends with Serpapi
parse_result
"""Parse a list of candidate model Generations into a specific format. Args: result: A list of Generations to be parsed. The Generations are assumed to be different candidate outputs for a single model input. Returns: Structured output. """
@abstractmethod def parse_result(self, result: List[Generation], *, partial: bool=False) ->T: """Parse a list of candidate model Generations into a specific format. Args: result: A list of Generations to be parsed. The Generations are assumed to be different candidate outputs fo...
Parse a list of candidate model Generations into a specific format. Args: result: A list of Generations to be parsed. The Generations are assumed to be different candidate outputs for a single model input. Returns: Structured output.
date_to_range_filter
constructor_args = {key: kwargs[key] for key in ['start_date', 'end_date', 'time_delta', 'start_inclusive', 'end_inclusive'] if key in kwargs} if not constructor_args or len(constructor_args) == 0: return None try: from timescale_vector import client except ImportError: raise ImportError( 'Could...
def date_to_range_filter(self, **kwargs: Any) ->Any: constructor_args = {key: kwargs[key] for key in ['start_date', 'end_date', 'time_delta', 'start_inclusive', 'end_inclusive'] if key in kwargs} if not constructor_args or len(constructor_args) == 0: return None try: from ti...
null
ZillizRetreiver
"""Deprecated ZillizRetreiver. Please use ZillizRetriever ('i' before 'e') instead. Args: *args: **kwargs: Returns: ZillizRetriever """ warnings.warn( "ZillizRetreiver will be deprecated in the future. Please use ZillizRetriever ('i' before 'e') instead." , Deprecation...
def ZillizRetreiver(*args: Any, **kwargs: Any) ->ZillizRetriever: """Deprecated ZillizRetreiver. Please use ZillizRetriever ('i' before 'e') instead. Args: *args: **kwargs: Returns: ZillizRetriever """ warnings.warn( "ZillizRetreiver will be deprecated in the f...
Deprecated ZillizRetreiver. Please use ZillizRetriever ('i' before 'e') instead. Args: *args: **kwargs: Returns: ZillizRetriever
wrap
async def wrapped_f(*args: Any, **kwargs: Any) ->Callable: async for _ in async_retrying: return await func(*args, **kwargs) raise AssertionError('this is unreachable') return wrapped_f
def wrap(func: Callable) ->Callable: async def wrapped_f(*args: Any, **kwargs: Any) ->Callable: async for _ in async_retrying: return await func(*args, **kwargs) raise AssertionError('this is unreachable') return wrapped_f
null
test__convert_dict_to_message_human
message_dict = {'role': 'user', 'content': 'foo'} result = _convert_dict_to_message(message_dict) expected_output = HumanMessage(content='foo') assert result == expected_output
def test__convert_dict_to_message_human() ->None: message_dict = {'role': 'user', 'content': 'foo'} result = _convert_dict_to_message(message_dict) expected_output = HumanMessage(content='foo') assert result == expected_output
null
embed_documents
"""Embed a list of documents using GPT4All. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ embeddings = [self.client.embed(text) for text in texts] return [list(map(float, e)) for e in embeddings]
def embed_documents(self, texts: List[str]) ->List[List[float]]: """Embed a list of documents using GPT4All. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ embeddings = [self.client.embed(text) for text in texts] ...
Embed a list of documents using GPT4All. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text.
test_load_valid_bool_content
file_path = '/workspaces/langchain/test.json' expected_docs = [Document(page_content='False', metadata={'source': file_path, 'seq_num': 1}), Document(page_content='True', metadata={ 'source': file_path, 'seq_num': 2})] mocker.patch('builtins.open', mocker.mock_open()) mocker.patch('pathlib.Path.read_text', retu...
def test_load_valid_bool_content(mocker: MockerFixture) ->None: file_path = '/workspaces/langchain/test.json' expected_docs = [Document(page_content='False', metadata={'source': file_path, 'seq_num': 1}), Document(page_content='True', metadata={ 'source': file_path, 'seq_num': 2})] mocker.pa...
null
__gt__
"""Create a Numeric greater than filter expression. Args: other (Union[int, float]): The value to filter on. Example: >>> from langchain_community.vectorstores.redis import RedisNum >>> filter = RedisNum("age") > 18 """ self._set_value(other, self.SUPPORTED_...
def __gt__(self, other: Union[int, float]) ->'RedisFilterExpression': """Create a Numeric greater than filter expression. Args: other (Union[int, float]): The value to filter on. Example: >>> from langchain_community.vectorstores.redis import RedisNum >>> filter...
Create a Numeric greater than filter expression. Args: other (Union[int, float]): The value to filter on. Example: >>> from langchain_community.vectorstores.redis import RedisNum >>> filter = RedisNum("age") > 18
test_all_imports
assert set(__all__) == set(EXPECTED_ALL)
def test_all_imports() ->None: assert set(__all__) == set(EXPECTED_ALL)
null
_import_openweathermap
from langchain_community.utilities.openweathermap import OpenWeatherMapAPIWrapper return OpenWeatherMapAPIWrapper
def _import_openweathermap() ->Any: from langchain_community.utilities.openweathermap import OpenWeatherMapAPIWrapper return OpenWeatherMapAPIWrapper
null
_format_func
self._validate_func(func) map_dict = {Operator.AND: 'And', Operator.OR: 'Or', Comparator.EQ: 'Equal', Comparator.NE: 'NotEqual', Comparator.GTE: 'GreaterThanEqual', Comparator.LTE: 'LessThanEqual', Comparator.LT: 'LessThan', Comparator. GT: 'GreaterThan'} return map_dict[func]
def _format_func(self, func: Union[Operator, Comparator]) ->str: self._validate_func(func) map_dict = {Operator.AND: 'And', Operator.OR: 'Or', Comparator.EQ: 'Equal', Comparator.NE: 'NotEqual', Comparator.GTE: 'GreaterThanEqual', Comparator.LTE: 'LessThanEqual', Comparator.LT: 'LessThan'...
null
from_data
members = [Member.from_data(member_data) for member_data in data['members']] return cls(id=data['id'], name=data['name'], members=members)
@classmethod def from_data(cls, data: Dict) ->'Team': members = [Member.from_data(member_data) for member_data in data['members'] ] return cls(id=data['id'], name=data['name'], members=members)
null
__init__
super().__init__(inputs=inputs, selected=selected) self.to_select_from = to_select_from self.based_on = based_on
def __init__(self, inputs: Dict[str, Any], to_select_from: Dict[str, Any], based_on: Dict[str, Any], selected: Optional[PickBestSelected]=None): super().__init__(inputs=inputs, selected=selected) self.to_select_from = to_select_from self.based_on = based_on
null
embed_query
"""Call out to Jina's embedding endpoint. Args: text: The text to embed. Returns: Embeddings for the text. """ return self._embed([text])[0]
def embed_query(self, text: str) ->List[float]: """Call out to Jina's embedding endpoint. Args: text: The text to embed. Returns: Embeddings for the text. """ return self._embed([text])[0]
Call out to Jina's embedding endpoint. Args: text: The text to embed. Returns: Embeddings for the text.
_llm_type
return 'giga-chat-model'
@property def _llm_type(self) ->str: return 'giga-chat-model'
null
test_sequential_usage_multiple_inputs
"""Test sequential on multiple input chains.""" chain_1 = FakeChain(input_variables=['foo', 'test'], output_variables=['bar']) chain_2 = FakeChain(input_variables=['bar', 'foo'], output_variables=['baz']) chain = SequentialChain(chains=[chain_1, chain_2], input_variables=['foo', 'test']) output = chain({'foo': '123...
def test_sequential_usage_multiple_inputs() ->None: """Test sequential on multiple input chains.""" chain_1 = FakeChain(input_variables=['foo', 'test'], output_variables=[ 'bar']) chain_2 = FakeChain(input_variables=['bar', 'foo'], output_variables=[ 'baz']) chain = SequentialChain(chain...
Test sequential on multiple input chains.
on_tool_end
"""Run when tool ends running.""" self.metrics['step'] += 1 self.metrics['tool_ends'] += 1 self.metrics['ends'] += 1 tool_ends = self.metrics['tool_ends'] resp: Dict[str, Any] = {} resp.update({'action': 'on_tool_end', 'output': output}) resp.update(self.metrics) self.mlflg.metrics(self.metrics, step=self.metrics['step...
def on_tool_end(self, output: str, **kwargs: Any) ->None: """Run when tool ends running.""" self.metrics['step'] += 1 self.metrics['tool_ends'] += 1 self.metrics['ends'] += 1 tool_ends = self.metrics['tool_ends'] resp: Dict[str, Any] = {} resp.update({'action': 'on_tool_end', 'output': outpu...
Run when tool ends running.
test_load_returns_full_set_of_metadata
"""Test that returns several docs""" api_client = PubMedAPIWrapper(load_max_docs=1, load_all_available_meta=True) docs = api_client.load_docs('ChatGPT') assert len(docs) == 3 for doc in docs: assert doc.metadata assert set(doc.metadata).issuperset({'Copyright Information', 'Published', 'Title', 'uid'})
def test_load_returns_full_set_of_metadata() ->None: """Test that returns several docs""" api_client = PubMedAPIWrapper(load_max_docs=1, load_all_available_meta=True ) docs = api_client.load_docs('ChatGPT') assert len(docs) == 3 for doc in docs: assert doc.metadata assert set...
Test that returns several docs
logging_enabled
return bool(self.path)
def logging_enabled(self) ->bool: return bool(self.path)
null
_type
return 'api_responder'
@property def _type(self) ->str: return 'api_responder'
null
test_fireworks_model_param
"""Tests model parameters for Fireworks""" llm = Fireworks(model='foo') assert llm.model == 'foo'
@pytest.mark.scheduled def test_fireworks_model_param() ->None: """Tests model parameters for Fireworks""" llm = Fireworks(model='foo') assert llm.model == 'foo'
Tests model parameters for Fireworks
box
"""Create a box on ASCII canvas. Args: x0 (int): x coordinate of the box corner. y0 (int): y coordinate of the box corner. width (int): box width. height (int): box height. """ assert width > 1 assert height > 1 width -= 1 height -= 1 for x in range(x0, x...
def box(self, x0: int, y0: int, width: int, height: int) ->None: """Create a box on ASCII canvas. Args: x0 (int): x coordinate of the box corner. y0 (int): y coordinate of the box corner. width (int): box width. height (int): box height. """ asser...
Create a box on ASCII canvas. Args: x0 (int): x coordinate of the box corner. y0 (int): y coordinate of the box corner. width (int): box width. height (int): box height.
similarity_search_with_score_by_vector
"""Return docs most similar to query. Args: embedding: Embedding vector to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter (Optional[Dict[str, Any]]): Filter by metadata. Defaults to None. fetch_k: (Optional[int]) Number of D...
def similarity_search_with_score_by_vector(self, embedding: List[float], k: int=4, filter: Optional[Dict[str, Any]]=None, fetch_k: int=20, **kwargs: Any) ->List[Tuple[Document, float]]: """Return docs most similar to query. Args: embedding: Embedding vector to look up documents similar ...
Return docs most similar to query. Args: embedding: Embedding vector to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter (Optional[Dict[str, Any]]): Filter by metadata. Defaults to None. fetch_k: (Optional[int]) Number of Documents to fetch before filtering. ...
from_texts
""" Args: skip_strict_exist_check: Deprecated. This is not used basically. """ vald = cls(embedding=embedding, host=host, port=port, grpc_options= grpc_options, grpc_use_secure=grpc_use_secure, grpc_credentials= grpc_credentials, **kwargs) vald.add_texts(texts=texts, metadatas=metada...
@classmethod def from_texts(cls: Type[Vald], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]]=None, host: str='localhost', port: int= 8080, grpc_options: Tuple=(('grpc.keepalive_time_ms', 1000 * 10), ( 'grpc.keepalive_timeout_ms', 1000 * 10)), grpc_use_secure: bool=False, grpc_cr...
Args: skip_strict_exist_check: Deprecated. This is not used basically.
_run
"""Use the tool.""" return str(self.api_wrapper.results(query, self.num_results))
def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun] =None) ->str: """Use the tool.""" return str(self.api_wrapper.results(query, self.num_results))
Use the tool.
test_draw
""" Test CPAL chain can draw its resulting DAG. """ import os narrative_input = ( 'Jan has three times the number of pets as Marcia.Marcia has two more pets than Cindy.If Marcia has ten pets, how many pets does Jan have?' ) llm = OpenAI(temperature=0, max_tokens=512) cpal_chain = CPALChain.from_...
@pytest.mark.skip(reason='requires manual install of debian and py packages') def test_draw(self) ->None: """ Test CPAL chain can draw its resulting DAG. """ import os narrative_input = ( 'Jan has three times the number of pets as Marcia.Marcia has two more pets than Cindy.If Marcia ...
Test CPAL chain can draw its resulting DAG.
_log_message_for_verbose
if self.run_manager: self.run_manager.on_text(message)
def _log_message_for_verbose(self, message: str) ->None: if self.run_manager: self.run_manager.on_text(message)
null
_identifying_params
return {**{'endpoint': self.endpoint, 'model': self.model}, **super(). _identifying_params}
@property def _identifying_params(self) ->Dict[str, Any]: return {**{'endpoint': self.endpoint, 'model': self.model}, **super(). _identifying_params}
null
get_num_tokens
"""Get the number of tokens present in the text. Useful for checking if an input will fit in a model's context window. Args: text: The string input to tokenize. Returns: The integer number of tokens in the text. """ return len(self.get_token_ids(text))
def get_num_tokens(self, text: str) ->int: """Get the number of tokens present in the text. Useful for checking if an input will fit in a model's context window. Args: text: The string input to tokenize. Returns: The integer number of tokens in the text. ""...
Get the number of tokens present in the text. Useful for checking if an input will fit in a model's context window. Args: text: The string input to tokenize. Returns: The integer number of tokens in the text.
__enter__
pass
def __enter__(self) ->None: pass
null
exact_match_string_evaluator
"""Create an ExactMatchStringEvaluator with default configuration.""" return ExactMatchStringEvaluator()
@pytest.fixture def exact_match_string_evaluator() ->ExactMatchStringEvaluator: """Create an ExactMatchStringEvaluator with default configuration.""" return ExactMatchStringEvaluator()
Create an ExactMatchStringEvaluator with default configuration.
test_similarity_search_empty_result
index = mock_index(index_details) index.similarity_search.return_value = {'manifest': {'column_count': 3, 'columns': [{'name': DEFAULT_PRIMARY_KEY}, {'name': DEFAULT_TEXT_COLUMN }, {'name': 'score'}]}, 'result': {'row_count': 0, 'data_array': []}, 'next_page_token': ''} vectorsearch = default_databricks_vec...
@pytest.mark.requires('databricks', 'databricks.vector_search') @pytest.mark.parametrize('index_details', ALL_INDEXES) def test_similarity_search_empty_result(index_details: dict) ->None: index = mock_index(index_details) index.similarity_search.return_value = {'manifest': {'column_count': 3, 'columns':...
null
_import_llm_rails
from langchain_community.vectorstores.llm_rails import LLMRails return LLMRails
def _import_llm_rails() ->Any: from langchain_community.vectorstores.llm_rails import LLMRails return LLMRails
null
_get_mock_authenticated_user
return {'shared_folder_ids': self.MOCK_FOLDER_IDS, 'id': 'Test'}
def _get_mock_authenticated_user(self) ->Dict: return {'shared_folder_ids': self.MOCK_FOLDER_IDS, 'id': 'Test'}
null
_signature
input_str = secret_key.get_secret_value() + json.dumps(payload) + str(timestamp ) md5 = hashlib.md5() md5.update(input_str.encode('utf-8')) return md5.hexdigest()
def _signature(secret_key: SecretStr, payload: Dict[str, Any], timestamp: int ) ->str: input_str = secret_key.get_secret_value() + json.dumps(payload) + str( timestamp) md5 = hashlib.md5() md5.update(input_str.encode('utf-8')) return md5.hexdigest()
null
get_num_tokens_from_messages
"""Calculate num tokens with tiktoken package. Official documentation: https://github.com/openai/openai-cookbook/blob/ main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb""" if sys.version_info[1] <= 7: return super().get_num_tokens_from_messages(messages) model, encoding = self._get_encodin...
def get_num_tokens_from_messages(self, messages: list[BaseMessage]) ->int: """Calculate num tokens with tiktoken package. Official documentation: https://github.com/openai/openai-cookbook/blob/ main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb""" if sys.version_info[1] <= 7: re...
Calculate num tokens with tiktoken package. Official documentation: https://github.com/openai/openai-cookbook/blob/ main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb
__init__
"""Take a raw result from Searx and make it into a dict like object.""" json_data = json.loads(data) super().__init__(json_data) self.__dict__ = self
def __init__(self, data: str): """Take a raw result from Searx and make it into a dict like object.""" json_data = json.loads(data) super().__init__(json_data) self.__dict__ = self
Take a raw result from Searx and make it into a dict like object.
get_verbose
"""Get the value of the `verbose` global setting.""" try: import langchain with warnings.catch_warnings(): warnings.filterwarnings('ignore', message= '.*Importing verbose from langchain root module is no longer supported' ) old_verbose = langchain.verbose except ImportErr...
def get_verbose() ->bool: """Get the value of the `verbose` global setting.""" try: import langchain with warnings.catch_warnings(): warnings.filterwarnings('ignore', message= '.*Importing verbose from langchain root module is no longer supported' ) ...
Get the value of the `verbose` global setting.
get_input_schema
return create_model(self.get_name('Input'), __root__=(List[self.bound. get_input_schema(config)], None), __config__=_SchemaConfig)
def get_input_schema(self, config: Optional[RunnableConfig]=None) ->Type[ BaseModel]: return create_model(self.get_name('Input'), __root__=(List[self.bound. get_input_schema(config)], None), __config__=_SchemaConfig)
null
test_anthropic_incorrect_field
with pytest.warns(match='not default parameter'): llm = ChatAnthropic(foo='bar') assert llm.model_kwargs == {'foo': 'bar'}
@pytest.mark.requires('anthropic') def test_anthropic_incorrect_field() ->None: with pytest.warns(match='not default parameter'): llm = ChatAnthropic(foo='bar') assert llm.model_kwargs == {'foo': 'bar'}
null
is_lc_serializable
return False
@classmethod def is_lc_serializable(cls) ->bool: return False
null
_import_sql_database_tool_QuerySQLDataBaseTool
from langchain_community.tools.sql_database.tool import QuerySQLDataBaseTool return QuerySQLDataBaseTool
def _import_sql_database_tool_QuerySQLDataBaseTool() ->Any: from langchain_community.tools.sql_database.tool import QuerySQLDataBaseTool return QuerySQLDataBaseTool
null
_run
"""Use the tool.""" return self.api_wrapper.run(query)
def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun] =None) ->str: """Use the tool.""" return self.api_wrapper.run(query)
Use the tool.
test_cosine_similarity_top_k_and_score_threshold
expected_idxs = [(0, 0), (2, 2), (1, 2), (0, 2)] expected_scores = [1.0, 0.93419873, 0.87038828, 0.83743579] actual_idxs, actual_scores = cosine_similarity_top_k(X, Y, score_threshold=0.8) assert actual_idxs == expected_idxs assert np.allclose(expected_scores, actual_scores)
def test_cosine_similarity_top_k_and_score_threshold(X: List[List[float]], Y: List[List[float]]) ->None: expected_idxs = [(0, 0), (2, 2), (1, 2), (0, 2)] expected_scores = [1.0, 0.93419873, 0.87038828, 0.83743579] actual_idxs, actual_scores = cosine_similarity_top_k(X, Y, score_threshold=0.8) ...
null
get_models
"""List available models""" backend = AviaryBackend.from_env() request_url = backend.backend_url + '-/routes' response = requests.get(request_url, headers=backend.header, timeout=TIMEOUT) try: result = response.json() except requests.JSONDecodeError as e: raise RuntimeError( f'Error decoding JSON from {...
def get_models() ->List[str]: """List available models""" backend = AviaryBackend.from_env() request_url = backend.backend_url + '-/routes' response = requests.get(request_url, headers=backend.header, timeout= TIMEOUT) try: result = response.json() except requests.JSONDecodeError...
List available models
test_from_document
"""Test from document class method.""" document = Document(page_content='Lorem ipsum dolor sit amet', metadata={ 'key': 'value'}) hashed_document = _HashedDocument.from_document(document) assert hashed_document.hash_ == 'fd1dc827-051b-537d-a1fe-1fa043e8b276' assert hashed_document.uid == hashed_document.hash_
def test_from_document() ->None: """Test from document class method.""" document = Document(page_content='Lorem ipsum dolor sit amet', metadata ={'key': 'value'}) hashed_document = _HashedDocument.from_document(document) assert hashed_document.hash_ == 'fd1dc827-051b-537d-a1fe-1fa043e8b276' ...
Test from document class method.
test_qdrant_from_texts_stores_duplicated_texts
"""Test end to end Qdrant.from_texts stores duplicated texts separately.""" from qdrant_client import QdrantClient collection_name = uuid.uuid4().hex with tempfile.TemporaryDirectory() as tmpdir: vec_store = Qdrant.from_texts(['abc', 'abc'], ConsistentFakeEmbeddings( ), collection_name=collection_name, path...
def test_qdrant_from_texts_stores_duplicated_texts() ->None: """Test end to end Qdrant.from_texts stores duplicated texts separately.""" from qdrant_client import QdrantClient collection_name = uuid.uuid4().hex with tempfile.TemporaryDirectory() as tmpdir: vec_store = Qdrant.from_texts(['abc', '...
Test end to end Qdrant.from_texts stores duplicated texts separately.
test_read_schema_dict_input
"""Test read_schema with dict input.""" index_schema = {'text': [{'name': 'content'}], 'tag': [{'name': 'tag'}], 'vector': [{'name': 'content_vector', 'dims': 100, 'algorithm': 'FLAT'}]} output = read_schema(index_schema=index_schema) assert output == index_schema
def test_read_schema_dict_input() ->None: """Test read_schema with dict input.""" index_schema = {'text': [{'name': 'content'}], 'tag': [{'name': 'tag'}], 'vector': [{'name': 'content_vector', 'dims': 100, 'algorithm': 'FLAT'}]} output = read_schema(index_schema=index_schema) assert outp...
Test read_schema with dict input.
test_load
mocker.patch('assemblyai.Transcriber.transcribe', return_value=mocker. MagicMock(text='Test transcription text', json_response={'id': '1'}, error=None)) loader = AssemblyAIAudioTranscriptLoader(file_path='./testfile.mp3', api_key='api_key') docs = loader.load() assert len(docs) == 1 assert docs[0].page_cont...
@pytest.mark.requires('assemblyai') def test_load(mocker: MockerFixture) ->None: mocker.patch('assemblyai.Transcriber.transcribe', return_value=mocker. MagicMock(text='Test transcription text', json_response={'id': '1'}, error=None)) loader = AssemblyAIAudioTranscriptLoader(file_path='./testfile...
null
similarity_search_by_vector
"""Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query vector. """ query_doc = self.doc_cls(embedding=...
def similarity_search_by_vector(self, embedding: List[float], k: int=4, ** kwargs: Any) ->List[Document]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. Return...
Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query vector.
_set_initial_conditions
for entity_setting in self.intervention.entity_settings: for entity in self.causal_operations.entities: if entity.name == entity_setting.name: entity.value = entity_setting.value
def _set_initial_conditions(self) ->None: for entity_setting in self.intervention.entity_settings: for entity in self.causal_operations.entities: if entity.name == entity_setting.name: entity.value = entity_setting.value
null
llm
return _get_llm(max_tokens=10)
@pytest.mark.scheduled @pytest.fixture def llm() ->AzureChatOpenAI: return _get_llm(max_tokens=10)
null
validate_environment
"""Validate that api key and python package exists in environment.""" google_api_key = get_from_dict_or_env(values, 'google_api_key', 'GOOGLE_API_KEY') values['google_api_key'] = google_api_key google_cse_id = get_from_dict_or_env(values, 'google_cse_id', 'GOOGLE_CSE_ID') values['google_cse_id'] = google_cse_id try...
@root_validator() def validate_environment(cls, values: Dict) ->Dict: """Validate that api key and python package exists in environment.""" google_api_key = get_from_dict_or_env(values, 'google_api_key', 'GOOGLE_API_KEY') values['google_api_key'] = google_api_key google_cse_id = get_from_dict_or...
Validate that api key and python package exists in environment.
_generate
res = self._chat(messages, **kwargs) if res.status_code != 200: raise ValueError(f'Error code: {res.status_code}, reason: {res.reason}') response = res.json() return self._create_chat_result(response)
def _generate(self, messages: List[BaseMessage], stop: Optional[List[str]]= None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any ) ->ChatResult: res = self._chat(messages, **kwargs) if res.status_code != 200: raise ValueError(f'Error code: {res.status_code}, reason: {res.rea...
null
load
"""Get important HN webpage information. HN webpage components are: - title - content - source url, - time of post - author of the post - number of comments - rank of the post """ soup_info = self.scrape() if 'item' in ...
def load(self) ->List[Document]: """Get important HN webpage information. HN webpage components are: - title - content - source url, - time of post - author of the post - number of comments - rank of the post """ ...
Get important HN webpage information. HN webpage components are: - title - content - source url, - time of post - author of the post - number of comments - rank of the post
test_missing_normalize_score_fn
"""Test doesn't perform similarity search without a valid distance strategy.""" texts = ['foo', 'bar', 'baz'] faiss_instance = FAISS.from_texts(texts, FakeEmbeddings(), distance_strategy='fake') with pytest.raises(ValueError): faiss_instance.similarity_search_with_relevance_scores('foo', k=2)
@pytest.mark.requires('faiss') def test_missing_normalize_score_fn() ->None: """Test doesn't perform similarity search without a valid distance strategy.""" texts = ['foo', 'bar', 'baz'] faiss_instance = FAISS.from_texts(texts, FakeEmbeddings(), distance_strategy='fake') with pytest.raises(Value...
Test doesn't perform similarity search without a valid distance strategy.
_flatten_dict
""" Generator that yields flattened items from a nested dictionary for a flat dict. Parameters: nested_dict (dict): The nested dictionary to flatten. parent_key (str): The prefix to prepend to the keys of the flattened dict. sep (str): The separator to use between the parent key and the...
def _flatten_dict(nested_dict: Dict[str, Any], parent_key: str='', sep: str='_' ) ->Iterable[Tuple[str, Any]]: """ Generator that yields flattened items from a nested dictionary for a flat dict. Parameters: nested_dict (dict): The nested dictionary to flatten. parent_key (str): The pref...
Generator that yields flattened items from a nested dictionary for a flat dict. Parameters: nested_dict (dict): The nested dictionary to flatten. parent_key (str): The prefix to prepend to the keys of the flattened dict. sep (str): The separator to use between the parent key and the key of the flat...
from_open_api_endpoint_chain
"""Convert an endpoint chain to an API endpoint tool.""" expanded_name = ( f"{api_title.replace(' ', '_')}.{chain.api_operation.operation_id}") description = ( f"I'm an AI from {api_title}. Instruct what you want, and I'll assist via an API with description: {chain.api_operation.description}" ) return cls(n...
@classmethod def from_open_api_endpoint_chain(cls, chain: OpenAPIEndpointChain, api_title: str) ->'NLATool': """Convert an endpoint chain to an API endpoint tool.""" expanded_name = ( f"{api_title.replace(' ', '_')}.{chain.api_operation.operation_id}") description = ( f"I'm an AI from {a...
Convert an endpoint chain to an API endpoint tool.
_require_arg
"""Raise ValueError if the required arg with name `arg_name` is None.""" if not arg: raise ValueError(f'`{arg_name}` is required for this index.')
@staticmethod def _require_arg(arg: Any, arg_name: str) ->None: """Raise ValueError if the required arg with name `arg_name` is None.""" if not arg: raise ValueError(f'`{arg_name}` is required for this index.')
Raise ValueError if the required arg with name `arg_name` is None.
_cosine_distance
"""Compute the cosine distance between two vectors. Args: a (np.ndarray): The first vector. b (np.ndarray): The second vector. Returns: np.ndarray: The cosine distance. """ return 1.0 - cosine_similarity(a, b)
@staticmethod def _cosine_distance(a: np.ndarray, b: np.ndarray) ->np.ndarray: """Compute the cosine distance between two vectors. Args: a (np.ndarray): The first vector. b (np.ndarray): The second vector. Returns: np.ndarray: The cosine distance. """ ...
Compute the cosine distance between two vectors. Args: a (np.ndarray): The first vector. b (np.ndarray): The second vector. Returns: np.ndarray: The cosine distance.
run
"""Run command with own globals/locals and returns anything printed. Timeout after the specified number of seconds.""" warn_once() queue: multiprocessing.Queue = multiprocessing.Queue() if timeout is not None: p = multiprocessing.Process(target=self.worker, args=(command, self. globals, self.locals,...
def run(self, command: str, timeout: Optional[int]=None) ->str: """Run command with own globals/locals and returns anything printed. Timeout after the specified number of seconds.""" warn_once() queue: multiprocessing.Queue = multiprocessing.Queue() if timeout is not None: p = multiproce...
Run command with own globals/locals and returns anything printed. Timeout after the specified number of seconds.
__init__
""" Initialize the graph transformer with various options. Args: diffbot_api_key (str): The API key for Diffbot's NLP services. fact_confidence_threshold (float): Minimum confidence level for facts to be included. include_qualifiers (b...
def __init__(self, diffbot_api_key: Optional[str]=None, fact_confidence_threshold: float=0.7, include_qualifiers: bool=True, include_evidence: bool=True, simplified_schema: bool=True) ->None: """ Initialize the graph transformer with various options. Args: diffbot_api_key (str):...
Initialize the graph transformer with various options. Args: diffbot_api_key (str): The API key for Diffbot's NLP services. fact_confidence_threshold (float): Minimum confidence level for facts to be included. include_qualifiers (bool): Whether to include qualifiers in the relations...
validate_search_type
"""Validate search type.""" search_type = values['search_type'] if search_type not in cls.allowed_search_types: raise ValueError( f'search_type of {search_type} not allowed. Valid values are: {cls.allowed_search_types}' ) if search_type == 'similarity_score_threshold': score_threshold = values['...
@root_validator() def validate_search_type(cls, values: Dict) ->Dict: """Validate search type.""" search_type = values['search_type'] if search_type not in cls.allowed_search_types: raise ValueError( f'search_type of {search_type} not allowed. Valid values are: {cls.allowed_search_types}...
Validate search type.
get_issue
""" Fetches a specific issue and its first 10 comments Parameters: issue_number(int): The number for the gitlab issue Returns: dict: A dictionary containing the issue's title, body, and comments as a string """ issue = self.gitlab_repo_instance.issues....
def get_issue(self, issue_number: int) ->Dict[str, Any]: """ Fetches a specific issue and its first 10 comments Parameters: issue_number(int): The number for the gitlab issue Returns: dict: A dictionary containing the issue's title, body, and comments as a...
Fetches a specific issue and its first 10 comments Parameters: issue_number(int): The number for the gitlab issue Returns: dict: A dictionary containing the issue's title, body, and comments as a string
input_keys
"""Input keys for the chain.""" return ['user_input']
@property def input_keys(self) ->List[str]: """Input keys for the chain.""" return ['user_input']
Input keys for the chain.
test_cypher_save_load
"""Test saving and loading.""" FILE_PATH = 'cypher.yaml' url = os.environ.get('NEO4J_URI') username = os.environ.get('NEO4J_USERNAME') password = os.environ.get('NEO4J_PASSWORD') assert url is not None assert username is not None assert password is not None graph = Neo4jGraph(url=url, username=username, password=passwo...
def test_cypher_save_load() ->None: """Test saving and loading.""" FILE_PATH = 'cypher.yaml' url = os.environ.get('NEO4J_URI') username = os.environ.get('NEO4J_USERNAME') password = os.environ.get('NEO4J_PASSWORD') assert url is not None assert username is not None assert password is not...
Test saving and loading.
_get_clean_text
"""Returns cleaned text with newlines preserved and irrelevant elements removed.""" elements_to_skip = ['script', 'noscript', 'canvas', 'meta', 'svg', 'map', 'area', 'audio', 'source', 'track', 'video', 'embed', 'object', 'param', 'picture', 'iframe', 'frame', 'frameset', 'noframes', 'applet', 'form', 'butt...
def _get_clean_text(element: Tag) ->str: """Returns cleaned text with newlines preserved and irrelevant elements removed.""" elements_to_skip = ['script', 'noscript', 'canvas', 'meta', 'svg', 'map', 'area', 'audio', 'source', 'track', 'video', 'embed', 'object', 'param', 'picture', 'iframe', 'fr...
Returns cleaned text with newlines preserved and irrelevant elements removed.
_get_node_properties
node_properties_query = """ MATCH (a:`{n_label}`) RETURN properties(a) AS props LIMIT 100 """ node_properties = [] for label in n_labels: q = node_properties_query.format(n_label=label) data = {'label': label, 'properties': self.query(q)['results']} s = set({}) for p in d...
def _get_node_properties(self, n_labels: List[str], types: Dict) ->List: node_properties_query = """ MATCH (a:`{n_label}`) RETURN properties(a) AS props LIMIT 100 """ node_properties = [] for label in n_labels: q = node_properties_query.format(n_label=label) d...
null
_wrap_prompt
if not self.HUMAN_PROMPT or not self.AI_PROMPT: raise NameError('Please ensure the anthropic package is loaded') if prompt.startswith(self.HUMAN_PROMPT): return prompt corrected_prompt, n_subs = re.subn('^\\n*Human:', self.HUMAN_PROMPT, prompt) if n_subs == 1: return corrected_prompt return f'{self.HUMAN_PR...
def _wrap_prompt(self, prompt: str) ->str: if not self.HUMAN_PROMPT or not self.AI_PROMPT: raise NameError('Please ensure the anthropic package is loaded') if prompt.startswith(self.HUMAN_PROMPT): return prompt corrected_prompt, n_subs = re.subn('^\\n*Human:', self.HUMAN_PROMPT, prompt ...
null
_send_pipeline_to_device
"""Send a pipeline to a device on the cluster.""" if isinstance(pipeline, str): with open(pipeline, 'rb') as f: pipeline = pickle.load(f) if importlib.util.find_spec('torch') is not None: import torch cuda_device_count = torch.cuda.device_count() if device < -1 or device >= cuda_device_count: ...
def _send_pipeline_to_device(pipeline: Any, device: int) ->Any: """Send a pipeline to a device on the cluster.""" if isinstance(pipeline, str): with open(pipeline, 'rb') as f: pipeline = pickle.load(f) if importlib.util.find_spec('torch') is not None: import torch cuda_de...
Send a pipeline to a device on the cluster.
test_json_schema_evaluator_invalid_prediction
prediction = '{"name": "John", "age": "30"}' reference = {'type': 'object', 'properties': {'name': {'type': 'string'}, 'age': {'type': 'integer'}}} result = json_schema_evaluator._evaluate_strings(prediction=prediction, reference=reference) assert result['score'] is False assert 'reasoning' in result
@pytest.mark.requires('jsonschema') def test_json_schema_evaluator_invalid_prediction(json_schema_evaluator: JsonSchemaEvaluator) ->None: prediction = '{"name": "John", "age": "30"}' reference = {'type': 'object', 'properties': {'name': {'type': 'string' }, 'age': {'type': 'integer'}}} result = ...
null
test_tracing_sequential
from langchain.agents import AgentType, initialize_agent, load_tools os.environ['LANGCHAIN_WANDB_TRACING'] = 'true' os.environ['WANDB_PROJECT'] = 'langchain-tracing' for q in questions[:3]: llm = OpenAI(temperature=0) tools = load_tools(['llm-math', 'serpapi'], llm=llm) agent = initialize_agent(tools, llm, ...
def test_tracing_sequential() ->None: from langchain.agents import AgentType, initialize_agent, load_tools os.environ['LANGCHAIN_WANDB_TRACING'] = 'true' os.environ['WANDB_PROJECT'] = 'langchain-tracing' for q in questions[:3]: llm = OpenAI(temperature=0) tools = load_tools(['llm-math', ...
null
_call
""" Displays the prompt to the user and returns their input as a response. Args: prompt (str): The prompt to be displayed to the user. stop (Optional[List[str]]): A list of stop strings. run_manager (Optional[CallbackManagerForLLMRun]): Currently not used. R...
def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str: """ Displays the prompt to the user and returns their input as a response. Args: prompt (str): The prompt to be displayed to the user. ...
Displays the prompt to the user and returns their input as a response. Args: prompt (str): The prompt to be displayed to the user. stop (Optional[List[str]]): A list of stop strings. run_manager (Optional[CallbackManagerForLLMRun]): Currently not used. Returns: str: The user's input as a response.
get_lc_namespace
"""Get the namespace of the langchain object.""" return ['langchain', 'chat_models', 'mistralai']
@classmethod def get_lc_namespace(cls) ->List[str]: """Get the namespace of the langchain object.""" return ['langchain', 'chat_models', 'mistralai']
Get the namespace of the langchain object.
get_images_from_pdf
""" Extract images from each page of a PDF document and save as JPEG files. :param pdf_path: A string representing the path to the PDF file. :param img_dump_path: A string representing the path to dummp images. """ pdf = pdfium.PdfDocument(pdf_path) n_pages = len(pdf) for page_number in range(n_pages):...
def get_images_from_pdf(pdf_path, img_dump_path): """ Extract images from each page of a PDF document and save as JPEG files. :param pdf_path: A string representing the path to the PDF file. :param img_dump_path: A string representing the path to dummp images. """ pdf = pdfium.PdfDocument(pdf_p...
Extract images from each page of a PDF document and save as JPEG files. :param pdf_path: A string representing the path to the PDF file. :param img_dump_path: A string representing the path to dummp images.
test_url
os.environ['MS_GRAPH_CLIENT_ID'] = 'CLIENT_ID' os.environ['MS_GRAPH_CLIENT_SECRET'] = 'CLIENT_SECRET' loader = OneNoteLoader(notebook_name='test_notebook', section_name= 'test_section', page_title='test_title', access_token='access_token', onenote_api_base_url='https://graph.microsoft.com/v1.0/me/onenote') asse...
def test_url() ->None: os.environ['MS_GRAPH_CLIENT_ID'] = 'CLIENT_ID' os.environ['MS_GRAPH_CLIENT_SECRET'] = 'CLIENT_SECRET' loader = OneNoteLoader(notebook_name='test_notebook', section_name= 'test_section', page_title='test_title', access_token= 'access_token', onenote_api_base_url= ...
null
__init__
"""Initialize the parser. Args: device: device to use. lang_model: whisper model to use, for example "openai/whisper-medium". Defaults to None. forced_decoder_ids: id states for decoder in a multilanguage model. Defaults to None. """ try: ...
def __init__(self, device: str='0', lang_model: Optional[str]=None, forced_decoder_ids: Optional[Tuple[Dict]]=None): """Initialize the parser. Args: device: device to use. lang_model: whisper model to use, for example "openai/whisper-medium". Defaults to None. ...
Initialize the parser. Args: device: device to use. lang_model: whisper model to use, for example "openai/whisper-medium". Defaults to None. forced_decoder_ids: id states for decoder in a multilanguage model. Defaults to None.
validate_inputs
"""Validate that either folder_id or document_ids is set, but not both.""" if values.get('folder_id') and (values.get('document_ids') or values.get( 'file_ids')): raise ValueError( 'Cannot specify both folder_id and document_ids nor folder_id and file_ids' ) if not values.get('folder_id') and no...
@root_validator def validate_inputs(cls, values: Dict[str, Any]) ->Dict[str, Any]: """Validate that either folder_id or document_ids is set, but not both.""" if values.get('folder_id') and (values.get('document_ids') or values. get('file_ids')): raise ValueError( 'Cannot specify both...
Validate that either folder_id or document_ids is set, but not both.
to_json_not_implemented
return to_json_not_implemented(self)
def to_json_not_implemented(self) ->SerializedNotImplemented: return to_json_not_implemented(self)
null
validate_environment
values['ernie_api_base'] = get_from_dict_or_env(values, 'ernie_api_base', 'ERNIE_API_BASE', 'https://aip.baidubce.com') values['ernie_client_id'] = get_from_dict_or_env(values, 'ernie_client_id', 'ERNIE_CLIENT_ID') values['ernie_client_secret'] = get_from_dict_or_env(values, 'ernie_client_secret', 'ERNIE_CL...
@root_validator() def validate_environment(cls, values: Dict) ->Dict: values['ernie_api_base'] = get_from_dict_or_env(values, 'ernie_api_base', 'ERNIE_API_BASE', 'https://aip.baidubce.com') values['ernie_client_id'] = get_from_dict_or_env(values, 'ernie_client_id', 'ERNIE_CLIENT_ID') values[...
null
stringify_value
"""Stringify a value. Args: val: The value to stringify. Returns: str: The stringified value. """ if isinstance(val, str): return val elif isinstance(val, dict): return '\n' + stringify_dict(val) elif isinstance(val, list): return '\n'.join(stringify_value(v) for v in val) else...
def stringify_value(val: Any) ->str: """Stringify a value. Args: val: The value to stringify. Returns: str: The stringified value. """ if isinstance(val, str): return val elif isinstance(val, dict): return '\n' + stringify_dict(val) elif isinstance(val, list...
Stringify a value. Args: val: The value to stringify. Returns: str: The stringified value.
_on_run_create
"""Process a run upon creation."""
def _on_run_create(self, run: Run) ->None: """Process a run upon creation."""
Process a run upon creation.
reset_callback_meta
"""Reset the callback metadata.""" self.step = 0 self.starts = 0 self.ends = 0 self.errors = 0 self.text_ctr = 0 self.ignore_llm_ = False self.ignore_chain_ = False self.ignore_agent_ = False self.always_verbose_ = False self.chain_starts = 0 self.chain_ends = 0 self.llm_starts = 0 self.llm_ends = 0 self.llm_streams = ...
def reset_callback_meta(self) ->None: """Reset the callback metadata.""" self.step = 0 self.starts = 0 self.ends = 0 self.errors = 0 self.text_ctr = 0 self.ignore_llm_ = False self.ignore_chain_ = False self.ignore_agent_ = False self.always_verbose_ = False self.chain_starts...
Reset the callback metadata.
parse
"""Return AutoGPTAction"""
@abstractmethod def parse(self, text: str) ->AutoGPTAction: """Return AutoGPTAction"""
Return AutoGPTAction
test_sitemap_block_size_to_small
"""Test sitemap loader.""" with pytest.raises(ValueError, match='Sitemap blocksize should be at least 1'): SitemapLoader('https://api.python.langchain.com/sitemap.xml', blocksize=0)
def test_sitemap_block_size_to_small() ->None: """Test sitemap loader.""" with pytest.raises(ValueError, match= 'Sitemap blocksize should be at least 1'): SitemapLoader('https://api.python.langchain.com/sitemap.xml', blocksize=0)
Test sitemap loader.
get_relevant_documents_with_filter
body = self.body.copy() _filter = f' and {_filter}' if _filter else '' body['yql'] = body['yql'] + _filter body['query'] = query return self._query(body)
def get_relevant_documents_with_filter(self, query: str, *, _filter: Optional[str]=None) ->List[Document]: body = self.body.copy() _filter = f' and {_filter}' if _filter else '' body['yql'] = body['yql'] + _filter body['query'] = query return self._query(body)
null
test_get_final_answer_new_line
"""Test getting final answer.""" llm_output = """Thought: I can now answer the question Final Answer: 1994""" action, action_input = get_action_and_input(llm_output) assert action == 'Final Answer' assert action_input == '1994'
def test_get_final_answer_new_line() ->None: """Test getting final answer.""" llm_output = 'Thought: I can now answer the question\nFinal Answer:\n1994' action, action_input = get_action_and_input(llm_output) assert action == 'Final Answer' assert action_input == '1994'
Test getting final answer.
_create_function_message
"""Convert agent action and observation into a function message. Args: agent_action: the tool invocation request from the agent observation: the result of the tool invocation Returns: FunctionMessage that corresponds to the original tool invocation """ if not isinstance(observation, ...
def _create_function_message(agent_action: AgentAction, observation: Any ) ->FunctionMessage: """Convert agent action and observation into a function message. Args: agent_action: the tool invocation request from the agent observation: the result of the tool invocation Returns: Fu...
Convert agent action and observation into a function message. Args: agent_action: the tool invocation request from the agent observation: the result of the tool invocation Returns: FunctionMessage that corresponds to the original tool invocation
_get_messages_from_run_dict
if not messages: return [] first_message = messages[0] if 'lc' in first_message: return [load(dumpd(message)) for message in messages] else: return messages_from_dict(messages)
def _get_messages_from_run_dict(messages: List[dict]) ->List[BaseMessage]: if not messages: return [] first_message = messages[0] if 'lc' in first_message: return [load(dumpd(message)) for message in messages] else: return messages_from_dict(messages)
null
validate_environment
"""Validate that api key exists in environment.""" scenex_api_key = get_from_dict_or_env(values, 'scenex_api_key', 'SCENEX_API_KEY') values['scenex_api_key'] = scenex_api_key return values
@root_validator(pre=True) def validate_environment(cls, values: Dict) ->Dict: """Validate that api key exists in environment.""" scenex_api_key = get_from_dict_or_env(values, 'scenex_api_key', 'SCENEX_API_KEY') values['scenex_api_key'] = scenex_api_key return values
Validate that api key exists in environment.
test_deanonymize
"""Test deanonymizing a name in a simple sentence""" from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer text = 'Hello, my name is John Doe.' anonymizer = PresidioReversibleAnonymizer(analyzed_fields=['PERSON']) anonymized_text = anonymizer.anonymize(text) deanonymized_text = anonymizer.dean...
@pytest.mark.requires('presidio_analyzer', 'presidio_anonymizer', 'faker') def test_deanonymize() ->None: """Test deanonymizing a name in a simple sentence""" from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer text = 'Hello, my name is John Doe.' anonymizer = PresidioReversi...
Test deanonymizing a name in a simple sentence
from_documents
""" Create a BM25Retriever from a list of Documents. Args: documents: A list of Documents to vectorize. bm25_params: Parameters to pass to the BM25 vectorizer. preprocess_func: A function to preprocess each text before vectorization. **kwargs: Any other ar...
@classmethod def from_documents(cls, documents: Iterable[Document], *, bm25_params: Optional[Dict[str, Any]]=None, preprocess_func: Callable[[str], List[ str]]=default_preprocessing_func, **kwargs: Any) ->BM25Retriever: """ Create a BM25Retriever from a list of Documents. Args: d...
Create a BM25Retriever from a list of Documents. Args: documents: A list of Documents to vectorize. bm25_params: Parameters to pass to the BM25 vectorizer. preprocess_func: A function to preprocess each text before vectorization. **kwargs: Any other arguments to pass to the retriever. Returns: A BM...