method_name
stringlengths
1
78
method_body
stringlengths
3
9.66k
full_code
stringlengths
31
10.7k
docstring
stringlengths
4
4.74k
get_output_schema
if self.custom_output_type is not None: return super().get_output_schema(config) return self.bound.get_output_schema(merge_configs(self.config, config))
def get_output_schema(self, config: Optional[RunnableConfig]=None) ->Type[ BaseModel]: if self.custom_output_type is not None: return super().get_output_schema(config) return self.bound.get_output_schema(merge_configs(self.config, config))
null
embed_query
"""Call out to Cohere's embedding endpoint. Args: text: The text to embed. Returns: Embeddings for the text. """ return self.embed([text], input_type='search_query')[0]
def embed_query(self, text: str) ->List[float]: """Call out to Cohere's embedding endpoint. Args: text: The text to embed. Returns: Embeddings for the text. """ return self.embed([text], input_type='search_query')[0]
Call out to Cohere's embedding endpoint. Args: text: The text to embed. Returns: Embeddings for the text.
from_openapi_spec
"""Create an APIOperation from an OpenAPI spec.""" operation = spec.get_operation(path, method) parameters = spec.get_parameters_for_operation(operation) properties = cls._get_properties_from_parameters(parameters, spec) operation_id = OpenAPISpec.get_cleaned_operation_id(operation, path, method) request_body = spec.ge...
@classmethod def from_openapi_spec(cls, spec: OpenAPISpec, path: str, method: str ) ->'APIOperation': """Create an APIOperation from an OpenAPI spec.""" operation = spec.get_operation(path, method) parameters = spec.get_parameters_for_operation(operation) properties = cls._get_properties_from_parame...
Create an APIOperation from an OpenAPI spec.
set_path
self.path = path
def set_path(self, path: str) ->None: self.path = path
null
test_chat_model_with_v2_callbacks
"""Test chat model callbacks fall back to on_llm_start.""" handler = FakeCallbackHandlerWithChatStart() llm = FakeListChatModel(callbacks=[handler], verbose=True, responses=[ 'fake response']) output = llm([HumanMessage(content='foo')]) assert output.content == 'fake response' assert handler.starts == 1 assert hand...
def test_chat_model_with_v2_callbacks() ->None: """Test chat model callbacks fall back to on_llm_start.""" handler = FakeCallbackHandlerWithChatStart() llm = FakeListChatModel(callbacks=[handler], verbose=True, responses=[ 'fake response']) output = llm([HumanMessage(content='foo')]) assert ...
Test chat model callbacks fall back to on_llm_start.
escape_str
return ''.join(f'{self.BS}{c}' if c in self.must_escape else c for c in value)
def escape_str(self, value: str) ->str: return ''.join(f'{self.BS}{c}' if c in self.must_escape else c for c in value)
null
input_keys
"""Return the singular input key. :meta private: """ return [self.input_key]
@property def input_keys(self) ->List[str]: """Return the singular input key. :meta private: """ return [self.input_key]
Return the singular input key. :meta private:
_on_run_update
if run.id != self.root_id: return if run.error is None: if self._arg_on_end is not None: call_func_with_variable_args(self._arg_on_end, run, self.config) elif self._arg_on_error is not None: call_func_with_variable_args(self._arg_on_error, run, self.config)
def _on_run_update(self, run: Run) ->None: if run.id != self.root_id: return if run.error is None: if self._arg_on_end is not None: call_func_with_variable_args(self._arg_on_end, run, self.config) elif self._arg_on_error is not None: call_func_with_variable_args(self._arg...
null
test_dashscope_embedding_documents_multiple
"""Test dashscope embeddings.""" documents = ['foo bar', 'bar foo', 'foo', 'foo0', 'foo1', 'foo2', 'foo3', 'foo4', 'foo5', 'foo6', 'foo7', 'foo8', 'foo9', 'foo10', 'foo11', 'foo12', 'foo13', 'foo14', 'foo15', 'foo16', 'foo17', 'foo18', 'foo19', 'foo20', 'foo21', 'foo22', 'foo23', 'foo24'] embedding = DashSc...
def test_dashscope_embedding_documents_multiple() ->None: """Test dashscope embeddings.""" documents = ['foo bar', 'bar foo', 'foo', 'foo0', 'foo1', 'foo2', 'foo3', 'foo4', 'foo5', 'foo6', 'foo7', 'foo8', 'foo9', 'foo10', 'foo11', 'foo12', 'foo13', 'foo14', 'foo15', 'foo16', 'foo17', 'fo...
Test dashscope embeddings.
__init__
""" Args: api_url: https://platform.quip.com access_token: token of access quip API. Please refer: https://quip.com/dev/automation/documentation/current#section/Authentication/Get-Access-to-Quip's-APIs request_timeout: timeout of request, default 60s. """ ...
def __init__(self, api_url: str, access_token: str, request_timeout: Optional[int]=60): """ Args: api_url: https://platform.quip.com access_token: token of access quip API. Please refer: https://quip.com/dev/automation/documentation/current#section/Authentication/Get-...
Args: api_url: https://platform.quip.com access_token: token of access quip API. Please refer: https://quip.com/dev/automation/documentation/current#section/Authentication/Get-Access-to-Quip's-APIs request_timeout: timeout of request, default 60s.
_process_result
"""Post-process the result from the server.""" message = ModelInferResponse() google.protobuf.json_format.Parse(json.dumps(result), message) infer_result = grpcclient.InferResult(message) np_res = infer_result.as_numpy('text_output') generated_text = '' if np_res is not None: generated_text = ''.join([token.decode(...
@staticmethod def _process_result(result: Dict[str, str]) ->str: """Post-process the result from the server.""" message = ModelInferResponse() google.protobuf.json_format.Parse(json.dumps(result), message) infer_result = grpcclient.InferResult(message) np_res = infer_result.as_numpy('text_output') ...
Post-process the result from the server.
inner_embedding_query
def generate_filter_query() ->str: if search_filter is None: return '' filter_clause = ' AND '.join([create_filter(md_key, md_value) for md_key, md_value in search_filter.items()]) return filter_clause def create_filter(md_key: str, md_value: Any) ->str: md_filter_expr = self.config.fie...
def inner_embedding_query(self, embedding: List[float], search_filter: Optional[Dict[str, Any]]=None, k: int=4) ->Dict[str, Any]: def generate_filter_query() ->str: if search_filter is None: return '' filter_clause = ' AND '.join([create_filter(md_key, md_value) for md_...
null
__init__
"""Initialize a LatexTextSplitter.""" separators = self.get_separators_for_language(Language.LATEX) super().__init__(separators=separators, **kwargs)
def __init__(self, **kwargs: Any) ->None: """Initialize a LatexTextSplitter.""" separators = self.get_separators_for_language(Language.LATEX) super().__init__(separators=separators, **kwargs)
Initialize a LatexTextSplitter.
_import_edenai_EdenAiExplicitImageTool
from langchain_community.tools.edenai import EdenAiExplicitImageTool return EdenAiExplicitImageTool
def _import_edenai_EdenAiExplicitImageTool() ->Any: from langchain_community.tools.edenai import EdenAiExplicitImageTool return EdenAiExplicitImageTool
null
test_anthropic_model_kwargs
llm = ChatAnthropic(model_kwargs={'foo': 'bar'}) assert llm.model_kwargs == {'foo': 'bar'}
@pytest.mark.requires('anthropic') def test_anthropic_model_kwargs() ->None: llm = ChatAnthropic(model_kwargs={'foo': 'bar'}) assert llm.model_kwargs == {'foo': 'bar'}
null
from_llm
"""Initialize from LLM.""" qa_chain = LLMChain(llm=llm, prompt=qa_prompt) aql_generation_chain = LLMChain(llm=llm, prompt=aql_generation_prompt) aql_fix_chain = LLMChain(llm=llm, prompt=aql_fix_prompt) return cls(qa_chain=qa_chain, aql_generation_chain=aql_generation_chain, aql_fix_chain=aql_fix_chain, **kwargs)
@classmethod def from_llm(cls, llm: BaseLanguageModel, *, qa_prompt: BasePromptTemplate= AQL_QA_PROMPT, aql_generation_prompt: BasePromptTemplate= AQL_GENERATION_PROMPT, aql_fix_prompt: BasePromptTemplate= AQL_FIX_PROMPT, **kwargs: Any) ->ArangoGraphQAChain: """Initialize from LLM.""" qa_chain = LLM...
Initialize from LLM.
_persist_run
pass
def _persist_run(self, run: Run) ->None: pass
null
_parse_python_function_docstring
"""Parse the function and argument descriptions from the docstring of a function. Assumes the function docstring follows Google Python style guide. """ docstring = inspect.getdoc(function) if docstring: docstring_blocks = docstring.split('\n\n') descriptors = [] args_block = None past_descripto...
def _parse_python_function_docstring(function: Callable) ->Tuple[str, dict]: """Parse the function and argument descriptions from the docstring of a function. Assumes the function docstring follows Google Python style guide. """ docstring = inspect.getdoc(function) if docstring: docstring_b...
Parse the function and argument descriptions from the docstring of a function. Assumes the function docstring follows Google Python style guide.
get_lc_namespace
"""Get the namespace of the langchain object.""" return ['langchain', 'prompts', 'chat']
@classmethod def get_lc_namespace(cls) ->List[str]: """Get the namespace of the langchain object.""" return ['langchain', 'prompts', 'chat']
Get the namespace of the langchain object.
test_allows_extra_kwargs
"""Test formatting allows extra keyword arguments.""" template = 'This is a {foo} test.' output = formatter.format(template, foo='good', bar='oops') expected_output = 'This is a good test.' assert output == expected_output
def test_allows_extra_kwargs() ->None: """Test formatting allows extra keyword arguments.""" template = 'This is a {foo} test.' output = formatter.format(template, foo='good', bar='oops') expected_output = 'This is a good test.' assert output == expected_output
Test formatting allows extra keyword arguments.
on_tool_error
if parent_run_id is None: self.increment()
def on_tool_error(self, error: BaseException, *, run_id: UUID, parent_run_id: Optional[UUID]=None, **kwargs: Any) ->Any: if parent_run_id is None: self.increment()
null
get_lc_namespace
"""Get the namespace of the langchain object.""" return ['langchain', 'chat_models', 'vertexai']
@classmethod def get_lc_namespace(cls) ->List[str]: """Get the namespace of the langchain object.""" return ['langchain', 'chat_models', 'vertexai']
Get the namespace of the langchain object.
post
...
@abstractmethod def post(self, request: Any, transform_output_fn: Optional[Callable[..., str]]=None) ->Any: ...
null
_refresh_signer
if self.auth.get('signer', None) and hasattr(self.auth['signer'], 'refresh_security_token'): self.auth['signer'].refresh_security_token()
def _refresh_signer(self) ->None: if self.auth.get('signer', None) and hasattr(self.auth['signer'], 'refresh_security_token'): self.auth['signer'].refresh_security_token()
null
on_tool_start
"""Start a trace for a tool run.""" parent_run_id_ = str(parent_run_id) if parent_run_id else None execution_order = self._get_execution_order(parent_run_id_) start_time = datetime.now(timezone.utc) if metadata: kwargs.update({'metadata': metadata}) tool_run = Run(id=run_id, parent_run_id=parent_run_id, serialized=...
def on_tool_start(self, serialized: Dict[str, Any], input_str: str, *, run_id: UUID, tags: Optional[List[str]]=None, parent_run_id: Optional[ UUID]=None, metadata: Optional[Dict[str, Any]]=None, name: Optional[str ]=None, **kwargs: Any) ->Run: """Start a trace for a tool run.""" parent_run_id_ = str...
Start a trace for a tool run.
embed_query
"""Embed a text. Args: text: The text to embed. Returns: Embedding for the text. """ task_type = self.task_type or 'retrieval_query' return self._embed([text], task_type=task_type)[0]
def embed_query(self, text: str) ->List[float]: """Embed a text. Args: text: The text to embed. Returns: Embedding for the text. """ task_type = self.task_type or 'retrieval_query' return self._embed([text], task_type=task_type)[0]
Embed a text. Args: text: The text to embed. Returns: Embedding for the text.
_convert_message_to_dict
if isinstance(message, ChatMessage): message_dict = {'role': message.role, 'content': message.content} elif isinstance(message, HumanMessage): message_dict = {'role': 'user', 'content': message.content} elif isinstance(message, AIMessage): message_dict = {'role': 'assistant', 'content': message.content} ...
def _convert_message_to_dict(message: BaseMessage) ->dict: if isinstance(message, ChatMessage): message_dict = {'role': message.role, 'content': message.content} elif isinstance(message, HumanMessage): message_dict = {'role': 'user', 'content': message.content} elif isinstance(message, AIMes...
null
input_keys
"""Return the input keys. Returns: List of input keys. """ return self._input_keys
@property def input_keys(self) ->List[str]: """Return the input keys. Returns: List of input keys. """ return self._input_keys
Return the input keys. Returns: List of input keys.
_call
"""Return next response""" response = self.responses[self.i] if self.i < len(self.responses) - 1: self.i += 1 else: self.i = 0 return response
def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str: """Return next response""" response = self.responses[self.i] if self.i < len(self.responses) - 1: self.i += 1 else: self.i = 0 return response
Return next response
test_dereference_refs_multiple_refs
schema = {'type': 'object', 'properties': {'first_name': {'$ref': '#/$defs/name'}, 'other': {'$ref': '#/$defs/other'}}, '$defs': {'name': {'type': 'string'}, 'other': {'type': 'object', 'properties': {'age': 'int', 'height': 'int'}}}} expected = {'type': 'object', 'properties': {'first_name': {'type': '...
def test_dereference_refs_multiple_refs() ->None: schema = {'type': 'object', 'properties': {'first_name': {'$ref': '#/$defs/name'}, 'other': {'$ref': '#/$defs/other'}}, '$defs': { 'name': {'type': 'string'}, 'other': {'type': 'object', 'properties': {'age': 'int', 'height': 'int'}}}} ex...
null
get_history
window = 3 data = graph.query( """ MATCH (u:User {id:$user_id})-[:HAS_SESSION]->(s:Session {id:$session_id}), (s)-[:LAST_MESSAGE]->(last_message) MATCH p=(last_message)<-[:NEXT*0..""" + str(window) + """]-() WITH p, length(p) AS length ORDER BY length DESC LIMIT 1 ...
def get_history(input: Dict[str, Any]) ->List[Union[HumanMessage, AIMessage]]: window = 3 data = graph.query( """ MATCH (u:User {id:$user_id})-[:HAS_SESSION]->(s:Session {id:$session_id}), (s)-[:LAST_MESSAGE]->(last_message) MATCH p=(last_message)<-[:NEXT*0..""" +...
null
__post_init__
self.header = {'Authorization': self.bearer}
def __post_init__(self) ->None: self.header = {'Authorization': self.bearer}
null
__init__
process_attachments = unstructured_kwargs.get('process_attachments') attachment_partitioner = unstructured_kwargs.get('attachment_partitioner') if process_attachments and attachment_partitioner is None: from unstructured.partition.auto import partition unstructured_kwargs['attachment_partitioner'] = partition s...
def __init__(self, file_path: str, mode: str='single', ** unstructured_kwargs: Any): process_attachments = unstructured_kwargs.get('process_attachments') attachment_partitioner = unstructured_kwargs.get('attachment_partitioner') if process_attachments and attachment_partitioner is None: from uns...
null
embeddings
"""Return the embeddings.""" return self._embedding
@property def embeddings(self) ->Embeddings: """Return the embeddings.""" return self._embedding
Return the embeddings.
set_repo
self.repo = repo
def set_repo(self, repo: str) ->None: self.repo = repo
null
on_chain_start
"""Do nothing when LLM chain starts.""" pass
def on_chain_start(self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any) ->None: """Do nothing when LLM chain starts.""" pass
Do nothing when LLM chain starts.
regex_match_string_evaluator
"""Create a RegexMatchStringEvaluator with default configuration.""" return RegexMatchStringEvaluator()
@pytest.fixture def regex_match_string_evaluator() ->RegexMatchStringEvaluator: """Create a RegexMatchStringEvaluator with default configuration.""" return RegexMatchStringEvaluator()
Create a RegexMatchStringEvaluator with default configuration.
test_unstructured_api_file_loader_multiple_files
"""Test unstructured loader.""" file_paths = [os.path.join(EXAMPLE_DOCS_DIRECTORY, 'layout-parser-paper.pdf'), os.path.join(EXAMPLE_DOCS_DIRECTORY, 'whatsapp_chat.txt')] loader = UnstructuredAPIFileLoader(file_path=file_paths, api_key= 'FAKE_API_KEY', strategy='fast', mode='elements') docs = loader.load() a...
def test_unstructured_api_file_loader_multiple_files() ->None: """Test unstructured loader.""" file_paths = [os.path.join(EXAMPLE_DOCS_DIRECTORY, 'layout-parser-paper.pdf'), os.path.join(EXAMPLE_DOCS_DIRECTORY, 'whatsapp_chat.txt')] loader = UnstructuredAPIFileLoader(file_path=file_paths, ap...
Test unstructured loader.
get_media_metadata_manifest
response = requests.get(IMAGE_AND_VIDEO_LIBRARY_URL + '/asset/' + query) return response.json()
def get_media_metadata_manifest(self, query: str) ->str: response = requests.get(IMAGE_AND_VIDEO_LIBRARY_URL + '/asset/' + query) return response.json()
null
__init__
"""Initialize tool.""" super(Tool, self).__init__(name=name, func=func, description=description, **kwargs)
def __init__(self, name: str, func: Optional[Callable], description: str, **kwargs: Any) ->None: """Initialize tool.""" super(Tool, self).__init__(name=name, func=func, description= description, **kwargs)
Initialize tool.
test_agent_iterator_output_structure
"""Test the output structure of AgentExecutorIterator.""" agent = _get_agent() agent_iter = agent.iter(inputs='when was langchain made') for step in agent_iter: assert isinstance(step, dict) if 'intermediate_step' in step: assert isinstance(step['intermediate_step'], list) elif 'output' in step: ...
def test_agent_iterator_output_structure() ->None: """Test the output structure of AgentExecutorIterator.""" agent = _get_agent() agent_iter = agent.iter(inputs='when was langchain made') for step in agent_iter: assert isinstance(step, dict) if 'intermediate_step' in step: as...
Test the output structure of AgentExecutorIterator.
on_text
"""Do nothing""" pass
def on_text(self, text: str, **kwargs: Any) ->None: """Do nothing""" pass
Do nothing
__init__
"""Initialize with dataset_id. Example: https://dev.socrata.com/foundry/data.sfgov.org/vw6y-z8j6 e.g., city_id = data.sfgov.org e.g., dataset_id = vw6y-z8j6 Args: city_id: The Open City city identifier. dataset_id: The Open City dataset identifier. li...
def __init__(self, city_id: str, dataset_id: str, limit: int): """Initialize with dataset_id. Example: https://dev.socrata.com/foundry/data.sfgov.org/vw6y-z8j6 e.g., city_id = data.sfgov.org e.g., dataset_id = vw6y-z8j6 Args: city_id: The Open City city identifier. ...
Initialize with dataset_id. Example: https://dev.socrata.com/foundry/data.sfgov.org/vw6y-z8j6 e.g., city_id = data.sfgov.org e.g., dataset_id = vw6y-z8j6 Args: city_id: The Open City city identifier. dataset_id: The Open City dataset identifier. limit: The maximum number of documents to load.
test_add_documents
documents = [Document(page_content='hello world', metadata={'a': 1}), Document(page_content='foo bar', metadata={'b': 2}), Document( page_content='baz qux', metadata={'c': 3})] ids = retriever.add_documents(documents) assert retriever.client.count(retriever.collection_name, exact=True).count == 3 documents = [D...
def test_add_documents(retriever: QdrantSparseVectorRetriever) ->None: documents = [Document(page_content='hello world', metadata={'a': 1}), Document(page_content='foo bar', metadata={'b': 2}), Document( page_content='baz qux', metadata={'c': 3})] ids = retriever.add_documents(documents) ass...
null
_default_painless_scripting_query
"""For Painless Scripting Search, this is the default query.""" if not pre_filter: pre_filter = MATCH_ALL_QUERY source = __get_painless_scripting_source(space_type, vector_field=vector_field) return {'size': k, 'query': {'script_score': {'query': pre_filter, 'script': {'source': source, 'params': {'field': vect...
def _default_painless_scripting_query(query_vector: List[float], k: int=4, space_type: str='l2Squared', pre_filter: Optional[Dict]=None, vector_field: str='vector_field') ->Dict: """For Painless Scripting Search, this is the default query.""" if not pre_filter: pre_filter = MATCH_ALL_QUERY s...
For Painless Scripting Search, this is the default query.
on_tool_error
self._container.markdown('**Tool encountered an error...**') self._container.exception(error)
def on_tool_error(self, error: BaseException, **kwargs: Any) ->None: self._container.markdown('**Tool encountered an error...**') self._container.exception(error)
null
test_unexpected_response
mock_response = MagicMock() mock_response.status_code = 200 mock_response.json.return_value = [{'status': 'success'}] mock_post.return_value = mock_response with pytest.raises(RuntimeError): tool._run('some query')
def test_unexpected_response(mock_post: MagicMock) ->None: mock_response = MagicMock() mock_response.status_code = 200 mock_response.json.return_value = [{'status': 'success'}] mock_post.return_value = mock_response with pytest.raises(RuntimeError): tool._run('some query')
null
_chain_type
return 'refine_documents_chain'
@property def _chain_type(self) ->str: return 'refine_documents_chain'
null
test_llm_on_kv_singleio_dataset
llm = OpenAI(temperature=0) eval_config = RunEvalConfig(evaluators=[EvaluatorType.QA, EvaluatorType. CRITERIA]) run_on_dataset(dataset_name=kv_singleio_dataset_name, llm_or_chain_factory= llm, client=client, evaluation=eval_config, project_name= eval_project_name, tags=['shouldpass']) _check_all_feedback_pa...
def test_llm_on_kv_singleio_dataset(kv_singleio_dataset_name: str, eval_project_name: str, client: Client) ->None: llm = OpenAI(temperature=0) eval_config = RunEvalConfig(evaluators=[EvaluatorType.QA, EvaluatorType .CRITERIA]) run_on_dataset(dataset_name=kv_singleio_dataset_name, llm_or_...
null
format_xml
"""Format the intermediate steps as XML. Args: intermediate_steps: The intermediate steps. Returns: The intermediate steps as XML. """ log = '' for action, observation in intermediate_steps: log += ( f'<tool>{action.tool}</tool><tool_input>{action.tool_input}</tool_input><obser...
def format_xml(intermediate_steps: List[Tuple[AgentAction, str]]) ->str: """Format the intermediate steps as XML. Args: intermediate_steps: The intermediate steps. Returns: The intermediate steps as XML. """ log = '' for action, observation in intermediate_steps: log +=...
Format the intermediate steps as XML. Args: intermediate_steps: The intermediate steps. Returns: The intermediate steps as XML.
get_indices_infos
indices = _list_indices(database, include_indices=include_indices, ignore_indices=ignore_indices) mappings = database.indices.get_mapping(index=','.join(indices)) if sample_documents_in_index_info > 0: for k, v in mappings.items(): hits = database.search(index=k, query={'match_all': {}}, size= ...
def get_indices_infos(database, sample_documents_in_index_info=5, include_indices=None, ignore_indices=None) ->str: indices = _list_indices(database, include_indices=include_indices, ignore_indices=ignore_indices) mappings = database.indices.get_mapping(index=','.join(indices)) if sample_documen...
null
FakeParseFromString
def ParseFromString(self: Any, data: str) ->None: self.uuid = 'fake_uuid' return ParseFromString
def FakeParseFromString(**args: Any) ->Any: def ParseFromString(self: Any, data: str) ->None: self.uuid = 'fake_uuid' return ParseFromString
null
props_to_dict
"""Convert properties to a dictionary.""" properties = {} if not props: return properties for p in props: properties[format_property_key(p.key)] = p.value return properties
def props_to_dict(props) ->dict: """Convert properties to a dictionary.""" properties = {} if not props: return properties for p in props: properties[format_property_key(p.key)] = p.value return properties
Convert properties to a dictionary.
wait_for_futures
"""Wait for the given futures to complete.""" wait(self._futures)
def wait_for_futures(self) ->None: """Wait for the given futures to complete.""" wait(self._futures)
Wait for the given futures to complete.
test_embedding_distance_eval_chain
embedding_distance_eval_chain.distance_metric = EmbeddingDistance.COSINE prediction = 'Hi' reference = 'Hello' result = embedding_distance_eval_chain.evaluate_strings(prediction= prediction, reference=reference) assert result['score'] < 1.0
@pytest.mark.requires('scipy') def test_embedding_distance_eval_chain(embedding_distance_eval_chain: EmbeddingDistanceEvalChain) ->None: embedding_distance_eval_chain.distance_metric = EmbeddingDistance.COSINE prediction = 'Hi' reference = 'Hello' result = embedding_distance_eval_chain.evaluate_stri...
null
test_context_eval_chain
"""Test a simple eval chain.""" example = {'query': "What's my name", 'context': 'The name of this person is John Doe'} prediction = {'result': 'John Doe'} fake_qa_eval_chain = chain_cls.from_llm(FakeLLM()) outputs = fake_qa_eval_chain.evaluate([example, example], [prediction, prediction]) assert outputs[0] == ...
@pytest.mark.skipif(sys.platform.startswith('win'), reason= 'Test not supported on Windows') @pytest.mark.parametrize('chain_cls', [ContextQAEvalChain, CotQAEvalChain]) def test_context_eval_chain(chain_cls: Type[ContextQAEvalChain]) ->None: """Test a simple eval chain.""" example = {'query': "What's my nam...
Test a simple eval chain.
__init__
"""Initialize with Pinecone client.""" try: import pinecone except ImportError: raise ImportError( 'Could not import pinecone python package. Please install it with `pip install pinecone-client`.' ) if not isinstance(embedding, Embeddings): warnings.warn( 'Passing in `embedding` as a...
def __init__(self, index: Any, embedding: Union[Embeddings, Callable], text_key: str, namespace: Optional[str]=None, distance_strategy: Optional[DistanceStrategy]=DistanceStrategy.COSINE): """Initialize with Pinecone client.""" try: import pinecone except ImportError: raise ImportErr...
Initialize with Pinecone client.
_identifying_params
"""Get the identifying parameters.""" _model_kwargs = self.model_kwargs or {} return {**{'api_url': self.api_url, 'headers': self.headers}, **{ 'model_kwargs': _model_kwargs}}
@property def _identifying_params(self) ->Mapping[str, Any]: """Get the identifying parameters.""" _model_kwargs = self.model_kwargs or {} return {**{'api_url': self.api_url, 'headers': self.headers}, **{ 'model_kwargs': _model_kwargs}}
Get the identifying parameters.
on_tool_end_common
self.tool_ends += 1 self.ends += 1
def on_tool_end_common(self) ->None: self.tool_ends += 1 self.ends += 1
null
test_max_marginal_relevance_search
"""Test MRR search.""" metadatas = [{'page': i} for i in range(len(texts))] docsearch = DocArrayInMemorySearch.from_texts(texts, FakeEmbeddings(), metadatas=metadatas, metric=metric) output = docsearch.max_marginal_relevance_search('foo', k=2, fetch_k=3) assert output == [Document(page_content='foo', metadata={'pag...
@pytest.mark.parametrize('metric', ['cosine_sim', 'euclidean_dist', 'sqeuclidean_dist']) def test_max_marginal_relevance_search(metric: str, texts: List[str]) ->None: """Test MRR search.""" metadatas = [{'page': i} for i in range(len(texts))] docsearch = DocArrayInMemorySearch.from_texts(texts, FakeEmbe...
Test MRR search.
_run
"""Run the tool.""" query = self.api_resource.users().threads().get(userId='me', id=thread_id) thread_data = query.execute() if not isinstance(thread_data, dict): raise ValueError('The output of the query must be a list.') messages = thread_data['messages'] thread_data['messages'] = [] keys_to_keep = ['id', 'snippe...
def _run(self, thread_id: str, run_manager: Optional[ CallbackManagerForToolRun]=None) ->Dict: """Run the tool.""" query = self.api_resource.users().threads().get(userId='me', id=thread_id) thread_data = query.execute() if not isinstance(thread_data, dict): raise ValueError('The output of th...
Run the tool.
test_chat_fireworks
"""Test ChatFireworks wrapper.""" message = HumanMessage(content='What is the weather in Redwood City, CA today') response = chat([message]) assert isinstance(response, BaseMessage) assert isinstance(response.content, str)
@pytest.mark.scheduled def test_chat_fireworks(chat: ChatFireworks) ->None: """Test ChatFireworks wrapper.""" message = HumanMessage(content= 'What is the weather in Redwood City, CA today') response = chat([message]) assert isinstance(response, BaseMessage) assert isinstance(response.conten...
Test ChatFireworks wrapper.
update
"""Update based on prompt and llm_string.""" items = [self.cache_schema(prompt=prompt, llm=llm_string, response=dumps( gen), idx=i) for i, gen in enumerate(return_val)] with Session(self.engine) as session, session.begin(): for item in items: session.merge(item)
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE ) ->None: """Update based on prompt and llm_string.""" items = [self.cache_schema(prompt=prompt, llm=llm_string, response= dumps(gen), idx=i) for i, gen in enumerate(return_val)] with Session(self.engine) as session, sess...
Update based on prompt and llm_string.
test_nuclia_tool
with mock.patch('nucliadb_protos.writer_pb2.BrokerMessage.ParseFromString', new_callable=FakeParseFromString): with mock.patch('requests.post', new_callable=fakepost): with mock.patch('requests.get', new_callable=fakeget): nua = NucliaUnderstandingAPI(enable_ml=False) uuid = nua....
@mock.patch.dict(os.environ, {'NUCLIA_NUA_KEY': '_a_key_'}) @pytest.mark.requires('nucliadb_protos') def test_nuclia_tool() ->None: with mock.patch('nucliadb_protos.writer_pb2.BrokerMessage.ParseFromString', new_callable=FakeParseFromString): with mock.patch('requests.post', new_callable=fakepost): ...
null
similarity_search
"""Return docs most similar to query.""" embedding = self._embedding.embed_query(query) documents = self.similarity_search_with_score_by_vector(embedding=embedding, k=k) return [doc for doc, _ in documents]
def similarity_search(self, query: str, k: int=4, **kwargs: Any) ->List[ Document]: """Return docs most similar to query.""" embedding = self._embedding.embed_query(query) documents = self.similarity_search_with_score_by_vector(embedding= embedding, k=k) return [doc for doc, _ in documents]
Return docs most similar to query.
from_llm_and_tools
raise NotImplementedError
@classmethod def from_llm_and_tools(cls, llm: BaseLanguageModel, tools: Sequence[ BaseTool], callback_manager: Optional[BaseCallbackManager]=None, ** kwargs: Any) ->BaseSingleActionAgent: raise NotImplementedError
null
_invocation_params
if is_openai_v1(): openai_params = {'model': self.deployment_name} else: openai_params = {'engine': self.deployment_name, 'api_type': self. openai_api_type, 'api_version': self.openai_api_version} return {**openai_params, **super()._invocation_params}
@property def _invocation_params(self) ->Dict[str, Any]: if is_openai_v1(): openai_params = {'model': self.deployment_name} else: openai_params = {'engine': self.deployment_name, 'api_type': self. openai_api_type, 'api_version': self.openai_api_version} return {**openai_params, *...
null
_persist_run
"""Persist a run.""" self.runs.append(self._copy_run(run))
def _persist_run(self, run: Run) ->None: """Persist a run.""" self.runs.append(self._copy_run(run))
Persist a run.
generate
"""Generate synthetic data using the given subject string. Args: subject (str): The subject the synthetic data will be about. runs (int): Number of times to generate the data. extra (str): Extra instructions for steerability in data generation. Returns: ...
def generate(self, subject: str, runs: int, *args: Any, **kwargs: Any) ->List[ str]: """Generate synthetic data using the given subject string. Args: subject (str): The subject the synthetic data will be about. runs (int): Number of times to generate the data. extra ...
Generate synthetic data using the given subject string. Args: subject (str): The subject the synthetic data will be about. runs (int): Number of times to generate the data. extra (str): Extra instructions for steerability in data generation. Returns: List[str]: List of generated synthetic data. Usage...
_create_message_dicts
params = dict(self._client_params) if stop is not None: if 'stop' in params: raise ValueError('`stop` found in both the input and default params.') params['stop'] = stop message_dicts = [_convert_message_to_dict(m) for m in messages] return message_dicts, params
def _create_message_dicts(self, messages: List[BaseMessage], stop: Optional [List[str]]) ->Tuple[List[Dict[str, Any]], Dict[str, Any]]: params = dict(self._client_params) if stop is not None: if 'stop' in params: raise ValueError( '`stop` found in both the input and defau...
null
format_to_openai_tool_messages
"""Convert (AgentAction, tool output) tuples into FunctionMessages. Args: intermediate_steps: Steps the LLM has taken to date, along with observations Returns: list of messages to send to the LLM for the next prediction """ messages = [] for agent_action, observation in intermediate_steps...
def format_to_openai_tool_messages(intermediate_steps: Sequence[Tuple[ AgentAction, str]]) ->List[BaseMessage]: """Convert (AgentAction, tool output) tuples into FunctionMessages. Args: intermediate_steps: Steps the LLM has taken to date, along with observations Returns: list of messag...
Convert (AgentAction, tool output) tuples into FunctionMessages. Args: intermediate_steps: Steps the LLM has taken to date, along with observations Returns: list of messages to send to the LLM for the next prediction
test_openai_stop_error
"""Test openai stop logic on bad configuration.""" llm = OpenAI(stop='3', temperature=0) with pytest.raises(ValueError): llm('write an ordered list of five items', stop=['\n'])
def test_openai_stop_error() ->None: """Test openai stop logic on bad configuration.""" llm = OpenAI(stop='3', temperature=0) with pytest.raises(ValueError): llm('write an ordered list of five items', stop=['\n'])
Test openai stop logic on bad configuration.
test_max_marginal_relevance_search_with_filter
"""Test end to end construction and MRR search.""" texts = ['foo', 'bar', 'baz'] metadatas = [{'page': i} for i in range(len(texts))] docsearch = Weaviate.from_texts(texts, embedding_openai, metadatas= metadatas, weaviate_url=weaviate_url) where_filter = {'path': ['page'], 'operator': 'Equal', 'valueNumber': 0} sta...
@pytest.mark.vcr(ignore_localhost=True) def test_max_marginal_relevance_search_with_filter(self, weaviate_url: str, embedding_openai: OpenAIEmbeddings) ->None: """Test end to end construction and MRR search.""" texts = ['foo', 'bar', 'baz'] metadatas = [{'page': i} for i in range(len(texts))] docsea...
Test end to end construction and MRR search.
test_document_compressor_pipeline
embeddings = OpenAIEmbeddings() splitter = CharacterTextSplitter(chunk_size=20, chunk_overlap=0, separator='. ' ) redundant_filter = EmbeddingsRedundantFilter(embeddings=embeddings) relevant_filter = EmbeddingsFilter(embeddings=embeddings, similarity_threshold=0.8) pipeline_filter = DocumentCompressorPipeline(t...
def test_document_compressor_pipeline() ->None: embeddings = OpenAIEmbeddings() splitter = CharacterTextSplitter(chunk_size=20, chunk_overlap=0, separator='. ') redundant_filter = EmbeddingsRedundantFilter(embeddings=embeddings) relevant_filter = EmbeddingsFilter(embeddings=embeddings, s...
null
test_json_equality_evaluator_evaluate_lists_permutation_invariant
evaluator = JsonEqualityEvaluator() prediction = '[{"a": 1, "b": 2}, {"a": 2, "b": 3}]' reference = '[{"a": 2, "b": 3}, {"a": 1, "b": 2}]' result = evaluator.evaluate_strings(prediction=prediction, reference=reference) assert result == {'score': True} prediction = '[{"a": 1, "b": 2}, {"a": 2, "b": 3}]' reference = '[{"...
def test_json_equality_evaluator_evaluate_lists_permutation_invariant() ->None: evaluator = JsonEqualityEvaluator() prediction = '[{"a": 1, "b": 2}, {"a": 2, "b": 3}]' reference = '[{"a": 2, "b": 3}, {"a": 1, "b": 2}]' result = evaluator.evaluate_strings(prediction=prediction, reference= referen...
null
_validate_google_libraries_installation
"""Validates that Google libraries that are needed are installed.""" try: from google.cloud import aiplatform, storage from google.oauth2 import service_account except ImportError: raise ImportError( 'You must run `pip install --upgrade google-cloud-aiplatform google-cloud-storage`to use the Matchin...
def _validate_google_libraries_installation(self) ->None: """Validates that Google libraries that are needed are installed.""" try: from google.cloud import aiplatform, storage from google.oauth2 import service_account except ImportError: raise ImportError( 'You must run ...
Validates that Google libraries that are needed are installed.
_import_promptlayer_chat
from langchain_community.llms.promptlayer_openai import PromptLayerOpenAIChat return PromptLayerOpenAIChat
def _import_promptlayer_chat() ->Any: from langchain_community.llms.promptlayer_openai import PromptLayerOpenAIChat return PromptLayerOpenAIChat
null
new
""" Creates a new integration package. Should be run from libs/partners """ if not Path.cwd().name == 'partners' or not Path.cwd().parent.name == 'libs': typer.echo( 'This command should be run from the `libs/partners` directory in the langchain-ai/langchain monorepo. Continuing is NOT recommen...
@integration_cli.command() def new(name: Annotated[str, typer.Option(help= 'The name of the integration to create (e.g. `my-integration`)', prompt =True)], name_class: Annotated[Optional[str], typer.Option(help= 'The name of the integration in PascalCase. e.g. `MyIntegration`. This is used to name classes l...
Creates a new integration package. Should be run from libs/partners
test_clearing_conversation_memory
"""Test clearing the conversation memory.""" good_inputs = {'foo': 'bar', 'baz': 'foo'} good_outputs = {'bar': 'foo'} memory.save_context(good_inputs, good_outputs) memory.clear() assert memory.load_memory_variables({}) == {'baz': ''}
@pytest.mark.parametrize('memory', [ConversationBufferMemory(memory_key= 'baz'), ConversationSummaryMemory(llm=FakeLLM(), memory_key='baz'), ConversationBufferWindowMemory(memory_key='baz')]) def test_clearing_conversation_memory(memory: BaseMemory) ->None: """Test clearing the conversation memory.""" g...
Test clearing the conversation memory.
_call
"""Call to Modal endpoint.""" params = self.model_kwargs or {} params = {**params, **kwargs} response = requests.post(url=self.endpoint_url, headers={'Content-Type': 'application/json'}, json={'prompt': prompt, **params}) try: if prompt in response.json()['prompt']: response_json = response.json() excep...
def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str: """Call to Modal endpoint.""" params = self.model_kwargs or {} params = {**params, **kwargs} response = requests.post(url=self.endpoint_url, headers={'Content-Type...
Call to Modal endpoint.
create_structured_output_chain
"""[Legacy] Create an LLMChain that uses an Ernie function to get a structured output. Args: output_schema: Either a dictionary or pydantic.BaseModel class. If a dictionary is passed in, it's assumed to already be a valid JsonSchema. For best results, pydantic.BaseModels should have...
def create_structured_output_chain(output_schema: Union[Dict[str, Any], Type[BaseModel]], llm: BaseLanguageModel, prompt: BasePromptTemplate, *, output_key: str='function', output_parser: Optional[BaseLLMOutputParser ]=None, **kwargs: Any) ->LLMChain: """[Legacy] Create an LLMChain that uses an Ernie fu...
[Legacy] Create an LLMChain that uses an Ernie function to get a structured output. Args: output_schema: Either a dictionary or pydantic.BaseModel class. If a dictionary is passed in, it's assumed to already be a valid JsonSchema. For best results, pydantic.BaseModels should have docstrings describ...
from_huggingface_tokenizer
"""Text splitter that uses HuggingFace tokenizer to count length.""" try: from transformers import PreTrainedTokenizerBase if not isinstance(tokenizer, PreTrainedTokenizerBase): raise ValueError( 'Tokenizer received was not an instance of PreTrainedTokenizerBase' ) def _hugg...
@classmethod def from_huggingface_tokenizer(cls, tokenizer: Any, **kwargs: Any ) ->TextSplitter: """Text splitter that uses HuggingFace tokenizer to count length.""" try: from transformers import PreTrainedTokenizerBase if not isinstance(tokenizer, PreTrainedTokenizerBase): raise...
Text splitter that uses HuggingFace tokenizer to count length.
__init__
self.byte_iterator = iter(stream) self.buffer = io.BytesIO() self.read_pos = 0
def __init__(self, stream: Any) ->None: self.byte_iterator = iter(stream) self.buffer = io.BytesIO() self.read_pos = 0
null
validate_environment
try: import openlm values['client'] = openlm.Completion except ImportError: raise ImportError( 'Could not import openlm python package. Please install it with `pip install openlm`.' ) if values['streaming']: raise ValueError('Streaming not supported with openlm') return values
@root_validator() def validate_environment(cls, values: Dict) ->Dict: try: import openlm values['client'] = openlm.Completion except ImportError: raise ImportError( 'Could not import openlm python package. Please install it with `pip install openlm`.' ) if val...
null
on_llm_error
"""Set the error flag.""" self.error = 1
def on_llm_error(self, error: BaseException, **kwargs: Any) ->None: """Set the error flag.""" self.error = 1
Set the error flag.
add_one
"""Add one.""" return x + 1
def add_one(x: int) ->int: """Add one.""" return x + 1
Add one.
_agent_type
"""Return Identifier of an agent type.""" return AgentType.SELF_ASK_WITH_SEARCH
@property def _agent_type(self) ->str: """Return Identifier of an agent type.""" return AgentType.SELF_ASK_WITH_SEARCH
Return Identifier of an agent type.
add_texts
"""Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. kwargs: vectorstore specific parameters Returns: List of ids...
def add_texts(self, texts: Iterable[str], metadatas: Optional[List[dict]]= None, ids: Optional[List[str]]=None, **kwargs: Any) ->List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas:...
Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. kwargs: vectorstore specific parameters Returns: List of ids from adding the texts into the vectorstore.
on_llm_error
"""Run when LLM errors.""" self.metrics['step'] += 1 self.metrics['errors'] += 1
def on_llm_error(self, error: BaseException, **kwargs: Any) ->None: """Run when LLM errors.""" self.metrics['step'] += 1 self.metrics['errors'] += 1
Run when LLM errors.
test_convert_message_to_mistral_chat_message
result = _convert_message_to_mistral_chat_message(message) assert result == expected
@pytest.mark.parametrize(('message', 'expected'), [(SystemMessage(content= 'Hello'), MistralChatMessage(role='system', content='Hello')), ( HumanMessage(content='Hello'), MistralChatMessage(role='user', content= 'Hello')), (AIMessage(content='Hello'), MistralChatMessage(role= 'assistant', content='Hello...
null
_make_graph
self._networkx_wrapper = NetworkxEntityGraph() for entity in self.causal_operations.entities: for parent_name in entity.depends_on: self._networkx_wrapper._graph.add_edge(parent_name, entity.name, relation=entity.code) self.causal_operations.entities = [entity for entity in self. causal_oper...
def _make_graph(self) ->None: self._networkx_wrapper = NetworkxEntityGraph() for entity in self.causal_operations.entities: for parent_name in entity.depends_on: self._networkx_wrapper._graph.add_edge(parent_name, entity.name, relation=entity.code) self.causal_operations....
null
on_agent_action
"""Do nothing when agent takes a specific action."""
def on_agent_action(self, action: AgentAction, **kwargs: Any) ->Any: """Do nothing when agent takes a specific action."""
Do nothing when agent takes a specific action.
add_user_message
"""Convenience method for adding a human message string to the store. Args: message: The string contents of a human message. metadata: Optional metadata to attach to the message. """ self.add_message(HumanMessage(content=message), metadata=metadata)
def add_user_message(self, message: str, metadata: Optional[Dict[str, Any]] =None) ->None: """Convenience method for adding a human message string to the store. Args: message: The string contents of a human message. metadata: Optional metadata to attach to the message. "...
Convenience method for adding a human message string to the store. Args: message: The string contents of a human message. metadata: Optional metadata to attach to the message.
test_elasticsearch_delete_ids
"""Test delete methods from vector store.""" texts = ['foo', 'bar', 'baz', 'gni'] metadatas = [{'page': i} for i in range(len(texts))] docsearch = ElasticsearchStore(embedding=ConsistentFakeEmbeddings(), ** elasticsearch_connection, index_name=index_name) ids = docsearch.add_texts(texts, metadatas) output = docsear...
def test_elasticsearch_delete_ids(self, elasticsearch_connection: dict, index_name: str) ->None: """Test delete methods from vector store.""" texts = ['foo', 'bar', 'baz', 'gni'] metadatas = [{'page': i} for i in range(len(texts))] docsearch = ElasticsearchStore(embedding=ConsistentFakeEmbeddings(),...
Test delete methods from vector store.
__init__
self.plan = plan self.tasks = [] self.id_task_map = {} self.status = 'pending' for step in self.plan.steps: task = Task(step.task, step.id, step.dep, step.args, step.tool) self.tasks.append(task) self.id_task_map[step.id] = task
def __init__(self, plan: Plan): self.plan = plan self.tasks = [] self.id_task_map = {} self.status = 'pending' for step in self.plan.steps: task = Task(step.task, step.id, step.dep, step.args, step.tool) self.tasks.append(task) self.id_task_map[step.id] = task
null
env_var_is_set
"""Check if an environment variable is set. Args: env_var (str): The name of the environment variable. Returns: bool: True if the environment variable is set, False otherwise. """ return env_var in os.environ and os.environ[env_var] not in ('', '0', 'false', 'False')
def env_var_is_set(env_var: str) ->bool: """Check if an environment variable is set. Args: env_var (str): The name of the environment variable. Returns: bool: True if the environment variable is set, False otherwise. """ return env_var in os.environ and os.environ[env_var] not in (...
Check if an environment variable is set. Args: env_var (str): The name of the environment variable. Returns: bool: True if the environment variable is set, False otherwise.
_euclidean_relevance_score_fn
"""Return a similarity score on a scale [0, 1].""" return 1.0 - distance / math.sqrt(2)
@staticmethod def _euclidean_relevance_score_fn(distance: float) ->float: """Return a similarity score on a scale [0, 1].""" return 1.0 - distance / math.sqrt(2)
Return a similarity score on a scale [0, 1].
_generate_rest_batches
from qdrant_client.http import models as rest texts_iterator = iter(texts) metadatas_iterator = iter(metadatas or []) ids_iterator = iter(ids or [uuid.uuid4().hex for _ in iter(texts)]) while (batch_texts := list(islice(texts_iterator, batch_size))): batch_metadatas = list(islice(metadatas_iterator, batch_size)) or...
def _generate_rest_batches(self, texts: Iterable[str], metadatas: Optional[ List[dict]]=None, ids: Optional[Sequence[str]]=None, batch_size: int=64 ) ->Generator[Tuple[List[str], List[rest.PointStruct]], None, None]: from qdrant_client.http import models as rest texts_iterator = iter(texts) metadata...
null
__init__
"""Initialize Couchbase document loader. Args: connection_string (str): The connection string to the Couchbase cluster. db_username (str): The username to connect to the Couchbase cluster. db_password (str): The password to connect to the Couchbase cluster. query...
def __init__(self, connection_string: str, db_username: str, db_password: str, query: str, *, page_content_fields: Optional[List[str]]=None, metadata_fields: Optional[List[str]]=None) ->None: """Initialize Couchbase document loader. Args: connection_string (str): The connection string t...
Initialize Couchbase document loader. Args: connection_string (str): The connection string to the Couchbase cluster. db_username (str): The username to connect to the Couchbase cluster. db_password (str): The password to connect to the Couchbase cluster. query (str): The SQL++ query to execute. pag...
to_document
"""Return a Document object.""" return Document(page_content=self.page_content, metadata=self.metadata)
def to_document(self) ->Document: """Return a Document object.""" return Document(page_content=self.page_content, metadata=self.metadata)
Return a Document object.