method_name
stringlengths
1
78
method_body
stringlengths
3
9.66k
full_code
stringlengths
31
10.7k
docstring
stringlengths
4
4.74k
get_output_schema
if self.custom_output_type is not None: return super().get_output_schema(config) return self.bound.get_output_schema(merge_configs(self.config, config))
def get_output_schema(self, config: Optional[RunnableConfig]=None) ->Type[ BaseModel]: if self.custom_output_type is not None: return super().get_output_schema(config) return self.bound.get_output_schema(merge_configs(self.config, config))
null
embed_query
"""Call out to Cohere's embedding endpoint. Args: text: The text to embed. Returns: Embeddings for the text. """ return self.embed([text], input_type='search_query')[0]
def embed_query(self, text: str) ->List[float]: """Call out to Cohere's embedding endpoint. Args: text: The text to embed. Returns: Embeddings for the text. """ return self.embed([text], input_type='search_query')[0]
Call out to Cohere's embedding endpoint. Args: text: The text to embed. Returns: Embeddings for the text.
from_openapi_spec
"""Create an APIOperation from an OpenAPI spec.""" operation = spec.get_operation(path, method) parameters = spec.get_parameters_for_operation(operation) properties = cls._get_properties_from_parameters(parameters, spec) operation_id = OpenAPISpec.get_cleaned_operation_id(operation, path, method) request_body = spec.get_request_body_for_operation(operation) api_request_body = APIRequestBody.from_request_body(request_body, spec ) if request_body is not None else None description = operation.description or operation.summary if not description and spec.paths is not None: description = spec.paths[path].description or spec.paths[path].summary return cls(operation_id=operation_id, description=description or '', base_url=spec.base_url, path=path, method=method, properties=properties, request_body=api_request_body)
@classmethod def from_openapi_spec(cls, spec: OpenAPISpec, path: str, method: str ) ->'APIOperation': """Create an APIOperation from an OpenAPI spec.""" operation = spec.get_operation(path, method) parameters = spec.get_parameters_for_operation(operation) properties = cls._get_properties_from_parameters(parameters, spec) operation_id = OpenAPISpec.get_cleaned_operation_id(operation, path, method ) request_body = spec.get_request_body_for_operation(operation) api_request_body = APIRequestBody.from_request_body(request_body, spec ) if request_body is not None else None description = operation.description or operation.summary if not description and spec.paths is not None: description = spec.paths[path].description or spec.paths[path].summary return cls(operation_id=operation_id, description=description or '', base_url=spec.base_url, path=path, method=method, properties= properties, request_body=api_request_body)
Create an APIOperation from an OpenAPI spec.
set_path
self.path = path
def set_path(self, path: str) ->None: self.path = path
null
test_chat_model_with_v2_callbacks
"""Test chat model callbacks fall back to on_llm_start.""" handler = FakeCallbackHandlerWithChatStart() llm = FakeListChatModel(callbacks=[handler], verbose=True, responses=[ 'fake response']) output = llm([HumanMessage(content='foo')]) assert output.content == 'fake response' assert handler.starts == 1 assert handler.ends == 1 assert handler.errors == 0 assert handler.llm_starts == 0 assert handler.llm_ends == 1 assert handler.chat_model_starts == 1
def test_chat_model_with_v2_callbacks() ->None: """Test chat model callbacks fall back to on_llm_start.""" handler = FakeCallbackHandlerWithChatStart() llm = FakeListChatModel(callbacks=[handler], verbose=True, responses=[ 'fake response']) output = llm([HumanMessage(content='foo')]) assert output.content == 'fake response' assert handler.starts == 1 assert handler.ends == 1 assert handler.errors == 0 assert handler.llm_starts == 0 assert handler.llm_ends == 1 assert handler.chat_model_starts == 1
Test chat model callbacks fall back to on_llm_start.
escape_str
return ''.join(f'{self.BS}{c}' if c in self.must_escape else c for c in value)
def escape_str(self, value: str) ->str: return ''.join(f'{self.BS}{c}' if c in self.must_escape else c for c in value)
null
input_keys
"""Return the singular input key. :meta private: """ return [self.input_key]
@property def input_keys(self) ->List[str]: """Return the singular input key. :meta private: """ return [self.input_key]
Return the singular input key. :meta private:
_on_run_update
if run.id != self.root_id: return if run.error is None: if self._arg_on_end is not None: call_func_with_variable_args(self._arg_on_end, run, self.config) elif self._arg_on_error is not None: call_func_with_variable_args(self._arg_on_error, run, self.config)
def _on_run_update(self, run: Run) ->None: if run.id != self.root_id: return if run.error is None: if self._arg_on_end is not None: call_func_with_variable_args(self._arg_on_end, run, self.config) elif self._arg_on_error is not None: call_func_with_variable_args(self._arg_on_error, run, self.config)
null
test_dashscope_embedding_documents_multiple
"""Test dashscope embeddings.""" documents = ['foo bar', 'bar foo', 'foo', 'foo0', 'foo1', 'foo2', 'foo3', 'foo4', 'foo5', 'foo6', 'foo7', 'foo8', 'foo9', 'foo10', 'foo11', 'foo12', 'foo13', 'foo14', 'foo15', 'foo16', 'foo17', 'foo18', 'foo19', 'foo20', 'foo21', 'foo22', 'foo23', 'foo24'] embedding = DashScopeEmbeddings(model='text-embedding-v1') output = embedding.embed_documents(documents) assert len(output) == 28 assert len(output[0]) == 1536 assert len(output[1]) == 1536 assert len(output[2]) == 1536
def test_dashscope_embedding_documents_multiple() ->None: """Test dashscope embeddings.""" documents = ['foo bar', 'bar foo', 'foo', 'foo0', 'foo1', 'foo2', 'foo3', 'foo4', 'foo5', 'foo6', 'foo7', 'foo8', 'foo9', 'foo10', 'foo11', 'foo12', 'foo13', 'foo14', 'foo15', 'foo16', 'foo17', 'foo18', 'foo19', 'foo20', 'foo21', 'foo22', 'foo23', 'foo24'] embedding = DashScopeEmbeddings(model='text-embedding-v1') output = embedding.embed_documents(documents) assert len(output) == 28 assert len(output[0]) == 1536 assert len(output[1]) == 1536 assert len(output[2]) == 1536
Test dashscope embeddings.
__init__
""" Args: api_url: https://platform.quip.com access_token: token of access quip API. Please refer: https://quip.com/dev/automation/documentation/current#section/Authentication/Get-Access-to-Quip's-APIs request_timeout: timeout of request, default 60s. """ try: from quip_api.quip import QuipClient except ImportError: raise ImportError( '`quip_api` package not found, please run `pip install quip_api`') self.quip_client = QuipClient(access_token=access_token, base_url=api_url, request_timeout=request_timeout)
def __init__(self, api_url: str, access_token: str, request_timeout: Optional[int]=60): """ Args: api_url: https://platform.quip.com access_token: token of access quip API. Please refer: https://quip.com/dev/automation/documentation/current#section/Authentication/Get-Access-to-Quip's-APIs request_timeout: timeout of request, default 60s. """ try: from quip_api.quip import QuipClient except ImportError: raise ImportError( '`quip_api` package not found, please run `pip install quip_api`') self.quip_client = QuipClient(access_token=access_token, base_url= api_url, request_timeout=request_timeout)
Args: api_url: https://platform.quip.com access_token: token of access quip API. Please refer: https://quip.com/dev/automation/documentation/current#section/Authentication/Get-Access-to-Quip's-APIs request_timeout: timeout of request, default 60s.
_process_result
"""Post-process the result from the server.""" message = ModelInferResponse() google.protobuf.json_format.Parse(json.dumps(result), message) infer_result = grpcclient.InferResult(message) np_res = infer_result.as_numpy('text_output') generated_text = '' if np_res is not None: generated_text = ''.join([token.decode() for token in np_res]) return generated_text
@staticmethod def _process_result(result: Dict[str, str]) ->str: """Post-process the result from the server.""" message = ModelInferResponse() google.protobuf.json_format.Parse(json.dumps(result), message) infer_result = grpcclient.InferResult(message) np_res = infer_result.as_numpy('text_output') generated_text = '' if np_res is not None: generated_text = ''.join([token.decode() for token in np_res]) return generated_text
Post-process the result from the server.
inner_embedding_query
def generate_filter_query() ->str: if search_filter is None: return '' filter_clause = ' AND '.join([create_filter(md_key, md_value) for md_key, md_value in search_filter.items()]) return filter_clause def create_filter(md_key: str, md_value: Any) ->str: md_filter_expr = self.config.field_name_mapping[md_key] if md_filter_expr is None: return '' expr = md_filter_expr.split(',') if len(expr) != 2: logger.error( f'filter {md_filter_expr} express is not correct, must contain mapping field and operator.' ) return '' md_filter_key = expr[0].strip() md_filter_operator = expr[1].strip() if isinstance(md_value, numbers.Number): return f'{md_filter_key} {md_filter_operator} {md_value}' return f'{md_filter_key}{md_filter_operator}"{md_value}"' def search_data() ->Dict[str, Any]: request = QueryRequest(table_name=self.config.table_name, namespace= self.config.namespace, vector=embedding, include_vector=True, output_fields=self.config.output_fields, filter= generate_filter_query(), top_k=k) query_result = self.ha3_engine_client.query(request) return json.loads(query_result.body) from alibabacloud_ha3engine_vector.models import QueryRequest try: json_response = search_data() if 'errorCode' in json_response and 'errorMsg' in json_response and len( json_response['errorMsg']) > 0: logger.error( f"query {self.config.endpoint} {self.config.instance_id} failed:{json_response['errorMsg']}." ) else: return json_response except Exception as e: logger.error( f'query instance endpoint:{self.config.endpoint} instance_id:{self.config.instance_id} failed.' , e) return {}
def inner_embedding_query(self, embedding: List[float], search_filter: Optional[Dict[str, Any]]=None, k: int=4) ->Dict[str, Any]: def generate_filter_query() ->str: if search_filter is None: return '' filter_clause = ' AND '.join([create_filter(md_key, md_value) for md_key, md_value in search_filter.items()]) return filter_clause def create_filter(md_key: str, md_value: Any) ->str: md_filter_expr = self.config.field_name_mapping[md_key] if md_filter_expr is None: return '' expr = md_filter_expr.split(',') if len(expr) != 2: logger.error( f'filter {md_filter_expr} express is not correct, must contain mapping field and operator.' ) return '' md_filter_key = expr[0].strip() md_filter_operator = expr[1].strip() if isinstance(md_value, numbers.Number): return f'{md_filter_key} {md_filter_operator} {md_value}' return f'{md_filter_key}{md_filter_operator}"{md_value}"' def search_data() ->Dict[str, Any]: request = QueryRequest(table_name=self.config.table_name, namespace =self.config.namespace, vector=embedding, include_vector=True, output_fields=self.config.output_fields, filter= generate_filter_query(), top_k=k) query_result = self.ha3_engine_client.query(request) return json.loads(query_result.body) from alibabacloud_ha3engine_vector.models import QueryRequest try: json_response = search_data() if ('errorCode' in json_response and 'errorMsg' in json_response and len(json_response['errorMsg']) > 0): logger.error( f"query {self.config.endpoint} {self.config.instance_id} failed:{json_response['errorMsg']}." ) else: return json_response except Exception as e: logger.error( f'query instance endpoint:{self.config.endpoint} instance_id:{self.config.instance_id} failed.' , e) return {}
null
__init__
"""Initialize a LatexTextSplitter.""" separators = self.get_separators_for_language(Language.LATEX) super().__init__(separators=separators, **kwargs)
def __init__(self, **kwargs: Any) ->None: """Initialize a LatexTextSplitter.""" separators = self.get_separators_for_language(Language.LATEX) super().__init__(separators=separators, **kwargs)
Initialize a LatexTextSplitter.
_import_edenai_EdenAiExplicitImageTool
from langchain_community.tools.edenai import EdenAiExplicitImageTool return EdenAiExplicitImageTool
def _import_edenai_EdenAiExplicitImageTool() ->Any: from langchain_community.tools.edenai import EdenAiExplicitImageTool return EdenAiExplicitImageTool
null
test_anthropic_model_kwargs
llm = ChatAnthropic(model_kwargs={'foo': 'bar'}) assert llm.model_kwargs == {'foo': 'bar'}
@pytest.mark.requires('anthropic') def test_anthropic_model_kwargs() ->None: llm = ChatAnthropic(model_kwargs={'foo': 'bar'}) assert llm.model_kwargs == {'foo': 'bar'}
null
from_llm
"""Initialize from LLM.""" qa_chain = LLMChain(llm=llm, prompt=qa_prompt) aql_generation_chain = LLMChain(llm=llm, prompt=aql_generation_prompt) aql_fix_chain = LLMChain(llm=llm, prompt=aql_fix_prompt) return cls(qa_chain=qa_chain, aql_generation_chain=aql_generation_chain, aql_fix_chain=aql_fix_chain, **kwargs)
@classmethod def from_llm(cls, llm: BaseLanguageModel, *, qa_prompt: BasePromptTemplate= AQL_QA_PROMPT, aql_generation_prompt: BasePromptTemplate= AQL_GENERATION_PROMPT, aql_fix_prompt: BasePromptTemplate= AQL_FIX_PROMPT, **kwargs: Any) ->ArangoGraphQAChain: """Initialize from LLM.""" qa_chain = LLMChain(llm=llm, prompt=qa_prompt) aql_generation_chain = LLMChain(llm=llm, prompt=aql_generation_prompt) aql_fix_chain = LLMChain(llm=llm, prompt=aql_fix_prompt) return cls(qa_chain=qa_chain, aql_generation_chain=aql_generation_chain, aql_fix_chain=aql_fix_chain, **kwargs)
Initialize from LLM.
_persist_run
pass
def _persist_run(self, run: Run) ->None: pass
null
_parse_python_function_docstring
"""Parse the function and argument descriptions from the docstring of a function. Assumes the function docstring follows Google Python style guide. """ docstring = inspect.getdoc(function) if docstring: docstring_blocks = docstring.split('\n\n') descriptors = [] args_block = None past_descriptors = False for block in docstring_blocks: if block.startswith('Args:'): args_block = block break elif block.startswith('Returns:') or block.startswith('Example:'): past_descriptors = True elif not past_descriptors: descriptors.append(block) else: continue description = ' '.join(descriptors) else: description = '' args_block = None arg_descriptions = {} if args_block: arg = None for line in args_block.split('\n')[1:]: if ':' in line: arg, desc = line.split(':', maxsplit=1) arg_descriptions[arg.strip()] = desc.strip() elif arg: arg_descriptions[arg.strip()] += ' ' + line.strip() return description, arg_descriptions
def _parse_python_function_docstring(function: Callable) ->Tuple[str, dict]: """Parse the function and argument descriptions from the docstring of a function. Assumes the function docstring follows Google Python style guide. """ docstring = inspect.getdoc(function) if docstring: docstring_blocks = docstring.split('\n\n') descriptors = [] args_block = None past_descriptors = False for block in docstring_blocks: if block.startswith('Args:'): args_block = block break elif block.startswith('Returns:') or block.startswith('Example:'): past_descriptors = True elif not past_descriptors: descriptors.append(block) else: continue description = ' '.join(descriptors) else: description = '' args_block = None arg_descriptions = {} if args_block: arg = None for line in args_block.split('\n')[1:]: if ':' in line: arg, desc = line.split(':', maxsplit=1) arg_descriptions[arg.strip()] = desc.strip() elif arg: arg_descriptions[arg.strip()] += ' ' + line.strip() return description, arg_descriptions
Parse the function and argument descriptions from the docstring of a function. Assumes the function docstring follows Google Python style guide.
get_lc_namespace
"""Get the namespace of the langchain object.""" return ['langchain', 'prompts', 'chat']
@classmethod def get_lc_namespace(cls) ->List[str]: """Get the namespace of the langchain object.""" return ['langchain', 'prompts', 'chat']
Get the namespace of the langchain object.
test_allows_extra_kwargs
"""Test formatting allows extra keyword arguments.""" template = 'This is a {foo} test.' output = formatter.format(template, foo='good', bar='oops') expected_output = 'This is a good test.' assert output == expected_output
def test_allows_extra_kwargs() ->None: """Test formatting allows extra keyword arguments.""" template = 'This is a {foo} test.' output = formatter.format(template, foo='good', bar='oops') expected_output = 'This is a good test.' assert output == expected_output
Test formatting allows extra keyword arguments.
on_tool_error
if parent_run_id is None: self.increment()
def on_tool_error(self, error: BaseException, *, run_id: UUID, parent_run_id: Optional[UUID]=None, **kwargs: Any) ->Any: if parent_run_id is None: self.increment()
null
get_lc_namespace
"""Get the namespace of the langchain object.""" return ['langchain', 'chat_models', 'vertexai']
@classmethod def get_lc_namespace(cls) ->List[str]: """Get the namespace of the langchain object.""" return ['langchain', 'chat_models', 'vertexai']
Get the namespace of the langchain object.
post
...
@abstractmethod def post(self, request: Any, transform_output_fn: Optional[Callable[..., str]]=None) ->Any: ...
null
_refresh_signer
if self.auth.get('signer', None) and hasattr(self.auth['signer'], 'refresh_security_token'): self.auth['signer'].refresh_security_token()
def _refresh_signer(self) ->None: if self.auth.get('signer', None) and hasattr(self.auth['signer'], 'refresh_security_token'): self.auth['signer'].refresh_security_token()
null
on_tool_start
"""Start a trace for a tool run.""" parent_run_id_ = str(parent_run_id) if parent_run_id else None execution_order = self._get_execution_order(parent_run_id_) start_time = datetime.now(timezone.utc) if metadata: kwargs.update({'metadata': metadata}) tool_run = Run(id=run_id, parent_run_id=parent_run_id, serialized= serialized, inputs={'input': input_str}, extra=kwargs, events=[{'name': 'start', 'time': start_time}], start_time=start_time, execution_order= execution_order, child_execution_order=execution_order, child_runs=[], run_type='tool', tags=tags or [], name=name) self._start_trace(tool_run) self._on_tool_start(tool_run) return tool_run
def on_tool_start(self, serialized: Dict[str, Any], input_str: str, *, run_id: UUID, tags: Optional[List[str]]=None, parent_run_id: Optional[ UUID]=None, metadata: Optional[Dict[str, Any]]=None, name: Optional[str ]=None, **kwargs: Any) ->Run: """Start a trace for a tool run.""" parent_run_id_ = str(parent_run_id) if parent_run_id else None execution_order = self._get_execution_order(parent_run_id_) start_time = datetime.now(timezone.utc) if metadata: kwargs.update({'metadata': metadata}) tool_run = Run(id=run_id, parent_run_id=parent_run_id, serialized= serialized, inputs={'input': input_str}, extra=kwargs, events=[{ 'name': 'start', 'time': start_time}], start_time=start_time, execution_order=execution_order, child_execution_order= execution_order, child_runs=[], run_type='tool', tags=tags or [], name=name) self._start_trace(tool_run) self._on_tool_start(tool_run) return tool_run
Start a trace for a tool run.
embed_query
"""Embed a text. Args: text: The text to embed. Returns: Embedding for the text. """ task_type = self.task_type or 'retrieval_query' return self._embed([text], task_type=task_type)[0]
def embed_query(self, text: str) ->List[float]: """Embed a text. Args: text: The text to embed. Returns: Embedding for the text. """ task_type = self.task_type or 'retrieval_query' return self._embed([text], task_type=task_type)[0]
Embed a text. Args: text: The text to embed. Returns: Embedding for the text.
_convert_message_to_dict
if isinstance(message, ChatMessage): message_dict = {'role': message.role, 'content': message.content} elif isinstance(message, HumanMessage): message_dict = {'role': 'user', 'content': message.content} elif isinstance(message, AIMessage): message_dict = {'role': 'assistant', 'content': message.content} if 'function_call' in message.additional_kwargs: message_dict['function_call'] = message.additional_kwargs[ 'function_call'] elif isinstance(message, SystemMessage): message_dict = {'role': 'system', 'content': message.content} elif isinstance(message, FunctionMessage): message_dict = {'role': 'function', 'content': message.content, 'name': message.name} else: raise ValueError(f'Got unknown type {message}') if 'name' in message.additional_kwargs: message_dict['name'] = message.additional_kwargs['name'] return message_dict
def _convert_message_to_dict(message: BaseMessage) ->dict: if isinstance(message, ChatMessage): message_dict = {'role': message.role, 'content': message.content} elif isinstance(message, HumanMessage): message_dict = {'role': 'user', 'content': message.content} elif isinstance(message, AIMessage): message_dict = {'role': 'assistant', 'content': message.content} if 'function_call' in message.additional_kwargs: message_dict['function_call'] = message.additional_kwargs[ 'function_call'] elif isinstance(message, SystemMessage): message_dict = {'role': 'system', 'content': message.content} elif isinstance(message, FunctionMessage): message_dict = {'role': 'function', 'content': message.content, 'name': message.name} else: raise ValueError(f'Got unknown type {message}') if 'name' in message.additional_kwargs: message_dict['name'] = message.additional_kwargs['name'] return message_dict
null
input_keys
"""Return the input keys. Returns: List of input keys. """ return self._input_keys
@property def input_keys(self) ->List[str]: """Return the input keys. Returns: List of input keys. """ return self._input_keys
Return the input keys. Returns: List of input keys.
_call
"""Return next response""" response = self.responses[self.i] if self.i < len(self.responses) - 1: self.i += 1 else: self.i = 0 return response
def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str: """Return next response""" response = self.responses[self.i] if self.i < len(self.responses) - 1: self.i += 1 else: self.i = 0 return response
Return next response
test_dereference_refs_multiple_refs
schema = {'type': 'object', 'properties': {'first_name': {'$ref': '#/$defs/name'}, 'other': {'$ref': '#/$defs/other'}}, '$defs': {'name': {'type': 'string'}, 'other': {'type': 'object', 'properties': {'age': 'int', 'height': 'int'}}}} expected = {'type': 'object', 'properties': {'first_name': {'type': 'string'}, 'other': {'type': 'object', 'properties': {'age': 'int', 'height': 'int'}}}, '$defs': {'name': {'type': 'string'}, 'other': { 'type': 'object', 'properties': {'age': 'int', 'height': 'int'}}}} actual = dereference_refs(schema) assert actual == expected
def test_dereference_refs_multiple_refs() ->None: schema = {'type': 'object', 'properties': {'first_name': {'$ref': '#/$defs/name'}, 'other': {'$ref': '#/$defs/other'}}, '$defs': { 'name': {'type': 'string'}, 'other': {'type': 'object', 'properties': {'age': 'int', 'height': 'int'}}}} expected = {'type': 'object', 'properties': {'first_name': {'type': 'string'}, 'other': {'type': 'object', 'properties': {'age': 'int', 'height': 'int'}}}, '$defs': {'name': {'type': 'string'}, 'other': {'type': 'object', 'properties': {'age': 'int', 'height': 'int'}}}} actual = dereference_refs(schema) assert actual == expected
null
get_history
window = 3 data = graph.query( """ MATCH (u:User {id:$user_id})-[:HAS_SESSION]->(s:Session {id:$session_id}), (s)-[:LAST_MESSAGE]->(last_message) MATCH p=(last_message)<-[:NEXT*0..""" + str(window) + """]-() WITH p, length(p) AS length ORDER BY length DESC LIMIT 1 UNWIND reverse(nodes(p)) AS node MATCH (node)-[:HAS_ANSWER]->(answer) RETURN {question:node.text, answer:answer.text} AS result """ , params=input) history = convert_messages(data) return history.messages
def get_history(input: Dict[str, Any]) ->List[Union[HumanMessage, AIMessage]]: window = 3 data = graph.query( """ MATCH (u:User {id:$user_id})-[:HAS_SESSION]->(s:Session {id:$session_id}), (s)-[:LAST_MESSAGE]->(last_message) MATCH p=(last_message)<-[:NEXT*0..""" + str(window) + """]-() WITH p, length(p) AS length ORDER BY length DESC LIMIT 1 UNWIND reverse(nodes(p)) AS node MATCH (node)-[:HAS_ANSWER]->(answer) RETURN {question:node.text, answer:answer.text} AS result """ , params=input) history = convert_messages(data) return history.messages
null
__post_init__
self.header = {'Authorization': self.bearer}
def __post_init__(self) ->None: self.header = {'Authorization': self.bearer}
null
__init__
process_attachments = unstructured_kwargs.get('process_attachments') attachment_partitioner = unstructured_kwargs.get('attachment_partitioner') if process_attachments and attachment_partitioner is None: from unstructured.partition.auto import partition unstructured_kwargs['attachment_partitioner'] = partition super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs)
def __init__(self, file_path: str, mode: str='single', ** unstructured_kwargs: Any): process_attachments = unstructured_kwargs.get('process_attachments') attachment_partitioner = unstructured_kwargs.get('attachment_partitioner') if process_attachments and attachment_partitioner is None: from unstructured.partition.auto import partition unstructured_kwargs['attachment_partitioner'] = partition super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs)
null
embeddings
"""Return the embeddings.""" return self._embedding
@property def embeddings(self) ->Embeddings: """Return the embeddings.""" return self._embedding
Return the embeddings.
set_repo
self.repo = repo
def set_repo(self, repo: str) ->None: self.repo = repo
null
on_chain_start
"""Do nothing when LLM chain starts.""" pass
def on_chain_start(self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any) ->None: """Do nothing when LLM chain starts.""" pass
Do nothing when LLM chain starts.
regex_match_string_evaluator
"""Create a RegexMatchStringEvaluator with default configuration.""" return RegexMatchStringEvaluator()
@pytest.fixture def regex_match_string_evaluator() ->RegexMatchStringEvaluator: """Create a RegexMatchStringEvaluator with default configuration.""" return RegexMatchStringEvaluator()
Create a RegexMatchStringEvaluator with default configuration.
test_unstructured_api_file_loader_multiple_files
"""Test unstructured loader.""" file_paths = [os.path.join(EXAMPLE_DOCS_DIRECTORY, 'layout-parser-paper.pdf'), os.path.join(EXAMPLE_DOCS_DIRECTORY, 'whatsapp_chat.txt')] loader = UnstructuredAPIFileLoader(file_path=file_paths, api_key= 'FAKE_API_KEY', strategy='fast', mode='elements') docs = loader.load() assert len(docs) > 1
def test_unstructured_api_file_loader_multiple_files() ->None: """Test unstructured loader.""" file_paths = [os.path.join(EXAMPLE_DOCS_DIRECTORY, 'layout-parser-paper.pdf'), os.path.join(EXAMPLE_DOCS_DIRECTORY, 'whatsapp_chat.txt')] loader = UnstructuredAPIFileLoader(file_path=file_paths, api_key= 'FAKE_API_KEY', strategy='fast', mode='elements') docs = loader.load() assert len(docs) > 1
Test unstructured loader.
get_media_metadata_manifest
response = requests.get(IMAGE_AND_VIDEO_LIBRARY_URL + '/asset/' + query) return response.json()
def get_media_metadata_manifest(self, query: str) ->str: response = requests.get(IMAGE_AND_VIDEO_LIBRARY_URL + '/asset/' + query) return response.json()
null
__init__
"""Initialize tool.""" super(Tool, self).__init__(name=name, func=func, description=description, **kwargs)
def __init__(self, name: str, func: Optional[Callable], description: str, **kwargs: Any) ->None: """Initialize tool.""" super(Tool, self).__init__(name=name, func=func, description= description, **kwargs)
Initialize tool.
test_agent_iterator_output_structure
"""Test the output structure of AgentExecutorIterator.""" agent = _get_agent() agent_iter = agent.iter(inputs='when was langchain made') for step in agent_iter: assert isinstance(step, dict) if 'intermediate_step' in step: assert isinstance(step['intermediate_step'], list) elif 'output' in step: assert isinstance(step['output'], str) else: assert False, 'Unexpected output structure'
def test_agent_iterator_output_structure() ->None: """Test the output structure of AgentExecutorIterator.""" agent = _get_agent() agent_iter = agent.iter(inputs='when was langchain made') for step in agent_iter: assert isinstance(step, dict) if 'intermediate_step' in step: assert isinstance(step['intermediate_step'], list) elif 'output' in step: assert isinstance(step['output'], str) else: assert False, 'Unexpected output structure'
Test the output structure of AgentExecutorIterator.
on_text
"""Do nothing""" pass
def on_text(self, text: str, **kwargs: Any) ->None: """Do nothing""" pass
Do nothing
__init__
"""Initialize with dataset_id. Example: https://dev.socrata.com/foundry/data.sfgov.org/vw6y-z8j6 e.g., city_id = data.sfgov.org e.g., dataset_id = vw6y-z8j6 Args: city_id: The Open City city identifier. dataset_id: The Open City dataset identifier. limit: The maximum number of documents to load. """ self.city_id = city_id self.dataset_id = dataset_id self.limit = limit
def __init__(self, city_id: str, dataset_id: str, limit: int): """Initialize with dataset_id. Example: https://dev.socrata.com/foundry/data.sfgov.org/vw6y-z8j6 e.g., city_id = data.sfgov.org e.g., dataset_id = vw6y-z8j6 Args: city_id: The Open City city identifier. dataset_id: The Open City dataset identifier. limit: The maximum number of documents to load. """ self.city_id = city_id self.dataset_id = dataset_id self.limit = limit
Initialize with dataset_id. Example: https://dev.socrata.com/foundry/data.sfgov.org/vw6y-z8j6 e.g., city_id = data.sfgov.org e.g., dataset_id = vw6y-z8j6 Args: city_id: The Open City city identifier. dataset_id: The Open City dataset identifier. limit: The maximum number of documents to load.
test_add_documents
documents = [Document(page_content='hello world', metadata={'a': 1}), Document(page_content='foo bar', metadata={'b': 2}), Document( page_content='baz qux', metadata={'c': 3})] ids = retriever.add_documents(documents) assert retriever.client.count(retriever.collection_name, exact=True).count == 3 documents = [Document(page_content='hello world'), Document(page_content= 'foo bar'), Document(page_content='baz qux')] ids = retriever.add_documents(documents) assert len(ids) == 3 assert retriever.client.count(retriever.collection_name, exact=True).count == 6
def test_add_documents(retriever: QdrantSparseVectorRetriever) ->None: documents = [Document(page_content='hello world', metadata={'a': 1}), Document(page_content='foo bar', metadata={'b': 2}), Document( page_content='baz qux', metadata={'c': 3})] ids = retriever.add_documents(documents) assert retriever.client.count(retriever.collection_name, exact=True ).count == 3 documents = [Document(page_content='hello world'), Document( page_content='foo bar'), Document(page_content='baz qux')] ids = retriever.add_documents(documents) assert len(ids) == 3 assert retriever.client.count(retriever.collection_name, exact=True ).count == 6
null
_default_painless_scripting_query
"""For Painless Scripting Search, this is the default query.""" if not pre_filter: pre_filter = MATCH_ALL_QUERY source = __get_painless_scripting_source(space_type, vector_field=vector_field) return {'size': k, 'query': {'script_score': {'query': pre_filter, 'script': {'source': source, 'params': {'field': vector_field, 'query_value': query_vector}}}}}
def _default_painless_scripting_query(query_vector: List[float], k: int=4, space_type: str='l2Squared', pre_filter: Optional[Dict]=None, vector_field: str='vector_field') ->Dict: """For Painless Scripting Search, this is the default query.""" if not pre_filter: pre_filter = MATCH_ALL_QUERY source = __get_painless_scripting_source(space_type, vector_field= vector_field) return {'size': k, 'query': {'script_score': {'query': pre_filter, 'script': {'source': source, 'params': {'field': vector_field, 'query_value': query_vector}}}}}
For Painless Scripting Search, this is the default query.
on_tool_error
self._container.markdown('**Tool encountered an error...**') self._container.exception(error)
def on_tool_error(self, error: BaseException, **kwargs: Any) ->None: self._container.markdown('**Tool encountered an error...**') self._container.exception(error)
null
test_unexpected_response
mock_response = MagicMock() mock_response.status_code = 200 mock_response.json.return_value = [{'status': 'success'}] mock_post.return_value = mock_response with pytest.raises(RuntimeError): tool._run('some query')
def test_unexpected_response(mock_post: MagicMock) ->None: mock_response = MagicMock() mock_response.status_code = 200 mock_response.json.return_value = [{'status': 'success'}] mock_post.return_value = mock_response with pytest.raises(RuntimeError): tool._run('some query')
null
_chain_type
return 'refine_documents_chain'
@property def _chain_type(self) ->str: return 'refine_documents_chain'
null
test_llm_on_kv_singleio_dataset
llm = OpenAI(temperature=0) eval_config = RunEvalConfig(evaluators=[EvaluatorType.QA, EvaluatorType. CRITERIA]) run_on_dataset(dataset_name=kv_singleio_dataset_name, llm_or_chain_factory= llm, client=client, evaluation=eval_config, project_name= eval_project_name, tags=['shouldpass']) _check_all_feedback_passed(eval_project_name, client)
def test_llm_on_kv_singleio_dataset(kv_singleio_dataset_name: str, eval_project_name: str, client: Client) ->None: llm = OpenAI(temperature=0) eval_config = RunEvalConfig(evaluators=[EvaluatorType.QA, EvaluatorType .CRITERIA]) run_on_dataset(dataset_name=kv_singleio_dataset_name, llm_or_chain_factory=llm, client=client, evaluation=eval_config, project_name=eval_project_name, tags=['shouldpass']) _check_all_feedback_passed(eval_project_name, client)
null
format_xml
"""Format the intermediate steps as XML. Args: intermediate_steps: The intermediate steps. Returns: The intermediate steps as XML. """ log = '' for action, observation in intermediate_steps: log += ( f'<tool>{action.tool}</tool><tool_input>{action.tool_input}</tool_input><observation>{observation}</observation>' ) return log
def format_xml(intermediate_steps: List[Tuple[AgentAction, str]]) ->str: """Format the intermediate steps as XML. Args: intermediate_steps: The intermediate steps. Returns: The intermediate steps as XML. """ log = '' for action, observation in intermediate_steps: log += ( f'<tool>{action.tool}</tool><tool_input>{action.tool_input}</tool_input><observation>{observation}</observation>' ) return log
Format the intermediate steps as XML. Args: intermediate_steps: The intermediate steps. Returns: The intermediate steps as XML.
get_indices_infos
indices = _list_indices(database, include_indices=include_indices, ignore_indices=ignore_indices) mappings = database.indices.get_mapping(index=','.join(indices)) if sample_documents_in_index_info > 0: for k, v in mappings.items(): hits = database.search(index=k, query={'match_all': {}}, size= sample_documents_in_index_info)['hits']['hits'] hits = [str(hit['_source']) for hit in hits] mappings[k]['mappings'] = str(v) + '\n\n/*\n' + '\n'.join(hits ) + '\n*/' return '\n\n'.join(["""Mapping for index {}: {}""".format(index, mappings[ index]['mappings']) for index in mappings])
def get_indices_infos(database, sample_documents_in_index_info=5, include_indices=None, ignore_indices=None) ->str: indices = _list_indices(database, include_indices=include_indices, ignore_indices=ignore_indices) mappings = database.indices.get_mapping(index=','.join(indices)) if sample_documents_in_index_info > 0: for k, v in mappings.items(): hits = database.search(index=k, query={'match_all': {}}, size= sample_documents_in_index_info)['hits']['hits'] hits = [str(hit['_source']) for hit in hits] mappings[k]['mappings'] = str(v) + '\n\n/*\n' + '\n'.join(hits ) + '\n*/' return '\n\n'.join(['Mapping for index {}:\n{}'.format(index, mappings[ index]['mappings']) for index in mappings])
null
FakeParseFromString
def ParseFromString(self: Any, data: str) ->None: self.uuid = 'fake_uuid' return ParseFromString
def FakeParseFromString(**args: Any) ->Any: def ParseFromString(self: Any, data: str) ->None: self.uuid = 'fake_uuid' return ParseFromString
null
props_to_dict
"""Convert properties to a dictionary.""" properties = {} if not props: return properties for p in props: properties[format_property_key(p.key)] = p.value return properties
def props_to_dict(props) ->dict: """Convert properties to a dictionary.""" properties = {} if not props: return properties for p in props: properties[format_property_key(p.key)] = p.value return properties
Convert properties to a dictionary.
wait_for_futures
"""Wait for the given futures to complete.""" wait(self._futures)
def wait_for_futures(self) ->None: """Wait for the given futures to complete.""" wait(self._futures)
Wait for the given futures to complete.
test_embedding_distance_eval_chain
embedding_distance_eval_chain.distance_metric = EmbeddingDistance.COSINE prediction = 'Hi' reference = 'Hello' result = embedding_distance_eval_chain.evaluate_strings(prediction= prediction, reference=reference) assert result['score'] < 1.0
@pytest.mark.requires('scipy') def test_embedding_distance_eval_chain(embedding_distance_eval_chain: EmbeddingDistanceEvalChain) ->None: embedding_distance_eval_chain.distance_metric = EmbeddingDistance.COSINE prediction = 'Hi' reference = 'Hello' result = embedding_distance_eval_chain.evaluate_strings(prediction= prediction, reference=reference) assert result['score'] < 1.0
null
test_context_eval_chain
"""Test a simple eval chain.""" example = {'query': "What's my name", 'context': 'The name of this person is John Doe'} prediction = {'result': 'John Doe'} fake_qa_eval_chain = chain_cls.from_llm(FakeLLM()) outputs = fake_qa_eval_chain.evaluate([example, example], [prediction, prediction]) assert outputs[0] == outputs[1] assert 'text' in outputs[0] assert outputs[0]['text'] == 'foo'
@pytest.mark.skipif(sys.platform.startswith('win'), reason= 'Test not supported on Windows') @pytest.mark.parametrize('chain_cls', [ContextQAEvalChain, CotQAEvalChain]) def test_context_eval_chain(chain_cls: Type[ContextQAEvalChain]) ->None: """Test a simple eval chain.""" example = {'query': "What's my name", 'context': 'The name of this person is John Doe'} prediction = {'result': 'John Doe'} fake_qa_eval_chain = chain_cls.from_llm(FakeLLM()) outputs = fake_qa_eval_chain.evaluate([example, example], [prediction, prediction]) assert outputs[0] == outputs[1] assert 'text' in outputs[0] assert outputs[0]['text'] == 'foo'
Test a simple eval chain.
__init__
"""Initialize with Pinecone client.""" try: import pinecone except ImportError: raise ImportError( 'Could not import pinecone python package. Please install it with `pip install pinecone-client`.' ) if not isinstance(embedding, Embeddings): warnings.warn( 'Passing in `embedding` as a Callable is deprecated. Please pass in an Embeddings object instead.' ) if not isinstance(index, pinecone.index.Index): raise ValueError( f'client should be an instance of pinecone.index.Index, got {type(index)}' ) self._index = index self._embedding = embedding self._text_key = text_key self._namespace = namespace self.distance_strategy = distance_strategy
def __init__(self, index: Any, embedding: Union[Embeddings, Callable], text_key: str, namespace: Optional[str]=None, distance_strategy: Optional[DistanceStrategy]=DistanceStrategy.COSINE): """Initialize with Pinecone client.""" try: import pinecone except ImportError: raise ImportError( 'Could not import pinecone python package. Please install it with `pip install pinecone-client`.' ) if not isinstance(embedding, Embeddings): warnings.warn( 'Passing in `embedding` as a Callable is deprecated. Please pass in an Embeddings object instead.' ) if not isinstance(index, pinecone.index.Index): raise ValueError( f'client should be an instance of pinecone.index.Index, got {type(index)}' ) self._index = index self._embedding = embedding self._text_key = text_key self._namespace = namespace self.distance_strategy = distance_strategy
Initialize with Pinecone client.
_identifying_params
"""Get the identifying parameters.""" _model_kwargs = self.model_kwargs or {} return {**{'api_url': self.api_url, 'headers': self.headers}, **{ 'model_kwargs': _model_kwargs}}
@property def _identifying_params(self) ->Mapping[str, Any]: """Get the identifying parameters.""" _model_kwargs = self.model_kwargs or {} return {**{'api_url': self.api_url, 'headers': self.headers}, **{ 'model_kwargs': _model_kwargs}}
Get the identifying parameters.
on_tool_end_common
self.tool_ends += 1 self.ends += 1
def on_tool_end_common(self) ->None: self.tool_ends += 1 self.ends += 1
null
test_max_marginal_relevance_search
"""Test MRR search.""" metadatas = [{'page': i} for i in range(len(texts))] docsearch = DocArrayInMemorySearch.from_texts(texts, FakeEmbeddings(), metadatas=metadatas, metric=metric) output = docsearch.max_marginal_relevance_search('foo', k=2, fetch_k=3) assert output == [Document(page_content='foo', metadata={'page': 0}), Document(page_content='bar', metadata={'page': 1})]
@pytest.mark.parametrize('metric', ['cosine_sim', 'euclidean_dist', 'sqeuclidean_dist']) def test_max_marginal_relevance_search(metric: str, texts: List[str]) ->None: """Test MRR search.""" metadatas = [{'page': i} for i in range(len(texts))] docsearch = DocArrayInMemorySearch.from_texts(texts, FakeEmbeddings(), metadatas=metadatas, metric=metric) output = docsearch.max_marginal_relevance_search('foo', k=2, fetch_k=3) assert output == [Document(page_content='foo', metadata={'page': 0}), Document(page_content='bar', metadata={'page': 1})]
Test MRR search.
_run
"""Run the tool.""" query = self.api_resource.users().threads().get(userId='me', id=thread_id) thread_data = query.execute() if not isinstance(thread_data, dict): raise ValueError('The output of the query must be a list.') messages = thread_data['messages'] thread_data['messages'] = [] keys_to_keep = ['id', 'snippet', 'snippet'] for message in messages: thread_data['messages'].append({k: message[k] for k in keys_to_keep if k in message}) return thread_data
def _run(self, thread_id: str, run_manager: Optional[ CallbackManagerForToolRun]=None) ->Dict: """Run the tool.""" query = self.api_resource.users().threads().get(userId='me', id=thread_id) thread_data = query.execute() if not isinstance(thread_data, dict): raise ValueError('The output of the query must be a list.') messages = thread_data['messages'] thread_data['messages'] = [] keys_to_keep = ['id', 'snippet', 'snippet'] for message in messages: thread_data['messages'].append({k: message[k] for k in keys_to_keep if k in message}) return thread_data
Run the tool.
test_chat_fireworks
"""Test ChatFireworks wrapper.""" message = HumanMessage(content='What is the weather in Redwood City, CA today') response = chat([message]) assert isinstance(response, BaseMessage) assert isinstance(response.content, str)
@pytest.mark.scheduled def test_chat_fireworks(chat: ChatFireworks) ->None: """Test ChatFireworks wrapper.""" message = HumanMessage(content= 'What is the weather in Redwood City, CA today') response = chat([message]) assert isinstance(response, BaseMessage) assert isinstance(response.content, str)
Test ChatFireworks wrapper.
update
"""Update based on prompt and llm_string.""" items = [self.cache_schema(prompt=prompt, llm=llm_string, response=dumps( gen), idx=i) for i, gen in enumerate(return_val)] with Session(self.engine) as session, session.begin(): for item in items: session.merge(item)
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE ) ->None: """Update based on prompt and llm_string.""" items = [self.cache_schema(prompt=prompt, llm=llm_string, response= dumps(gen), idx=i) for i, gen in enumerate(return_val)] with Session(self.engine) as session, session.begin(): for item in items: session.merge(item)
Update based on prompt and llm_string.
test_nuclia_tool
with mock.patch('nucliadb_protos.writer_pb2.BrokerMessage.ParseFromString', new_callable=FakeParseFromString): with mock.patch('requests.post', new_callable=fakepost): with mock.patch('requests.get', new_callable=fakeget): nua = NucliaUnderstandingAPI(enable_ml=False) uuid = nua.run({'action': 'push', 'id': '1', 'path': str( README_PATH), 'text': None}) assert uuid == 'fake_uuid' data = nua.run({'action': 'pull', 'id': '1', 'path': None, 'text': None}) assert json.loads(data)['uuid'] == 'fake_uuid'
@mock.patch.dict(os.environ, {'NUCLIA_NUA_KEY': '_a_key_'}) @pytest.mark.requires('nucliadb_protos') def test_nuclia_tool() ->None: with mock.patch('nucliadb_protos.writer_pb2.BrokerMessage.ParseFromString', new_callable=FakeParseFromString): with mock.patch('requests.post', new_callable=fakepost): with mock.patch('requests.get', new_callable=fakeget): nua = NucliaUnderstandingAPI(enable_ml=False) uuid = nua.run({'action': 'push', 'id': '1', 'path': str( README_PATH), 'text': None}) assert uuid == 'fake_uuid' data = nua.run({'action': 'pull', 'id': '1', 'path': None, 'text': None}) assert json.loads(data)['uuid'] == 'fake_uuid'
null
similarity_search
"""Return docs most similar to query.""" embedding = self._embedding.embed_query(query) documents = self.similarity_search_with_score_by_vector(embedding=embedding, k=k) return [doc for doc, _ in documents]
def similarity_search(self, query: str, k: int=4, **kwargs: Any) ->List[ Document]: """Return docs most similar to query.""" embedding = self._embedding.embed_query(query) documents = self.similarity_search_with_score_by_vector(embedding= embedding, k=k) return [doc for doc, _ in documents]
Return docs most similar to query.
from_llm_and_tools
raise NotImplementedError
@classmethod def from_llm_and_tools(cls, llm: BaseLanguageModel, tools: Sequence[ BaseTool], callback_manager: Optional[BaseCallbackManager]=None, ** kwargs: Any) ->BaseSingleActionAgent: raise NotImplementedError
null
_invocation_params
if is_openai_v1(): openai_params = {'model': self.deployment_name} else: openai_params = {'engine': self.deployment_name, 'api_type': self. openai_api_type, 'api_version': self.openai_api_version} return {**openai_params, **super()._invocation_params}
@property def _invocation_params(self) ->Dict[str, Any]: if is_openai_v1(): openai_params = {'model': self.deployment_name} else: openai_params = {'engine': self.deployment_name, 'api_type': self. openai_api_type, 'api_version': self.openai_api_version} return {**openai_params, **super()._invocation_params}
null
_persist_run
"""Persist a run.""" self.runs.append(self._copy_run(run))
def _persist_run(self, run: Run) ->None: """Persist a run.""" self.runs.append(self._copy_run(run))
Persist a run.
generate
"""Generate synthetic data using the given subject string. Args: subject (str): The subject the synthetic data will be about. runs (int): Number of times to generate the data. extra (str): Extra instructions for steerability in data generation. Returns: List[str]: List of generated synthetic data. Usage Example: >>> results = generator.generate(subject="climate change", runs=5, extra="Focus on environmental impacts.") """ if self.llm_chain is None: raise ValueError( 'llm_chain is none, either set either llm_chain or llm at generator construction' ) for _ in range(runs): result = self.llm_chain.run(*args, subject=subject, **kwargs) self.results.append(result) self._update_examples(result) return self.results
def generate(self, subject: str, runs: int, *args: Any, **kwargs: Any) ->List[ str]: """Generate synthetic data using the given subject string. Args: subject (str): The subject the synthetic data will be about. runs (int): Number of times to generate the data. extra (str): Extra instructions for steerability in data generation. Returns: List[str]: List of generated synthetic data. Usage Example: >>> results = generator.generate(subject="climate change", runs=5, extra="Focus on environmental impacts.") """ if self.llm_chain is None: raise ValueError( 'llm_chain is none, either set either llm_chain or llm at generator construction' ) for _ in range(runs): result = self.llm_chain.run(*args, subject=subject, **kwargs) self.results.append(result) self._update_examples(result) return self.results
Generate synthetic data using the given subject string. Args: subject (str): The subject the synthetic data will be about. runs (int): Number of times to generate the data. extra (str): Extra instructions for steerability in data generation. Returns: List[str]: List of generated synthetic data. Usage Example: >>> results = generator.generate(subject="climate change", runs=5, extra="Focus on environmental impacts.")
_create_message_dicts
params = dict(self._client_params) if stop is not None: if 'stop' in params: raise ValueError('`stop` found in both the input and default params.') params['stop'] = stop message_dicts = [_convert_message_to_dict(m) for m in messages] return message_dicts, params
def _create_message_dicts(self, messages: List[BaseMessage], stop: Optional [List[str]]) ->Tuple[List[Dict[str, Any]], Dict[str, Any]]: params = dict(self._client_params) if stop is not None: if 'stop' in params: raise ValueError( '`stop` found in both the input and default params.') params['stop'] = stop message_dicts = [_convert_message_to_dict(m) for m in messages] return message_dicts, params
null
format_to_openai_tool_messages
"""Convert (AgentAction, tool output) tuples into FunctionMessages. Args: intermediate_steps: Steps the LLM has taken to date, along with observations Returns: list of messages to send to the LLM for the next prediction """ messages = [] for agent_action, observation in intermediate_steps: if isinstance(agent_action, OpenAIToolAgentAction): new_messages = list(agent_action.message_log) + [_create_tool_message (agent_action, observation)] messages.extend([new for new in new_messages if new not in messages]) else: messages.append(AIMessage(content=agent_action.log)) return messages
def format_to_openai_tool_messages(intermediate_steps: Sequence[Tuple[ AgentAction, str]]) ->List[BaseMessage]: """Convert (AgentAction, tool output) tuples into FunctionMessages. Args: intermediate_steps: Steps the LLM has taken to date, along with observations Returns: list of messages to send to the LLM for the next prediction """ messages = [] for agent_action, observation in intermediate_steps: if isinstance(agent_action, OpenAIToolAgentAction): new_messages = list(agent_action.message_log) + [ _create_tool_message(agent_action, observation)] messages.extend([new for new in new_messages if new not in messages]) else: messages.append(AIMessage(content=agent_action.log)) return messages
Convert (AgentAction, tool output) tuples into FunctionMessages. Args: intermediate_steps: Steps the LLM has taken to date, along with observations Returns: list of messages to send to the LLM for the next prediction
test_openai_stop_error
"""Test openai stop logic on bad configuration.""" llm = OpenAI(stop='3', temperature=0) with pytest.raises(ValueError): llm('write an ordered list of five items', stop=['\n'])
def test_openai_stop_error() ->None: """Test openai stop logic on bad configuration.""" llm = OpenAI(stop='3', temperature=0) with pytest.raises(ValueError): llm('write an ordered list of five items', stop=['\n'])
Test openai stop logic on bad configuration.
test_max_marginal_relevance_search_with_filter
"""Test end to end construction and MRR search.""" texts = ['foo', 'bar', 'baz'] metadatas = [{'page': i} for i in range(len(texts))] docsearch = Weaviate.from_texts(texts, embedding_openai, metadatas= metadatas, weaviate_url=weaviate_url) where_filter = {'path': ['page'], 'operator': 'Equal', 'valueNumber': 0} standard_ranking = docsearch.similarity_search('foo', k=2, where_filter= where_filter) output = docsearch.max_marginal_relevance_search('foo', k=2, fetch_k=3, lambda_mult=1.0, where_filter=where_filter) assert output == standard_ranking output = docsearch.max_marginal_relevance_search('foo', k=2, fetch_k=3, lambda_mult=0.0, where_filter=where_filter) assert output == [Document(page_content='foo', metadata={'page': 0})]
@pytest.mark.vcr(ignore_localhost=True) def test_max_marginal_relevance_search_with_filter(self, weaviate_url: str, embedding_openai: OpenAIEmbeddings) ->None: """Test end to end construction and MRR search.""" texts = ['foo', 'bar', 'baz'] metadatas = [{'page': i} for i in range(len(texts))] docsearch = Weaviate.from_texts(texts, embedding_openai, metadatas= metadatas, weaviate_url=weaviate_url) where_filter = {'path': ['page'], 'operator': 'Equal', 'valueNumber': 0} standard_ranking = docsearch.similarity_search('foo', k=2, where_filter =where_filter) output = docsearch.max_marginal_relevance_search('foo', k=2, fetch_k=3, lambda_mult=1.0, where_filter=where_filter) assert output == standard_ranking output = docsearch.max_marginal_relevance_search('foo', k=2, fetch_k=3, lambda_mult=0.0, where_filter=where_filter) assert output == [Document(page_content='foo', metadata={'page': 0})]
Test end to end construction and MRR search.
test_document_compressor_pipeline
embeddings = OpenAIEmbeddings() splitter = CharacterTextSplitter(chunk_size=20, chunk_overlap=0, separator='. ' ) redundant_filter = EmbeddingsRedundantFilter(embeddings=embeddings) relevant_filter = EmbeddingsFilter(embeddings=embeddings, similarity_threshold=0.8) pipeline_filter = DocumentCompressorPipeline(transformers=[splitter, redundant_filter, relevant_filter]) texts = ['This sentence is about cows', 'This sentence was about cows', 'foo bar baz'] docs = [Document(page_content='. '.join(texts))] actual = pipeline_filter.compress_documents(docs, 'Tell me about farm animals') assert len(actual) == 1 assert actual[0].page_content in texts[:2]
def test_document_compressor_pipeline() ->None: embeddings = OpenAIEmbeddings() splitter = CharacterTextSplitter(chunk_size=20, chunk_overlap=0, separator='. ') redundant_filter = EmbeddingsRedundantFilter(embeddings=embeddings) relevant_filter = EmbeddingsFilter(embeddings=embeddings, similarity_threshold=0.8) pipeline_filter = DocumentCompressorPipeline(transformers=[splitter, redundant_filter, relevant_filter]) texts = ['This sentence is about cows', 'This sentence was about cows', 'foo bar baz'] docs = [Document(page_content='. '.join(texts))] actual = pipeline_filter.compress_documents(docs, 'Tell me about farm animals') assert len(actual) == 1 assert actual[0].page_content in texts[:2]
null
test_json_equality_evaluator_evaluate_lists_permutation_invariant
evaluator = JsonEqualityEvaluator() prediction = '[{"a": 1, "b": 2}, {"a": 2, "b": 3}]' reference = '[{"a": 2, "b": 3}, {"a": 1, "b": 2}]' result = evaluator.evaluate_strings(prediction=prediction, reference=reference) assert result == {'score': True} prediction = '[{"a": 1, "b": 2}, {"a": 2, "b": 3}]' reference = '[{"a": 2, "b": 3}, {"a": 1, "b": 4}]' result = evaluator.evaluate_strings(prediction=prediction, reference=reference) assert result == {'score': False} prediction = '[{"a": 1, "b": 2}, {"a": 2, "b": 3}]' reference = '[{"a": 2, "b": 3}]' result = evaluator.evaluate_strings(prediction=prediction, reference=reference) assert result == {'score': False} prediction = '[{"a": 1, "b": 2}, {"a": 2, "b": 3}]' reference = '[{"a": 2, "b": 3}, {"a": 1, "b": 2}, {"a": 3, "b": 4}]' result = evaluator.evaluate_strings(prediction=prediction, reference=reference) assert result == {'score': False} prediction = '[{"a": 1, "b": 2}, {"a": 2, "b": 3}]' reference = '[{"a": 2, "b": 3}, {"b": 2,"a": 1}, {"a": 3, "b": 4}]' result = evaluator.evaluate_strings(prediction=reference, reference=prediction) assert result == {'score': False} prediction = '[' + ','.join([f'{{"a": {i}, "b": {i + 1}}}' for i in range( 1000)]) + ']' rlist = [f'{{"a": {i}, "b": {i + 1}}}' for i in range(1000)] random.shuffle(rlist) reference = '[' + ','.join(rlist) + ']' result = evaluator.evaluate_strings(prediction=prediction, reference=reference) assert result == {'score': True} prediction = '[' + ','.join([f'{{"b": {i + 1}, "a": {i}}}' for i in range( 1000)]) + ']' reference = '[' + ','.join([f'{{"a": {i + 1}, "b": {i + 2}}}' for i in range(999)] + ['{"a": 1000, "b": 1001}']) + ']' result = evaluator.evaluate_strings(prediction=prediction, reference=reference) assert result == {'score': False}
def test_json_equality_evaluator_evaluate_lists_permutation_invariant() ->None: evaluator = JsonEqualityEvaluator() prediction = '[{"a": 1, "b": 2}, {"a": 2, "b": 3}]' reference = '[{"a": 2, "b": 3}, {"a": 1, "b": 2}]' result = evaluator.evaluate_strings(prediction=prediction, reference= reference) assert result == {'score': True} prediction = '[{"a": 1, "b": 2}, {"a": 2, "b": 3}]' reference = '[{"a": 2, "b": 3}, {"a": 1, "b": 4}]' result = evaluator.evaluate_strings(prediction=prediction, reference= reference) assert result == {'score': False} prediction = '[{"a": 1, "b": 2}, {"a": 2, "b": 3}]' reference = '[{"a": 2, "b": 3}]' result = evaluator.evaluate_strings(prediction=prediction, reference= reference) assert result == {'score': False} prediction = '[{"a": 1, "b": 2}, {"a": 2, "b": 3}]' reference = '[{"a": 2, "b": 3}, {"a": 1, "b": 2}, {"a": 3, "b": 4}]' result = evaluator.evaluate_strings(prediction=prediction, reference= reference) assert result == {'score': False} prediction = '[{"a": 1, "b": 2}, {"a": 2, "b": 3}]' reference = '[{"a": 2, "b": 3}, {"b": 2,"a": 1}, {"a": 3, "b": 4}]' result = evaluator.evaluate_strings(prediction=reference, reference= prediction) assert result == {'score': False} prediction = '[' + ','.join([f'{{"a": {i}, "b": {i + 1}}}' for i in range(1000)]) + ']' rlist = [f'{{"a": {i}, "b": {i + 1}}}' for i in range(1000)] random.shuffle(rlist) reference = '[' + ','.join(rlist) + ']' result = evaluator.evaluate_strings(prediction=prediction, reference= reference) assert result == {'score': True} prediction = '[' + ','.join([f'{{"b": {i + 1}, "a": {i}}}' for i in range(1000)]) + ']' reference = '[' + ','.join([f'{{"a": {i + 1}, "b": {i + 2}}}' for i in range(999)] + ['{"a": 1000, "b": 1001}']) + ']' result = evaluator.evaluate_strings(prediction=prediction, reference= reference) assert result == {'score': False}
null
_validate_google_libraries_installation
"""Validates that Google libraries that are needed are installed.""" try: from google.cloud import aiplatform, storage from google.oauth2 import service_account except ImportError: raise ImportError( 'You must run `pip install --upgrade google-cloud-aiplatform google-cloud-storage`to use the MatchingEngine Vectorstore.' )
def _validate_google_libraries_installation(self) ->None: """Validates that Google libraries that are needed are installed.""" try: from google.cloud import aiplatform, storage from google.oauth2 import service_account except ImportError: raise ImportError( 'You must run `pip install --upgrade google-cloud-aiplatform google-cloud-storage`to use the MatchingEngine Vectorstore.' )
Validates that Google libraries that are needed are installed.
_import_promptlayer_chat
from langchain_community.llms.promptlayer_openai import PromptLayerOpenAIChat return PromptLayerOpenAIChat
def _import_promptlayer_chat() ->Any: from langchain_community.llms.promptlayer_openai import PromptLayerOpenAIChat return PromptLayerOpenAIChat
null
new
""" Creates a new integration package. Should be run from libs/partners """ if not Path.cwd().name == 'partners' or not Path.cwd().parent.name == 'libs': typer.echo( 'This command should be run from the `libs/partners` directory in the langchain-ai/langchain monorepo. Continuing is NOT recommended.' ) typer.confirm('Are you sure you want to continue?', abort=True) try: replacements = _process_name(name) except ValueError as e: typer.echo(e) raise typer.Exit(code=1) if name_class: if not re.match('^[A-Z][a-zA-Z0-9]*$', name_class): typer.echo( 'Name should only contain letters (a-z, A-Z), numbers, and underscores, and start with a capital letter.' ) raise typer.Exit(code=1) replacements['__ModuleName__'] = name_class else: replacements['__ModuleName__'] = typer.prompt( 'Name of integration in PascalCase', default=replacements[ '__ModuleName__']) destination_dir = Path.cwd() / replacements['__package_name_short__'] if destination_dir.exists(): typer.echo(f'Folder {destination_dir} exists.') raise typer.Exit(code=1) project_template_dir = Path(__file__).parents[1] / 'integration_template' shutil.copytree(project_template_dir, destination_dir, dirs_exist_ok=False) package_dir = destination_dir / replacements['__module_name__'] shutil.move(destination_dir / 'integration_template', package_dir) replace_glob(destination_dir, '**/*', replacements) subprocess.run(['poetry', 'install', '--with', 'lint,test,typing,test_integration'], cwd=destination_dir)
@integration_cli.command() def new(name: Annotated[str, typer.Option(help= 'The name of the integration to create (e.g. `my-integration`)', prompt =True)], name_class: Annotated[Optional[str], typer.Option(help= 'The name of the integration in PascalCase. e.g. `MyIntegration`. This is used to name classes like `MyIntegrationVectorStore`' )]=None): """ Creates a new integration package. Should be run from libs/partners """ if not Path.cwd().name == 'partners' or not Path.cwd( ).parent.name == 'libs': typer.echo( 'This command should be run from the `libs/partners` directory in the langchain-ai/langchain monorepo. Continuing is NOT recommended.' ) typer.confirm('Are you sure you want to continue?', abort=True) try: replacements = _process_name(name) except ValueError as e: typer.echo(e) raise typer.Exit(code=1) if name_class: if not re.match('^[A-Z][a-zA-Z0-9]*$', name_class): typer.echo( 'Name should only contain letters (a-z, A-Z), numbers, and underscores, and start with a capital letter.' ) raise typer.Exit(code=1) replacements['__ModuleName__'] = name_class else: replacements['__ModuleName__'] = typer.prompt( 'Name of integration in PascalCase', default=replacements[ '__ModuleName__']) destination_dir = Path.cwd() / replacements['__package_name_short__'] if destination_dir.exists(): typer.echo(f'Folder {destination_dir} exists.') raise typer.Exit(code=1) project_template_dir = Path(__file__).parents[1] / 'integration_template' shutil.copytree(project_template_dir, destination_dir, dirs_exist_ok=False) package_dir = destination_dir / replacements['__module_name__'] shutil.move(destination_dir / 'integration_template', package_dir) replace_glob(destination_dir, '**/*', replacements) subprocess.run(['poetry', 'install', '--with', 'lint,test,typing,test_integration'], cwd=destination_dir)
Creates a new integration package. Should be run from libs/partners
test_clearing_conversation_memory
"""Test clearing the conversation memory.""" good_inputs = {'foo': 'bar', 'baz': 'foo'} good_outputs = {'bar': 'foo'} memory.save_context(good_inputs, good_outputs) memory.clear() assert memory.load_memory_variables({}) == {'baz': ''}
@pytest.mark.parametrize('memory', [ConversationBufferMemory(memory_key= 'baz'), ConversationSummaryMemory(llm=FakeLLM(), memory_key='baz'), ConversationBufferWindowMemory(memory_key='baz')]) def test_clearing_conversation_memory(memory: BaseMemory) ->None: """Test clearing the conversation memory.""" good_inputs = {'foo': 'bar', 'baz': 'foo'} good_outputs = {'bar': 'foo'} memory.save_context(good_inputs, good_outputs) memory.clear() assert memory.load_memory_variables({}) == {'baz': ''}
Test clearing the conversation memory.
_call
"""Call to Modal endpoint.""" params = self.model_kwargs or {} params = {**params, **kwargs} response = requests.post(url=self.endpoint_url, headers={'Content-Type': 'application/json'}, json={'prompt': prompt, **params}) try: if prompt in response.json()['prompt']: response_json = response.json() except KeyError: raise KeyError("LangChain requires 'prompt' key in response.") text = response_json['prompt'] if stop is not None: text = enforce_stop_tokens(text, stop) return text
def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str: """Call to Modal endpoint.""" params = self.model_kwargs or {} params = {**params, **kwargs} response = requests.post(url=self.endpoint_url, headers={'Content-Type': 'application/json'}, json={'prompt': prompt, **params}) try: if prompt in response.json()['prompt']: response_json = response.json() except KeyError: raise KeyError("LangChain requires 'prompt' key in response.") text = response_json['prompt'] if stop is not None: text = enforce_stop_tokens(text, stop) return text
Call to Modal endpoint.
create_structured_output_chain
"""[Legacy] Create an LLMChain that uses an Ernie function to get a structured output. Args: output_schema: Either a dictionary or pydantic.BaseModel class. If a dictionary is passed in, it's assumed to already be a valid JsonSchema. For best results, pydantic.BaseModels should have docstrings describing what the schema represents and descriptions for the parameters. llm: Language model to use, assumed to support the Ernie function-calling API. prompt: BasePromptTemplate to pass to the model. output_key: The key to use when returning the output in LLMChain.__call__. output_parser: BaseLLMOutputParser to use for parsing model outputs. By default will be inferred from the function types. If pydantic.BaseModels are passed in, then the OutputParser will try to parse outputs using those. Otherwise model outputs will simply be parsed as JSON. Returns: An LLMChain that will pass the given function to the model. Example: .. code-block:: python from typing import Optional from langchain.chains.ernie_functions import create_structured_output_chain from langchain_community.chat_models import ErnieBotChat from langchain.prompts import ChatPromptTemplate from langchain.pydantic_v1 import BaseModel, Field class Dog(BaseModel): ""\"Identifying information about a dog.""\" name: str = Field(..., description="The dog's name") color: str = Field(..., description="The dog's color") fav_food: Optional[str] = Field(None, description="The dog's favorite food") llm = ErnieBotChat(model_name="ERNIE-Bot-4") prompt = ChatPromptTemplate.from_messages( [ ("user", "Use the given format to extract information from the following input: {input}"), ("assistant", "OK!"), ("user", "Tip: Make sure to answer in the correct format"), ] ) chain = create_structured_output_chain(Dog, llm, prompt) chain.run("Harry was a chubby brown beagle who loved chicken") # -> Dog(name="Harry", color="brown", fav_food="chicken") """ if isinstance(output_schema, dict): function: Any = {'name': 'output_formatter', 'description': 'Output formatter. Should always be used to format your response to the user.' , 'parameters': output_schema} else: class _OutputFormatter(BaseModel): """Output formatter. Should always be used to format your response to the user.""" output: output_schema function = _OutputFormatter output_parser = output_parser or PydanticAttrOutputFunctionsParser( pydantic_schema=_OutputFormatter, attr_name='output') return create_ernie_fn_chain([function], llm, prompt, output_key=output_key, output_parser=output_parser, **kwargs)
def create_structured_output_chain(output_schema: Union[Dict[str, Any], Type[BaseModel]], llm: BaseLanguageModel, prompt: BasePromptTemplate, *, output_key: str='function', output_parser: Optional[BaseLLMOutputParser ]=None, **kwargs: Any) ->LLMChain: """[Legacy] Create an LLMChain that uses an Ernie function to get a structured output. Args: output_schema: Either a dictionary or pydantic.BaseModel class. If a dictionary is passed in, it's assumed to already be a valid JsonSchema. For best results, pydantic.BaseModels should have docstrings describing what the schema represents and descriptions for the parameters. llm: Language model to use, assumed to support the Ernie function-calling API. prompt: BasePromptTemplate to pass to the model. output_key: The key to use when returning the output in LLMChain.__call__. output_parser: BaseLLMOutputParser to use for parsing model outputs. By default will be inferred from the function types. If pydantic.BaseModels are passed in, then the OutputParser will try to parse outputs using those. Otherwise model outputs will simply be parsed as JSON. Returns: An LLMChain that will pass the given function to the model. Example: .. code-block:: python from typing import Optional from langchain.chains.ernie_functions import create_structured_output_chain from langchain_community.chat_models import ErnieBotChat from langchain.prompts import ChatPromptTemplate from langchain.pydantic_v1 import BaseModel, Field class Dog(BaseModel): ""\"Identifying information about a dog.""\" name: str = Field(..., description="The dog's name") color: str = Field(..., description="The dog's color") fav_food: Optional[str] = Field(None, description="The dog's favorite food") llm = ErnieBotChat(model_name="ERNIE-Bot-4") prompt = ChatPromptTemplate.from_messages( [ ("user", "Use the given format to extract information from the following input: {input}"), ("assistant", "OK!"), ("user", "Tip: Make sure to answer in the correct format"), ] ) chain = create_structured_output_chain(Dog, llm, prompt) chain.run("Harry was a chubby brown beagle who loved chicken") # -> Dog(name="Harry", color="brown", fav_food="chicken") """ if isinstance(output_schema, dict): function: Any = {'name': 'output_formatter', 'description': 'Output formatter. Should always be used to format your response to the user.' , 'parameters': output_schema} else: class _OutputFormatter(BaseModel): """Output formatter. Should always be used to format your response to the user.""" output: output_schema function = _OutputFormatter output_parser = output_parser or PydanticAttrOutputFunctionsParser( pydantic_schema=_OutputFormatter, attr_name='output') return create_ernie_fn_chain([function], llm, prompt, output_key= output_key, output_parser=output_parser, **kwargs)
[Legacy] Create an LLMChain that uses an Ernie function to get a structured output. Args: output_schema: Either a dictionary or pydantic.BaseModel class. If a dictionary is passed in, it's assumed to already be a valid JsonSchema. For best results, pydantic.BaseModels should have docstrings describing what the schema represents and descriptions for the parameters. llm: Language model to use, assumed to support the Ernie function-calling API. prompt: BasePromptTemplate to pass to the model. output_key: The key to use when returning the output in LLMChain.__call__. output_parser: BaseLLMOutputParser to use for parsing model outputs. By default will be inferred from the function types. If pydantic.BaseModels are passed in, then the OutputParser will try to parse outputs using those. Otherwise model outputs will simply be parsed as JSON. Returns: An LLMChain that will pass the given function to the model. Example: .. code-block:: python from typing import Optional from langchain.chains.ernie_functions import create_structured_output_chain from langchain_community.chat_models import ErnieBotChat from langchain.prompts import ChatPromptTemplate from langchain.pydantic_v1 import BaseModel, Field class Dog(BaseModel): """Identifying information about a dog.""" name: str = Field(..., description="The dog's name") color: str = Field(..., description="The dog's color") fav_food: Optional[str] = Field(None, description="The dog's favorite food") llm = ErnieBotChat(model_name="ERNIE-Bot-4") prompt = ChatPromptTemplate.from_messages( [ ("user", "Use the given format to extract information from the following input: {input}"), ("assistant", "OK!"), ("user", "Tip: Make sure to answer in the correct format"), ] ) chain = create_structured_output_chain(Dog, llm, prompt) chain.run("Harry was a chubby brown beagle who loved chicken") # -> Dog(name="Harry", color="brown", fav_food="chicken")
from_huggingface_tokenizer
"""Text splitter that uses HuggingFace tokenizer to count length.""" try: from transformers import PreTrainedTokenizerBase if not isinstance(tokenizer, PreTrainedTokenizerBase): raise ValueError( 'Tokenizer received was not an instance of PreTrainedTokenizerBase' ) def _huggingface_tokenizer_length(text: str) ->int: return len(tokenizer.encode(text)) except ImportError: raise ValueError( 'Could not import transformers python package. Please install it with `pip install transformers`.' ) return cls(length_function=_huggingface_tokenizer_length, **kwargs)
@classmethod def from_huggingface_tokenizer(cls, tokenizer: Any, **kwargs: Any ) ->TextSplitter: """Text splitter that uses HuggingFace tokenizer to count length.""" try: from transformers import PreTrainedTokenizerBase if not isinstance(tokenizer, PreTrainedTokenizerBase): raise ValueError( 'Tokenizer received was not an instance of PreTrainedTokenizerBase' ) def _huggingface_tokenizer_length(text: str) ->int: return len(tokenizer.encode(text)) except ImportError: raise ValueError( 'Could not import transformers python package. Please install it with `pip install transformers`.' ) return cls(length_function=_huggingface_tokenizer_length, **kwargs)
Text splitter that uses HuggingFace tokenizer to count length.
__init__
self.byte_iterator = iter(stream) self.buffer = io.BytesIO() self.read_pos = 0
def __init__(self, stream: Any) ->None: self.byte_iterator = iter(stream) self.buffer = io.BytesIO() self.read_pos = 0
null
validate_environment
try: import openlm values['client'] = openlm.Completion except ImportError: raise ImportError( 'Could not import openlm python package. Please install it with `pip install openlm`.' ) if values['streaming']: raise ValueError('Streaming not supported with openlm') return values
@root_validator() def validate_environment(cls, values: Dict) ->Dict: try: import openlm values['client'] = openlm.Completion except ImportError: raise ImportError( 'Could not import openlm python package. Please install it with `pip install openlm`.' ) if values['streaming']: raise ValueError('Streaming not supported with openlm') return values
null
on_llm_error
"""Set the error flag.""" self.error = 1
def on_llm_error(self, error: BaseException, **kwargs: Any) ->None: """Set the error flag.""" self.error = 1
Set the error flag.
add_one
"""Add one.""" return x + 1
def add_one(x: int) ->int: """Add one.""" return x + 1
Add one.
_agent_type
"""Return Identifier of an agent type.""" return AgentType.SELF_ASK_WITH_SEARCH
@property def _agent_type(self) ->str: """Return Identifier of an agent type.""" return AgentType.SELF_ASK_WITH_SEARCH
Return Identifier of an agent type.
add_texts
"""Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. kwargs: vectorstore specific parameters Returns: List of ids from adding the texts into the vectorstore. """ if ids is None: ids = [str(uuid.uuid1()) for _ in texts] embeddings = self.embedding_function.embed_documents(list(texts)) if not metadatas: metadatas = [{} for _ in texts] self.add_embeddings(texts, embeddings, metadatas, ids, **kwargs) return ids
def add_texts(self, texts: Iterable[str], metadatas: Optional[List[dict]]= None, ids: Optional[List[str]]=None, **kwargs: Any) ->List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. kwargs: vectorstore specific parameters Returns: List of ids from adding the texts into the vectorstore. """ if ids is None: ids = [str(uuid.uuid1()) for _ in texts] embeddings = self.embedding_function.embed_documents(list(texts)) if not metadatas: metadatas = [{} for _ in texts] self.add_embeddings(texts, embeddings, metadatas, ids, **kwargs) return ids
Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. kwargs: vectorstore specific parameters Returns: List of ids from adding the texts into the vectorstore.
on_llm_error
"""Run when LLM errors.""" self.metrics['step'] += 1 self.metrics['errors'] += 1
def on_llm_error(self, error: BaseException, **kwargs: Any) ->None: """Run when LLM errors.""" self.metrics['step'] += 1 self.metrics['errors'] += 1
Run when LLM errors.
test_convert_message_to_mistral_chat_message
result = _convert_message_to_mistral_chat_message(message) assert result == expected
@pytest.mark.parametrize(('message', 'expected'), [(SystemMessage(content= 'Hello'), MistralChatMessage(role='system', content='Hello')), ( HumanMessage(content='Hello'), MistralChatMessage(role='user', content= 'Hello')), (AIMessage(content='Hello'), MistralChatMessage(role= 'assistant', content='Hello')), (ChatMessage(role='assistant', content= 'Hello'), MistralChatMessage(role='assistant', content='Hello'))]) def test_convert_message_to_mistral_chat_message(message: BaseMessage, expected: MistralChatMessage) ->None: result = _convert_message_to_mistral_chat_message(message) assert result == expected
null
_make_graph
self._networkx_wrapper = NetworkxEntityGraph() for entity in self.causal_operations.entities: for parent_name in entity.depends_on: self._networkx_wrapper._graph.add_edge(parent_name, entity.name, relation=entity.code) self.causal_operations.entities = [entity for entity in self. causal_operations.entities if entity.name in self._networkx_wrapper. get_topological_sort()]
def _make_graph(self) ->None: self._networkx_wrapper = NetworkxEntityGraph() for entity in self.causal_operations.entities: for parent_name in entity.depends_on: self._networkx_wrapper._graph.add_edge(parent_name, entity.name, relation=entity.code) self.causal_operations.entities = [entity for entity in self. causal_operations.entities if entity.name in self._networkx_wrapper .get_topological_sort()]
null
on_agent_action
"""Do nothing when agent takes a specific action."""
def on_agent_action(self, action: AgentAction, **kwargs: Any) ->Any: """Do nothing when agent takes a specific action."""
Do nothing when agent takes a specific action.
add_user_message
"""Convenience method for adding a human message string to the store. Args: message: The string contents of a human message. metadata: Optional metadata to attach to the message. """ self.add_message(HumanMessage(content=message), metadata=metadata)
def add_user_message(self, message: str, metadata: Optional[Dict[str, Any]] =None) ->None: """Convenience method for adding a human message string to the store. Args: message: The string contents of a human message. metadata: Optional metadata to attach to the message. """ self.add_message(HumanMessage(content=message), metadata=metadata)
Convenience method for adding a human message string to the store. Args: message: The string contents of a human message. metadata: Optional metadata to attach to the message.
test_elasticsearch_delete_ids
"""Test delete methods from vector store.""" texts = ['foo', 'bar', 'baz', 'gni'] metadatas = [{'page': i} for i in range(len(texts))] docsearch = ElasticsearchStore(embedding=ConsistentFakeEmbeddings(), ** elasticsearch_connection, index_name=index_name) ids = docsearch.add_texts(texts, metadatas) output = docsearch.similarity_search('foo', k=10) assert len(output) == 4 docsearch.delete(ids[1:3]) output = docsearch.similarity_search('foo', k=10) assert len(output) == 2 docsearch.delete(['not-existing']) output = docsearch.similarity_search('foo', k=10) assert len(output) == 2 docsearch.delete([ids[0]]) output = docsearch.similarity_search('foo', k=10) assert len(output) == 1 docsearch.delete([ids[3]]) output = docsearch.similarity_search('gni', k=10) assert len(output) == 0
def test_elasticsearch_delete_ids(self, elasticsearch_connection: dict, index_name: str) ->None: """Test delete methods from vector store.""" texts = ['foo', 'bar', 'baz', 'gni'] metadatas = [{'page': i} for i in range(len(texts))] docsearch = ElasticsearchStore(embedding=ConsistentFakeEmbeddings(), ** elasticsearch_connection, index_name=index_name) ids = docsearch.add_texts(texts, metadatas) output = docsearch.similarity_search('foo', k=10) assert len(output) == 4 docsearch.delete(ids[1:3]) output = docsearch.similarity_search('foo', k=10) assert len(output) == 2 docsearch.delete(['not-existing']) output = docsearch.similarity_search('foo', k=10) assert len(output) == 2 docsearch.delete([ids[0]]) output = docsearch.similarity_search('foo', k=10) assert len(output) == 1 docsearch.delete([ids[3]]) output = docsearch.similarity_search('gni', k=10) assert len(output) == 0
Test delete methods from vector store.
__init__
self.plan = plan self.tasks = [] self.id_task_map = {} self.status = 'pending' for step in self.plan.steps: task = Task(step.task, step.id, step.dep, step.args, step.tool) self.tasks.append(task) self.id_task_map[step.id] = task
def __init__(self, plan: Plan): self.plan = plan self.tasks = [] self.id_task_map = {} self.status = 'pending' for step in self.plan.steps: task = Task(step.task, step.id, step.dep, step.args, step.tool) self.tasks.append(task) self.id_task_map[step.id] = task
null
env_var_is_set
"""Check if an environment variable is set. Args: env_var (str): The name of the environment variable. Returns: bool: True if the environment variable is set, False otherwise. """ return env_var in os.environ and os.environ[env_var] not in ('', '0', 'false', 'False')
def env_var_is_set(env_var: str) ->bool: """Check if an environment variable is set. Args: env_var (str): The name of the environment variable. Returns: bool: True if the environment variable is set, False otherwise. """ return env_var in os.environ and os.environ[env_var] not in ('', '0', 'false', 'False')
Check if an environment variable is set. Args: env_var (str): The name of the environment variable. Returns: bool: True if the environment variable is set, False otherwise.
_euclidean_relevance_score_fn
"""Return a similarity score on a scale [0, 1].""" return 1.0 - distance / math.sqrt(2)
@staticmethod def _euclidean_relevance_score_fn(distance: float) ->float: """Return a similarity score on a scale [0, 1].""" return 1.0 - distance / math.sqrt(2)
Return a similarity score on a scale [0, 1].
_generate_rest_batches
from qdrant_client.http import models as rest texts_iterator = iter(texts) metadatas_iterator = iter(metadatas or []) ids_iterator = iter(ids or [uuid.uuid4().hex for _ in iter(texts)]) while (batch_texts := list(islice(texts_iterator, batch_size))): batch_metadatas = list(islice(metadatas_iterator, batch_size)) or None batch_ids = list(islice(ids_iterator, batch_size)) batch_embeddings = self._embed_texts(batch_texts) points = [rest.PointStruct(id=point_id, vector=vector if self. vector_name is None else {self.vector_name: vector}, payload= payload) for point_id, vector, payload in zip(batch_ids, batch_embeddings, self._build_payloads(batch_texts, batch_metadatas, self.content_payload_key, self.metadata_payload_key))] yield batch_ids, points
def _generate_rest_batches(self, texts: Iterable[str], metadatas: Optional[ List[dict]]=None, ids: Optional[Sequence[str]]=None, batch_size: int=64 ) ->Generator[Tuple[List[str], List[rest.PointStruct]], None, None]: from qdrant_client.http import models as rest texts_iterator = iter(texts) metadatas_iterator = iter(metadatas or []) ids_iterator = iter(ids or [uuid.uuid4().hex for _ in iter(texts)]) while (batch_texts := list(islice(texts_iterator, batch_size))): batch_metadatas = list(islice(metadatas_iterator, batch_size)) or None batch_ids = list(islice(ids_iterator, batch_size)) batch_embeddings = self._embed_texts(batch_texts) points = [rest.PointStruct(id=point_id, vector=vector if self. vector_name is None else {self.vector_name: vector}, payload= payload) for point_id, vector, payload in zip(batch_ids, batch_embeddings, self._build_payloads(batch_texts, batch_metadatas, self.content_payload_key, self. metadata_payload_key))] yield batch_ids, points
null
__init__
"""Initialize Couchbase document loader. Args: connection_string (str): The connection string to the Couchbase cluster. db_username (str): The username to connect to the Couchbase cluster. db_password (str): The password to connect to the Couchbase cluster. query (str): The SQL++ query to execute. page_content_fields (Optional[List[str]]): The columns to write into the `page_content` field of the document. By default, all columns are written. metadata_fields (Optional[List[str]]): The columns to write into the `metadata` field of the document. By default, no columns are written. """ try: from couchbase.auth import PasswordAuthenticator from couchbase.cluster import Cluster from couchbase.options import ClusterOptions except ImportError as e: raise ImportError( 'Could not import couchbase package.Please install couchbase SDK with `pip install couchbase`.' ) from e if not connection_string: raise ValueError('connection_string must be provided.') if not db_username: raise ValueError('db_username must be provided.') if not db_password: raise ValueError('db_password must be provided.') auth = PasswordAuthenticator(db_username, db_password) self.cluster: Cluster = Cluster(connection_string, ClusterOptions(auth)) self.query = query self.page_content_fields = page_content_fields self.metadata_fields = metadata_fields
def __init__(self, connection_string: str, db_username: str, db_password: str, query: str, *, page_content_fields: Optional[List[str]]=None, metadata_fields: Optional[List[str]]=None) ->None: """Initialize Couchbase document loader. Args: connection_string (str): The connection string to the Couchbase cluster. db_username (str): The username to connect to the Couchbase cluster. db_password (str): The password to connect to the Couchbase cluster. query (str): The SQL++ query to execute. page_content_fields (Optional[List[str]]): The columns to write into the `page_content` field of the document. By default, all columns are written. metadata_fields (Optional[List[str]]): The columns to write into the `metadata` field of the document. By default, no columns are written. """ try: from couchbase.auth import PasswordAuthenticator from couchbase.cluster import Cluster from couchbase.options import ClusterOptions except ImportError as e: raise ImportError( 'Could not import couchbase package.Please install couchbase SDK with `pip install couchbase`.' ) from e if not connection_string: raise ValueError('connection_string must be provided.') if not db_username: raise ValueError('db_username must be provided.') if not db_password: raise ValueError('db_password must be provided.') auth = PasswordAuthenticator(db_username, db_password) self.cluster: Cluster = Cluster(connection_string, ClusterOptions(auth)) self.query = query self.page_content_fields = page_content_fields self.metadata_fields = metadata_fields
Initialize Couchbase document loader. Args: connection_string (str): The connection string to the Couchbase cluster. db_username (str): The username to connect to the Couchbase cluster. db_password (str): The password to connect to the Couchbase cluster. query (str): The SQL++ query to execute. page_content_fields (Optional[List[str]]): The columns to write into the `page_content` field of the document. By default, all columns are written. metadata_fields (Optional[List[str]]): The columns to write into the `metadata` field of the document. By default, no columns are written.
to_document
"""Return a Document object.""" return Document(page_content=self.page_content, metadata=self.metadata)
def to_document(self) ->Document: """Return a Document object.""" return Document(page_content=self.page_content, metadata=self.metadata)
Return a Document object.