method_name
stringlengths
1
78
method_body
stringlengths
3
9.66k
full_code
stringlengths
31
10.7k
docstring
stringlengths
4
4.74k
test_character_text_splitter_longer_words
"""Test splitting by characters when splits not found easily.""" text = 'foo bar baz 123' splitter = CharacterTextSplitter(separator=' ', chunk_size=1, chunk_overlap=1) output = splitter.split_text(text) expected_output = ['foo', 'bar', 'baz', '123'] assert output == expected_output
def test_character_text_splitter_longer_words() ->None: """Test splitting by characters when splits not found easily.""" text = 'foo bar baz 123' splitter = CharacterTextSplitter(separator=' ', chunk_size=1, chunk_overlap=1) output = splitter.split_text(text) expected_output = ['foo', 'bar', 'baz', '123'] assert output == expected_output
Test splitting by characters when splits not found easily.
test_illegal_command_exec_disallowed_code_validation
"""Test the validator.""" with pytest.raises(ValueError): PALChain.validate_code(_SAMPLE_CODE_3, _ILLEGAL_COMMAND_EXEC_VALIDATIONS)
def test_illegal_command_exec_disallowed_code_validation() ->None: """Test the validator.""" with pytest.raises(ValueError): PALChain.validate_code(_SAMPLE_CODE_3, _ILLEGAL_COMMAND_EXEC_VALIDATIONS)
Test the validator.
_call
if self.narrative_chain is None: self.narrative_chain = NarrativeChain.from_univariate_prompt(llm=self.llm) if self.causal_chain is None: self.causal_chain = CausalChain.from_univariate_prompt(llm=self.llm) if self.intervention_chain is None: self.intervention_chain = InterventionChain.from_univariate_prompt(llm= self.llm) if self.query_chain is None: self.query_chain = QueryChain.from_univariate_prompt(llm=self.llm) narrative = self.narrative_chain(inputs[Constant.narrative_input.value])[ Constant.chain_data.value] story = StoryModel(causal_operations=self.causal_chain(narrative.story_plot )[Constant.chain_data.value], intervention=self.intervention_chain( narrative.story_hypothetical)[Constant.chain_data.value], query=self. query_chain(narrative.story_outcome_question)[Constant.chain_data.value]) self._story = story def pretty_print_str(title: str, d: str) ->str: return title + '\n' + d _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() _run_manager.on_text(pretty_print_str('story outcome data', story. _outcome_table.to_string()), color='green', end='\n\n', verbose=self. verbose) def pretty_print_dict(title: str, d: dict) ->str: return title + '\n' + json.dumps(d, indent=4) _run_manager.on_text(pretty_print_dict('query data', story.query.dict()), color='blue', end='\n\n', verbose=self.verbose) if story.query._result_table.empty: raise ValueError( f"""unanswerable, query and outcome are incoherent outcome: {story._outcome_table} query: {story.query.dict()}""" ) else: query_result = float(story.query._result_table.values[0][-1]) if False: """TODO: add this back in when demanded by composable chains""" reporting_chain = self.chain human_report = reporting_chain.run(question=story.query.question, query_result=query_result) query_result = {'query_result': query_result, 'human_report': human_report} output = {Constant.chain_data.value: story, self.output_key: query_result, **kwargs} return output
def _call(self, inputs: Dict[str, Any], run_manager: Optional[ CallbackManagerForChainRun]=None, **kwargs: Any) ->Dict[str, Any]: if self.narrative_chain is None: self.narrative_chain = NarrativeChain.from_univariate_prompt(llm= self.llm) if self.causal_chain is None: self.causal_chain = CausalChain.from_univariate_prompt(llm=self.llm) if self.intervention_chain is None: self.intervention_chain = InterventionChain.from_univariate_prompt(llm =self.llm) if self.query_chain is None: self.query_chain = QueryChain.from_univariate_prompt(llm=self.llm) narrative = self.narrative_chain(inputs[Constant.narrative_input.value])[ Constant.chain_data.value] story = StoryModel(causal_operations=self.causal_chain(narrative. story_plot)[Constant.chain_data.value], intervention=self. intervention_chain(narrative.story_hypothetical)[Constant. chain_data.value], query=self.query_chain(narrative. story_outcome_question)[Constant.chain_data.value]) self._story = story def pretty_print_str(title: str, d: str) ->str: return title + '\n' + d _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() _run_manager.on_text(pretty_print_str('story outcome data', story. _outcome_table.to_string()), color='green', end='\n\n', verbose= self.verbose) def pretty_print_dict(title: str, d: dict) ->str: return title + '\n' + json.dumps(d, indent=4) _run_manager.on_text(pretty_print_dict('query data', story.query.dict() ), color='blue', end='\n\n', verbose=self.verbose) if story.query._result_table.empty: raise ValueError( f"""unanswerable, query and outcome are incoherent outcome: {story._outcome_table} query: {story.query.dict()}""" ) else: query_result = float(story.query._result_table.values[0][-1]) if False: """TODO: add this back in when demanded by composable chains""" reporting_chain = self.chain human_report = reporting_chain.run(question=story.query. question, query_result=query_result) query_result = {'query_result': query_result, 'human_report': human_report} output = {Constant.chain_data.value: story, self.output_key: query_result, **kwargs} return output
null
test_parse_date_value
parsed = cast(Comparison, DEFAULT_PARSER.parse_folder(f'eq("x", {x})')) actual = parsed.value['date'] assert actual == x.strip('\'"')
@pytest.mark.parametrize('x', ('"2022-10-20"', "'2022-10-20'", '2022-10-20')) def test_parse_date_value(x: str) ->None: parsed = cast(Comparison, DEFAULT_PARSER.parse_folder(f'eq("x", {x})')) actual = parsed.value['date'] assert actual == x.strip('\'"')
null
query
knn = {'filter': filter, 'field': vector_query_field, 'k': k, 'num_candidates': fetch_k} if query_vector and not self.query_model_id: knn['query_vector'] = query_vector elif query and self.query_model_id: knn['query_vector_builder'] = {'text_embedding': {'model_id': self. query_model_id, 'model_text': query}} else: raise ValueError( 'You must provide an embedding function or a query_model_id to perform a similarity search.' ) if self.hybrid: query_body = {'knn': knn, 'query': {'bool': {'must': [{'match': { text_field: {'query': query}}}], 'filter': filter}}} if isinstance(self.rrf, dict): query_body['rank'] = {'rrf': self.rrf} elif isinstance(self.rrf, bool) and self.rrf is True: query_body['rank'] = {'rrf': {}} return query_body else: return {'knn': knn}
def query(self, query_vector: Union[List[float], None], query: Union[str, None], k: int, fetch_k: int, vector_query_field: str, text_field: str, filter: List[dict], similarity: Union[DistanceStrategy, None]) ->Dict: knn = {'filter': filter, 'field': vector_query_field, 'k': k, 'num_candidates': fetch_k} if query_vector and not self.query_model_id: knn['query_vector'] = query_vector elif query and self.query_model_id: knn['query_vector_builder'] = {'text_embedding': {'model_id': self. query_model_id, 'model_text': query}} else: raise ValueError( 'You must provide an embedding function or a query_model_id to perform a similarity search.' ) if self.hybrid: query_body = {'knn': knn, 'query': {'bool': {'must': [{'match': { text_field: {'query': query}}}], 'filter': filter}}} if isinstance(self.rrf, dict): query_body['rank'] = {'rrf': self.rrf} elif isinstance(self.rrf, bool) and self.rrf is True: query_body['rank'] = {'rrf': {}} return query_body else: return {'knn': knn}
null
__init__
"""Initialize callback handler.""" clearml = import_clearml() spacy = import_spacy() super().__init__() self.task_type = task_type self.project_name = project_name self.tags = tags self.task_name = task_name self.visualize = visualize self.complexity_metrics = complexity_metrics self.stream_logs = stream_logs self.temp_dir = tempfile.TemporaryDirectory() if clearml.Task.current_task(): self.task = clearml.Task.current_task() else: self.task = clearml.Task.init(task_type=self.task_type, project_name= self.project_name, tags=self.tags, task_name=self.task_name, output_uri=True) self.logger = self.task.get_logger() warning = ( 'The clearml callback is currently in beta and is subject to change based on updates to `langchain`. Please report any issues to https://github.com/allegroai/clearml/issues with the tag `langchain`.' ) self.logger.report_text(warning, level=30, print_console=True) self.callback_columns: list = [] self.action_records: list = [] self.complexity_metrics = complexity_metrics self.visualize = visualize self.nlp = spacy.load('en_core_web_sm')
def __init__(self, task_type: Optional[str]='inference', project_name: Optional[str]='langchain_callback_demo', tags: Optional[Sequence]=None, task_name: Optional[str]=None, visualize: bool=False, complexity_metrics: bool=False, stream_logs: bool=False) ->None: """Initialize callback handler.""" clearml = import_clearml() spacy = import_spacy() super().__init__() self.task_type = task_type self.project_name = project_name self.tags = tags self.task_name = task_name self.visualize = visualize self.complexity_metrics = complexity_metrics self.stream_logs = stream_logs self.temp_dir = tempfile.TemporaryDirectory() if clearml.Task.current_task(): self.task = clearml.Task.current_task() else: self.task = clearml.Task.init(task_type=self.task_type, project_name=self.project_name, tags=self.tags, task_name=self. task_name, output_uri=True) self.logger = self.task.get_logger() warning = ( 'The clearml callback is currently in beta and is subject to change based on updates to `langchain`. Please report any issues to https://github.com/allegroai/clearml/issues with the tag `langchain`.' ) self.logger.report_text(warning, level=30, print_console=True) self.callback_columns: list = [] self.action_records: list = [] self.complexity_metrics = complexity_metrics self.visualize = visualize self.nlp = spacy.load('en_core_web_sm')
Initialize callback handler.
get_breadcrumbs
parents = self.get_parents(run)[::-1] string = ' > '.join( f'{parent.execution_order}:{parent.run_type}:{parent.name}' if i != len (parents) - 1 else f'{parent.execution_order}:{parent.run_type}:{parent.name}' for i, parent in enumerate(parents + [run])) return string
def get_breadcrumbs(self, run: Run) ->str: parents = self.get_parents(run)[::-1] string = ' > '.join( f'{parent.execution_order}:{parent.run_type}:{parent.name}' if i != len(parents) - 1 else f'{parent.execution_order}:{parent.run_type}:{parent.name}' for i, parent in enumerate(parents + [run])) return string
null
_select_relevance_score_fn
""" The underlying VectorTable already returns a "score proper", i.e. one in [0, 1] where higher means more *similar*, so here the final score transformation is not reversing the interval: """ return self._dont_flip_the_cos_score
def _select_relevance_score_fn(self) ->Callable[[float], float]: """ The underlying VectorTable already returns a "score proper", i.e. one in [0, 1] where higher means more *similar*, so here the final score transformation is not reversing the interval: """ return self._dont_flip_the_cos_score
The underlying VectorTable already returns a "score proper", i.e. one in [0, 1] where higher means more *similar*, so here the final score transformation is not reversing the interval:
_type
return 'criteria_result'
@property def _type(self) ->str: return 'criteria_result'
null
test_parse_without_language_without_a_new_line
llm_output = """I can use the `foo` tool to achieve the goal. Action: ```{"action": "foo", "action_input": "bar"}``` """ action, action_input = get_action_and_input(llm_output) assert action == 'foo' assert action_input == 'bar'
def test_parse_without_language_without_a_new_line() ->None: llm_output = """I can use the `foo` tool to achieve the goal. Action: ```{"action": "foo", "action_input": "bar"}``` """ action, action_input = get_action_and_input(llm_output) assert action == 'foo' assert action_input == 'bar'
null
test_loadnotewithmissingcontenttag_emptylistreturned
documents = EverNoteLoader(self.example_notebook_path( 'sample_notebook_missingcontenttag.enex'), False).load() assert len(documents) == 0
def test_loadnotewithmissingcontenttag_emptylistreturned(self) ->None: documents = EverNoteLoader(self.example_notebook_path( 'sample_notebook_missingcontenttag.enex'), False).load() assert len(documents) == 0
null
add_graph_documents
""" Take GraphDocument as input as uses it to construct a graph. """ for document in graph_documents: include_docs_query = ( 'CREATE (d:Document) SET d.text = $document.page_content SET d += $document.metadata WITH d ' ) self.query( f"{include_docs_query if include_source else ''}UNWIND $data AS row CALL apoc.merge.node([row.type], {{id: row.id}}, row.properties, {{}}) YIELD node {'MERGE (d)-[:MENTIONS]->(node) ' if include_source else ''}RETURN distinct 'done' AS result" , {'data': [el.__dict__ for el in document.nodes], 'document': document.source.__dict__}) self.query( "UNWIND $data AS row CALL apoc.merge.node([row.source_label], {id: row.source},{}, {}) YIELD node as source CALL apoc.merge.node([row.target_label], {id: row.target},{}, {}) YIELD node as target CALL apoc.merge.relationship(source, row.type, {}, row.properties, target) YIELD rel RETURN distinct 'done'" , {'data': [{'source': el.source.id, 'source_label': el.source.type, 'target': el.target.id, 'target_label': el.target.type, 'type': el. type.replace(' ', '_').upper(), 'properties': el.properties} for el in document.relationships]})
def add_graph_documents(self, graph_documents: List[GraphDocument], include_source: bool=False) ->None: """ Take GraphDocument as input as uses it to construct a graph. """ for document in graph_documents: include_docs_query = ( 'CREATE (d:Document) SET d.text = $document.page_content SET d += $document.metadata WITH d ' ) self.query( f"{include_docs_query if include_source else ''}UNWIND $data AS row CALL apoc.merge.node([row.type], {{id: row.id}}, row.properties, {{}}) YIELD node {'MERGE (d)-[:MENTIONS]->(node) ' if include_source else ''}RETURN distinct 'done' AS result" , {'data': [el.__dict__ for el in document.nodes], 'document': document.source.__dict__}) self.query( "UNWIND $data AS row CALL apoc.merge.node([row.source_label], {id: row.source},{}, {}) YIELD node as source CALL apoc.merge.node([row.target_label], {id: row.target},{}, {}) YIELD node as target CALL apoc.merge.relationship(source, row.type, {}, row.properties, target) YIELD rel RETURN distinct 'done'" , {'data': [{'source': el.source.id, 'source_label': el.source. type, 'target': el.target.id, 'target_label': el.target.type, 'type': el.type.replace(' ', '_').upper(), 'properties': el. properties} for el in document.relationships]})
Take GraphDocument as input as uses it to construct a graph.
lazy_load
"""Lazy load records from dataframe.""" for _, row in self.data_frame.iterrows(): text = row[self.page_content_column] metadata = row.to_dict() metadata.pop(self.page_content_column) yield Document(page_content=text, metadata=metadata)
def lazy_load(self) ->Iterator[Document]: """Lazy load records from dataframe.""" for _, row in self.data_frame.iterrows(): text = row[self.page_content_column] metadata = row.to_dict() metadata.pop(self.page_content_column) yield Document(page_content=text, metadata=metadata)
Lazy load records from dataframe.
get_ordered_generation_requests
""" Return the body for the model router input. """ from gpt_router.models import GenerationParams, ModelGenerationRequest return [ModelGenerationRequest(model_name=model.name, provider_name=model. provider_name, order=index + 1, prompt_params=GenerationParams(**kwargs )) for index, model in enumerate(models_priority_list)]
def get_ordered_generation_requests(models_priority_list: List[ GPTRouterModel], **kwargs): """ Return the body for the model router input. """ from gpt_router.models import GenerationParams, ModelGenerationRequest return [ModelGenerationRequest(model_name=model.name, provider_name= model.provider_name, order=index + 1, prompt_params= GenerationParams(**kwargs)) for index, model in enumerate( models_priority_list)]
Return the body for the model router input.
_import_powerbi_tool_InfoPowerBITool
from langchain_community.tools.powerbi.tool import InfoPowerBITool return InfoPowerBITool
def _import_powerbi_tool_InfoPowerBITool() ->Any: from langchain_community.tools.powerbi.tool import InfoPowerBITool return InfoPowerBITool
null
clean_email_body
"""Clean email body.""" try: from bs4 import BeautifulSoup try: soup = BeautifulSoup(str(body), 'html.parser') body = soup.get_text() return str(body) except Exception as e: logger.error(e) return str(body) except ImportError: logger.warning('BeautifulSoup not installed. Skipping cleaning.') return str(body)
def clean_email_body(body: str) ->str: """Clean email body.""" try: from bs4 import BeautifulSoup try: soup = BeautifulSoup(str(body), 'html.parser') body = soup.get_text() return str(body) except Exception as e: logger.error(e) return str(body) except ImportError: logger.warning('BeautifulSoup not installed. Skipping cleaning.') return str(body)
Clean email body.
on_text
"""Run when agent ends.""" print_text(text, color=color or self.color, end=end)
def on_text(self, text: str, color: Optional[str]=None, end: str='', ** kwargs: Any) ->None: """Run when agent ends.""" print_text(text, color=color or self.color, end=end)
Run when agent ends.
test_neo4jvector_prefer_indexname
"""Test using when two indexes are found, prefer by index_name.""" Neo4jVector.from_texts(texts=['foo'], embedding= FakeEmbeddingsWithOsDimension(), url=url, username=username, password= password, pre_delete_collection=True) Neo4jVector.from_texts(texts=['bar'], embedding= FakeEmbeddingsWithOsDimension(), url=url, username=username, password= password, index_name='foo', node_label='Test', embedding_node_property= 'vector', text_node_property='info', pre_delete_collection=True) existing_index = Neo4jVector.from_existing_index(embedding= FakeEmbeddingsWithOsDimension(), url=url, username=username, password= password, index_name='foo', text_node_property='info') output = existing_index.similarity_search('bar', k=1) assert output == [Document(page_content='bar', metadata={})] drop_vector_indexes(existing_index)
def test_neo4jvector_prefer_indexname() ->None: """Test using when two indexes are found, prefer by index_name.""" Neo4jVector.from_texts(texts=['foo'], embedding= FakeEmbeddingsWithOsDimension(), url=url, username=username, password=password, pre_delete_collection=True) Neo4jVector.from_texts(texts=['bar'], embedding= FakeEmbeddingsWithOsDimension(), url=url, username=username, password=password, index_name='foo', node_label='Test', embedding_node_property='vector', text_node_property='info', pre_delete_collection=True) existing_index = Neo4jVector.from_existing_index(embedding= FakeEmbeddingsWithOsDimension(), url=url, username=username, password=password, index_name='foo', text_node_property='info') output = existing_index.similarity_search('bar', k=1) assert output == [Document(page_content='bar', metadata={})] drop_vector_indexes(existing_index)
Test using when two indexes are found, prefer by index_name.
test_astradb_cache
set_llm_cache(astradb_cache) llm = FakeLLM() params = llm.dict() params['stop'] = None llm_string = str(sorted([(k, v) for k, v in params.items()])) get_llm_cache().update('foo', llm_string, [Generation(text='fizz')]) output = llm.generate(['foo']) print(output) expected_output = LLMResult(generations=[[Generation(text='fizz')]], llm_output={}) print(expected_output) assert output == expected_output astradb_cache.clear()
def test_astradb_cache(self, astradb_cache: AstraDBCache) ->None: set_llm_cache(astradb_cache) llm = FakeLLM() params = llm.dict() params['stop'] = None llm_string = str(sorted([(k, v) for k, v in params.items()])) get_llm_cache().update('foo', llm_string, [Generation(text='fizz')]) output = llm.generate(['foo']) print(output) expected_output = LLMResult(generations=[[Generation(text='fizz')]], llm_output={}) print(expected_output) assert output == expected_output astradb_cache.clear()
null
_on_retriever_start
"""Process the Retriever Run upon start.""" if run.parent_run_id is None: run.reference_example_id = self.example_id self._submit(self._persist_run_single, _copy(run))
def _on_retriever_start(self, run: Run) ->None: """Process the Retriever Run upon start.""" if run.parent_run_id is None: run.reference_example_id = self.example_id self._submit(self._persist_run_single, _copy(run))
Process the Retriever Run upon start.
_run
try: write_path = self.get_relative_path(file_path) except FileValidationError: return INVALID_PATH_TEMPLATE.format(arg_name='file_path', value=file_path) try: write_path.parent.mkdir(exist_ok=True, parents=False) mode = 'a' if append else 'w' with write_path.open(mode, encoding='utf-8') as f: f.write(text) return f'File written successfully to {file_path}.' except Exception as e: return 'Error: ' + str(e)
def _run(self, file_path: str, text: str, append: bool=False, run_manager: Optional[CallbackManagerForToolRun]=None) ->str: try: write_path = self.get_relative_path(file_path) except FileValidationError: return INVALID_PATH_TEMPLATE.format(arg_name='file_path', value= file_path) try: write_path.parent.mkdir(exist_ok=True, parents=False) mode = 'a' if append else 'w' with write_path.open(mode, encoding='utf-8') as f: f.write(text) return f'File written successfully to {file_path}.' except Exception as e: return 'Error: ' + str(e)
null
put
"""PUT the URL and return the text.""" return self.requests.put(url, data, **kwargs).text
def put(self, url: str, data: Dict[str, Any], **kwargs: Any) ->str: """PUT the URL and return the text.""" return self.requests.put(url, data, **kwargs).text
PUT the URL and return the text.
test_neo4jvector_retriever_search_threshold
"""Test using retriever for searching with threshold.""" metadatas = [{'page': str(i)} for i in range(len(texts))] docsearch = Neo4jVector.from_texts(texts=texts, embedding= FakeEmbeddingsWithOsDimension(), metadatas=metadatas, url=url, username =username, password=password, pre_delete_collection=True) retriever = docsearch.as_retriever(search_type='similarity_score_threshold', search_kwargs={'k': 3, 'score_threshold': 0.9999}) output = retriever.get_relevant_documents('foo') assert output == [Document(page_content='foo', metadata={'page': '0'})] drop_vector_indexes(docsearch)
def test_neo4jvector_retriever_search_threshold() ->None: """Test using retriever for searching with threshold.""" metadatas = [{'page': str(i)} for i in range(len(texts))] docsearch = Neo4jVector.from_texts(texts=texts, embedding= FakeEmbeddingsWithOsDimension(), metadatas=metadatas, url=url, username=username, password=password, pre_delete_collection=True) retriever = docsearch.as_retriever(search_type= 'similarity_score_threshold', search_kwargs={'k': 3, 'score_threshold': 0.9999}) output = retriever.get_relevant_documents('foo') assert output == [Document(page_content='foo', metadata={'page': '0'})] drop_vector_indexes(docsearch)
Test using retriever for searching with threshold.
url
return 'https://api.mathpix.com/v3/pdf'
@property def url(self) ->str: return 'https://api.mathpix.com/v3/pdf'
null
yield_blobs
"""Yield blob implementation.""" yield Blob(data=b'Hello, World!')
def yield_blobs(self) ->Iterable[Blob]: """Yield blob implementation.""" yield Blob(data=b'Hello, World!')
Yield blob implementation.
output_keys
"""Return the singular output key. :meta private: """ if not self.return_intermediate_steps: return [self.output_key] else: return [self.output_key, INTERMEDIATE_STEPS_KEY]
@property def output_keys(self) ->List[str]: """Return the singular output key. :meta private: """ if not self.return_intermediate_steps: return [self.output_key] else: return [self.output_key, INTERMEDIATE_STEPS_KEY]
Return the singular output key. :meta private:
test_pypdfium2_parser
"""Test PyPDFium2 parser.""" _assert_with_parser(PyPDFium2Parser())
@pytest.mark.requires('pypdfium2') def test_pypdfium2_parser() ->None: """Test PyPDFium2 parser.""" _assert_with_parser(PyPDFium2Parser())
Test PyPDFium2 parser.
_import_cohere
from langchain_community.llms.cohere import Cohere return Cohere
def _import_cohere() ->Any: from langchain_community.llms.cohere import Cohere return Cohere
null
_formatted_tag_value
return '|'.join([self.escaper.escape(tag) for tag in self._value])
@property def _formatted_tag_value(self) ->str: return '|'.join([self.escaper.escape(tag) for tag in self._value])
null
_is_aoss_enabled
"""Check if the service is http_auth is set as `aoss`.""" if http_auth is not None and hasattr(http_auth, 'service' ) and http_auth.service == 'aoss': return True return False
def _is_aoss_enabled(http_auth: Any) ->bool: """Check if the service is http_auth is set as `aoss`.""" if http_auth is not None and hasattr(http_auth, 'service' ) and http_auth.service == 'aoss': return True return False
Check if the service is http_auth is set as `aoss`.
get_results
try: from google.cloud.documentai_v1 import BatchProcessMetadata except ImportError as exc: raise ImportError( 'documentai package not found, please install it with `pip install google-cloud-documentai`' ) from exc return [DocAIParsingResults(source_path=status.input_gcs_source, parsed_path=status.output_gcs_destination) for op in operations for status in (op.metadata.individual_process_statuses if isinstance(op. metadata, BatchProcessMetadata) else BatchProcessMetadata.deserialize( op.metadata.value).individual_process_statuses)]
def get_results(self, operations: List['Operation']) ->List[DocAIParsingResults ]: try: from google.cloud.documentai_v1 import BatchProcessMetadata except ImportError as exc: raise ImportError( 'documentai package not found, please install it with `pip install google-cloud-documentai`' ) from exc return [DocAIParsingResults(source_path=status.input_gcs_source, parsed_path=status.output_gcs_destination) for op in operations for status in (op.metadata.individual_process_statuses if isinstance(op .metadata, BatchProcessMetadata) else BatchProcessMetadata. deserialize(op.metadata.value).individual_process_statuses)]
null
test_results
"""Test that call gives correct answer.""" search = SearchApiAPIWrapper() output = search.results('What is the capital of Lithuania?') assert 'Vilnius' in output['answer_box']['answer'] assert 'Vilnius' in output['answer_box']['snippet'] assert 'Vilnius' in output['knowledge_graph']['description'] assert 'Vilnius' in output['organic_results'][0]['snippet']
def test_results() ->None: """Test that call gives correct answer.""" search = SearchApiAPIWrapper() output = search.results('What is the capital of Lithuania?') assert 'Vilnius' in output['answer_box']['answer'] assert 'Vilnius' in output['answer_box']['snippet'] assert 'Vilnius' in output['knowledge_graph']['description'] assert 'Vilnius' in output['organic_results'][0]['snippet']
Test that call gives correct answer.
get_lc_namespace
"""Get the namespace of the langchain object.""" return ['langchain', 'schema', 'runnable']
@classmethod def get_lc_namespace(cls) ->List[str]: """Get the namespace of the langchain object.""" return ['langchain', 'schema', 'runnable']
Get the namespace of the langchain object.
_generate
"""Run the LLM on the given prompt and input.""" generations = [] new_arg_supported = inspect.signature(self._call).parameters.get('run_manager') for prompt in prompts: text = self._call(prompt, stop=stop, run_manager=run_manager, **kwargs ) if new_arg_supported else self._call(prompt, stop=stop, **kwargs) generations.append([Generation(text=text)]) return LLMResult(generations=generations)
def _generate(self, prompts: List[str], stop: Optional[List[str]]=None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any ) ->LLMResult: """Run the LLM on the given prompt and input.""" generations = [] new_arg_supported = inspect.signature(self._call).parameters.get( 'run_manager') for prompt in prompts: text = self._call(prompt, stop=stop, run_manager=run_manager, **kwargs ) if new_arg_supported else self._call(prompt, stop=stop, **kwargs) generations.append([Generation(text=text)]) return LLMResult(generations=generations)
Run the LLM on the given prompt and input.
_import_operator_config
try: from presidio_anonymizer.entities import OperatorConfig except ImportError as e: raise ImportError( 'Could not import presidio_anonymizer, please install with `pip install presidio-anonymizer`.' ) from e return OperatorConfig
def _import_operator_config() ->'OperatorConfig': try: from presidio_anonymizer.entities import OperatorConfig except ImportError as e: raise ImportError( 'Could not import presidio_anonymizer, please install with `pip install presidio-anonymizer`.' ) from e return OperatorConfig
null
_on_chain_start
"""Process the Chain Run upon start.""" if run.parent_run_id is None: run.reference_example_id = self.example_id self._submit(self._persist_run_single, _copy(run))
def _on_chain_start(self, run: Run) ->None: """Process the Chain Run upon start.""" if run.parent_run_id is None: run.reference_example_id = self.example_id self._submit(self._persist_run_single, _copy(run))
Process the Chain Run upon start.
fakerun
async def run(self: Any, **args: Any) ->str: await asyncio.sleep(0.1) data = {'extracted_text': [{'body': {'text': 'Hello World'}}], 'file_extracted_data': [{'language': 'en'}], 'field_metadata': [{ 'metadata': {'metadata': {'paragraphs': [{'end': 66, 'sentences': [ {'start': 1, 'end': 67}]}]}}}]} return json.dumps(data) return run
def fakerun(**args: Any) ->Any: async def run(self: Any, **args: Any) ->str: await asyncio.sleep(0.1) data = {'extracted_text': [{'body': {'text': 'Hello World'}}], 'file_extracted_data': [{'language': 'en'}], 'field_metadata': [{'metadata': {'metadata': {'paragraphs': [{'end': 66, 'sentences': [{'start': 1, 'end': 67}]}]}}}]} return json.dumps(data) return run
null
_While
self.fill('while ') self.dispatch(t.test) self.enter() self.dispatch(t.body) self.leave() if t.orelse: self.fill('else') self.enter() self.dispatch(t.orelse) self.leave()
def _While(self, t): self.fill('while ') self.dispatch(t.test) self.enter() self.dispatch(t.body) self.leave() if t.orelse: self.fill('else') self.enter() self.dispatch(t.orelse) self.leave()
null
_summary_search_result_to_doc
return [Document(page_content=r.summary.content, metadata={'score': r.dist, 'uuid': r.summary.uuid, 'created_at': r.summary.created_at, 'token_count': r.summary.token_count}) for r in results if r.summary]
def _summary_search_result_to_doc(self, results: List[MemorySearchResult] ) ->List[Document]: return [Document(page_content=r.summary.content, metadata={'score': r. dist, 'uuid': r.summary.uuid, 'created_at': r.summary.created_at, 'token_count': r.summary.token_count}) for r in results if r.summary]
null
__add__
if isinstance(other, ChatGenerationChunk): generation_info = ({**self.generation_info or {}, **other. generation_info or {}} if self.generation_info is not None or other .generation_info is not None else None) return ChatGenerationChunk(message=self.message + other.message, generation_info=generation_info) else: raise TypeError( f"unsupported operand type(s) for +: '{type(self)}' and '{type(other)}'" )
def __add__(self, other: ChatGenerationChunk) ->ChatGenerationChunk: if isinstance(other, ChatGenerationChunk): generation_info = ({**self.generation_info or {}, **other. generation_info or {}} if self.generation_info is not None or other.generation_info is not None else None) return ChatGenerationChunk(message=self.message + other.message, generation_info=generation_info) else: raise TypeError( f"unsupported operand type(s) for +: '{type(self)}' and '{type(other)}'" )
null
embed_query
"""Compute query embeddings using a SageMaker inference endpoint. Args: text: The text to embed. Returns: Embeddings for the text. """ return self._embedding_func([text])[0]
def embed_query(self, text: str) ->List[float]: """Compute query embeddings using a SageMaker inference endpoint. Args: text: The text to embed. Returns: Embeddings for the text. """ return self._embedding_func([text])[0]
Compute query embeddings using a SageMaker inference endpoint. Args: text: The text to embed. Returns: Embeddings for the text.
__init__
"""Initialize the Cloudflare Workers AI client.""" super().__init__(**kwargs) self.headers = {'Authorization': f'Bearer {self.api_token}'}
def __init__(self, **kwargs: Any): """Initialize the Cloudflare Workers AI client.""" super().__init__(**kwargs) self.headers = {'Authorization': f'Bearer {self.api_token}'}
Initialize the Cloudflare Workers AI client.
load_memory_variables
"""Return history buffer.""" return {self.memory_key: self.buffer}
def load_memory_variables(self, inputs: Dict[str, Any]) ->Dict[str, Any]: """Return history buffer.""" return {self.memory_key: self.buffer}
Return history buffer.
get_parents
parents = [] current_run = run while current_run.parent_run_id: parent = self.run_map.get(str(current_run.parent_run_id)) if parent: parents.append(parent) current_run = parent else: break return parents
def get_parents(self, run: Run) ->List[Run]: parents = [] current_run = run while current_run.parent_run_id: parent = self.run_map.get(str(current_run.parent_run_id)) if parent: parents.append(parent) current_run = parent else: break return parents
null
_mlflow_extras
return '[genai]'
@property def _mlflow_extras(self) ->str: return '[genai]'
null
accepts_context
"""Check if a callable accepts a context argument.""" try: return signature(callable).parameters.get('context') is not None except ValueError: return False
def accepts_context(callable: Callable[..., Any]) ->bool: """Check if a callable accepts a context argument.""" try: return signature(callable).parameters.get('context') is not None except ValueError: return False
Check if a callable accepts a context argument.
max_marginal_relevance_search
"""Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch before filtering (if needed) to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ embedding = self._embed_query(query) docs = self.max_marginal_relevance_search_by_vector(embedding, k=k, fetch_k =fetch_k, lambda_mult=lambda_mult, filter=filter, **kwargs) return docs
def max_marginal_relevance_search(self, query: str, k: int=4, fetch_k: int= 20, lambda_mult: float=0.5, filter: Optional[Dict[str, Any]]=None, ** kwargs: Any) ->List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch before filtering (if needed) to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ embedding = self._embed_query(query) docs = self.max_marginal_relevance_search_by_vector(embedding, k=k, fetch_k=fetch_k, lambda_mult=lambda_mult, filter=filter, **kwargs) return docs
Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch before filtering (if needed) to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance.
_type
return 'conversational_chat'
@property def _type(self) ->str: return 'conversational_chat'
null
test_simple_action_strlist_no_emb
str1 = 'test1' str2 = 'test2' str3 = 'test3' expected = [{'a_namespace': str1}, {'a_namespace': str2}, {'a_namespace': str3} ] to_embed: List[Union[str, base._Embed]] = [str1, str2, str3] assert base.embed(to_embed, MockEncoder(), 'a_namespace') == expected
@pytest.mark.requires('vowpal_wabbit_next') def test_simple_action_strlist_no_emb() ->None: str1 = 'test1' str2 = 'test2' str3 = 'test3' expected = [{'a_namespace': str1}, {'a_namespace': str2}, { 'a_namespace': str3}] to_embed: List[Union[str, base._Embed]] = [str1, str2, str3] assert base.embed(to_embed, MockEncoder(), 'a_namespace') == expected
null
try_json_stringify
""" Try to stringify an object to JSON. Args: obj: Object to stringify. fallback: Fallback string to return if the object cannot be stringified. Returns: A JSON string if the object can be stringified, otherwise the fallback string. """ try: return json.dumps(obj, indent=2, ensure_ascii=False) except Exception: return fallback
def try_json_stringify(obj: Any, fallback: str) ->str: """ Try to stringify an object to JSON. Args: obj: Object to stringify. fallback: Fallback string to return if the object cannot be stringified. Returns: A JSON string if the object can be stringified, otherwise the fallback string. """ try: return json.dumps(obj, indent=2, ensure_ascii=False) except Exception: return fallback
Try to stringify an object to JSON. Args: obj: Object to stringify. fallback: Fallback string to return if the object cannot be stringified. Returns: A JSON string if the object can be stringified, otherwise the fallback string.
__getattr__
if name == 'AINAppOps': return _import_ainetwork_app() elif name == 'AINOwnerOps': return _import_ainetwork_owner() elif name == 'AINRuleOps': return _import_ainetwork_rule() elif name == 'AINTransfer': return _import_ainetwork_transfer() elif name == 'AINValueOps': return _import_ainetwork_value() elif name == 'ArxivQueryRun': return _import_arxiv_tool() elif name == 'AzureCogsFormRecognizerTool': return _import_azure_cognitive_services_AzureCogsFormRecognizerTool() elif name == 'AzureCogsImageAnalysisTool': return _import_azure_cognitive_services_AzureCogsImageAnalysisTool() elif name == 'AzureCogsSpeech2TextTool': return _import_azure_cognitive_services_AzureCogsSpeech2TextTool() elif name == 'AzureCogsText2SpeechTool': return _import_azure_cognitive_services_AzureCogsText2SpeechTool() elif name == 'AzureCogsTextAnalyticsHealthTool': return _import_azure_cognitive_services_AzureCogsTextAnalyticsHealthTool() elif name == 'BingSearchResults': return _import_bing_search_tool_BingSearchResults() elif name == 'BingSearchRun': return _import_bing_search_tool_BingSearchRun() elif name == 'BraveSearch': return _import_brave_search_tool() elif name == 'DuckDuckGoSearchResults': return _import_ddg_search_tool_DuckDuckGoSearchResults() elif name == 'DuckDuckGoSearchRun': return _import_ddg_search_tool_DuckDuckGoSearchRun() elif name == 'EdenAiExplicitImageTool': return _import_edenai_EdenAiExplicitImageTool() elif name == 'EdenAiObjectDetectionTool': return _import_edenai_EdenAiObjectDetectionTool() elif name == 'EdenAiParsingIDTool': return _import_edenai_EdenAiParsingIDTool() elif name == 'EdenAiParsingInvoiceTool': return _import_edenai_EdenAiParsingInvoiceTool() elif name == 'EdenAiSpeechToTextTool': return _import_edenai_EdenAiSpeechToTextTool() elif name == 'EdenAiTextModerationTool': return _import_edenai_EdenAiTextModerationTool() elif name == 'EdenAiTextToSpeechTool': return _import_edenai_EdenAiTextToSpeechTool() elif name == 'EdenaiTool': return _import_edenai_EdenaiTool() elif name == 'ElevenLabsText2SpeechTool': return _import_eleven_labs_text2speech() elif name == 'CopyFileTool': return _import_file_management_CopyFileTool() elif name == 'DeleteFileTool': return _import_file_management_DeleteFileTool() elif name == 'FileSearchTool': return _import_file_management_FileSearchTool() elif name == 'ListDirectoryTool': return _import_file_management_ListDirectoryTool() elif name == 'MoveFileTool': return _import_file_management_MoveFileTool() elif name == 'ReadFileTool': return _import_file_management_ReadFileTool() elif name == 'WriteFileTool': return _import_file_management_WriteFileTool() elif name == 'GmailCreateDraft': return _import_gmail_GmailCreateDraft() elif name == 'GmailGetMessage': return _import_gmail_GmailGetMessage() elif name == 'GmailGetThread': return _import_gmail_GmailGetThread() elif name == 'GmailSearch': return _import_gmail_GmailSearch() elif name == 'GmailSendMessage': return _import_gmail_GmailSendMessage() elif name == 'GoogleCloudTextToSpeechTool': return _import_google_cloud_texttospeech() elif name == 'GooglePlacesTool': return _import_google_places_tool() elif name == 'GoogleSearchResults': return _import_google_search_tool_GoogleSearchResults() elif name == 'GoogleSearchRun': return _import_google_search_tool_GoogleSearchRun() elif name == 'GoogleSerperResults': return _import_google_serper_tool_GoogleSerperResults() elif name == 'GoogleSerperRun': return _import_google_serper_tool_GoogleSerperRun() elif name == 'SearchAPIResults': return _import_searchapi_tool_SearchAPIResults() elif name == 'SearchAPIRun': return _import_searchapi_tool_SearchAPIRun() elif name == 'BaseGraphQLTool': return _import_graphql_tool() elif name == 'HumanInputRun': return _import_human_tool() elif name == 'IFTTTWebhook': return _import_ifttt() elif name == 'StdInInquireTool': return _import_interaction_tool() elif name == 'JiraAction': return _import_jira_tool() elif name == 'JsonGetValueTool': return _import_json_tool_JsonGetValueTool() elif name == 'JsonListKeysTool': return _import_json_tool_JsonListKeysTool() elif name == 'MerriamWebsterQueryRun': return _import_merriam_webster_tool() elif name == 'MetaphorSearchResults': return _import_metaphor_search() elif name == 'NasaAction': return _import_nasa_tool() elif name == 'O365CreateDraftMessage': return _import_office365_create_draft_message() elif name == 'O365SearchEvents': return _import_office365_events_search() elif name == 'O365SearchEmails': return _import_office365_messages_search() elif name == 'O365SendEvent': return _import_office365_send_event() elif name == 'O365SendMessage': return _import_office365_send_message() elif name == 'authenticate': return _import_office365_utils() elif name == 'APIOperation': return _import_openapi_utils_api_models() elif name == 'OpenAPISpec': return _import_openapi_utils_openapi_utils() elif name == 'OpenWeatherMapQueryRun': return _import_openweathermap_tool() elif name == 'ClickTool': return _import_playwright_ClickTool() elif name == 'CurrentWebPageTool': return _import_playwright_CurrentWebPageTool() elif name == 'ExtractHyperlinksTool': return _import_playwright_ExtractHyperlinksTool() elif name == 'ExtractTextTool': return _import_playwright_ExtractTextTool() elif name == 'GetElementsTool': return _import_playwright_GetElementsTool() elif name == 'NavigateBackTool': return _import_playwright_NavigateBackTool() elif name == 'NavigateTool': return _import_playwright_NavigateTool() elif name == 'AIPluginTool': return _import_plugin() elif name == 'InfoPowerBITool': return _import_powerbi_tool_InfoPowerBITool() elif name == 'ListPowerBITool': return _import_powerbi_tool_ListPowerBITool() elif name == 'QueryPowerBITool': return _import_powerbi_tool_QueryPowerBITool() elif name == 'PubmedQueryRun': return _import_pubmed_tool() elif name == 'PythonAstREPLTool': return _import_python_tool_PythonAstREPLTool() elif name == 'PythonREPLTool': return _import_python_tool_PythonREPLTool() elif name == 'RedditSearchRun': return _import_reddit_search_RedditSearchRun() elif name == 'format_tool_to_openai_function': return _import_render() elif name == 'BaseRequestsTool': return _import_requests_tool_BaseRequestsTool() elif name == 'RequestsDeleteTool': return _import_requests_tool_RequestsDeleteTool() elif name == 'RequestsGetTool': return _import_requests_tool_RequestsGetTool() elif name == 'RequestsPatchTool': return _import_requests_tool_RequestsPatchTool() elif name == 'RequestsPostTool': return _import_requests_tool_RequestsPostTool() elif name == 'RequestsPutTool': return _import_requests_tool_RequestsPutTool() elif name == 'SteamWebAPIQueryRun': return _import_steam_webapi_tool() elif name == 'SceneXplainTool': return _import_scenexplain_tool() elif name == 'SearxSearchResults': return _import_searx_search_tool_SearxSearchResults() elif name == 'SearxSearchRun': return _import_searx_search_tool_SearxSearchRun() elif name == 'ShellTool': return _import_shell_tool() elif name == 'SlackGetChannel': return _import_slack_get_channel elif name == 'SlackGetMessage': return _import_slack_get_message elif name == 'SlackScheduleMessage': return _import_slack_schedule_message elif name == 'SlackSendMessage': return _import_slack_send_message elif name == 'SleepTool': return _import_sleep_tool() elif name == 'BaseSparkSQLTool': return _import_spark_sql_tool_BaseSparkSQLTool() elif name == 'InfoSparkSQLTool': return _import_spark_sql_tool_InfoSparkSQLTool() elif name == 'ListSparkSQLTool': return _import_spark_sql_tool_ListSparkSQLTool() elif name == 'QueryCheckerTool': return _import_spark_sql_tool_QueryCheckerTool() elif name == 'QuerySparkSQLTool': return _import_spark_sql_tool_QuerySparkSQLTool() elif name == 'BaseSQLDatabaseTool': return _import_sql_database_tool_BaseSQLDatabaseTool() elif name == 'InfoSQLDatabaseTool': return _import_sql_database_tool_InfoSQLDatabaseTool() elif name == 'ListSQLDatabaseTool': return _import_sql_database_tool_ListSQLDatabaseTool() elif name == 'QuerySQLCheckerTool': return _import_sql_database_tool_QuerySQLCheckerTool() elif name == 'QuerySQLDataBaseTool': return _import_sql_database_tool_QuerySQLDataBaseTool() elif name == 'StackExchangeTool': return _import_stackexchange_tool() elif name == 'SteamshipImageGenerationTool': return _import_steamship_image_generation() elif name == 'VectorStoreQATool': return _import_vectorstore_tool_VectorStoreQATool() elif name == 'VectorStoreQAWithSourcesTool': return _import_vectorstore_tool_VectorStoreQAWithSourcesTool() elif name == 'WikipediaQueryRun': return _import_wikipedia_tool() elif name == 'WolframAlphaQueryRun': return _import_wolfram_alpha_tool() elif name == 'YahooFinanceNewsTool': return _import_yahoo_finance_news() elif name == 'YouTubeSearchTool': return _import_youtube_search() elif name == 'ZapierNLAListActions': return _import_zapier_tool_ZapierNLAListActions() elif name == 'ZapierNLARunAction': return _import_zapier_tool_ZapierNLARunAction() elif name == 'BearlyInterpreterTool': return _import_bearly_tool() elif name == 'E2BDataAnalysisTool': return _import_e2b_data_analysis() else: raise AttributeError(f'Could not find: {name}')
def __getattr__(name: str) ->Any: if name == 'AINAppOps': return _import_ainetwork_app() elif name == 'AINOwnerOps': return _import_ainetwork_owner() elif name == 'AINRuleOps': return _import_ainetwork_rule() elif name == 'AINTransfer': return _import_ainetwork_transfer() elif name == 'AINValueOps': return _import_ainetwork_value() elif name == 'ArxivQueryRun': return _import_arxiv_tool() elif name == 'AzureCogsFormRecognizerTool': return _import_azure_cognitive_services_AzureCogsFormRecognizerTool() elif name == 'AzureCogsImageAnalysisTool': return _import_azure_cognitive_services_AzureCogsImageAnalysisTool() elif name == 'AzureCogsSpeech2TextTool': return _import_azure_cognitive_services_AzureCogsSpeech2TextTool() elif name == 'AzureCogsText2SpeechTool': return _import_azure_cognitive_services_AzureCogsText2SpeechTool() elif name == 'AzureCogsTextAnalyticsHealthTool': return ( _import_azure_cognitive_services_AzureCogsTextAnalyticsHealthTool() ) elif name == 'BingSearchResults': return _import_bing_search_tool_BingSearchResults() elif name == 'BingSearchRun': return _import_bing_search_tool_BingSearchRun() elif name == 'BraveSearch': return _import_brave_search_tool() elif name == 'DuckDuckGoSearchResults': return _import_ddg_search_tool_DuckDuckGoSearchResults() elif name == 'DuckDuckGoSearchRun': return _import_ddg_search_tool_DuckDuckGoSearchRun() elif name == 'EdenAiExplicitImageTool': return _import_edenai_EdenAiExplicitImageTool() elif name == 'EdenAiObjectDetectionTool': return _import_edenai_EdenAiObjectDetectionTool() elif name == 'EdenAiParsingIDTool': return _import_edenai_EdenAiParsingIDTool() elif name == 'EdenAiParsingInvoiceTool': return _import_edenai_EdenAiParsingInvoiceTool() elif name == 'EdenAiSpeechToTextTool': return _import_edenai_EdenAiSpeechToTextTool() elif name == 'EdenAiTextModerationTool': return _import_edenai_EdenAiTextModerationTool() elif name == 'EdenAiTextToSpeechTool': return _import_edenai_EdenAiTextToSpeechTool() elif name == 'EdenaiTool': return _import_edenai_EdenaiTool() elif name == 'ElevenLabsText2SpeechTool': return _import_eleven_labs_text2speech() elif name == 'CopyFileTool': return _import_file_management_CopyFileTool() elif name == 'DeleteFileTool': return _import_file_management_DeleteFileTool() elif name == 'FileSearchTool': return _import_file_management_FileSearchTool() elif name == 'ListDirectoryTool': return _import_file_management_ListDirectoryTool() elif name == 'MoveFileTool': return _import_file_management_MoveFileTool() elif name == 'ReadFileTool': return _import_file_management_ReadFileTool() elif name == 'WriteFileTool': return _import_file_management_WriteFileTool() elif name == 'GmailCreateDraft': return _import_gmail_GmailCreateDraft() elif name == 'GmailGetMessage': return _import_gmail_GmailGetMessage() elif name == 'GmailGetThread': return _import_gmail_GmailGetThread() elif name == 'GmailSearch': return _import_gmail_GmailSearch() elif name == 'GmailSendMessage': return _import_gmail_GmailSendMessage() elif name == 'GoogleCloudTextToSpeechTool': return _import_google_cloud_texttospeech() elif name == 'GooglePlacesTool': return _import_google_places_tool() elif name == 'GoogleSearchResults': return _import_google_search_tool_GoogleSearchResults() elif name == 'GoogleSearchRun': return _import_google_search_tool_GoogleSearchRun() elif name == 'GoogleSerperResults': return _import_google_serper_tool_GoogleSerperResults() elif name == 'GoogleSerperRun': return _import_google_serper_tool_GoogleSerperRun() elif name == 'SearchAPIResults': return _import_searchapi_tool_SearchAPIResults() elif name == 'SearchAPIRun': return _import_searchapi_tool_SearchAPIRun() elif name == 'BaseGraphQLTool': return _import_graphql_tool() elif name == 'HumanInputRun': return _import_human_tool() elif name == 'IFTTTWebhook': return _import_ifttt() elif name == 'StdInInquireTool': return _import_interaction_tool() elif name == 'JiraAction': return _import_jira_tool() elif name == 'JsonGetValueTool': return _import_json_tool_JsonGetValueTool() elif name == 'JsonListKeysTool': return _import_json_tool_JsonListKeysTool() elif name == 'MerriamWebsterQueryRun': return _import_merriam_webster_tool() elif name == 'MetaphorSearchResults': return _import_metaphor_search() elif name == 'NasaAction': return _import_nasa_tool() elif name == 'O365CreateDraftMessage': return _import_office365_create_draft_message() elif name == 'O365SearchEvents': return _import_office365_events_search() elif name == 'O365SearchEmails': return _import_office365_messages_search() elif name == 'O365SendEvent': return _import_office365_send_event() elif name == 'O365SendMessage': return _import_office365_send_message() elif name == 'authenticate': return _import_office365_utils() elif name == 'APIOperation': return _import_openapi_utils_api_models() elif name == 'OpenAPISpec': return _import_openapi_utils_openapi_utils() elif name == 'OpenWeatherMapQueryRun': return _import_openweathermap_tool() elif name == 'ClickTool': return _import_playwright_ClickTool() elif name == 'CurrentWebPageTool': return _import_playwright_CurrentWebPageTool() elif name == 'ExtractHyperlinksTool': return _import_playwright_ExtractHyperlinksTool() elif name == 'ExtractTextTool': return _import_playwright_ExtractTextTool() elif name == 'GetElementsTool': return _import_playwright_GetElementsTool() elif name == 'NavigateBackTool': return _import_playwright_NavigateBackTool() elif name == 'NavigateTool': return _import_playwright_NavigateTool() elif name == 'AIPluginTool': return _import_plugin() elif name == 'InfoPowerBITool': return _import_powerbi_tool_InfoPowerBITool() elif name == 'ListPowerBITool': return _import_powerbi_tool_ListPowerBITool() elif name == 'QueryPowerBITool': return _import_powerbi_tool_QueryPowerBITool() elif name == 'PubmedQueryRun': return _import_pubmed_tool() elif name == 'PythonAstREPLTool': return _import_python_tool_PythonAstREPLTool() elif name == 'PythonREPLTool': return _import_python_tool_PythonREPLTool() elif name == 'RedditSearchRun': return _import_reddit_search_RedditSearchRun() elif name == 'format_tool_to_openai_function': return _import_render() elif name == 'BaseRequestsTool': return _import_requests_tool_BaseRequestsTool() elif name == 'RequestsDeleteTool': return _import_requests_tool_RequestsDeleteTool() elif name == 'RequestsGetTool': return _import_requests_tool_RequestsGetTool() elif name == 'RequestsPatchTool': return _import_requests_tool_RequestsPatchTool() elif name == 'RequestsPostTool': return _import_requests_tool_RequestsPostTool() elif name == 'RequestsPutTool': return _import_requests_tool_RequestsPutTool() elif name == 'SteamWebAPIQueryRun': return _import_steam_webapi_tool() elif name == 'SceneXplainTool': return _import_scenexplain_tool() elif name == 'SearxSearchResults': return _import_searx_search_tool_SearxSearchResults() elif name == 'SearxSearchRun': return _import_searx_search_tool_SearxSearchRun() elif name == 'ShellTool': return _import_shell_tool() elif name == 'SlackGetChannel': return _import_slack_get_channel elif name == 'SlackGetMessage': return _import_slack_get_message elif name == 'SlackScheduleMessage': return _import_slack_schedule_message elif name == 'SlackSendMessage': return _import_slack_send_message elif name == 'SleepTool': return _import_sleep_tool() elif name == 'BaseSparkSQLTool': return _import_spark_sql_tool_BaseSparkSQLTool() elif name == 'InfoSparkSQLTool': return _import_spark_sql_tool_InfoSparkSQLTool() elif name == 'ListSparkSQLTool': return _import_spark_sql_tool_ListSparkSQLTool() elif name == 'QueryCheckerTool': return _import_spark_sql_tool_QueryCheckerTool() elif name == 'QuerySparkSQLTool': return _import_spark_sql_tool_QuerySparkSQLTool() elif name == 'BaseSQLDatabaseTool': return _import_sql_database_tool_BaseSQLDatabaseTool() elif name == 'InfoSQLDatabaseTool': return _import_sql_database_tool_InfoSQLDatabaseTool() elif name == 'ListSQLDatabaseTool': return _import_sql_database_tool_ListSQLDatabaseTool() elif name == 'QuerySQLCheckerTool': return _import_sql_database_tool_QuerySQLCheckerTool() elif name == 'QuerySQLDataBaseTool': return _import_sql_database_tool_QuerySQLDataBaseTool() elif name == 'StackExchangeTool': return _import_stackexchange_tool() elif name == 'SteamshipImageGenerationTool': return _import_steamship_image_generation() elif name == 'VectorStoreQATool': return _import_vectorstore_tool_VectorStoreQATool() elif name == 'VectorStoreQAWithSourcesTool': return _import_vectorstore_tool_VectorStoreQAWithSourcesTool() elif name == 'WikipediaQueryRun': return _import_wikipedia_tool() elif name == 'WolframAlphaQueryRun': return _import_wolfram_alpha_tool() elif name == 'YahooFinanceNewsTool': return _import_yahoo_finance_news() elif name == 'YouTubeSearchTool': return _import_youtube_search() elif name == 'ZapierNLAListActions': return _import_zapier_tool_ZapierNLAListActions() elif name == 'ZapierNLARunAction': return _import_zapier_tool_ZapierNLARunAction() elif name == 'BearlyInterpreterTool': return _import_bearly_tool() elif name == 'E2BDataAnalysisTool': return _import_e2b_data_analysis() else: raise AttributeError(f'Could not find: {name}')
null
test_load_single_confluence_page
loader = ConfluenceLoader(url='https://templates.atlassian.net/wiki/') docs = loader.load(page_ids=['33189']) assert len(docs) == 1 assert docs[0].page_content is not None assert docs[0].metadata['id'] == '33189' assert docs[0].metadata['title'] == 'An easy intro to using Confluence' assert docs[0].metadata['source' ] == 'https://templates.atlassian.net/wiki/spaces/RD/pages/33189/An+easy+intro+to+using+Confluence'
@pytest.mark.skipif(not confluence_installed, reason= 'Atlassian package not installed') def test_load_single_confluence_page() ->None: loader = ConfluenceLoader(url='https://templates.atlassian.net/wiki/') docs = loader.load(page_ids=['33189']) assert len(docs) == 1 assert docs[0].page_content is not None assert docs[0].metadata['id'] == '33189' assert docs[0].metadata['title'] == 'An easy intro to using Confluence' assert docs[0].metadata['source' ] == 'https://templates.atlassian.net/wiki/spaces/RD/pages/33189/An+easy+intro+to+using+Confluence'
null
_compare_run_with_error
if run.child_runs: assert len(expected_run.child_runs) == len(run.child_runs) for received, expected in zip(run.child_runs, expected_run.child_runs): _compare_run_with_error(received, expected) received_dict = run.dict(exclude={'child_runs'}) received_err = received_dict.pop('error') expected_dict = expected_run.dict(exclude={'child_runs'}) expected_err = expected_dict.pop('error') assert received_dict == expected_dict if expected_err is not None: assert received_err is not None assert expected_err in received_err else: assert received_err is None
def _compare_run_with_error(run: Run, expected_run: Run) ->None: if run.child_runs: assert len(expected_run.child_runs) == len(run.child_runs) for received, expected in zip(run.child_runs, expected_run.child_runs): _compare_run_with_error(received, expected) received_dict = run.dict(exclude={'child_runs'}) received_err = received_dict.pop('error') expected_dict = expected_run.dict(exclude={'child_runs'}) expected_err = expected_dict.pop('error') assert received_dict == expected_dict if expected_err is not None: assert received_err is not None assert expected_err in received_err else: assert received_err is None
null
get_users_games
return self.steam.users.get_owned_games(steam_id, False, False)
def get_users_games(self, steam_id: str) ->List[str]: return self.steam.users.get_owned_games(steam_id, False, False)
null
test_visit_operation
op = Operation(operator=Operator.AND, arguments=[Comparison(comparator= Comparator.GTE, attribute='qty', value=10), Comparison(comparator= Comparator.LTE, attribute='qty', value=20), Comparison(comparator= Comparator.EQ, attribute='name', value='foo')]) expected = {'$and': [{'qty': {'$gte': 10}}, {'qty': {'$lte': 20}}, {'name': {'$eq': 'foo'}}]} actual = DEFAULT_TRANSLATOR.visit_operation(op) assert expected == actual
def test_visit_operation() ->None: op = Operation(operator=Operator.AND, arguments=[Comparison(comparator= Comparator.GTE, attribute='qty', value=10), Comparison(comparator= Comparator.LTE, attribute='qty', value=20), Comparison(comparator= Comparator.EQ, attribute='name', value='foo')]) expected = {'$and': [{'qty': {'$gte': 10}}, {'qty': {'$lte': 20}}, { 'name': {'$eq': 'foo'}}]} actual = DEFAULT_TRANSLATOR.visit_operation(op) assert expected == actual
null
fuse_retrieved_docs
results_map = input['sources'] query = input['question'] embedded_query = embeddings.embed_query(query) names, docs = zip(*((name, doc) for name, docs in results_map.items() for doc in docs)) embedded_docs = embeddings.embed_documents([doc.page_content for doc in docs]) similarity = cosine_similarity([embedded_query], embedded_docs) most_similar = np.flip(np.argsort(similarity[0]))[:5] return [(names[i], docs[i]) for i in most_similar]
def fuse_retrieved_docs(input): results_map = input['sources'] query = input['question'] embedded_query = embeddings.embed_query(query) names, docs = zip(*((name, doc) for name, docs in results_map.items() for doc in docs)) embedded_docs = embeddings.embed_documents([doc.page_content for doc in docs]) similarity = cosine_similarity([embedded_query], embedded_docs) most_similar = np.flip(np.argsort(similarity[0]))[:5] return [(names[i], docs[i]) for i in most_similar]
null
on_tool_end
"""Do nothing when tool ends.""" pass
def on_tool_end(self, output: str, observation_prefix: Optional[str]=None, llm_prefix: Optional[str]=None, **kwargs: Any) ->None: """Do nothing when tool ends.""" pass
Do nothing when tool ends.
_get_output
"""Return the output from the API call.""" if self.return_intermediate_steps: return {self.output_key: output, 'intermediate_steps': intermediate_steps} else: return {self.output_key: output}
def _get_output(self, output: str, intermediate_steps: dict) ->dict: """Return the output from the API call.""" if self.return_intermediate_steps: return {self.output_key: output, 'intermediate_steps': intermediate_steps} else: return {self.output_key: output}
Return the output from the API call.
test_gpt_router_streaming
"""Test streaming tokens from GPTRouter.""" anthropic_claude = GPTRouterModel(name='claude-instant-1.2', provider_name= 'anthropic') chat = GPTRouter(models_priority_list=[anthropic_claude], streaming=True) message = HumanMessage(content='Hello') response = chat([message]) assert isinstance(response, AIMessage) assert isinstance(response.content, str)
def test_gpt_router_streaming() ->None: """Test streaming tokens from GPTRouter.""" anthropic_claude = GPTRouterModel(name='claude-instant-1.2', provider_name='anthropic') chat = GPTRouter(models_priority_list=[anthropic_claude], streaming=True) message = HumanMessage(content='Hello') response = chat([message]) assert isinstance(response, AIMessage) assert isinstance(response.content, str)
Test streaming tokens from GPTRouter.
test_follow_up
"""Test follow up parsing.""" parser = SelfAskOutputParser() _input = 'Follow up: what is two + 2' output = parser.invoke(_input) expected_output = AgentAction(tool='Intermediate Answer', tool_input= 'what is two + 2', log=_input) assert output == expected_output _input = 'Followup: what is two + 2' output = parser.invoke(_input) expected_output = AgentAction(tool='Intermediate Answer', tool_input= 'what is two + 2', log=_input) assert output == expected_output
def test_follow_up() ->None: """Test follow up parsing.""" parser = SelfAskOutputParser() _input = 'Follow up: what is two + 2' output = parser.invoke(_input) expected_output = AgentAction(tool='Intermediate Answer', tool_input= 'what is two + 2', log=_input) assert output == expected_output _input = 'Followup: what is two + 2' output = parser.invoke(_input) expected_output = AgentAction(tool='Intermediate Answer', tool_input= 'what is two + 2', log=_input) assert output == expected_output
Test follow up parsing.
test_retrieve_article_no_abstract_available
"""Test that returns 'No abstract available'.""" output = api_client.retrieve_article('10766884', '') assert 'No abstract available' == output['Summary']
def test_retrieve_article_no_abstract_available(api_client: PubMedAPIWrapper ) ->None: """Test that returns 'No abstract available'.""" output = api_client.retrieve_article('10766884', '') assert 'No abstract available' == output['Summary']
Test that returns 'No abstract available'.
test_multiple_history
"""Tests multiple history works.""" chat = QianfanChatEndpoint() response = chat(messages=[HumanMessage(content='Hello.'), AIMessage(content ='Hello!'), HumanMessage(content='How are you doing?')]) assert isinstance(response, BaseMessage) assert isinstance(response.content, str)
def test_multiple_history() ->None: """Tests multiple history works.""" chat = QianfanChatEndpoint() response = chat(messages=[HumanMessage(content='Hello.'), AIMessage( content='Hello!'), HumanMessage(content='How are you doing?')]) assert isinstance(response, BaseMessage) assert isinstance(response.content, str)
Tests multiple history works.
_llm_type
"""Return type of llm.""" return 'gradient'
@property def _llm_type(self) ->str: """Return type of llm.""" return 'gradient'
Return type of llm.
create_tables_if_not_exists
with self._conn.begin(): Base.metadata.create_all(self._conn)
def create_tables_if_not_exists(self) ->None: with self._conn.begin(): Base.metadata.create_all(self._conn)
null
import_wandb
"""Import the wandb python package and raise an error if it is not installed.""" try: import wandb except ImportError: raise ImportError( 'To use the wandb callback manager you need to have the `wandb` python package installed. Please install it with `pip install wandb`' ) return wandb
def import_wandb() ->Any: """Import the wandb python package and raise an error if it is not installed.""" try: import wandb except ImportError: raise ImportError( 'To use the wandb callback manager you need to have the `wandb` python package installed. Please install it with `pip install wandb`' ) return wandb
Import the wandb python package and raise an error if it is not installed.
set_verbose
"""If verbose is None, set it. This allows users to pass in None as verbose to access the global setting. """ if verbose is None: return _get_verbosity() else: return verbose
@validator('verbose', pre=True, always=True) def set_verbose(cls, verbose: Optional[bool]) ->bool: """If verbose is None, set it. This allows users to pass in None as verbose to access the global setting. """ if verbose is None: return _get_verbosity() else: return verbose
If verbose is None, set it. This allows users to pass in None as verbose to access the global setting.
load
"""Load reddits.""" praw = _dependable_praw_import() reddit = praw.Reddit(client_id=self.client_id, client_secret=self. client_secret, user_agent=self.user_agent) results: List[Document] = [] if self.mode == 'subreddit': for search_query in self.search_queries: for category in self.categories: docs = self._subreddit_posts_loader(search_query=search_query, category=category, reddit=reddit) results.extend(docs) elif self.mode == 'username': for search_query in self.search_queries: for category in self.categories: docs = self._user_posts_loader(search_query=search_query, category=category, reddit=reddit) results.extend(docs) else: raise ValueError( "mode not correct, please enter 'username' or 'subreddit' as mode") return results
def load(self) ->List[Document]: """Load reddits.""" praw = _dependable_praw_import() reddit = praw.Reddit(client_id=self.client_id, client_secret=self. client_secret, user_agent=self.user_agent) results: List[Document] = [] if self.mode == 'subreddit': for search_query in self.search_queries: for category in self.categories: docs = self._subreddit_posts_loader(search_query= search_query, category=category, reddit=reddit) results.extend(docs) elif self.mode == 'username': for search_query in self.search_queries: for category in self.categories: docs = self._user_posts_loader(search_query=search_query, category=category, reddit=reddit) results.extend(docs) else: raise ValueError( "mode not correct, please enter 'username' or 'subreddit' as mode") return results
Load reddits.
_run
"""Use the tool.""" try: return self.api_wrapper.raw_results(query, max_results=5, include_answer=True, search_depth='basic')['answer'] except Exception as e: return repr(e)
def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun] =None) ->Union[List[Dict], str]: """Use the tool.""" try: return self.api_wrapper.raw_results(query, max_results=5, include_answer=True, search_depth='basic')['answer'] except Exception as e: return repr(e)
Use the tool.
input_keys
"""Return the singular input key. :meta private: """ return [self.input_key]
@property def input_keys(self) ->List[str]: """Return the singular input key. :meta private: """ return [self.input_key]
Return the singular input key. :meta private:
split_text
"""Split incoming text and return chunks.""" splits = (s.text for s in self._tokenizer(text).sents) return self._merge_splits(splits, self._separator)
def split_text(self, text: str) ->List[str]: """Split incoming text and return chunks.""" splits = (s.text for s in self._tokenizer(text).sents) return self._merge_splits(splits, self._separator)
Split incoming text and return chunks.
visit_operation
try: from timescale_vector import client except ImportError as e: raise ImportError( 'Cannot import timescale-vector. Please install with `pip install timescale-vector`.' ) from e args = [arg.accept(self) for arg in operation.arguments] return client.Predicates(*args, operator=self._format_func(operation.operator))
def visit_operation(self, operation: Operation) ->client.Predicates: try: from timescale_vector import client except ImportError as e: raise ImportError( 'Cannot import timescale-vector. Please install with `pip install timescale-vector`.' ) from e args = [arg.accept(self) for arg in operation.arguments] return client.Predicates(*args, operator=self._format_func(operation. operator))
null
parse_url
from azure.ai.documentintelligence.models import AnalyzeDocumentRequest poller = self.client.begin_analyze_document(self.api_model, AnalyzeDocumentRequest(url_source=url), output_content_format= 'markdown' if self.mode == 'markdown' else 'text') result = poller.result() if self.mode in ['single', 'markdown']: yield from self._generate_docs_single(result) elif self.mode == ['page']: yield from self._generate_docs_page(result) else: yield from self._generate_docs_object(result)
def parse_url(self, url: str) ->Iterator[Document]: from azure.ai.documentintelligence.models import AnalyzeDocumentRequest poller = self.client.begin_analyze_document(self.api_model, AnalyzeDocumentRequest(url_source=url), output_content_format= 'markdown' if self.mode == 'markdown' else 'text') result = poller.result() if self.mode in ['single', 'markdown']: yield from self._generate_docs_single(result) elif self.mode == ['page']: yield from self._generate_docs_page(result) else: yield from self._generate_docs_object(result)
null
embed_query
"""Call out to HuggingFaceHub's embedding endpoint for embedding query text. Args: text: The text to embed. Returns: Embeddings for the text. """ response = self.embed_documents([text])[0] return response
def embed_query(self, text: str) ->List[float]: """Call out to HuggingFaceHub's embedding endpoint for embedding query text. Args: text: The text to embed. Returns: Embeddings for the text. """ response = self.embed_documents([text])[0] return response
Call out to HuggingFaceHub's embedding endpoint for embedding query text. Args: text: The text to embed. Returns: Embeddings for the text.
_get_test_specs
"""Walk the test_specs directory and collect all files with the name 'apispec' in them. """ if not SPECS_DIR.exists(): raise ValueError return (Path(root) / file for root, _, files in os.walk(SPECS_DIR) for file in files if file.startswith('apispec'))
def _get_test_specs() ->Iterable[Path]: """Walk the test_specs directory and collect all files with the name 'apispec' in them. """ if not SPECS_DIR.exists(): raise ValueError return (Path(root) / file for root, _, files in os.walk(SPECS_DIR) for file in files if file.startswith('apispec'))
Walk the test_specs directory and collect all files with the name 'apispec' in them.
_import_databricks_vector_search
from langchain_community.vectorstores.databricks_vector_search import DatabricksVectorSearch return DatabricksVectorSearch
def _import_databricks_vector_search() ->Any: from langchain_community.vectorstores.databricks_vector_search import DatabricksVectorSearch return DatabricksVectorSearch
null
create_folder
""" Creates a new folder. """ query_dict, error = load_query(query, fault_tolerant=True) if query_dict is None: return {'Error': error} space_id = self.space_id url = f'{DEFAULT_URL}/space/{space_id}/folder' payload = {'name': query_dict['name']} headers = self.get_headers() response = requests.post(url, json=payload, headers=headers) data = response.json() if 'id' in data: self.list_id = data['id'] return data
def create_folder(self, query: str) ->Dict: """ Creates a new folder. """ query_dict, error = load_query(query, fault_tolerant=True) if query_dict is None: return {'Error': error} space_id = self.space_id url = f'{DEFAULT_URL}/space/{space_id}/folder' payload = {'name': query_dict['name']} headers = self.get_headers() response = requests.post(url, json=payload, headers=headers) data = response.json() if 'id' in data: self.list_id = data['id'] return data
Creates a new folder.
test_myscale_search_filter
"""Test end to end construction and search with metadata filtering.""" texts = ['far', 'bar', 'baz'] metadatas = [{'first_letter': '{}'.format(text[0])} for text in texts] config = MyScaleSettings() config.table = 'test_myscale_search_filter' docsearch = MyScale.from_texts(texts=texts, embedding=FakeEmbeddings(), metadatas=metadatas, config=config) output = docsearch.similarity_search('far', k=1, where_str= f"{docsearch.metadata_column}.first_letter='f'") assert output == [Document(page_content='far', metadata={'first_letter': 'f'})] output = docsearch.similarity_search('bar', k=1, where_str= f"{docsearch.metadata_column}.first_letter='b'") assert output == [Document(page_content='bar', metadata={'first_letter': 'b'})] docsearch.drop()
def test_myscale_search_filter() ->None: """Test end to end construction and search with metadata filtering.""" texts = ['far', 'bar', 'baz'] metadatas = [{'first_letter': '{}'.format(text[0])} for text in texts] config = MyScaleSettings() config.table = 'test_myscale_search_filter' docsearch = MyScale.from_texts(texts=texts, embedding=FakeEmbeddings(), metadatas=metadatas, config=config) output = docsearch.similarity_search('far', k=1, where_str= f"{docsearch.metadata_column}.first_letter='f'") assert output == [Document(page_content='far', metadata={'first_letter': 'f'})] output = docsearch.similarity_search('bar', k=1, where_str= f"{docsearch.metadata_column}.first_letter='b'") assert output == [Document(page_content='bar', metadata={'first_letter': 'b'})] docsearch.drop()
Test end to end construction and search with metadata filtering.
fix_filter_directive
"""Fix invalid filter directive. Args: filter: Filter directive to fix. allowed_comparators: allowed comparators. Defaults to all comparators. allowed_operators: allowed operators. Defaults to all operators. allowed_attributes: allowed attributes. Defaults to all attributes. Returns: Fixed filter directive. """ if not (allowed_comparators or allowed_operators or allowed_attributes ) or not filter: return filter elif isinstance(filter, Comparison): if allowed_comparators and filter.comparator not in allowed_comparators: return None if allowed_attributes and filter.attribute not in allowed_attributes: return None return filter elif isinstance(filter, Operation): if allowed_operators and filter.operator not in allowed_operators: return None args = [fix_filter_directive(arg, allowed_comparators= allowed_comparators, allowed_operators=allowed_operators, allowed_attributes=allowed_attributes) for arg in filter.arguments] args = [arg for arg in args if arg is not None] if not args: return None elif len(args) == 1 and filter.operator in (Operator.AND, Operator.OR): return args[0] else: return Operation(operator=filter.operator, arguments=args) else: return filter
def fix_filter_directive(filter: Optional[FilterDirective], *, allowed_comparators: Optional[Sequence[Comparator]]=None, allowed_operators: Optional[Sequence[Operator]]=None, allowed_attributes: Optional[Sequence[str]]=None) ->Optional[ FilterDirective]: """Fix invalid filter directive. Args: filter: Filter directive to fix. allowed_comparators: allowed comparators. Defaults to all comparators. allowed_operators: allowed operators. Defaults to all operators. allowed_attributes: allowed attributes. Defaults to all attributes. Returns: Fixed filter directive. """ if not (allowed_comparators or allowed_operators or allowed_attributes ) or not filter: return filter elif isinstance(filter, Comparison): if (allowed_comparators and filter.comparator not in allowed_comparators): return None if allowed_attributes and filter.attribute not in allowed_attributes: return None return filter elif isinstance(filter, Operation): if allowed_operators and filter.operator not in allowed_operators: return None args = [fix_filter_directive(arg, allowed_comparators= allowed_comparators, allowed_operators=allowed_operators, allowed_attributes=allowed_attributes) for arg in filter.arguments] args = [arg for arg in args if arg is not None] if not args: return None elif len(args) == 1 and filter.operator in (Operator.AND, Operator.OR): return args[0] else: return Operation(operator=filter.operator, arguments=args) else: return filter
Fix invalid filter directive. Args: filter: Filter directive to fix. allowed_comparators: allowed comparators. Defaults to all comparators. allowed_operators: allowed operators. Defaults to all operators. allowed_attributes: allowed attributes. Defaults to all attributes. Returns: Fixed filter directive.
_persist_run
chain_ = self._chains_map[run.id] chain_.set_outputs(outputs=run.outputs) self._chain_api.log_chain(chain_)
def _persist_run(self, run: 'Run') ->None: chain_ = self._chains_map[run.id] chain_.set_outputs(outputs=run.outputs) self._chain_api.log_chain(chain_)
null
init_elastic
""" cd tests/integration_tests/vectorstores/docker-compose docker-compose -f elasticsearch.yml up """ from docarray import BaseDoc from docarray.index import ElasticDocIndex class MyDoc(BaseDoc): title: str title_embedding: NdArray[32] other_emb: NdArray[32] year: int embeddings = FakeEmbeddings(size=32) elastic_db = ElasticDocIndex[MyDoc](hosts='http://localhost:9200', index_name='docarray_retriever') elastic_db.index([MyDoc(title=f'My document {i}', title_embedding=np.array( embeddings.embed_query(f'fake emb {i}')), other_emb=np.array(embeddings .embed_query(f'other fake emb {i}')), year=i) for i in range(100)]) filter_query = {'range': {'year': {'lte': 90}}} yield elastic_db, filter_query, embeddings elastic_db._client.indices.delete(index='docarray_retriever')
@pytest.fixture def init_elastic() ->Generator[Tuple[ElasticDocIndex, Dict[str, Any], FakeEmbeddings], None, None]: """ cd tests/integration_tests/vectorstores/docker-compose docker-compose -f elasticsearch.yml up """ from docarray import BaseDoc from docarray.index import ElasticDocIndex class MyDoc(BaseDoc): title: str title_embedding: NdArray[32] other_emb: NdArray[32] year: int embeddings = FakeEmbeddings(size=32) elastic_db = ElasticDocIndex[MyDoc](hosts='http://localhost:9200', index_name='docarray_retriever') elastic_db.index([MyDoc(title=f'My document {i}', title_embedding=np. array(embeddings.embed_query(f'fake emb {i}')), other_emb=np.array( embeddings.embed_query(f'other fake emb {i}')), year=i) for i in range(100)]) filter_query = {'range': {'year': {'lte': 90}}} yield elastic_db, filter_query, embeddings elastic_db._client.indices.delete(index='docarray_retriever')
cd tests/integration_tests/vectorstores/docker-compose docker-compose -f elasticsearch.yml up
test_rst_code_splitter
splitter = RecursiveCharacterTextSplitter.from_language(Language.RST, chunk_size=CHUNK_SIZE, chunk_overlap=0) code = """ Sample Document =============== Section ------- This is the content of the section. Lists ----- - Item 1 - Item 2 - Item 3 Comment ******* Not a comment .. This is a comment """ chunks = splitter.split_text(code) assert chunks == ['Sample Document', '===============', 'Section', '-------', 'This is the', 'content of the', 'section.', 'Lists', '-----', '- Item 1', '- Item 2', '- Item 3', 'Comment', '*******', 'Not a comment', '.. This is a', 'comment'] code = """harry *** babylon is""" chunks = splitter.split_text(code) assert chunks == ['harry', '***\nbabylon is']
def test_rst_code_splitter() ->None: splitter = RecursiveCharacterTextSplitter.from_language(Language.RST, chunk_size=CHUNK_SIZE, chunk_overlap=0) code = """ Sample Document =============== Section ------- This is the content of the section. Lists ----- - Item 1 - Item 2 - Item 3 Comment ******* Not a comment .. This is a comment """ chunks = splitter.split_text(code) assert chunks == ['Sample Document', '===============', 'Section', '-------', 'This is the', 'content of the', 'section.', 'Lists', '-----', '- Item 1', '- Item 2', '- Item 3', 'Comment', '*******', 'Not a comment', '.. This is a', 'comment'] code = 'harry\n***\nbabylon is' chunks = splitter.split_text(code) assert chunks == ['harry', '***\nbabylon is']
null
available_models
"""List the available models that can be invoked.""" if self._available_models is not None: return self._available_models live_fns = [v for v in self.available_functions if v.get('status') == 'ACTIVE'] self._available_models = {v['name']: v['id'] for v in live_fns} return self._available_models
@property def available_models(self) ->dict: """List the available models that can be invoked.""" if self._available_models is not None: return self._available_models live_fns = [v for v in self.available_functions if v.get('status') == 'ACTIVE'] self._available_models = {v['name']: v['id'] for v in live_fns} return self._available_models
List the available models that can be invoked.
_llm_type
"""Return the type of llm.""" return 'gpt4all'
@property def _llm_type(self) ->str: """Return the type of llm.""" return 'gpt4all'
Return the type of llm.
validate_environment
"""Validate that AWS credentials to and python package exists in environment.""" if values['client'] is not None: return values try: import boto3 if values['credentials_profile_name'] is not None: session = boto3.Session(profile_name=values['credentials_profile_name'] ) else: session = boto3.Session() values['region_name'] = get_from_dict_or_env(values, 'region_name', 'AWS_DEFAULT_REGION', default=session.region_name) client_params = {} if values['region_name']: client_params['region_name'] = values['region_name'] if values['endpoint_url']: client_params['endpoint_url'] = values['endpoint_url'] if values['config']: client_params['config'] = values['config'] values['client'] = session.client('bedrock-runtime', **client_params) except ImportError: raise ModuleNotFoundError( 'Could not import boto3 python package. Please install it with `pip install boto3`.' ) except Exception as e: raise ValueError( 'Could not load credentials to authenticate with AWS client. Please check that credentials in the specified profile name are valid.' ) from e return values
@root_validator() def validate_environment(cls, values: Dict) ->Dict: """Validate that AWS credentials to and python package exists in environment.""" if values['client'] is not None: return values try: import boto3 if values['credentials_profile_name'] is not None: session = boto3.Session(profile_name=values[ 'credentials_profile_name']) else: session = boto3.Session() values['region_name'] = get_from_dict_or_env(values, 'region_name', 'AWS_DEFAULT_REGION', default=session.region_name) client_params = {} if values['region_name']: client_params['region_name'] = values['region_name'] if values['endpoint_url']: client_params['endpoint_url'] = values['endpoint_url'] if values['config']: client_params['config'] = values['config'] values['client'] = session.client('bedrock-runtime', **client_params) except ImportError: raise ModuleNotFoundError( 'Could not import boto3 python package. Please install it with `pip install boto3`.' ) except Exception as e: raise ValueError( 'Could not load credentials to authenticate with AWS client. Please check that credentials in the specified profile name are valid.' ) from e return values
Validate that AWS credentials to and python package exists in environment.
test_initialize_agent_with_str_agent_type
"""Test initialize_agent with a string.""" fake_llm = FakeLLM() agent_executor = initialize_agent([my_tool], fake_llm, 'zero-shot-react-description') assert agent_executor.agent._agent_type == AgentType.ZERO_SHOT_REACT_DESCRIPTION assert isinstance(agent_executor.tags, list) assert 'zero-shot-react-description' in agent_executor.tags
def test_initialize_agent_with_str_agent_type() ->None: """Test initialize_agent with a string.""" fake_llm = FakeLLM() agent_executor = initialize_agent([my_tool], fake_llm, 'zero-shot-react-description') assert agent_executor.agent._agent_type == AgentType.ZERO_SHOT_REACT_DESCRIPTION assert isinstance(agent_executor.tags, list) assert 'zero-shot-react-description' in agent_executor.tags
Test initialize_agent with a string.
embed_query
return [5.0, 6.0]
def embed_query(self, text: str) ->List[float]: return [5.0, 6.0]
null
create_prompt
"""Return default prompt.""" return TEXTWORLD_PROMPT
@classmethod def create_prompt(cls, tools: Sequence[BaseTool]) ->BasePromptTemplate: """Return default prompt.""" return TEXTWORLD_PROMPT
Return default prompt.
check_response
"""Check the response from the completion call.""" if resp.status_code == 200: return resp elif resp.status_code in [400, 401]: raise ValueError( f"""status_code: {resp.status_code} code: {resp.code} message: {resp.message}""" ) else: raise HTTPError( f"""HTTP error occurred: status_code: {resp.status_code} code: {resp.code} message: {resp.message}""" , response=resp)
def check_response(resp: Any) ->Any: """Check the response from the completion call.""" if resp.status_code == 200: return resp elif resp.status_code in [400, 401]: raise ValueError( f"""status_code: {resp.status_code} code: {resp.code} message: {resp.message}""" ) else: raise HTTPError( f"""HTTP error occurred: status_code: {resp.status_code} code: {resp.code} message: {resp.message}""" , response=resp)
Check the response from the completion call.
_prepare_intermediate_steps
if isinstance(self.trim_intermediate_steps, int ) and self.trim_intermediate_steps > 0: return intermediate_steps[-self.trim_intermediate_steps:] elif callable(self.trim_intermediate_steps): return self.trim_intermediate_steps(intermediate_steps) else: return intermediate_steps
def _prepare_intermediate_steps(self, intermediate_steps: List[Tuple[ AgentAction, str]]) ->List[Tuple[AgentAction, str]]: if isinstance(self.trim_intermediate_steps, int ) and self.trim_intermediate_steps > 0: return intermediate_steps[-self.trim_intermediate_steps:] elif callable(self.trim_intermediate_steps): return self.trim_intermediate_steps(intermediate_steps) else: return intermediate_steps
null
_collect_metrics
all_eval_results: dict = {} for c in self.configs: for callback in cast(list, c['callbacks']): if isinstance(callback, EvaluatorCallbackHandler): eval_results = callback.logged_eval_results for (_, example_id), v in eval_results.items(): all_eval_results.setdefault(str(example_id), {}).update({ 'feedback': v}) elif isinstance(callback, LangChainTracer): run = callback.latest_run execution_time = (run.end_time - run.start_time).total_seconds( ) if run and run.end_time else None run_id = str(run.id) if run else None all_eval_results.setdefault(str(callback.example_id), {}).update({ 'execution_time': execution_time, 'run_id': run_id}) return cast(Dict[str, _RowResult], all_eval_results)
def _collect_metrics(self) ->Dict[str, _RowResult]: all_eval_results: dict = {} for c in self.configs: for callback in cast(list, c['callbacks']): if isinstance(callback, EvaluatorCallbackHandler): eval_results = callback.logged_eval_results for (_, example_id), v in eval_results.items(): all_eval_results.setdefault(str(example_id), {}).update({ 'feedback': v}) elif isinstance(callback, LangChainTracer): run = callback.latest_run execution_time = (run.end_time - run.start_time).total_seconds( ) if run and run.end_time else None run_id = str(run.id) if run else None all_eval_results.setdefault(str(callback.example_id), {} ).update({'execution_time': execution_time, 'run_id': run_id}) return cast(Dict[str, _RowResult], all_eval_results)
null
on_llm_error_common
self.errors += 1
def on_llm_error_common(self) ->None: self.errors += 1
null
_extract_images_from_page
"""Extract images from page and get the text with RapidOCR.""" import pdfminer def get_image(layout_object: Any) ->Any: if isinstance(layout_object, pdfminer.layout.LTImage): return layout_object if isinstance(layout_object, pdfminer.layout.LTContainer): for child in layout_object: return get_image(child) else: return None images = [] for img in list(filter(bool, map(get_image, page))): if img.stream['Filter'].name in _PDF_FILTER_WITHOUT_LOSS: images.append(np.frombuffer(img.stream.get_data(), dtype=np.uint8). reshape(img.stream['Height'], img.stream['Width'], -1)) elif img.stream['Filter'].name in _PDF_FILTER_WITH_LOSS: images.append(img.stream.get_data()) else: warnings.warn('Unknown PDF Filter!') return extract_from_images_with_rapidocr(images)
def _extract_images_from_page(self, page: pdfminer.layout.LTPage) ->str: """Extract images from page and get the text with RapidOCR.""" import pdfminer def get_image(layout_object: Any) ->Any: if isinstance(layout_object, pdfminer.layout.LTImage): return layout_object if isinstance(layout_object, pdfminer.layout.LTContainer): for child in layout_object: return get_image(child) else: return None images = [] for img in list(filter(bool, map(get_image, page))): if img.stream['Filter'].name in _PDF_FILTER_WITHOUT_LOSS: images.append(np.frombuffer(img.stream.get_data(), dtype=np. uint8).reshape(img.stream['Height'], img.stream['Width'], -1)) elif img.stream['Filter'].name in _PDF_FILTER_WITH_LOSS: images.append(img.stream.get_data()) else: warnings.warn('Unknown PDF Filter!') return extract_from_images_with_rapidocr(images)
Extract images from page and get the text with RapidOCR.
test_extracts_href
bs_transformer = BeautifulSoupTransformer() multiple_tags_html = ( "<h1>First heading.</h1><p>First paragraph with an <a href='http://example.com'>example</a></p><p>Second paragraph with an <a>a tag without href</a></p>" ) documents = [Document(page_content=multiple_tags_html)] docs_transformed = bs_transformer.transform_documents(documents, tags_to_extract=['p']) assert docs_transformed[0 ].page_content == 'First paragraph with an example (http://example.com) Second paragraph with an a tag without href'
@pytest.mark.requires('bs4') def test_extracts_href() ->None: bs_transformer = BeautifulSoupTransformer() multiple_tags_html = ( "<h1>First heading.</h1><p>First paragraph with an <a href='http://example.com'>example</a></p><p>Second paragraph with an <a>a tag without href</a></p>" ) documents = [Document(page_content=multiple_tags_html)] docs_transformed = bs_transformer.transform_documents(documents, tags_to_extract=['p']) assert docs_transformed[0 ].page_content == 'First paragraph with an example (http://example.com) Second paragraph with an a tag without href'
null
_embedding_source_column_name
"""Return the name of the embedding source column. None if the index is not a Databricks-managed embedding index. """ return self._embedding_source_column().get('name')
def _embedding_source_column_name(self) ->Optional[str]: """Return the name of the embedding source column. None if the index is not a Databricks-managed embedding index. """ return self._embedding_source_column().get('name')
Return the name of the embedding source column. None if the index is not a Databricks-managed embedding index.
__init__
"""Initializes the schema dictionary based on the predefined list.""" self.schema = dict() for row in schema_mapping: self.schema[row[0]] = row[1]
def __init__(self) ->None: """Initializes the schema dictionary based on the predefined list.""" self.schema = dict() for row in schema_mapping: self.schema[row[0]] = row[1]
Initializes the schema dictionary based on the predefined list.
_dump_generations_to_json
"""Dump generations to json. Args: generations (RETURN_VAL_TYPE): A list of language model generations. Returns: str: Json representing a list of generations. Warning: would not work well with arbitrary subclasses of `Generation` """ return json.dumps([generation.dict() for generation in generations])
def _dump_generations_to_json(generations: RETURN_VAL_TYPE) ->str: """Dump generations to json. Args: generations (RETURN_VAL_TYPE): A list of language model generations. Returns: str: Json representing a list of generations. Warning: would not work well with arbitrary subclasses of `Generation` """ return json.dumps([generation.dict() for generation in generations])
Dump generations to json. Args: generations (RETURN_VAL_TYPE): A list of language model generations. Returns: str: Json representing a list of generations. Warning: would not work well with arbitrary subclasses of `Generation`
__post_init__
self.youtube_client = self._build_youtube_client(self.google_api_client.creds)
def __post_init__(self) ->None: self.youtube_client = self._build_youtube_client(self.google_api_client .creds)
null
parse
"""Parse text into agent actions/finish."""
@abstractmethod def parse(self, text: str) ->Union[List[AgentAction], AgentFinish]: """Parse text into agent actions/finish."""
Parse text into agent actions/finish.
test_redis_retriever_score_threshold
texts = ['foo', 'bar', 'baz'] docsearch = Redis.from_texts(texts, FakeEmbeddings(), redis_url=TEST_REDIS_URL) retriever = docsearch.as_retriever(search_type='similarity_score_threshold', search_kwargs={'k': 3, 'score_threshold': 0.91}) results = retriever.get_relevant_documents('foo') assert len(results) == 2 assert drop(docsearch.index_name)
def test_redis_retriever_score_threshold() ->None: texts = ['foo', 'bar', 'baz'] docsearch = Redis.from_texts(texts, FakeEmbeddings(), redis_url= TEST_REDIS_URL) retriever = docsearch.as_retriever(search_type= 'similarity_score_threshold', search_kwargs={'k': 3, 'score_threshold': 0.91}) results = retriever.get_relevant_documents('foo') assert len(results) == 2 assert drop(docsearch.index_name)
null
_import_arcee
from langchain_community.utilities.arcee import ArceeWrapper return ArceeWrapper
def _import_arcee() ->Any: from langchain_community.utilities.arcee import ArceeWrapper return ArceeWrapper
null