method_name
stringlengths
1
78
method_body
stringlengths
3
9.66k
full_code
stringlengths
31
10.7k
docstring
stringlengths
4
4.74k
__init__
"""Initialize with webpage path and optional filter URLs. Args: web_path: url of the sitemap. can also be a local path filter_urls: a list of regexes. If specified, only URLS that match one of the filter URLs will be loaded. *WARNING* The filter URLs are interpreted as regular expressions. Remember to escape special characters if you do not want them to be interpreted as regular expression syntax. For example, `.` appears frequently in URLs and should be escaped if you want to match a literal `.` rather than any character. restrict_to_same_domain takes precedence over filter_urls when restrict_to_same_domain is True and the sitemap is not a local file. parsing_function: Function to parse bs4.Soup output blocksize: number of sitemap locations per block blocknum: the number of the block that should be loaded - zero indexed. Default: 0 meta_function: Function to parse bs4.Soup output for metadata remember when setting this method to also copy metadata["loc"] to metadata["source"] if you are using this field is_local: whether the sitemap is a local file. Default: False continue_on_failure: whether to continue loading the sitemap if an error occurs loading a url, emitting a warning instead of raising an exception. Setting this to True makes the loader more robust, but also may result in missing data. Default: False restrict_to_same_domain: whether to restrict loading to URLs to the same domain as the sitemap. Attention: This is only applied if the sitemap is not a local file! """ if blocksize is not None and blocksize < 1: raise ValueError('Sitemap blocksize should be at least 1') if blocknum < 0: raise ValueError('Sitemap blocknum can not be lower then 0') try: import lxml except ImportError: raise ImportError( 'lxml package not found, please install it with `pip install lxml`') super().__init__(web_paths=[web_path], **kwargs) self.allow_url_patterns = filter_urls self.restrict_to_same_domain = restrict_to_same_domain self.parsing_function = parsing_function or _default_parsing_function self.meta_function = meta_function or _default_meta_function self.blocksize = blocksize self.blocknum = blocknum self.is_local = is_local self.continue_on_failure = continue_on_failure
def __init__(self, web_path: str, filter_urls: Optional[List[str]]=None, parsing_function: Optional[Callable]=None, blocksize: Optional[int]= None, blocknum: int=0, meta_function: Optional[Callable]=None, is_local: bool=False, continue_on_failure: bool=False, restrict_to_same_domain: bool=True, **kwargs: Any): """Initialize with webpage path and optional filter URLs. Args: web_path: url of the sitemap. can also be a local path filter_urls: a list of regexes. If specified, only URLS that match one of the filter URLs will be loaded. *WARNING* The filter URLs are interpreted as regular expressions. Remember to escape special characters if you do not want them to be interpreted as regular expression syntax. For example, `.` appears frequently in URLs and should be escaped if you want to match a literal `.` rather than any character. restrict_to_same_domain takes precedence over filter_urls when restrict_to_same_domain is True and the sitemap is not a local file. parsing_function: Function to parse bs4.Soup output blocksize: number of sitemap locations per block blocknum: the number of the block that should be loaded - zero indexed. Default: 0 meta_function: Function to parse bs4.Soup output for metadata remember when setting this method to also copy metadata["loc"] to metadata["source"] if you are using this field is_local: whether the sitemap is a local file. Default: False continue_on_failure: whether to continue loading the sitemap if an error occurs loading a url, emitting a warning instead of raising an exception. Setting this to True makes the loader more robust, but also may result in missing data. Default: False restrict_to_same_domain: whether to restrict loading to URLs to the same domain as the sitemap. Attention: This is only applied if the sitemap is not a local file! """ if blocksize is not None and blocksize < 1: raise ValueError('Sitemap blocksize should be at least 1') if blocknum < 0: raise ValueError('Sitemap blocknum can not be lower then 0') try: import lxml except ImportError: raise ImportError( 'lxml package not found, please install it with `pip install lxml`' ) super().__init__(web_paths=[web_path], **kwargs) self.allow_url_patterns = filter_urls self.restrict_to_same_domain = restrict_to_same_domain self.parsing_function = parsing_function or _default_parsing_function self.meta_function = meta_function or _default_meta_function self.blocksize = blocksize self.blocknum = blocknum self.is_local = is_local self.continue_on_failure = continue_on_failure
Initialize with webpage path and optional filter URLs. Args: web_path: url of the sitemap. can also be a local path filter_urls: a list of regexes. If specified, only URLS that match one of the filter URLs will be loaded. *WARNING* The filter URLs are interpreted as regular expressions. Remember to escape special characters if you do not want them to be interpreted as regular expression syntax. For example, `.` appears frequently in URLs and should be escaped if you want to match a literal `.` rather than any character. restrict_to_same_domain takes precedence over filter_urls when restrict_to_same_domain is True and the sitemap is not a local file. parsing_function: Function to parse bs4.Soup output blocksize: number of sitemap locations per block blocknum: the number of the block that should be loaded - zero indexed. Default: 0 meta_function: Function to parse bs4.Soup output for metadata remember when setting this method to also copy metadata["loc"] to metadata["source"] if you are using this field is_local: whether the sitemap is a local file. Default: False continue_on_failure: whether to continue loading the sitemap if an error occurs loading a url, emitting a warning instead of raising an exception. Setting this to True makes the loader more robust, but also may result in missing data. Default: False restrict_to_same_domain: whether to restrict loading to URLs to the same domain as the sitemap. Attention: This is only applied if the sitemap is not a local file!
sample_gdf
import geopandas path_to_data = geopandas.datasets.get_path('nybb') gdf = geopandas.read_file(path_to_data) gdf['area'] = gdf.area gdf['crs'] = gdf.crs.to_string() return gdf.head(2)
@pytest.mark.requires('geopandas') def sample_gdf() ->GeoDataFrame: import geopandas path_to_data = geopandas.datasets.get_path('nybb') gdf = geopandas.read_file(path_to_data) gdf['area'] = gdf.area gdf['crs'] = gdf.crs.to_string() return gdf.head(2)
null
output_keys
"""Input keys this chain returns.""" return self.output_variables
@property def output_keys(self) ->List[str]: """Input keys this chain returns.""" return self.output_variables
Input keys this chain returns.
validate_top_k
if value < 0: raise ValueError(f'top_k ({value}) cannot be negative.') return value
@validator('top_k') def validate_top_k(cls, value: int) ->int: if value < 0: raise ValueError(f'top_k ({value}) cannot be negative.') return value
null
delete
"""Delete by vector IDs. Args: ids: List of ids to delete. """ if ids is None: raise ValueError('No ids provided to delete.') for id in ids: self._client.data_object.delete(uuid=id)
def delete(self, ids: Optional[List[str]]=None, **kwargs: Any) ->None: """Delete by vector IDs. Args: ids: List of ids to delete. """ if ids is None: raise ValueError('No ids provided to delete.') for id in ids: self._client.data_object.delete(uuid=id)
Delete by vector IDs. Args: ids: List of ids to delete.
_run
return f'{arg1} {arg2} {arg3}'
def _run(self, arg1: int, arg2: bool, arg3: Optional[dict]=None) ->str: return f'{arg1} {arg2} {arg3}'
null
comment_on_issue
""" Adds a comment to a github issue Parameters: comment_query(str): a string which contains the issue number, two newlines, and the comment. for example: "1 Working on it now" adds the comment "working on it now" to issue 1 Returns: str: A success or failure message """ issue_number = int(comment_query.split('\n\n')[0]) comment = comment_query[len(str(issue_number)) + 2:] try: issue = self.github_repo_instance.get_issue(number=issue_number) issue.create_comment(comment) return 'Commented on issue ' + str(issue_number) except Exception as e: return 'Unable to make comment due to error:\n' + str(e)
def comment_on_issue(self, comment_query: str) ->str: """ Adds a comment to a github issue Parameters: comment_query(str): a string which contains the issue number, two newlines, and the comment. for example: "1 Working on it now" adds the comment "working on it now" to issue 1 Returns: str: A success or failure message """ issue_number = int(comment_query.split('\n\n')[0]) comment = comment_query[len(str(issue_number)) + 2:] try: issue = self.github_repo_instance.get_issue(number=issue_number) issue.create_comment(comment) return 'Commented on issue ' + str(issue_number) except Exception as e: return 'Unable to make comment due to error:\n' + str(e)
Adds a comment to a github issue Parameters: comment_query(str): a string which contains the issue number, two newlines, and the comment. for example: "1 Working on it now" adds the comment "working on it now" to issue 1 Returns: str: A success or failure message
from_agent_and_tools
"""Create from agent and tools.""" return cls(agent=agent, tools=tools, callbacks=callbacks, **kwargs)
@classmethod def from_agent_and_tools(cls, agent: Union[BaseSingleActionAgent, BaseMultiActionAgent], tools: Sequence[BaseTool], callbacks: Callbacks= None, **kwargs: Any) ->AgentExecutor: """Create from agent and tools.""" return cls(agent=agent, tools=tools, callbacks=callbacks, **kwargs)
Create from agent and tools.
_load_pubmed_from_universal_entry
from langchain.agents.load_tools import load_tools tools = load_tools(['pubmed'], **kwargs) assert len(tools) == 1, 'loaded more than 1 tool' return tools[0]
def _load_pubmed_from_universal_entry(**kwargs: Any) ->BaseTool: from langchain.agents.load_tools import load_tools tools = load_tools(['pubmed'], **kwargs) assert len(tools) == 1, 'loaded more than 1 tool' return tools[0]
null
get_full_inputs
"""Create the full inputs for the LLMChain from intermediate steps.""" thoughts = self._construct_scratchpad(intermediate_steps) new_inputs = {'agent_scratchpad': thoughts, 'stop': self._stop} full_inputs = {**kwargs, **new_inputs} return full_inputs
def get_full_inputs(self, intermediate_steps: List[Tuple[AgentAction, str]], **kwargs: Any) ->Dict[str, Any]: """Create the full inputs for the LLMChain from intermediate steps.""" thoughts = self._construct_scratchpad(intermediate_steps) new_inputs = {'agent_scratchpad': thoughts, 'stop': self._stop} full_inputs = {**kwargs, **new_inputs} return full_inputs
Create the full inputs for the LLMChain from intermediate steps.
validate_environment
"""Validate that api key and python package exists in environment.""" deepinfra_api_token = get_from_dict_or_env(values, 'deepinfra_api_token', 'DEEPINFRA_API_TOKEN') values['deepinfra_api_token'] = deepinfra_api_token return values
@root_validator() def validate_environment(cls, values: Dict) ->Dict: """Validate that api key and python package exists in environment.""" deepinfra_api_token = get_from_dict_or_env(values, 'deepinfra_api_token', 'DEEPINFRA_API_TOKEN') values['deepinfra_api_token'] = deepinfra_api_token return values
Validate that api key and python package exists in environment.
_get_resource
endpoint = STRIPE_ENDPOINTS.get(self.resource) if endpoint is None: return [] return self._make_request(endpoint)
def _get_resource(self) ->List[Document]: endpoint = STRIPE_ENDPOINTS.get(self.resource) if endpoint is None: return [] return self._make_request(endpoint)
null
_search
""" Perform a search using the query embedding and return top_k documents. Args: query_emb: Query represented as an embedding top_k: Number of documents to return Returns: A list of top_k documents matching the query """ from docarray.index import ElasticDocIndex, WeaviateDocumentIndex filter_args = {} search_field = self.search_field if isinstance(self.index, WeaviateDocumentIndex): filter_args['where_filter'] = self.filters search_field = '' elif isinstance(self.index, ElasticDocIndex): filter_args['query'] = self.filters else: filter_args['filter_query'] = self.filters if self.filters: query = self.index.build_query().find(query=query_emb, search_field= search_field).filter(**filter_args).build(limit=top_k) docs = self.index.execute_query(query) if hasattr(docs, 'documents'): docs = docs.documents docs = docs[:top_k] else: docs = self.index.find(query=query_emb, search_field=search_field, limit=top_k).documents return docs
def _search(self, query_emb: np.ndarray, top_k: int) ->List[Union[Dict[str, Any], Any]]: """ Perform a search using the query embedding and return top_k documents. Args: query_emb: Query represented as an embedding top_k: Number of documents to return Returns: A list of top_k documents matching the query """ from docarray.index import ElasticDocIndex, WeaviateDocumentIndex filter_args = {} search_field = self.search_field if isinstance(self.index, WeaviateDocumentIndex): filter_args['where_filter'] = self.filters search_field = '' elif isinstance(self.index, ElasticDocIndex): filter_args['query'] = self.filters else: filter_args['filter_query'] = self.filters if self.filters: query = self.index.build_query().find(query=query_emb, search_field =search_field).filter(**filter_args).build(limit=top_k) docs = self.index.execute_query(query) if hasattr(docs, 'documents'): docs = docs.documents docs = docs[:top_k] else: docs = self.index.find(query=query_emb, search_field=search_field, limit=top_k).documents return docs
Perform a search using the query embedding and return top_k documents. Args: query_emb: Query represented as an embedding top_k: Number of documents to return Returns: A list of top_k documents matching the query
add_texts
batch_size = kwargs.get('batch_size', DEFAULT_INSERT_BATCH_SIZE) _metadatas: Union[List, Generator] = metadatas or ({} for _ in texts) texts_batch = [] metadatas_batch = [] result_ids = [] for i, (text, metadata) in enumerate(zip(texts, _metadatas)): texts_batch.append(text) metadatas_batch.append(metadata) if (i + 1) % batch_size == 0: result_ids.extend(self._insert_texts(texts_batch, metadatas_batch)) texts_batch = [] metadatas_batch = [] if texts_batch: result_ids.extend(self._insert_texts(texts_batch, metadatas_batch)) return result_ids
def add_texts(self, texts: Iterable[str], metadatas: Optional[List[Dict[str, Any]]]=None, **kwargs: Any) ->List: batch_size = kwargs.get('batch_size', DEFAULT_INSERT_BATCH_SIZE) _metadatas: Union[List, Generator] = metadatas or ({} for _ in texts) texts_batch = [] metadatas_batch = [] result_ids = [] for i, (text, metadata) in enumerate(zip(texts, _metadatas)): texts_batch.append(text) metadatas_batch.append(metadata) if (i + 1) % batch_size == 0: result_ids.extend(self._insert_texts(texts_batch, metadatas_batch)) texts_batch = [] metadatas_batch = [] if texts_batch: result_ids.extend(self._insert_texts(texts_batch, metadatas_batch)) return result_ids
null
test__init__
"""Test initialization from init.""" loader = GenericLoader(FileSystemBlobLoader(toy_dir, suffixes=['.txt']), AsIsParser()) docs = loader.load() assert len(docs) == 3 assert docs[0].page_content == 'This is a test.txt file.'
def test__init__(toy_dir: str) ->None: """Test initialization from init.""" loader = GenericLoader(FileSystemBlobLoader(toy_dir, suffixes=['.txt']), AsIsParser()) docs = loader.load() assert len(docs) == 3 assert docs[0].page_content == 'This is a test.txt file.'
Test initialization from init.
process_pdf
try: import pytesseract from pdf2image import convert_from_bytes except ImportError: raise ImportError( '`pytesseract` or `pdf2image` package not found, please run `pip install pytesseract pdf2image`' ) response = self.confluence.request(path=link, absolute=True) text = '' if response.status_code != 200 or response.content == b'' or response.content is None: return text try: images = convert_from_bytes(response.content) except ValueError: return text for i, image in enumerate(images): image_text = pytesseract.image_to_string(image, lang=ocr_languages) text += f'Page {i + 1}:\n{image_text}\n\n' return text
def process_pdf(self, link: str, ocr_languages: Optional[str]=None) ->str: try: import pytesseract from pdf2image import convert_from_bytes except ImportError: raise ImportError( '`pytesseract` or `pdf2image` package not found, please run `pip install pytesseract pdf2image`' ) response = self.confluence.request(path=link, absolute=True) text = '' if (response.status_code != 200 or response.content == b'' or response. content is None): return text try: images = convert_from_bytes(response.content) except ValueError: return text for i, image in enumerate(images): image_text = pytesseract.image_to_string(image, lang=ocr_languages) text += f'Page {i + 1}:\n{image_text}\n\n' return text
null
test_append
zep_chat.add_message(AIMessage(content='test message')) zep_chat.zep_client.memory.add_memory.assert_called_once()
@pytest.mark.requires('zep_python') def test_append(mocker: MockerFixture, zep_chat: ZepChatMessageHistory) ->None: zep_chat.add_message(AIMessage(content='test message')) zep_chat.zep_client.memory.add_memory.assert_called_once()
null
create_spark_dataframe_agent
"""Construct a Spark agent from an LLM and dataframe.""" if not _validate_spark_df(df) and not _validate_spark_connect_df(df): raise ImportError('Spark is not installed. run `pip install pyspark`.') if input_variables is None: input_variables = ['df', 'input', 'agent_scratchpad'] tools = [PythonAstREPLTool(locals={'df': df})] prompt = ZeroShotAgent.create_prompt(tools, prefix=prefix, suffix=suffix, input_variables=input_variables) partial_prompt = prompt.partial(df=str(df.first())) llm_chain = LLMChain(llm=llm, prompt=partial_prompt, callback_manager= callback_manager) tool_names = [tool.name for tool in tools] agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names, callback_manager=callback_manager, **kwargs) return AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, callback_manager=callback_manager, verbose=verbose, return_intermediate_steps=return_intermediate_steps, max_iterations= max_iterations, max_execution_time=max_execution_time, early_stopping_method=early_stopping_method, **agent_executor_kwargs or {})
def create_spark_dataframe_agent(llm: BaseLLM, df: Any, callback_manager: Optional[BaseCallbackManager]=None, prefix: str=PREFIX, suffix: str= SUFFIX, input_variables: Optional[List[str]]=None, verbose: bool=False, return_intermediate_steps: bool=False, max_iterations: Optional[int]=15, max_execution_time: Optional[float]=None, early_stopping_method: str= 'force', agent_executor_kwargs: Optional[Dict[str, Any]]=None, **kwargs: Any) ->AgentExecutor: """Construct a Spark agent from an LLM and dataframe.""" if not _validate_spark_df(df) and not _validate_spark_connect_df(df): raise ImportError('Spark is not installed. run `pip install pyspark`.') if input_variables is None: input_variables = ['df', 'input', 'agent_scratchpad'] tools = [PythonAstREPLTool(locals={'df': df})] prompt = ZeroShotAgent.create_prompt(tools, prefix=prefix, suffix= suffix, input_variables=input_variables) partial_prompt = prompt.partial(df=str(df.first())) llm_chain = LLMChain(llm=llm, prompt=partial_prompt, callback_manager= callback_manager) tool_names = [tool.name for tool in tools] agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names, callback_manager=callback_manager, **kwargs) return AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, callback_manager=callback_manager, verbose=verbose, return_intermediate_steps=return_intermediate_steps, max_iterations =max_iterations, max_execution_time=max_execution_time, early_stopping_method=early_stopping_method, ** agent_executor_kwargs or {})
Construct a Spark agent from an LLM and dataframe.
__copy__
"""Copy the tracer.""" return self
def __copy__(self) ->BaseTracer: """Copy the tracer.""" return self
Copy the tracer.
_create_retry_decorator
min_seconds = 1 max_seconds = 4 return retry(reraise=True, stop=stop_after_attempt(llm.max_retries), wait= wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry =retry_if_exception_type(HTTPError), before_sleep=before_sleep_log( logger, logging.WARNING))
def _create_retry_decorator(llm: Tongyi) ->Callable[[Any], Any]: min_seconds = 1 max_seconds = 4 return retry(reraise=True, stop=stop_after_attempt(llm.max_retries), wait=wait_exponential(multiplier=1, min=min_seconds, max= max_seconds), retry=retry_if_exception_type(HTTPError), before_sleep=before_sleep_log(logger, logging.WARNING))
null
_get_llm_math
return Tool(name='Calculator', description= 'Useful for when you need to answer questions about math.', func= LLMMathChain.from_llm(llm=llm).run, coroutine=LLMMathChain.from_llm(llm =llm).arun)
def _get_llm_math(llm: BaseLanguageModel) ->BaseTool: return Tool(name='Calculator', description= 'Useful for when you need to answer questions about math.', func= LLMMathChain.from_llm(llm=llm).run, coroutine=LLMMathChain.from_llm (llm=llm).arun)
null
_submit
"""Submit a function to the executor.""" if self.executor is None: function(run) else: self._futures.add(self.executor.submit(function, run))
def _submit(self, function: Callable[[Run], None], run: Run) ->None: """Submit a function to the executor.""" if self.executor is None: function(run) else: self._futures.add(self.executor.submit(function, run))
Submit a function to the executor.
search_api
"""Search the API for the query.""" assert isinstance(query, str) return f'API result - {query}'
@tool('search') def search_api(query: str) ->str: """Search the API for the query.""" assert isinstance(query, str) return f'API result - {query}'
Search the API for the query.
validate_prompt_input_variables
"""Validate that prompt input variables are consistent.""" prompt_variables = values['prompt'].input_variables expected_keys = {'summary', 'new_lines'} if expected_keys != set(prompt_variables): raise ValueError( f'Got unexpected prompt input variables. The prompt expects {prompt_variables}, but it should have {expected_keys}.' ) return values
@root_validator() def validate_prompt_input_variables(cls, values: Dict) ->Dict: """Validate that prompt input variables are consistent.""" prompt_variables = values['prompt'].input_variables expected_keys = {'summary', 'new_lines'} if expected_keys != set(prompt_variables): raise ValueError( f'Got unexpected prompt input variables. The prompt expects {prompt_variables}, but it should have {expected_keys}.' ) return values
Validate that prompt input variables are consistent.
format_prompt
for k, prompt in self.pipeline_prompts: _inputs = _get_inputs(kwargs, prompt.input_variables) if isinstance(prompt, BaseChatPromptTemplate): kwargs[k] = prompt.format_messages(**_inputs) else: kwargs[k] = prompt.format(**_inputs) _inputs = _get_inputs(kwargs, self.final_prompt.input_variables) return self.final_prompt.format_prompt(**_inputs)
def format_prompt(self, **kwargs: Any) ->PromptValue: for k, prompt in self.pipeline_prompts: _inputs = _get_inputs(kwargs, prompt.input_variables) if isinstance(prompt, BaseChatPromptTemplate): kwargs[k] = prompt.format_messages(**_inputs) else: kwargs[k] = prompt.format(**_inputs) _inputs = _get_inputs(kwargs, self.final_prompt.input_variables) return self.final_prompt.format_prompt(**_inputs)
null
_fstring_JoinedStr
for value in t.values: meth = getattr(self, '_fstring_' + type(value).__name__) meth(value, write)
def _fstring_JoinedStr(self, t, write): for value in t.values: meth = getattr(self, '_fstring_' + type(value).__name__) meth(value, write)
null
test_similarity_search_with_metadata
"""Test end to end construction and search with metadata.""" texts = ['foo', 'bar', 'baz'] metadatas = [{'page': i} for i in range(len(texts))] docsearch = ElasticsearchStore.from_texts(texts, ConsistentFakeEmbeddings(), metadatas=metadatas, **elasticsearch_connection, index_name=index_name) output = docsearch.similarity_search('foo', k=1) assert output == [Document(page_content='foo', metadata={'page': 0})] output = docsearch.similarity_search('bar', k=1) assert output == [Document(page_content='bar', metadata={'page': 1})]
def test_similarity_search_with_metadata(self, elasticsearch_connection: dict, index_name: str) ->None: """Test end to end construction and search with metadata.""" texts = ['foo', 'bar', 'baz'] metadatas = [{'page': i} for i in range(len(texts))] docsearch = ElasticsearchStore.from_texts(texts, ConsistentFakeEmbeddings(), metadatas=metadatas, ** elasticsearch_connection, index_name=index_name) output = docsearch.similarity_search('foo', k=1) assert output == [Document(page_content='foo', metadata={'page': 0})] output = docsearch.similarity_search('bar', k=1) assert output == [Document(page_content='bar', metadata={'page': 1})]
Test end to end construction and search with metadata.
on_chain_end
"""Run when chain ends running.""" self.metrics['step'] += 1 self.metrics['chain_ends'] += 1 self.metrics['ends'] += 1 chain_ends = self.metrics['chain_ends'] resp: Dict[str, Any] = {} chain_output = ','.join([f'{k}={v}' for k, v in outputs.items()]) resp.update({'action': 'on_chain_end', 'outputs': chain_output}) resp.update(self.metrics) self.mlflg.metrics(self.metrics, step=self.metrics['step']) self.records['on_chain_end_records'].append(resp) self.records['action_records'].append(resp) self.mlflg.jsonf(resp, f'chain_end_{chain_ends}')
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) ->None: """Run when chain ends running.""" self.metrics['step'] += 1 self.metrics['chain_ends'] += 1 self.metrics['ends'] += 1 chain_ends = self.metrics['chain_ends'] resp: Dict[str, Any] = {} chain_output = ','.join([f'{k}={v}' for k, v in outputs.items()]) resp.update({'action': 'on_chain_end', 'outputs': chain_output}) resp.update(self.metrics) self.mlflg.metrics(self.metrics, step=self.metrics['step']) self.records['on_chain_end_records'].append(resp) self.records['action_records'].append(resp) self.mlflg.jsonf(resp, f'chain_end_{chain_ends}')
Run when chain ends running.
_import_vectorstore_tool_VectorStoreQAWithSourcesTool
from langchain_community.tools.vectorstore.tool import VectorStoreQAWithSourcesTool return VectorStoreQAWithSourcesTool
def _import_vectorstore_tool_VectorStoreQAWithSourcesTool() ->Any: from langchain_community.tools.vectorstore.tool import VectorStoreQAWithSourcesTool return VectorStoreQAWithSourcesTool
null
test_integration_question
"""Test question about integration that needs sympy""" question = 'What is the integral of e^-x from 0 to infinity?' output = fake_llm_symbolic_math_chain.run(question) assert output == 'Answer: 1'
def test_integration_question(fake_llm_symbolic_math_chain: LLMSymbolicMathChain) ->None: """Test question about integration that needs sympy""" question = 'What is the integral of e^-x from 0 to infinity?' output = fake_llm_symbolic_math_chain.run(question) assert output == 'Answer: 1'
Test question about integration that needs sympy
reset_deanonymizer_mapping
"""Abstract method to reset deanonymizer mapping"""
@abstractmethod def reset_deanonymizer_mapping(self) ->None: """Abstract method to reset deanonymizer mapping"""
Abstract method to reset deanonymizer mapping
lazy_load
"""Lazy load Documents from URLs.""" for url in self.urls: if self.text_content: response = requests.post('https://chrome.browserless.io/scrape', params={'token': self.api_token}, json={'url': url, 'elements': [{'selector': 'body'}]}) yield Document(page_content=response.json()['data'][0]['results'][0 ]['text'], metadata={'source': url}) else: response = requests.post('https://chrome.browserless.io/content', params={'token': self.api_token}, json={'url': url}) yield Document(page_content=response.text, metadata={'source': url})
def lazy_load(self) ->Iterator[Document]: """Lazy load Documents from URLs.""" for url in self.urls: if self.text_content: response = requests.post('https://chrome.browserless.io/scrape', params={'token': self.api_token}, json={'url': url, 'elements': [{'selector': 'body'}]}) yield Document(page_content=response.json()['data'][0][ 'results'][0]['text'], metadata={'source': url}) else: response = requests.post('https://chrome.browserless.io/content', params={'token': self.api_token}, json={'url': url}) yield Document(page_content=response.text, metadata={'source': url} )
Lazy load Documents from URLs.
test_astradb_vectorstore_metadata
"""Metadata filtering.""" store_someemb.add_documents([Document(page_content='q', metadata={'ord': ord('q'), 'group': 'consonant'}), Document(page_content='w', metadata={ 'ord': ord('w'), 'group': 'consonant'}), Document(page_content='r', metadata={'ord': ord('r'), 'group': 'consonant'}), Document( page_content='e', metadata={'ord': ord('e'), 'group': 'vowel'}), Document(page_content='i', metadata={'ord': ord('i'), 'group': 'vowel'} ), Document(page_content='o', metadata={'ord': ord('o'), 'group': 'vowel'})]) res0 = store_someemb.similarity_search('x', k=10) assert {doc.page_content for doc in res0} == set('qwreio') res1 = store_someemb.similarity_search('x', k=10, filter={'group': 'vowel'}) assert {doc.page_content for doc in res1} == set('eio') res2 = store_someemb.similarity_search('x', k=10, filter={'group': 'consonant', 'ord': ord('q')}) assert {doc.page_content for doc in res2} == set('q') res3 = store_someemb.similarity_search('x', k=10, filter={'group': 'consonant', 'ord': ord('q'), 'case': 'upper'}) assert res3 == []
def test_astradb_vectorstore_metadata(self, store_someemb: AstraDB) ->None: """Metadata filtering.""" store_someemb.add_documents([Document(page_content='q', metadata={'ord': ord('q'), 'group': 'consonant'}), Document(page_content='w', metadata={'ord': ord('w'), 'group': 'consonant'}), Document( page_content='r', metadata={'ord': ord('r'), 'group': 'consonant'}), Document(page_content='e', metadata={'ord': ord('e'), 'group': 'vowel'}), Document(page_content='i', metadata={'ord': ord('i'), 'group': 'vowel'}), Document(page_content='o', metadata={'ord': ord ('o'), 'group': 'vowel'})]) res0 = store_someemb.similarity_search('x', k=10) assert {doc.page_content for doc in res0} == set('qwreio') res1 = store_someemb.similarity_search('x', k=10, filter={'group': 'vowel'} ) assert {doc.page_content for doc in res1} == set('eio') res2 = store_someemb.similarity_search('x', k=10, filter={'group': 'consonant', 'ord': ord('q')}) assert {doc.page_content for doc in res2} == set('q') res3 = store_someemb.similarity_search('x', k=10, filter={'group': 'consonant', 'ord': ord('q'), 'case': 'upper'}) assert res3 == []
Metadata filtering.
validate_environment
"""Validate that api key and python package exists in environment.""" openai_api_key = get_from_dict_or_env(values, 'openai_api_key', 'OPENAI_API_KEY') openai_organization = get_from_dict_or_env(values, 'openai_organization', 'OPENAI_ORGANIZATION', default='') try: import openai openai.api_key = openai_api_key if openai_organization: openai.organization = openai_organization values['client'] = openai.Moderation except ImportError: raise ImportError( 'Could not import openai python package. Please install it with `pip install openai`.' ) return values
@root_validator() def validate_environment(cls, values: Dict) ->Dict: """Validate that api key and python package exists in environment.""" openai_api_key = get_from_dict_or_env(values, 'openai_api_key', 'OPENAI_API_KEY') openai_organization = get_from_dict_or_env(values, 'openai_organization', 'OPENAI_ORGANIZATION', default='') try: import openai openai.api_key = openai_api_key if openai_organization: openai.organization = openai_organization values['client'] = openai.Moderation except ImportError: raise ImportError( 'Could not import openai python package. Please install it with `pip install openai`.' ) return values
Validate that api key and python package exists in environment.
to_doc
"""Converts this item to a Document.""" page_content = page_content_formatter(self) metadata = self.get_additional_metadata() metadata.update({'result_id': self.Id, 'document_id': self.DocumentId, 'source': self.DocumentURI, 'title': self.get_title(), 'excerpt': self. get_excerpt(), 'document_attributes': self.get_document_attributes_dict()}) return Document(page_content=page_content, metadata=metadata)
def to_doc(self, page_content_formatter: Callable[['ResultItem'], str]= combined_text) ->Document: """Converts this item to a Document.""" page_content = page_content_formatter(self) metadata = self.get_additional_metadata() metadata.update({'result_id': self.Id, 'document_id': self.DocumentId, 'source': self.DocumentURI, 'title': self.get_title(), 'excerpt': self.get_excerpt(), 'document_attributes': self. get_document_attributes_dict()}) return Document(page_content=page_content, metadata=metadata)
Converts this item to a Document.
on_tool_start
"""Run when tool starts running.""" self.step += 1 self.tool_starts += 1 self.starts += 1 resp = self._init_resp() resp.update({'action': 'on_tool_start', 'input_str': input_str}) resp.update(flatten_dict(serialized)) resp.update(self.get_custom_callback_meta()) self.on_tool_start_records.append(resp) self.action_records.append(resp) if self.stream_logs: self.run.log(resp)
def on_tool_start(self, serialized: Dict[str, Any], input_str: str, ** kwargs: Any) ->None: """Run when tool starts running.""" self.step += 1 self.tool_starts += 1 self.starts += 1 resp = self._init_resp() resp.update({'action': 'on_tool_start', 'input_str': input_str}) resp.update(flatten_dict(serialized)) resp.update(self.get_custom_callback_meta()) self.on_tool_start_records.append(resp) self.action_records.append(resp) if self.stream_logs: self.run.log(resp)
Run when tool starts running.
test_vertex_generate
llm = VertexAI(temperature=0.3, n=2, model_name='text-bison@001') output = llm.generate(['Say foo:']) assert isinstance(output, LLMResult) assert len(output.generations) == 1 assert len(output.generations[0]) == 2
@pytest.mark.scheduled def test_vertex_generate() ->None: llm = VertexAI(temperature=0.3, n=2, model_name='text-bison@001') output = llm.generate(['Say foo:']) assert isinstance(output, LLMResult) assert len(output.generations) == 1 assert len(output.generations[0]) == 2
null
lazy_load
"""Lazy load given path as pages.""" if self.web_path: blob = Blob.from_data(open(self.file_path, 'rb').read(), path=self.web_path ) else: blob = Blob.from_path(self.file_path) yield from self.parser.parse_folder(blob)
def lazy_load(self) ->Iterator[Document]: """Lazy load given path as pages.""" if self.web_path: blob = Blob.from_data(open(self.file_path, 'rb').read(), path=self. web_path) else: blob = Blob.from_path(self.file_path) yield from self.parser.parse_folder(blob)
Lazy load given path as pages.
_lambda
if x == 1: raise ValueError('x is 1') elif x == 2: raise RuntimeError('x is 2') else: return x
def _lambda(x: int) ->Union[int, Runnable]: if x == 1: raise ValueError('x is 1') elif x == 2: raise RuntimeError('x is 2') else: return x
null
login
"""Authenticate using the Slack API.""" try: from slack_sdk import WebClient except ImportError as e: raise ImportError( 'Cannot import slack_sdk. Please install the package with `pip install slack_sdk`.' ) from e if 'SLACK_BOT_TOKEN' in os.environ: token = os.environ['SLACK_BOT_TOKEN'] client = WebClient(token=token) logger.info('slack login success') return client elif 'SLACK_USER_TOKEN' in os.environ: token = os.environ['SLACK_USER_TOKEN'] client = WebClient(token=token) logger.info('slack login success') return client else: logger.error( 'Error: The SLACK_BOT_TOKEN or SLACK_USER_TOKEN environment variable have not been set.' )
def login() ->WebClient: """Authenticate using the Slack API.""" try: from slack_sdk import WebClient except ImportError as e: raise ImportError( 'Cannot import slack_sdk. Please install the package with `pip install slack_sdk`.' ) from e if 'SLACK_BOT_TOKEN' in os.environ: token = os.environ['SLACK_BOT_TOKEN'] client = WebClient(token=token) logger.info('slack login success') return client elif 'SLACK_USER_TOKEN' in os.environ: token = os.environ['SLACK_USER_TOKEN'] client = WebClient(token=token) logger.info('slack login success') return client else: logger.error( 'Error: The SLACK_BOT_TOKEN or SLACK_USER_TOKEN environment variable have not been set.' )
Authenticate using the Slack API.
_extract_scheme_and_domain
"""Extract the scheme + domain from a given URL. Args: url (str): The input URL. Returns: return a 2-tuple of scheme and domain """ parsed_uri = urlparse(url) return parsed_uri.scheme, parsed_uri.netloc
def _extract_scheme_and_domain(url: str) ->Tuple[str, str]: """Extract the scheme + domain from a given URL. Args: url (str): The input URL. Returns: return a 2-tuple of scheme and domain """ parsed_uri = urlparse(url) return parsed_uri.scheme, parsed_uri.netloc
Extract the scheme + domain from a given URL. Args: url (str): The input URL. Returns: return a 2-tuple of scheme and domain
_get_notes
has_more = True page = 1 while has_more: req_note = urllib.request.Request(self._get_note_url.format(page=page)) with urllib.request.urlopen(req_note) as response: json_data = json.loads(response.read().decode()) for note in json_data['items']: metadata = {'source': LINK_NOTE_TEMPLATE.format(id=note['id']), 'folder': self._get_folder(note['parent_id']), 'tags': self ._get_tags(note['id']), 'title': note['title'], 'created_time': self._convert_date(note['created_time']), 'updated_time': self._convert_date(note['updated_time'])} yield Document(page_content=note['body'], metadata=metadata) has_more = json_data['has_more'] page += 1
def _get_notes(self) ->Iterator[Document]: has_more = True page = 1 while has_more: req_note = urllib.request.Request(self._get_note_url.format(page=page)) with urllib.request.urlopen(req_note) as response: json_data = json.loads(response.read().decode()) for note in json_data['items']: metadata = {'source': LINK_NOTE_TEMPLATE.format(id=note[ 'id']), 'folder': self._get_folder(note['parent_id']), 'tags': self._get_tags(note['id']), 'title': note[ 'title'], 'created_time': self._convert_date(note[ 'created_time']), 'updated_time': self._convert_date( note['updated_time'])} yield Document(page_content=note['body'], metadata=metadata) has_more = json_data['has_more'] page += 1
null
test_dir
return Path(os.path.join(PROJECT_DIR, 'tests', 'integration_tests'))
@pytest.fixture(scope='module') def test_dir() ->Path: return Path(os.path.join(PROJECT_DIR, 'tests', 'integration_tests'))
null
save_context
"""Save context from this conversation to buffer.""" input_str, output_str = self._get_input_output(inputs, outputs) self.chat_memory.add_user_message(input_str) self.chat_memory.add_ai_message(output_str)
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) ->None: """Save context from this conversation to buffer.""" input_str, output_str = self._get_input_output(inputs, outputs) self.chat_memory.add_user_message(input_str) self.chat_memory.add_ai_message(output_str)
Save context from this conversation to buffer.
similarity_search
"""Run similarity search Args: query (str): The query text for which to find similar documents. k (int): The number of documents to return. Default is 4. filter (RedisFilterExpression, optional): Optional metadata filter. Defaults to None. return_metadata (bool, optional): Whether to return metadata. Defaults to True. distance_threshold (Optional[float], optional): Maximum vector distance between selected documents and the query vector. Defaults to None. Returns: List[Document]: A list of documents that are most similar to the query text. """ query_embedding = self._embeddings.embed_query(query) return self.similarity_search_by_vector(query_embedding, k=k, filter=filter, return_metadata=return_metadata, distance_threshold=distance_threshold, **kwargs)
def similarity_search(self, query: str, k: int=4, filter: Optional[ RedisFilterExpression]=None, return_metadata: bool=True, distance_threshold: Optional[float]=None, **kwargs: Any) ->List[Document]: """Run similarity search Args: query (str): The query text for which to find similar documents. k (int): The number of documents to return. Default is 4. filter (RedisFilterExpression, optional): Optional metadata filter. Defaults to None. return_metadata (bool, optional): Whether to return metadata. Defaults to True. distance_threshold (Optional[float], optional): Maximum vector distance between selected documents and the query vector. Defaults to None. Returns: List[Document]: A list of documents that are most similar to the query text. """ query_embedding = self._embeddings.embed_query(query) return self.similarity_search_by_vector(query_embedding, k=k, filter= filter, return_metadata=return_metadata, distance_threshold= distance_threshold, **kwargs)
Run similarity search Args: query (str): The query text for which to find similar documents. k (int): The number of documents to return. Default is 4. filter (RedisFilterExpression, optional): Optional metadata filter. Defaults to None. return_metadata (bool, optional): Whether to return metadata. Defaults to True. distance_threshold (Optional[float], optional): Maximum vector distance between selected documents and the query vector. Defaults to None. Returns: List[Document]: A list of documents that are most similar to the query text.
test_chat_openai_streaming
"""Test that streaming correctly invokes on_llm_new_token callback.""" callback_handler = FakeCallbackHandler() callback_manager = CallbackManager([callback_handler]) chat = ChatOpenAI(max_tokens=10, streaming=True, temperature=0, callback_manager=callback_manager, verbose=True) message = HumanMessage(content='Hello') response = chat([message]) assert callback_handler.llm_streams > 0 assert isinstance(response, BaseMessage)
@pytest.mark.scheduled def test_chat_openai_streaming() ->None: """Test that streaming correctly invokes on_llm_new_token callback.""" callback_handler = FakeCallbackHandler() callback_manager = CallbackManager([callback_handler]) chat = ChatOpenAI(max_tokens=10, streaming=True, temperature=0, callback_manager=callback_manager, verbose=True) message = HumanMessage(content='Hello') response = chat([message]) assert callback_handler.llm_streams > 0 assert isinstance(response, BaseMessage)
Test that streaming correctly invokes on_llm_new_token callback.
test_titan_takeoff_call
"""Test valid call to Titan Takeoff.""" url = 'http://localhost:8000/generate' responses.add(responses.POST, url, json={'message': '2 + 2 is 4'}, status=200) llm = TitanTakeoff() output = llm('What is 2 + 2?') assert isinstance(output, str)
@responses.activate def test_titan_takeoff_call() ->None: """Test valid call to Titan Takeoff.""" url = 'http://localhost:8000/generate' responses.add(responses.POST, url, json={'message': '2 + 2 is 4'}, status=200) llm = TitanTakeoff() output = llm('What is 2 + 2?') assert isinstance(output, str)
Test valid call to Titan Takeoff.
dumps
"""Return a json string representation of an object.""" if 'default' in kwargs: raise ValueError('`default` should not be passed to dumps') try: if pretty: indent = kwargs.pop('indent', 2) return json.dumps(obj, default=default, indent=indent, **kwargs) else: return json.dumps(obj, default=default, **kwargs) except TypeError: if pretty: return json.dumps(to_json_not_implemented(obj), indent=indent, **kwargs ) else: return json.dumps(to_json_not_implemented(obj), **kwargs)
def dumps(obj: Any, *, pretty: bool=False, **kwargs: Any) ->str: """Return a json string representation of an object.""" if 'default' in kwargs: raise ValueError('`default` should not be passed to dumps') try: if pretty: indent = kwargs.pop('indent', 2) return json.dumps(obj, default=default, indent=indent, **kwargs) else: return json.dumps(obj, default=default, **kwargs) except TypeError: if pretty: return json.dumps(to_json_not_implemented(obj), indent=indent, **kwargs) else: return json.dumps(to_json_not_implemented(obj), **kwargs)
Return a json string representation of an object.
test_load_success_all_meta
api_client.load_all_available_meta = True docs = api_client.load('HUNTER X HUNTER') assert len(docs) > 1 assert len(docs) <= 3 assert_docs(docs, all_meta=True)
def test_load_success_all_meta(api_client: WikipediaAPIWrapper) ->None: api_client.load_all_available_meta = True docs = api_client.load('HUNTER X HUNTER') assert len(docs) > 1 assert len(docs) <= 3 assert_docs(docs, all_meta=True)
null
batched
iterator = iter(iterable) while (batch := list(islice(iterator, batch_size))): yield batch
def batched(iterable: Iterable[Any], batch_size: int) ->Iterable[Any]: iterator = iter(iterable) while (batch := list(islice(iterator, batch_size))): yield batch
null
test_visit_comparison_ne
comp = Comparison(comparator=Comparator.NE, attribute='name', value='foo') expected = {'name': {'$ne': 'foo'}} actual = DEFAULT_TRANSLATOR.visit_comparison(comp) assert expected == actual
def test_visit_comparison_ne() ->None: comp = Comparison(comparator=Comparator.NE, attribute='name', value='foo') expected = {'name': {'$ne': 'foo'}} actual = DEFAULT_TRANSLATOR.visit_comparison(comp) assert expected == actual
null
test_mosaicml_embedding_documents_multiple
"""Test MosaicML embeddings with multiple documents.""" documents = ['foo bar', 'bar foo', 'foo'] embedding = MosaicMLInstructorEmbeddings() output = embedding.embed_documents(documents) assert len(output) == 3 assert len(output[0]) == 768 assert len(output[1]) == 768 assert len(output[2]) == 768
def test_mosaicml_embedding_documents_multiple() ->None: """Test MosaicML embeddings with multiple documents.""" documents = ['foo bar', 'bar foo', 'foo'] embedding = MosaicMLInstructorEmbeddings() output = embedding.embed_documents(documents) assert len(output) == 3 assert len(output[0]) == 768 assert len(output[1]) == 768 assert len(output[2]) == 768
Test MosaicML embeddings with multiple documents.
_get_memory
"""Retrieve memory from Zep""" from zep_python import NotFoundError try: zep_memory: Memory = self.zep_client.memory.get_memory(self.session_id) except NotFoundError: logger.warning( f'Session {self.session_id} not found in Zep. Returning None') return None return zep_memory
def _get_memory(self) ->Optional[Memory]: """Retrieve memory from Zep""" from zep_python import NotFoundError try: zep_memory: Memory = self.zep_client.memory.get_memory(self.session_id) except NotFoundError: logger.warning( f'Session {self.session_id} not found in Zep. Returning None') return None return zep_memory
Retrieve memory from Zep
on_llm_error
"""Run when LLM errors. Args: error (Exception or KeyboardInterrupt): The error. kwargs (Any): Additional keyword arguments. - response (LLMResult): The response which was generated before the error occurred. """ handle_event(self.handlers, 'on_llm_error', 'ignore_llm', error, run_id= self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs)
def on_llm_error(self, error: BaseException, **kwargs: Any) ->None: """Run when LLM errors. Args: error (Exception or KeyboardInterrupt): The error. kwargs (Any): Additional keyword arguments. - response (LLMResult): The response which was generated before the error occurred. """ handle_event(self.handlers, 'on_llm_error', 'ignore_llm', error, run_id =self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, ** kwargs)
Run when LLM errors. Args: error (Exception or KeyboardInterrupt): The error. kwargs (Any): Additional keyword arguments. - response (LLMResult): The response which was generated before the error occurred.
_prepare_output
""" Prepare the output dictionary. Args: result (Dict[str, Any]): The evaluation results. Returns: Dict[str, Any]: The prepared output dictionary. """ result = {'score': result['score']} if RUN_KEY in result: result[RUN_KEY] = result[RUN_KEY].dict() return result
def _prepare_output(self, result: Dict[str, Any]) ->Dict[str, Any]: """ Prepare the output dictionary. Args: result (Dict[str, Any]): The evaluation results. Returns: Dict[str, Any]: The prepared output dictionary. """ result = {'score': result['score']} if RUN_KEY in result: result[RUN_KEY] = result[RUN_KEY].dict() return result
Prepare the output dictionary. Args: result (Dict[str, Any]): The evaluation results. Returns: Dict[str, Any]: The prepared output dictionary.
client_search
version_num = client.info()['version']['number'][0] version_num = int(version_num) if version_num >= 8: response = client.search(index=index_name, query=script_query, size=size) else: response = client.search(index=index_name, body={'query': script_query, 'size': size}) return response
def client_search(self, client: Any, index_name: str, script_query: Dict, size: int) ->Any: version_num = client.info()['version']['number'][0] version_num = int(version_num) if version_num >= 8: response = client.search(index=index_name, query=script_query, size =size) else: response = client.search(index=index_name, body={'query': script_query, 'size': size}) return response
null
on_agent_action
self.on_agent_action_common()
def on_agent_action(self, *args: Any, **kwargs: Any) ->Any: self.on_agent_action_common()
null
test_couchbase_loader
"""Test Couchbase loader.""" loader = CouchbaseLoader(connection_string=self.conn_string, db_username= self.database_user, db_password=self.database_password, query=self. valid_query, page_content_fields=self.valid_page_content_fields, metadata_fields=self.valid_metadata_fields) docs = loader.load() print(docs) assert len(docs) > 0 for doc in docs: print(doc) assert doc.page_content != '' assert 'id' in doc.metadata and doc.metadata['id'] != ''
def test_couchbase_loader(self) ->None: """Test Couchbase loader.""" loader = CouchbaseLoader(connection_string=self.conn_string, db_username=self.database_user, db_password=self.database_password, query=self.valid_query, page_content_fields=self. valid_page_content_fields, metadata_fields=self.valid_metadata_fields) docs = loader.load() print(docs) assert len(docs) > 0 for doc in docs: print(doc) assert doc.page_content != '' assert 'id' in doc.metadata and doc.metadata['id'] != ''
Test Couchbase loader.
_get_sample_rows
command = select(table).limit(self._sample_rows_in_table_info) columns_str = '\t'.join([col.name for col in table.columns]) try: with self._engine.connect() as connection: sample_rows_result = connection.execute(command) sample_rows = list(map(lambda ls: [str(i)[:100] for i in ls], sample_rows_result)) sample_rows_str = '\n'.join(['\t'.join(row) for row in sample_rows]) except ProgrammingError: sample_rows_str = '' return f"""{self._sample_rows_in_table_info} rows from {table.name} table: {columns_str} {sample_rows_str}"""
def _get_sample_rows(self, table: Table) ->str: command = select(table).limit(self._sample_rows_in_table_info) columns_str = '\t'.join([col.name for col in table.columns]) try: with self._engine.connect() as connection: sample_rows_result = connection.execute(command) sample_rows = list(map(lambda ls: [str(i)[:100] for i in ls], sample_rows_result)) sample_rows_str = '\n'.join(['\t'.join(row) for row in sample_rows]) except ProgrammingError: sample_rows_str = '' return f"""{self._sample_rows_in_table_info} rows from {table.name} table: {columns_str} {sample_rows_str}"""
null
_load_from_bytes
"""Return a document from a bytes representation.""" return loads(serialized.decode('utf-8'))
def _load_from_bytes(serialized: bytes) ->Serializable: """Return a document from a bytes representation.""" return loads(serialized.decode('utf-8'))
Return a document from a bytes representation.
validate_tools_single_input
"""Validate tools for single input.""" for tool in tools: if not tool.is_single_input: raise ValueError( f'{class_name} does not support multi-input tool {tool.name}.')
def validate_tools_single_input(class_name: str, tools: Sequence[BaseTool] ) ->None: """Validate tools for single input.""" for tool in tools: if not tool.is_single_input: raise ValueError( f'{class_name} does not support multi-input tool {tool.name}.')
Validate tools for single input.
test_get_code
"""Test the parser.""" code_lines = output_parser.parse_folder(_SAMPLE_CODE) code = [c for c in code_lines if c.strip()] assert code == code_lines assert code == ['echo hello'] code_lines = output_parser.parse_folder(_SAMPLE_CODE + _SAMPLE_CODE_2_LINES) assert code_lines == ['echo hello', 'echo hello', 'echo world']
def test_get_code(output_parser: BashOutputParser) ->None: """Test the parser.""" code_lines = output_parser.parse_folder(_SAMPLE_CODE) code = [c for c in code_lines if c.strip()] assert code == code_lines assert code == ['echo hello'] code_lines = output_parser.parse_folder(_SAMPLE_CODE + _SAMPLE_CODE_2_LINES ) assert code_lines == ['echo hello', 'echo hello', 'echo world']
Test the parser.
__init__
self.folder = Path(folder) self.model_path = self.folder / 'latest.vw' self.with_history = with_history if reset and self.has_history(): logger.warning( 'There is non empty history which is recommended to be cleaned up') if self.model_path.exists(): os.remove(self.model_path) self.folder.mkdir(parents=True, exist_ok=True)
def __init__(self, folder: Union[str, os.PathLike], with_history: bool=True, reset: bool=False): self.folder = Path(folder) self.model_path = self.folder / 'latest.vw' self.with_history = with_history if reset and self.has_history(): logger.warning( 'There is non empty history which is recommended to be cleaned up') if self.model_path.exists(): os.remove(self.model_path) self.folder.mkdir(parents=True, exist_ok=True)
null
test_init_delta_sync_with_self_managed_embeddings
index = mock_index(DELTA_SYNC_INDEX_SELF_MANAGED_EMBEDDINGS) vectorsearch = DatabricksVectorSearch(index, embedding= DEFAULT_EMBEDDING_MODEL, text_column=DEFAULT_TEXT_COLUMN) assert vectorsearch.index == index
@pytest.mark.requires('databricks', 'databricks.vector_search') def test_init_delta_sync_with_self_managed_embeddings() ->None: index = mock_index(DELTA_SYNC_INDEX_SELF_MANAGED_EMBEDDINGS) vectorsearch = DatabricksVectorSearch(index, embedding= DEFAULT_EMBEDDING_MODEL, text_column=DEFAULT_TEXT_COLUMN) assert vectorsearch.index == index
null
test_all_imports
assert sorted(EXPECTED_ALL) == sorted(__all__)
def test_all_imports() ->None: assert sorted(EXPECTED_ALL) == sorted(__all__)
null
test_pass_confluence_kwargs
loader = ConfluenceLoader(url='https://templates.atlassian.net/wiki/', confluence_kwargs={'verify_ssl': False}) assert loader.confluence.verify_ssl is False
@pytest.mark.skipif(not confluence_installed, reason= 'Atlassian package not installed') def test_pass_confluence_kwargs() ->None: loader = ConfluenceLoader(url='https://templates.atlassian.net/wiki/', confluence_kwargs={'verify_ssl': False}) assert loader.confluence.verify_ssl is False
null
test_api_key_masked_when_passed_via_constructor
llm = CerebriumAI(cerebriumai_api_key='secret-api-key') print(llm.cerebriumai_api_key, end='') captured = capsys.readouterr() assert captured.out == '**********' assert repr(llm.cerebriumai_api_key) == "SecretStr('**********')"
def test_api_key_masked_when_passed_via_constructor(capsys: CaptureFixture ) ->None: llm = CerebriumAI(cerebriumai_api_key='secret-api-key') print(llm.cerebriumai_api_key, end='') captured = capsys.readouterr() assert captured.out == '**********' assert repr(llm.cerebriumai_api_key) == "SecretStr('**********')"
null
test_similarity_search_approx_with_custom_query_fn
"""test that custom query function is called with the query string and query body""" def my_custom_query(query_body: dict, query: str) ->dict: assert query == 'foo' assert query_body == {'knn': {'field': 'vector', 'filter': [], 'k': 1, 'num_candidates': 50, 'query_vector': [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0]}} return {'query': {'match': {'text': {'query': 'bar'}}}} """Test end to end construction and search with metadata.""" texts = ['foo', 'bar', 'baz'] docsearch = ElasticsearchStore.from_texts(texts, FakeEmbeddings(), ** elasticsearch_connection, index_name=index_name) output = docsearch.similarity_search('foo', k=1, custom_query=my_custom_query) assert output == [Document(page_content='bar')]
def test_similarity_search_approx_with_custom_query_fn(self, elasticsearch_connection: dict, index_name: str) ->None: """test that custom query function is called with the query string and query body""" def my_custom_query(query_body: dict, query: str) ->dict: assert query == 'foo' assert query_body == {'knn': {'field': 'vector', 'filter': [], 'k': 1, 'num_candidates': 50, 'query_vector': [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0]}} return {'query': {'match': {'text': {'query': 'bar'}}}} """Test end to end construction and search with metadata.""" texts = ['foo', 'bar', 'baz'] docsearch = ElasticsearchStore.from_texts(texts, FakeEmbeddings(), ** elasticsearch_connection, index_name=index_name) output = docsearch.similarity_search('foo', k=1, custom_query= my_custom_query) assert output == [Document(page_content='bar')]
test that custom query function is called with the query string and query body
format_messages
"""Format the chat template into a list of finalized messages. Args: **kwargs: keyword arguments to use for filling in template variables in all the template messages in this chat template. Returns: list of formatted messages """ kwargs = self._merge_partial_and_user_variables(**kwargs) result = [] for message_template in self.messages: if isinstance(message_template, BaseMessage): result.extend([message_template]) elif isinstance(message_template, (BaseMessagePromptTemplate, BaseChatPromptTemplate)): message = message_template.format_messages(**kwargs) result.extend(message) else: raise ValueError(f'Unexpected input: {message_template}') return result
def format_messages(self, **kwargs: Any) ->List[BaseMessage]: """Format the chat template into a list of finalized messages. Args: **kwargs: keyword arguments to use for filling in template variables in all the template messages in this chat template. Returns: list of formatted messages """ kwargs = self._merge_partial_and_user_variables(**kwargs) result = [] for message_template in self.messages: if isinstance(message_template, BaseMessage): result.extend([message_template]) elif isinstance(message_template, (BaseMessagePromptTemplate, BaseChatPromptTemplate)): message = message_template.format_messages(**kwargs) result.extend(message) else: raise ValueError(f'Unexpected input: {message_template}') return result
Format the chat template into a list of finalized messages. Args: **kwargs: keyword arguments to use for filling in template variables in all the template messages in this chat template. Returns: list of formatted messages
__init__
self._pod = pod self._store = store self._vector_index = vector_index self._vector_type = vector_type self._vector_dimension = vector_dimension self._embedding = embedding try: from jaguardb_http_client.JaguarHttpClient import JaguarHttpClient except ImportError: raise ValueError( 'Could not import jaguardb-http-client python package. Please install it with `pip install -U jaguardb-http-client`' ) self._jag = JaguarHttpClient(url) self._token = ''
def __init__(self, pod: str, store: str, vector_index: str, vector_type: str, vector_dimension: int, url: str, embedding: Embeddings): self._pod = pod self._store = store self._vector_index = vector_index self._vector_type = vector_type self._vector_dimension = vector_dimension self._embedding = embedding try: from jaguardb_http_client.JaguarHttpClient import JaguarHttpClient except ImportError: raise ValueError( 'Could not import jaguardb-http-client python package. Please install it with `pip install -U jaguardb-http-client`' ) self._jag = JaguarHttpClient(url) self._token = ''
null
test_embed_query_normalized
output = OpenAIEmbeddings().embed_query('foo walked to the market') assert np.isclose(np.linalg.norm(output), 1.0)
@pytest.mark.scheduled def test_embed_query_normalized() ->None: output = OpenAIEmbeddings().embed_query('foo walked to the market') assert np.isclose(np.linalg.norm(output), 1.0)
null
test_token_text_splitter
"""Test no overlap.""" splitter = TokenTextSplitter(chunk_size=5, chunk_overlap=0) output = splitter.split_text('abcdef' * 5) expected_output = ['abcdefabcdefabc', 'defabcdefabcdef'] assert output == expected_output
def test_token_text_splitter() ->None: """Test no overlap.""" splitter = TokenTextSplitter(chunk_size=5, chunk_overlap=0) output = splitter.split_text('abcdef' * 5) expected_output = ['abcdefabcdefabc', 'defabcdefabcdef'] assert output == expected_output
Test no overlap.
embed_query
return self._get_embedding(seed=self._get_seed(text))
def embed_query(self, text: str) ->List[float]: return self._get_embedding(seed=self._get_seed(text))
null
test_tracer_multiple_llm_runs
"""Test the tracer with multiple runs.""" uuid = uuid4() compare_run = Run(id=uuid, name='llm', start_time=datetime.now(timezone.utc ), end_time=datetime.now(timezone.utc), events=[{'name': 'start', 'time': datetime.now(timezone.utc)}, {'name': 'end', 'time': datetime. now(timezone.utc)}], extra={}, execution_order=1, child_execution_order =1, serialized=SERIALIZED, inputs=dict(prompts=[]), outputs=LLMResult( generations=[[]]), error=None, run_type='llm', trace_id=uuid, dotted_order=f'20230101T000000000000Z{uuid}') tracer = FakeTracer() num_runs = 10 for _ in range(num_runs): tracer.on_llm_start(serialized=SERIALIZED, prompts=[], run_id=uuid) tracer.on_llm_end(response=LLMResult(generations=[[]]), run_id=uuid) assert tracer.runs == [compare_run] * num_runs
@freeze_time('2023-01-01') def test_tracer_multiple_llm_runs() ->None: """Test the tracer with multiple runs.""" uuid = uuid4() compare_run = Run(id=uuid, name='llm', start_time=datetime.now(timezone .utc), end_time=datetime.now(timezone.utc), events=[{'name': 'start', 'time': datetime.now(timezone.utc)}, {'name': 'end', 'time': datetime.now(timezone.utc)}], extra={}, execution_order=1, child_execution_order=1, serialized=SERIALIZED, inputs=dict(prompts =[]), outputs=LLMResult(generations=[[]]), error=None, run_type= 'llm', trace_id=uuid, dotted_order=f'20230101T000000000000Z{uuid}') tracer = FakeTracer() num_runs = 10 for _ in range(num_runs): tracer.on_llm_start(serialized=SERIALIZED, prompts=[], run_id=uuid) tracer.on_llm_end(response=LLMResult(generations=[[]]), run_id=uuid) assert tracer.runs == [compare_run] * num_runs
Test the tracer with multiple runs.
test_language_loader_for_python
"""Test Python loader with parser enabled.""" file_path = Path(__file__).parent.parent.parent / 'examples' loader = GenericLoader.from_filesystem(file_path, glob='hello_world.py', parser=LanguageParser(parser_threshold=5)) docs = loader.load() assert len(docs) == 2 metadata = docs[0].metadata assert metadata['source'] == str(file_path / 'hello_world.py') assert metadata['content_type'] == 'functions_classes' assert metadata['language'] == 'python' metadata = docs[1].metadata assert metadata['source'] == str(file_path / 'hello_world.py') assert metadata['content_type'] == 'simplified_code' assert metadata['language'] == 'python' assert docs[0].page_content == """def main(): print("Hello World!") return 0""" assert docs[1].page_content == """#!/usr/bin/env python3 import sys # Code for: def main(): if __name__ == "__main__": sys.exit(main())"""
def test_language_loader_for_python() ->None: """Test Python loader with parser enabled.""" file_path = Path(__file__).parent.parent.parent / 'examples' loader = GenericLoader.from_filesystem(file_path, glob='hello_world.py', parser=LanguageParser(parser_threshold=5)) docs = loader.load() assert len(docs) == 2 metadata = docs[0].metadata assert metadata['source'] == str(file_path / 'hello_world.py') assert metadata['content_type'] == 'functions_classes' assert metadata['language'] == 'python' metadata = docs[1].metadata assert metadata['source'] == str(file_path / 'hello_world.py') assert metadata['content_type'] == 'simplified_code' assert metadata['language'] == 'python' assert docs[0].page_content == """def main(): print("Hello World!") return 0""" assert docs[1].page_content == """#!/usr/bin/env python3 import sys # Code for: def main(): if __name__ == "__main__": sys.exit(main())"""
Test Python loader with parser enabled.
load
"""Load documents.""" return list(self.lazy_load())
def load(self) ->List[Document]: """Load documents.""" return list(self.lazy_load())
Load documents.
_encoding_file_extension_map
texttospeech = _import_google_cloud_texttospeech() ENCODING_FILE_EXTENSION_MAP = {texttospeech.AudioEncoding.LINEAR16: '.wav', texttospeech.AudioEncoding.MP3: '.mp3', texttospeech.AudioEncoding. OGG_OPUS: '.ogg', texttospeech.AudioEncoding.MULAW: '.wav', texttospeech.AudioEncoding.ALAW: '.wav'} return ENCODING_FILE_EXTENSION_MAP.get(encoding)
def _encoding_file_extension_map(encoding: texttospeech.AudioEncoding ) ->Optional[str]: texttospeech = _import_google_cloud_texttospeech() ENCODING_FILE_EXTENSION_MAP = {texttospeech.AudioEncoding.LINEAR16: '.wav', texttospeech.AudioEncoding.MP3: '.mp3', texttospeech. AudioEncoding.OGG_OPUS: '.ogg', texttospeech.AudioEncoding.MULAW: '.wav', texttospeech.AudioEncoding.ALAW: '.wav'} return ENCODING_FILE_EXTENSION_MAP.get(encoding)
null
test_openai_multiple_prompts
"""Test completion with multiple prompts.""" llm = OpenAI(max_tokens=10) output = llm.generate(["I'm Pickle Rick", "I'm Pickle Rick"]) assert isinstance(output, LLMResult) assert isinstance(output.generations, list) assert len(output.generations) == 2
@pytest.mark.scheduled def test_openai_multiple_prompts() ->None: """Test completion with multiple prompts.""" llm = OpenAI(max_tokens=10) output = llm.generate(["I'm Pickle Rick", "I'm Pickle Rick"]) assert isinstance(output, LLMResult) assert isinstance(output.generations, list) assert len(output.generations) == 2
Test completion with multiple prompts.
__init__
self.client = client self.moderation_beacon = {'moderation_chain_id': chain_id, 'moderation_type': 'PromptSafety', 'moderation_status': 'LABELS_NOT_FOUND'} self.callback = callback self.unique_id = unique_id
def __init__(self, client: Any, callback: Optional[Any]=None, unique_id: Optional[str]=None, chain_id: Optional[str]=None) ->None: self.client = client self.moderation_beacon = {'moderation_chain_id': chain_id, 'moderation_type': 'PromptSafety', 'moderation_status': 'LABELS_NOT_FOUND'} self.callback = callback self.unique_id = unique_id
null
test_stream
"""Test streaming tokens from ChatMistralAI.""" llm = ChatMistralAI() for token in llm.stream("I'm Pickle Rick"): assert isinstance(token.content, str)
def test_stream() ->None: """Test streaming tokens from ChatMistralAI.""" llm = ChatMistralAI() for token in llm.stream("I'm Pickle Rick"): assert isinstance(token.content, str)
Test streaming tokens from ChatMistralAI.
test_load_fail_wrong_dataset_name
"""Test that fails to load""" with pytest.raises(ValidationError) as exc_info: TensorflowDatasetLoader(dataset_name='wrong_dataset_name', split_name= 'test', load_max_docs=MAX_DOCS, sample_to_document_function= mlqaen_example_to_document) assert 'the dataset name is spelled correctly' in str(exc_info.value)
def test_load_fail_wrong_dataset_name() ->None: """Test that fails to load""" with pytest.raises(ValidationError) as exc_info: TensorflowDatasetLoader(dataset_name='wrong_dataset_name', split_name='test', load_max_docs=MAX_DOCS, sample_to_document_function=mlqaen_example_to_document) assert 'the dataset name is spelled correctly' in str(exc_info.value)
Test that fails to load
run
user_input = ( 'Determine which next command to use, and respond using the format specified above:' ) loop_count = 0 while True: loop_count += 1 assistant_reply = self.chain.run(goals=goals, messages=self. chat_history_memory.messages, memory=self.memory, user_input=user_input ) print(assistant_reply) self.chat_history_memory.add_message(HumanMessage(content=user_input)) self.chat_history_memory.add_message(AIMessage(content=assistant_reply)) action = self.output_parser.parse_folder(assistant_reply) tools = {t.name: t for t in self.tools} if action.name == FINISH_NAME: return action.args['response'] if action.name in tools: tool = tools[action.name] try: observation = tool.run(action.args) except ValidationError as e: observation = ( f'Validation Error in args: {str(e)}, args: {action.args}') except Exception as e: observation = ( f'Error: {str(e)}, {type(e).__name__}, args: {action.args}') result = f'Command {tool.name} returned: {observation}' elif action.name == 'ERROR': result = f'Error: {action.args}. ' else: result = ( f"Unknown command '{action.name}'. Please refer to the 'COMMANDS' list for available commands and only respond in the specified JSON format." ) memory_to_add = f'Assistant Reply: {assistant_reply} \nResult: {result} ' if self.feedback_tool is not None: feedback = f"\n{self.feedback_tool.run('Input: ')}" if feedback in {'q', 'stop'}: print('EXITING') return 'EXITING' memory_to_add += feedback self.memory.add_documents([Document(page_content=memory_to_add)]) self.chat_history_memory.add_message(SystemMessage(content=result))
def run(self, goals: List[str]) ->str: user_input = ( 'Determine which next command to use, and respond using the format specified above:' ) loop_count = 0 while True: loop_count += 1 assistant_reply = self.chain.run(goals=goals, messages=self. chat_history_memory.messages, memory=self.memory, user_input= user_input) print(assistant_reply) self.chat_history_memory.add_message(HumanMessage(content=user_input)) self.chat_history_memory.add_message(AIMessage(content=assistant_reply) ) action = self.output_parser.parse_folder(assistant_reply) tools = {t.name: t for t in self.tools} if action.name == FINISH_NAME: return action.args['response'] if action.name in tools: tool = tools[action.name] try: observation = tool.run(action.args) except ValidationError as e: observation = ( f'Validation Error in args: {str(e)}, args: {action.args}') except Exception as e: observation = ( f'Error: {str(e)}, {type(e).__name__}, args: {action.args}' ) result = f'Command {tool.name} returned: {observation}' elif action.name == 'ERROR': result = f'Error: {action.args}. ' else: result = ( f"Unknown command '{action.name}'. Please refer to the 'COMMANDS' list for available commands and only respond in the specified JSON format." ) memory_to_add = ( f'Assistant Reply: {assistant_reply} \nResult: {result} ') if self.feedback_tool is not None: feedback = f"\n{self.feedback_tool.run('Input: ')}" if feedback in {'q', 'stop'}: print('EXITING') return 'EXITING' memory_to_add += feedback self.memory.add_documents([Document(page_content=memory_to_add)]) self.chat_history_memory.add_message(SystemMessage(content=result))
null
test_incorrect_command_return_err_output
"""Test optional returning of shell output on incorrect command.""" session = BashProcess(return_err_output=True) output = session.run(['invalid_command']) assert re.match( '^/bin/sh:.*invalid_command.*(?:not found|Permission denied).*$', output)
@pytest.mark.skipif(sys.platform.startswith('win'), reason= 'Test not supported on Windows') def test_incorrect_command_return_err_output() ->None: """Test optional returning of shell output on incorrect command.""" session = BashProcess(return_err_output=True) output = session.run(['invalid_command']) assert re.match( '^/bin/sh:.*invalid_command.*(?:not found|Permission denied).*$', output)
Test optional returning of shell output on incorrect command.
summarize_related_memories
"""Summarize memories that are most relevant to an observation.""" prompt = PromptTemplate.from_template( """ {q1}? Context from memory: {relevant_memories} Relevant context: """) entity_name = self._get_entity_from_observation(observation) entity_action = self._get_entity_action(observation, entity_name) q1 = f'What is the relationship between {self.name} and {entity_name}' q2 = f'{entity_name} is {entity_action}' return self.chain(prompt=prompt).run(q1=q1, queries=[q1, q2]).strip()
def summarize_related_memories(self, observation: str) ->str: """Summarize memories that are most relevant to an observation.""" prompt = PromptTemplate.from_template( """ {q1}? Context from memory: {relevant_memories} Relevant context: """ ) entity_name = self._get_entity_from_observation(observation) entity_action = self._get_entity_action(observation, entity_name) q1 = f'What is the relationship between {self.name} and {entity_name}' q2 = f'{entity_name} is {entity_action}' return self.chain(prompt=prompt).run(q1=q1, queries=[q1, q2]).strip()
Summarize memories that are most relevant to an observation.
test_self_hosted_huggingface_instructor_embedding_query
"""Test self-hosted huggingface instruct embeddings.""" query = 'foo bar' gpu = get_remote_instance() embedding = SelfHostedHuggingFaceInstructEmbeddings(hardware=gpu) output = embedding.embed_query(query) assert len(output) == 768
def test_self_hosted_huggingface_instructor_embedding_query() ->None: """Test self-hosted huggingface instruct embeddings.""" query = 'foo bar' gpu = get_remote_instance() embedding = SelfHostedHuggingFaceInstructEmbeddings(hardware=gpu) output = embedding.embed_query(query) assert len(output) == 768
Test self-hosted huggingface instruct embeddings.
_select_relevance_score_fn
""" Select and return the appropriate relevance score function based on the distance metric used in the BagelDB cluster. """ if self.override_relevance_score_fn: return self.override_relevance_score_fn distance = 'l2' distance_key = 'hnsw:space' metadata = self._cluster.metadata if metadata and distance_key in metadata: distance = metadata[distance_key] if distance == 'cosine': return self._cosine_relevance_score_fn elif distance == 'l2': return self._euclidean_relevance_score_fn elif distance == 'ip': return self._max_inner_product_relevance_score_fn else: raise ValueError( f'No supported normalization function for distance metric of type: {distance}. Consider providing relevance_score_fn to Bagel constructor.' )
def _select_relevance_score_fn(self) ->Callable[[float], float]: """ Select and return the appropriate relevance score function based on the distance metric used in the BagelDB cluster. """ if self.override_relevance_score_fn: return self.override_relevance_score_fn distance = 'l2' distance_key = 'hnsw:space' metadata = self._cluster.metadata if metadata and distance_key in metadata: distance = metadata[distance_key] if distance == 'cosine': return self._cosine_relevance_score_fn elif distance == 'l2': return self._euclidean_relevance_score_fn elif distance == 'ip': return self._max_inner_product_relevance_score_fn else: raise ValueError( f'No supported normalization function for distance metric of type: {distance}. Consider providing relevance_score_fn to Bagel constructor.' )
Select and return the appropriate relevance score function based on the distance metric used in the BagelDB cluster.
exists
"""Check if entity exists in store.""" pass
@abstractmethod def exists(self, key: str) ->bool: """Check if entity exists in store.""" pass
Check if entity exists in store.
match_args
ret: Dict[str, Any] = dict(query_embedding=query) if filter: ret['filter'] = filter return ret
def match_args(self, query: List[float], filter: Optional[Dict[str, Any]] ) ->Dict[str, Any]: ret: Dict[str, Any] = dict(query_embedding=query) if filter: ret['filter'] = filter return ret
null
_str
return ' '.join([f'{i}:{e}' for i, e in enumerate(embedding)])
@staticmethod def _str(embedding: List[float]) ->str: return ' '.join([f'{i}:{e}' for i, e in enumerate(embedding)])
null
_import_sql_database_tool_ListSQLDatabaseTool
from langchain_community.tools.sql_database.tool import ListSQLDatabaseTool return ListSQLDatabaseTool
def _import_sql_database_tool_ListSQLDatabaseTool() ->Any: from langchain_community.tools.sql_database.tool import ListSQLDatabaseTool return ListSQLDatabaseTool
null
test_chat_google_genai_invoke
"""Test invoke tokens from ChatGoogleGenerativeAI.""" llm = ChatGoogleGenerativeAI(model=_MODEL) result = llm.invoke("This is a test. Say 'foo'", config=dict(tags=['foo']), generation_config=dict(top_k=2, top_p=1, temperature=0.7)) assert isinstance(result.content, str) assert not result.content.startswith(' ')
def test_chat_google_genai_invoke() ->None: """Test invoke tokens from ChatGoogleGenerativeAI.""" llm = ChatGoogleGenerativeAI(model=_MODEL) result = llm.invoke("This is a test. Say 'foo'", config=dict(tags=[ 'foo']), generation_config=dict(top_k=2, top_p=1, temperature=0.7)) assert isinstance(result.content, str) assert not result.content.startswith(' ')
Test invoke tokens from ChatGoogleGenerativeAI.
_run
"""Use the tool.""" return self.api_wrapper.run(query)
def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun] =None) ->str: """Use the tool.""" return self.api_wrapper.run(query)
Use the tool.
_create_retry_decorator
"""Returns a tenacity retry decorator, preconfigured to handle PaLM exceptions""" import litellm errors = [litellm.Timeout, litellm.APIError, litellm.APIConnectionError, litellm.RateLimitError] return create_base_retry_decorator(error_types=errors, max_retries=llm. max_retries, run_manager=run_manager)
def _create_retry_decorator(llm: ChatLiteLLM, run_manager: Optional[Union[ AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun]]=None) ->Callable[ [Any], Any]: """Returns a tenacity retry decorator, preconfigured to handle PaLM exceptions""" import litellm errors = [litellm.Timeout, litellm.APIError, litellm.APIConnectionError, litellm.RateLimitError] return create_base_retry_decorator(error_types=errors, max_retries=llm. max_retries, run_manager=run_manager)
Returns a tenacity retry decorator, preconfigured to handle PaLM exceptions
_call
docs = self.text_splitter.create_documents([inputs[self.input_key]]) results = self.llm_chain.generate([{'text': d.page_content} for d in docs], run_manager=run_manager) qa = [json.loads(res[0].text) for res in results.generations] return {self.output_key: qa}
def _call(self, inputs: Dict[str, Any], run_manager: Optional[ CallbackManagerForChainRun]=None) ->Dict[str, List]: docs = self.text_splitter.create_documents([inputs[self.input_key]]) results = self.llm_chain.generate([{'text': d.page_content} for d in docs], run_manager=run_manager) qa = [json.loads(res[0].text) for res in results.generations] return {self.output_key: qa}
null
_llm_type
"""Return type of model.""" return 'replicate'
@property def _llm_type(self) ->str: """Return type of model.""" return 'replicate'
Return type of model.
test_all_imports
assert sorted(EXPECTED_ALL) == sorted(__all__)
def test_all_imports() ->None: assert sorted(EXPECTED_ALL) == sorted(__all__)
null
_stream
invocation_params = self._invocation_params(stop, **kwargs) headers = {'User-Agent': 'Test Client', 'Authorization': f'{self.eas_service_token}'} if self.version == '1.0': pload = {'input_ids': prompt, **invocation_params} response = requests.post(self.eas_service_url, headers=headers, json= pload, stream=True) res = GenerationChunk(text=response.text) if run_manager: run_manager.on_llm_new_token(res.text) yield res else: pload = {'prompt': prompt, 'use_stream_chat': 'True', **invocation_params} response = requests.post(self.eas_service_url, headers=headers, json= pload, stream=True) for chunk in response.iter_lines(chunk_size=8192, decode_unicode=False, delimiter=b'\x00'): if chunk: data = json.loads(chunk.decode('utf-8')) output = data['response'] stop_seq_found: Optional[str] = None for stop_seq in invocation_params['stop']: if stop_seq in output: stop_seq_found = stop_seq text: Optional[str] = None if stop_seq_found: text = output[:output.index(stop_seq_found)] else: text = output if text: res = GenerationChunk(text=text) yield res if run_manager: run_manager.on_llm_new_token(res.text) if stop_seq_found: break
def _stream(self, prompt: str, stop: Optional[List[str]]=None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->Iterator[ GenerationChunk]: invocation_params = self._invocation_params(stop, **kwargs) headers = {'User-Agent': 'Test Client', 'Authorization': f'{self.eas_service_token}'} if self.version == '1.0': pload = {'input_ids': prompt, **invocation_params} response = requests.post(self.eas_service_url, headers=headers, json=pload, stream=True) res = GenerationChunk(text=response.text) if run_manager: run_manager.on_llm_new_token(res.text) yield res else: pload = {'prompt': prompt, 'use_stream_chat': 'True', ** invocation_params} response = requests.post(self.eas_service_url, headers=headers, json=pload, stream=True) for chunk in response.iter_lines(chunk_size=8192, decode_unicode= False, delimiter=b'\x00'): if chunk: data = json.loads(chunk.decode('utf-8')) output = data['response'] stop_seq_found: Optional[str] = None for stop_seq in invocation_params['stop']: if stop_seq in output: stop_seq_found = stop_seq text: Optional[str] = None if stop_seq_found: text = output[:output.index(stop_seq_found)] else: text = output if text: res = GenerationChunk(text=text) yield res if run_manager: run_manager.on_llm_new_token(res.text) if stop_seq_found: break
null
from_llm
"""Get the response parser.""" system_template = ( '#1 Task Planning Stage: The AI assistant can parse user input to several tasks: [{{"task": task, "id": task_id, "dep": dependency_task_id, "args": {{"input name": text may contain <resource-dep_id>}}}}]. The special tag "dep_id" refer to the one generated text/image/audio in the dependency task (Please consider whether the dependency task generates resources of this type.) and "dep_id" must be in "dep" list. The "dep" field denotes the ids of the previous prerequisite tasks which generate a new resource that the current task relies on. The task MUST be selected from the following tools (along with tool description, input name and output type): {tools}. There may be multiple tasks of the same type. Think step by step about all the tasks needed to resolve the user\'s request. Parse out as few tasks as possible while ensuring that the user request can be resolved. Pay attention to the dependencies and order among tasks. If the user input can\'t be parsed, you need to reply empty JSON [].' ) human_template = 'Now I input: {input}.' system_message_prompt = SystemMessagePromptTemplate.from_template( system_template) human_message_prompt = HumanMessagePromptTemplate.from_template(human_template) demo_messages: List[Union[HumanMessagePromptTemplate, AIMessagePromptTemplate] ] = [] for demo in demos: if demo['role'] == 'user': demo_messages.append(HumanMessagePromptTemplate.from_template(demo[ 'content'])) else: demo_messages.append(AIMessagePromptTemplate.from_template(demo[ 'content'])) prompt = ChatPromptTemplate.from_messages([system_message_prompt, * demo_messages, human_message_prompt]) return cls(prompt=prompt, llm=llm, verbose=verbose)
@classmethod def from_llm(cls, llm: BaseLanguageModel, demos: List[Dict]=DEMONSTRATIONS, verbose: bool=True) ->LLMChain: """Get the response parser.""" system_template = ( '#1 Task Planning Stage: The AI assistant can parse user input to several tasks: [{{"task": task, "id": task_id, "dep": dependency_task_id, "args": {{"input name": text may contain <resource-dep_id>}}}}]. The special tag "dep_id" refer to the one generated text/image/audio in the dependency task (Please consider whether the dependency task generates resources of this type.) and "dep_id" must be in "dep" list. The "dep" field denotes the ids of the previous prerequisite tasks which generate a new resource that the current task relies on. The task MUST be selected from the following tools (along with tool description, input name and output type): {tools}. There may be multiple tasks of the same type. Think step by step about all the tasks needed to resolve the user\'s request. Parse out as few tasks as possible while ensuring that the user request can be resolved. Pay attention to the dependencies and order among tasks. If the user input can\'t be parsed, you need to reply empty JSON [].' ) human_template = 'Now I input: {input}.' system_message_prompt = SystemMessagePromptTemplate.from_template( system_template) human_message_prompt = HumanMessagePromptTemplate.from_template( human_template) demo_messages: List[Union[HumanMessagePromptTemplate, AIMessagePromptTemplate]] = [] for demo in demos: if demo['role'] == 'user': demo_messages.append(HumanMessagePromptTemplate.from_template( demo['content'])) else: demo_messages.append(AIMessagePromptTemplate.from_template(demo ['content'])) prompt = ChatPromptTemplate.from_messages([system_message_prompt, * demo_messages, human_message_prompt]) return cls(prompt=prompt, llm=llm, verbose=verbose)
Get the response parser.
__init__
self.nua = nuclia_tool self.id = str(uuid.uuid4()) self.nua.run({'action': 'push', 'id': self.id, 'path': path, 'text': None})
def __init__(self, path: str, nuclia_tool: NucliaUnderstandingAPI): self.nua = nuclia_tool self.id = str(uuid.uuid4()) self.nua.run({'action': 'push', 'id': self.id, 'path': path, 'text': None})
null
ignore_chain
"""Whether to ignore chain callbacks.""" return self.ignore_chain_
@property def ignore_chain(self) ->bool: """Whether to ignore chain callbacks.""" return self.ignore_chain_
Whether to ignore chain callbacks.