method_name
stringlengths
1
78
method_body
stringlengths
3
9.66k
full_code
stringlengths
31
10.7k
docstring
stringlengths
4
4.74k
_import_modal
from langchain_community.llms.modal import Modal return Modal
def _import_modal() ->Any: from langchain_community.llms.modal import Modal return Modal
null
get_pull_request
""" Fetches a specific pull request and its first 10 comments, limited by max_tokens. Parameters: pr_number(int): The number for the Github pull max_tokens(int): The maximum number of tokens in the response Returns: dict: A dictionary containing the pull's title, body, and comments as a string """ max_tokens = 2000 pull = self.github_repo_instance.get_pull(number=pr_number) total_tokens = 0 def get_tokens(text: str) ->int: tiktoken = _import_tiktoken() return len(tiktoken.get_encoding('cl100k_base').encode(text)) def add_to_dict(data_dict: Dict[str, Any], key: str, value: str) ->None: nonlocal total_tokens tokens = get_tokens(value) if total_tokens + tokens <= max_tokens: data_dict[key] = value total_tokens += tokens response_dict: Dict[str, str] = {} add_to_dict(response_dict, 'title', pull.title) add_to_dict(response_dict, 'number', str(pr_number)) add_to_dict(response_dict, 'body', pull.body) comments: List[str] = [] page = 0 while len(comments) <= 10: comments_page = pull.get_issue_comments().get_page(page) if len(comments_page) == 0: break for comment in comments_page: comment_str = str({'body': comment.body, 'user': comment.user.login}) if total_tokens + get_tokens(comment_str) > max_tokens: break comments.append(comment_str) total_tokens += get_tokens(comment_str) page += 1 add_to_dict(response_dict, 'comments', str(comments)) commits: List[str] = [] page = 0 while len(commits) <= 10: commits_page = pull.get_commits().get_page(page) if len(commits_page) == 0: break for commit in commits_page: commit_str = str({'message': commit.commit.message}) if total_tokens + get_tokens(commit_str) > max_tokens: break commits.append(commit_str) total_tokens += get_tokens(commit_str) page += 1 add_to_dict(response_dict, 'commits', str(commits)) return response_dict
def get_pull_request(self, pr_number: int) ->Dict[str, Any]: """ Fetches a specific pull request and its first 10 comments, limited by max_tokens. Parameters: pr_number(int): The number for the Github pull max_tokens(int): The maximum number of tokens in the response Returns: dict: A dictionary containing the pull's title, body, and comments as a string """ max_tokens = 2000 pull = self.github_repo_instance.get_pull(number=pr_number) total_tokens = 0 def get_tokens(text: str) ->int: tiktoken = _import_tiktoken() return len(tiktoken.get_encoding('cl100k_base').encode(text)) def add_to_dict(data_dict: Dict[str, Any], key: str, value: str) ->None: nonlocal total_tokens tokens = get_tokens(value) if total_tokens + tokens <= max_tokens: data_dict[key] = value total_tokens += tokens response_dict: Dict[str, str] = {} add_to_dict(response_dict, 'title', pull.title) add_to_dict(response_dict, 'number', str(pr_number)) add_to_dict(response_dict, 'body', pull.body) comments: List[str] = [] page = 0 while len(comments) <= 10: comments_page = pull.get_issue_comments().get_page(page) if len(comments_page) == 0: break for comment in comments_page: comment_str = str({'body': comment.body, 'user': comment.user. login}) if total_tokens + get_tokens(comment_str) > max_tokens: break comments.append(comment_str) total_tokens += get_tokens(comment_str) page += 1 add_to_dict(response_dict, 'comments', str(comments)) commits: List[str] = [] page = 0 while len(commits) <= 10: commits_page = pull.get_commits().get_page(page) if len(commits_page) == 0: break for commit in commits_page: commit_str = str({'message': commit.commit.message}) if total_tokens + get_tokens(commit_str) > max_tokens: break commits.append(commit_str) total_tokens += get_tokens(commit_str) page += 1 add_to_dict(response_dict, 'commits', str(commits)) return response_dict
Fetches a specific pull request and its first 10 comments, limited by max_tokens. Parameters: pr_number(int): The number for the Github pull max_tokens(int): The maximum number of tokens in the response Returns: dict: A dictionary containing the pull's title, body, and comments as a string
write_item
k, v = item if k is None: self.write('**') self.dispatch(v) else: write_key_value_pair(k, v)
def write_item(item): k, v = item if k is None: self.write('**') self.dispatch(v) else: write_key_value_pair(k, v)
null
test_all_imports
assert set(__all__) == set(EXPECTED_ALL)
def test_all_imports() ->None: assert set(__all__) == set(EXPECTED_ALL)
null
requires_reference
return True
@property def requires_reference(self) ->bool: return True
null
test_embed_documents_consistency
"""Test embedding consistency for the same document.""" model = GoogleGenerativeAIEmbeddings(model=_MODEL) doc = 'Consistent document for testing' result1 = model.embed_documents([doc]) result2 = model.embed_documents([doc]) assert result1 == result2
def test_embed_documents_consistency() ->None: """Test embedding consistency for the same document.""" model = GoogleGenerativeAIEmbeddings(model=_MODEL) doc = 'Consistent document for testing' result1 = model.embed_documents([doc]) result2 = model.embed_documents([doc]) assert result1 == result2
Test embedding consistency for the same document.
_import_vectorstore_tool_VectorStoreQATool
from langchain_community.tools.vectorstore.tool import VectorStoreQATool return VectorStoreQATool
def _import_vectorstore_tool_VectorStoreQATool() ->Any: from langchain_community.tools.vectorstore.tool import VectorStoreQATool return VectorStoreQATool
null
similarity_search_with_score
""" Performs a search on the query string and returns results with scores. Args: query (str): The text being searched. k (int, optional): The number of results to return. Default is 4. param (dict): Specifies the search parameters for the index. Default is None. expr (str, optional): Filtering expression. Default is None. timeout (int, optional): The waiting time before a timeout error. Default is None. kwargs: Keyword arguments for Collection.search(). Returns: List[float], List[Tuple[Document, any, any]]: """ if self.col is None: logger.debug('No existing collection to search.') return [] embedding = self.embedding_func.embed_query(query) ret = self.similarity_search_with_score_by_vector(embedding=embedding, k=k, param=param, expr=expr, timeout=timeout, **kwargs) return ret
def similarity_search_with_score(self, query: str, k: int=4, param: Optional[dict]=None, expr: Optional[str]=None, timeout: Optional[int]= None, **kwargs: Any) ->List[Tuple[Document, float]]: """ Performs a search on the query string and returns results with scores. Args: query (str): The text being searched. k (int, optional): The number of results to return. Default is 4. param (dict): Specifies the search parameters for the index. Default is None. expr (str, optional): Filtering expression. Default is None. timeout (int, optional): The waiting time before a timeout error. Default is None. kwargs: Keyword arguments for Collection.search(). Returns: List[float], List[Tuple[Document, any, any]]: """ if self.col is None: logger.debug('No existing collection to search.') return [] embedding = self.embedding_func.embed_query(query) ret = self.similarity_search_with_score_by_vector(embedding=embedding, k=k, param=param, expr=expr, timeout=timeout, **kwargs) return ret
Performs a search on the query string and returns results with scores. Args: query (str): The text being searched. k (int, optional): The number of results to return. Default is 4. param (dict): Specifies the search parameters for the index. Default is None. expr (str, optional): Filtering expression. Default is None. timeout (int, optional): The waiting time before a timeout error. Default is None. kwargs: Keyword arguments for Collection.search(). Returns: List[float], List[Tuple[Document, any, any]]:
flush
self._flush()
def flush(self) ->None: self._flush()
null
_update_neighbors
if len(self._embeddings) == 0: raise SKLearnVectorStoreException( 'No data was added to SKLearnVectorStore.') self._embeddings_np = self._np.asarray(self._embeddings) self._neighbors.fit(self._embeddings_np) self._neighbors_fitted = True
def _update_neighbors(self) ->None: if len(self._embeddings) == 0: raise SKLearnVectorStoreException( 'No data was added to SKLearnVectorStore.') self._embeddings_np = self._np.asarray(self._embeddings) self._neighbors.fit(self._embeddings_np) self._neighbors_fitted = True
null
_identifying_params
"""Get the identifying parameters.""" return {**{'model': self.model}, **self._default_params}
@property def _identifying_params(self) ->Dict[str, Any]: """Get the identifying parameters.""" return {**{'model': self.model}, **self._default_params}
Get the identifying parameters.
test_api_key_is_secret_string
llm = CerebriumAI(cerebriumai_api_key='test-cerebriumai-api-key') assert isinstance(llm.cerebriumai_api_key, SecretStr)
def test_api_key_is_secret_string() ->None: llm = CerebriumAI(cerebriumai_api_key='test-cerebriumai-api-key') assert isinstance(llm.cerebriumai_api_key, SecretStr)
null
__init__
""" Initialize the Atlas Client Args: name (str): The name of your project. If the project already exists, it will be loaded. embedding_function (Optional[Embeddings]): An optional function used for embedding your data. If None, data will be embedded with Nomic's embed model. api_key (str): Your nomic API key description (str): A description for your project. is_public (bool): Whether your project is publicly accessible. True by default. reset_project_if_exists (bool): Whether to reset this project if it already exists. Default False. Generally useful during development and testing. """ try: import nomic from nomic import AtlasProject except ImportError: raise ImportError( 'Could not import nomic python package. Please install it with `pip install nomic`.' ) if api_key is None: raise ValueError('No API key provided. Sign up at atlas.nomic.ai!') nomic.login(api_key) self._embedding_function = embedding_function modality = 'text' if self._embedding_function is not None: modality = 'embedding' self.project = AtlasProject(name=name, description=description, modality= modality, is_public=is_public, reset_project_if_exists= reset_project_if_exists, unique_id_field=AtlasDB._ATLAS_DEFAULT_ID_FIELD) self.project._latest_project_state()
def __init__(self, name: str, embedding_function: Optional[Embeddings]=None, api_key: Optional[str]=None, description: str= 'A description for your project', is_public: bool=True, reset_project_if_exists: bool=False) ->None: """ Initialize the Atlas Client Args: name (str): The name of your project. If the project already exists, it will be loaded. embedding_function (Optional[Embeddings]): An optional function used for embedding your data. If None, data will be embedded with Nomic's embed model. api_key (str): Your nomic API key description (str): A description for your project. is_public (bool): Whether your project is publicly accessible. True by default. reset_project_if_exists (bool): Whether to reset this project if it already exists. Default False. Generally useful during development and testing. """ try: import nomic from nomic import AtlasProject except ImportError: raise ImportError( 'Could not import nomic python package. Please install it with `pip install nomic`.' ) if api_key is None: raise ValueError('No API key provided. Sign up at atlas.nomic.ai!') nomic.login(api_key) self._embedding_function = embedding_function modality = 'text' if self._embedding_function is not None: modality = 'embedding' self.project = AtlasProject(name=name, description=description, modality=modality, is_public=is_public, reset_project_if_exists= reset_project_if_exists, unique_id_field=AtlasDB. _ATLAS_DEFAULT_ID_FIELD) self.project._latest_project_state()
Initialize the Atlas Client Args: name (str): The name of your project. If the project already exists, it will be loaded. embedding_function (Optional[Embeddings]): An optional function used for embedding your data. If None, data will be embedded with Nomic's embed model. api_key (str): Your nomic API key description (str): A description for your project. is_public (bool): Whether your project is publicly accessible. True by default. reset_project_if_exists (bool): Whether to reset this project if it already exists. Default False. Generally useful during development and testing.
test_selector_valid
"""Test LengthBasedExampleSelector can select examples..""" short_question = 'Short question?' output = selector.select_examples({'question': short_question}) assert output == EXAMPLES
def test_selector_valid(selector: LengthBasedExampleSelector) ->None: """Test LengthBasedExampleSelector can select examples..""" short_question = 'Short question?' output = selector.select_examples({'question': short_question}) assert output == EXAMPLES
Test LengthBasedExampleSelector can select examples..
test_opensearch_embedding_size_zero
"""Test to validate indexing when embedding size is zero.""" with pytest.raises(RuntimeError): OpenSearchVectorSearch.from_texts([], FakeEmbeddings(), opensearch_url= DEFAULT_OPENSEARCH_URL)
def test_opensearch_embedding_size_zero() ->None: """Test to validate indexing when embedding size is zero.""" with pytest.raises(RuntimeError): OpenSearchVectorSearch.from_texts([], FakeEmbeddings(), opensearch_url=DEFAULT_OPENSEARCH_URL)
Test to validate indexing when embedding size is zero.
_default_params
"""Get the default parameters for calling Konko API.""" return {'model': self.model, 'request_timeout': self.request_timeout, 'max_tokens': self.max_tokens, 'stream': self.streaming, 'n': self.n, 'temperature': self.temperature, **self.model_kwargs}
@property def _default_params(self) ->Dict[str, Any]: """Get the default parameters for calling Konko API.""" return {'model': self.model, 'request_timeout': self.request_timeout, 'max_tokens': self.max_tokens, 'stream': self.streaming, 'n': self. n, 'temperature': self.temperature, **self.model_kwargs}
Get the default parameters for calling Konko API.
test_verbose_is_settable_via_setter
from langchain import globals from langchain.chains.base import _get_verbosity previous_value = globals._verbose previous_fn_reading = _get_verbosity() assert previous_value == previous_fn_reading set_verbose(not previous_value) new_value = globals._verbose new_fn_reading = _get_verbosity() try: assert new_value != previous_value assert new_value == new_fn_reading assert new_value == get_verbose() finally: set_verbose(previous_value)
def test_verbose_is_settable_via_setter() ->None: from langchain import globals from langchain.chains.base import _get_verbosity previous_value = globals._verbose previous_fn_reading = _get_verbosity() assert previous_value == previous_fn_reading set_verbose(not previous_value) new_value = globals._verbose new_fn_reading = _get_verbosity() try: assert new_value != previous_value assert new_value == new_fn_reading assert new_value == get_verbose() finally: set_verbose(previous_value)
null
max_marginal_relevance_search_by_vector
try: from vald.v1.payload import payload_pb2 from vald.v1.vald import object_pb2_grpc except ImportError: raise ValueError( 'Could not import vald-client-python python package. Please install it with `pip install vald-client-python`.' ) channel = self._get_channel() stub = object_pb2_grpc.ObjectStub(channel) docs_and_scores = self.similarity_search_with_score_by_vector(embedding, fetch_k=fetch_k, radius=radius, epsilon=epsilon, timeout=timeout, grpc_metadata=grpc_metadata) docs = [] embs = [] for doc, _ in docs_and_scores: vec = stub.GetObject(payload_pb2.Object.VectorRequest(id=payload_pb2. Object.ID(id=doc.page_content)), metadata=grpc_metadata) embs.append(vec.vector) docs.append(doc) mmr = maximal_marginal_relevance(np.array(embedding), embs, lambda_mult= lambda_mult, k=k) channel.close() return [docs[i] for i in mmr]
def max_marginal_relevance_search_by_vector(self, embedding: List[float], k: int=4, fetch_k: int=20, lambda_mult: float=0.5, radius: float=-1.0, epsilon: float=0.01, timeout: int=3000000000, grpc_metadata: Optional[ Any]=None, **kwargs: Any) ->List[Document]: try: from vald.v1.payload import payload_pb2 from vald.v1.vald import object_pb2_grpc except ImportError: raise ValueError( 'Could not import vald-client-python python package. Please install it with `pip install vald-client-python`.' ) channel = self._get_channel() stub = object_pb2_grpc.ObjectStub(channel) docs_and_scores = self.similarity_search_with_score_by_vector(embedding, fetch_k=fetch_k, radius=radius, epsilon=epsilon, timeout=timeout, grpc_metadata=grpc_metadata) docs = [] embs = [] for doc, _ in docs_and_scores: vec = stub.GetObject(payload_pb2.Object.VectorRequest(id= payload_pb2.Object.ID(id=doc.page_content)), metadata=grpc_metadata ) embs.append(vec.vector) docs.append(doc) mmr = maximal_marginal_relevance(np.array(embedding), embs, lambda_mult =lambda_mult, k=k) channel.close() return [docs[i] for i in mmr]
null
_parse_critique
if 'Fallacy Revision request:' not in output_string: return output_string output_string = output_string.split('Fallacy Revision request:')[0] if '\n\n' in output_string: output_string = output_string.split('\n\n')[0] return output_string
@staticmethod def _parse_critique(output_string: str) ->str: if 'Fallacy Revision request:' not in output_string: return output_string output_string = output_string.split('Fallacy Revision request:')[0] if '\n\n' in output_string: output_string = output_string.split('\n\n')[0] return output_string
null
test_pandas_output_parser_invalid_special_op
try: parser.parse_folder('riemann_sum:chicken') assert False, 'Should have raised OutputParserException' except OutputParserException: assert True
def test_pandas_output_parser_invalid_special_op() ->None: try: parser.parse_folder('riemann_sum:chicken') assert False, 'Should have raised OutputParserException' except OutputParserException: assert True
null
test_visit_operation
op = Operation(operator=Operator.AND, arguments=[Comparison(comparator= Comparator.EQ, attribute='foo', value=2), Comparison(comparator= Comparator.EQ, attribute='bar', value='baz')]) expected = {'bool': {'must': [{'term': {'metadata.foo': 2}}, {'term': { 'metadata.bar.keyword': 'baz'}}]}} actual = DEFAULT_TRANSLATOR.visit_operation(op) assert expected == actual
def test_visit_operation() ->None: op = Operation(operator=Operator.AND, arguments=[Comparison(comparator= Comparator.EQ, attribute='foo', value=2), Comparison(comparator= Comparator.EQ, attribute='bar', value='baz')]) expected = {'bool': {'must': [{'term': {'metadata.foo': 2}}, {'term': { 'metadata.bar.keyword': 'baz'}}]}} actual = DEFAULT_TRANSLATOR.visit_operation(op) assert expected == actual
null
from_texts
"""Construct ElasticsearchStore wrapper from raw documents. Example: .. code-block:: python from langchain_community.vectorstores import ElasticsearchStore from langchain_community.embeddings.openai import OpenAIEmbeddings db = ElasticsearchStore.from_texts( texts, // embeddings optional if using // a strategy that doesn't require inference embeddings, index_name="langchain-demo", es_url="http://localhost:9200" ) Args: texts: List of texts to add to the Elasticsearch index. embedding: Embedding function to use to embed the texts. metadatas: Optional list of metadatas associated with the texts. index_name: Name of the Elasticsearch index to create. es_url: URL of the Elasticsearch instance to connect to. cloud_id: Cloud ID of the Elasticsearch instance to connect to. es_user: Username to use when connecting to Elasticsearch. es_password: Password to use when connecting to Elasticsearch. es_api_key: API key to use when connecting to Elasticsearch. es_connection: Optional pre-existing Elasticsearch connection. vector_query_field: Optional. Name of the field to store the embedding vectors in. query_field: Optional. Name of the field to store the texts in. distance_strategy: Optional. Name of the distance strategy to use. Defaults to "COSINE". can be one of "COSINE", "EUCLIDEAN_DISTANCE", "DOT_PRODUCT". bulk_kwargs: Optional. Additional arguments to pass to Elasticsearch bulk. """ elasticsearchStore = ElasticsearchStore._create_cls_from_kwargs(embedding= embedding, **kwargs) elasticsearchStore.add_texts(texts, metadatas=metadatas, bulk_kwargs= bulk_kwargs) return elasticsearchStore
@classmethod def from_texts(cls, texts: List[str], embedding: Optional[Embeddings]=None, metadatas: Optional[List[Dict[str, Any]]]=None, bulk_kwargs: Optional[ Dict]=None, **kwargs: Any) ->'ElasticsearchStore': """Construct ElasticsearchStore wrapper from raw documents. Example: .. code-block:: python from langchain_community.vectorstores import ElasticsearchStore from langchain_community.embeddings.openai import OpenAIEmbeddings db = ElasticsearchStore.from_texts( texts, // embeddings optional if using // a strategy that doesn't require inference embeddings, index_name="langchain-demo", es_url="http://localhost:9200" ) Args: texts: List of texts to add to the Elasticsearch index. embedding: Embedding function to use to embed the texts. metadatas: Optional list of metadatas associated with the texts. index_name: Name of the Elasticsearch index to create. es_url: URL of the Elasticsearch instance to connect to. cloud_id: Cloud ID of the Elasticsearch instance to connect to. es_user: Username to use when connecting to Elasticsearch. es_password: Password to use when connecting to Elasticsearch. es_api_key: API key to use when connecting to Elasticsearch. es_connection: Optional pre-existing Elasticsearch connection. vector_query_field: Optional. Name of the field to store the embedding vectors in. query_field: Optional. Name of the field to store the texts in. distance_strategy: Optional. Name of the distance strategy to use. Defaults to "COSINE". can be one of "COSINE", "EUCLIDEAN_DISTANCE", "DOT_PRODUCT". bulk_kwargs: Optional. Additional arguments to pass to Elasticsearch bulk. """ elasticsearchStore = ElasticsearchStore._create_cls_from_kwargs(embedding =embedding, **kwargs) elasticsearchStore.add_texts(texts, metadatas=metadatas, bulk_kwargs= bulk_kwargs) return elasticsearchStore
Construct ElasticsearchStore wrapper from raw documents. Example: .. code-block:: python from langchain_community.vectorstores import ElasticsearchStore from langchain_community.embeddings.openai import OpenAIEmbeddings db = ElasticsearchStore.from_texts( texts, // embeddings optional if using // a strategy that doesn't require inference embeddings, index_name="langchain-demo", es_url="http://localhost:9200" ) Args: texts: List of texts to add to the Elasticsearch index. embedding: Embedding function to use to embed the texts. metadatas: Optional list of metadatas associated with the texts. index_name: Name of the Elasticsearch index to create. es_url: URL of the Elasticsearch instance to connect to. cloud_id: Cloud ID of the Elasticsearch instance to connect to. es_user: Username to use when connecting to Elasticsearch. es_password: Password to use when connecting to Elasticsearch. es_api_key: API key to use when connecting to Elasticsearch. es_connection: Optional pre-existing Elasticsearch connection. vector_query_field: Optional. Name of the field to store the embedding vectors in. query_field: Optional. Name of the field to store the texts in. distance_strategy: Optional. Name of the distance strategy to use. Defaults to "COSINE". can be one of "COSINE", "EUCLIDEAN_DISTANCE", "DOT_PRODUCT". bulk_kwargs: Optional. Additional arguments to pass to Elasticsearch bulk.
_create_session_analysis_df
"""Create a dataframe with all the information from the session.""" pd = import_pandas() on_llm_end_records_df = pd.DataFrame(self.on_llm_end_records) llm_input_prompts_df = ClearMLCallbackHandler._build_llm_df(base_df= on_llm_end_records_df, base_df_fields=['step', 'prompts'] + (['name'] if 'name' in on_llm_end_records_df else ['id']), rename_map={'step': 'prompt_step'}) complexity_metrics_columns = [] visualizations_columns: List = [] if self.complexity_metrics: complexity_metrics_columns = ['flesch_reading_ease', 'flesch_kincaid_grade', 'smog_index', 'coleman_liau_index', 'automated_readability_index', 'dale_chall_readability_score', 'difficult_words', 'linsear_write_formula', 'gunning_fog', 'text_standard', 'fernandez_huerta', 'szigriszt_pazos', 'gutierrez_polini', 'crawford', 'gulpease_index', 'osman'] llm_outputs_df = ClearMLCallbackHandler._build_llm_df(on_llm_end_records_df, ['step', 'text', 'token_usage_total_tokens', 'token_usage_prompt_tokens', 'token_usage_completion_tokens'] + complexity_metrics_columns + visualizations_columns, {'step': 'output_step', 'text': 'output'}) session_analysis_df = pd.concat([llm_input_prompts_df, llm_outputs_df], axis=1) return session_analysis_df
def _create_session_analysis_df(self) ->Any: """Create a dataframe with all the information from the session.""" pd = import_pandas() on_llm_end_records_df = pd.DataFrame(self.on_llm_end_records) llm_input_prompts_df = ClearMLCallbackHandler._build_llm_df(base_df= on_llm_end_records_df, base_df_fields=['step', 'prompts'] + ([ 'name'] if 'name' in on_llm_end_records_df else ['id']), rename_map ={'step': 'prompt_step'}) complexity_metrics_columns = [] visualizations_columns: List = [] if self.complexity_metrics: complexity_metrics_columns = ['flesch_reading_ease', 'flesch_kincaid_grade', 'smog_index', 'coleman_liau_index', 'automated_readability_index', 'dale_chall_readability_score', 'difficult_words', 'linsear_write_formula', 'gunning_fog', 'text_standard', 'fernandez_huerta', 'szigriszt_pazos', 'gutierrez_polini', 'crawford', 'gulpease_index', 'osman'] llm_outputs_df = ClearMLCallbackHandler._build_llm_df(on_llm_end_records_df , ['step', 'text', 'token_usage_total_tokens', 'token_usage_prompt_tokens', 'token_usage_completion_tokens'] + complexity_metrics_columns + visualizations_columns, {'step': 'output_step', 'text': 'output'}) session_analysis_df = pd.concat([llm_input_prompts_df, llm_outputs_df], axis=1) return session_analysis_df
Create a dataframe with all the information from the session.
is_arxiv_identifier
"""Check if a query is an arxiv identifier.""" arxiv_identifier_pattern = '\\d{2}(0[1-9]|1[0-2])\\.\\d{4,5}(v\\d+|)|\\d{7}.*' for query_item in query[:self.ARXIV_MAX_QUERY_LENGTH].split(): match_result = re.match(arxiv_identifier_pattern, query_item) if not match_result: return False assert match_result is not None if not match_result.group(0) == query_item: return False return True
def is_arxiv_identifier(self, query: str) ->bool: """Check if a query is an arxiv identifier.""" arxiv_identifier_pattern = ( '\\d{2}(0[1-9]|1[0-2])\\.\\d{4,5}(v\\d+|)|\\d{7}.*') for query_item in query[:self.ARXIV_MAX_QUERY_LENGTH].split(): match_result = re.match(arxiv_identifier_pattern, query_item) if not match_result: return False assert match_result is not None if not match_result.group(0) == query_item: return False return True
Check if a query is an arxiv identifier.
_import_yahoo_finance_news
from langchain_community.tools.yahoo_finance_news import YahooFinanceNewsTool return YahooFinanceNewsTool
def _import_yahoo_finance_news() ->Any: from langchain_community.tools.yahoo_finance_news import YahooFinanceNewsTool return YahooFinanceNewsTool
null
invoke_delete_by_id_with_no_args
vectorstore: AzureCosmosDBVectorSearch = (AzureCosmosDBVectorSearch. from_connection_string(CONNECTION_STRING, NAMESPACE, azure_openai_embeddings, index_name=INDEX_NAME)) vectorstore.delete_document_by_id()
def invoke_delete_by_id_with_no_args(self, azure_openai_embeddings: OpenAIEmbeddings, collection: Any) ->None: vectorstore: AzureCosmosDBVectorSearch = (AzureCosmosDBVectorSearch. from_connection_string(CONNECTION_STRING, NAMESPACE, azure_openai_embeddings, index_name=INDEX_NAME)) vectorstore.delete_document_by_id()
null
test_analyticdb_delete
"""Test end to end construction and search.""" texts = ['foo', 'bar', 'baz'] ids = ['fooid', 'barid', 'bazid'] metadatas = [{'page': str(i)} for i in range(len(texts))] docsearch = AnalyticDB.from_texts(texts=texts, collection_name= 'test_collection_delete', embedding=FakeEmbeddingsWithAdaDimension(), metadatas=metadatas, connection_string=CONNECTION_STRING, ids=ids, pre_delete_collection=True) output = docsearch.similarity_search_with_score('foo', k=1, filter={'page': '2'}) print(output) assert output == [(Document(page_content='baz', metadata={'page': '2'}), 4.0)] docsearch.delete(ids=ids) output = docsearch.similarity_search_with_score('foo', k=1, filter={'page': '2'}) assert output == []
def test_analyticdb_delete() ->None: """Test end to end construction and search.""" texts = ['foo', 'bar', 'baz'] ids = ['fooid', 'barid', 'bazid'] metadatas = [{'page': str(i)} for i in range(len(texts))] docsearch = AnalyticDB.from_texts(texts=texts, collection_name= 'test_collection_delete', embedding=FakeEmbeddingsWithAdaDimension( ), metadatas=metadatas, connection_string=CONNECTION_STRING, ids= ids, pre_delete_collection=True) output = docsearch.similarity_search_with_score('foo', k=1, filter={ 'page': '2'}) print(output) assert output == [(Document(page_content='baz', metadata={'page': '2'}), 4.0)] docsearch.delete(ids=ids) output = docsearch.similarity_search_with_score('foo', k=1, filter={ 'page': '2'}) assert output == []
Test end to end construction and search.
_import_render
from langchain_community.tools.convert_to_openai import format_tool_to_openai_function return format_tool_to_openai_function
def _import_render() ->Any: from langchain_community.tools.convert_to_openai import format_tool_to_openai_function return format_tool_to_openai_function
null
from_texts
"""Construct ElasticVectorSearch wrapper from raw documents. This is a user-friendly interface that: 1. Embeds documents. 2. Creates a new index for the embeddings in the Elasticsearch instance. 3. Adds the documents to the newly created Elasticsearch index. This is intended to be a quick way to get started. Example: .. code-block:: python from langchain_community.vectorstores import ElasticVectorSearch from langchain_community.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() elastic_vector_search = ElasticVectorSearch.from_texts( texts, embeddings, elasticsearch_url="http://localhost:9200" ) """ elasticsearch_url = get_from_dict_or_env(kwargs, 'elasticsearch_url', 'ELASTICSEARCH_URL') if 'elasticsearch_url' in kwargs: del kwargs['elasticsearch_url'] index_name = index_name or uuid.uuid4().hex vectorsearch = cls(elasticsearch_url, index_name, embedding, **kwargs) vectorsearch.add_texts(texts, metadatas=metadatas, ids=ids, refresh_indices =refresh_indices) return vectorsearch
@classmethod def from_texts(cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]]=None, ids: Optional[List[str]]=None, index_name: Optional[str]=None, refresh_indices: bool=True, **kwargs: Any ) ->ElasticVectorSearch: """Construct ElasticVectorSearch wrapper from raw documents. This is a user-friendly interface that: 1. Embeds documents. 2. Creates a new index for the embeddings in the Elasticsearch instance. 3. Adds the documents to the newly created Elasticsearch index. This is intended to be a quick way to get started. Example: .. code-block:: python from langchain_community.vectorstores import ElasticVectorSearch from langchain_community.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() elastic_vector_search = ElasticVectorSearch.from_texts( texts, embeddings, elasticsearch_url="http://localhost:9200" ) """ elasticsearch_url = get_from_dict_or_env(kwargs, 'elasticsearch_url', 'ELASTICSEARCH_URL') if 'elasticsearch_url' in kwargs: del kwargs['elasticsearch_url'] index_name = index_name or uuid.uuid4().hex vectorsearch = cls(elasticsearch_url, index_name, embedding, **kwargs) vectorsearch.add_texts(texts, metadatas=metadatas, ids=ids, refresh_indices=refresh_indices) return vectorsearch
Construct ElasticVectorSearch wrapper from raw documents. This is a user-friendly interface that: 1. Embeds documents. 2. Creates a new index for the embeddings in the Elasticsearch instance. 3. Adds the documents to the newly created Elasticsearch index. This is intended to be a quick way to get started. Example: .. code-block:: python from langchain_community.vectorstores import ElasticVectorSearch from langchain_community.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() elastic_vector_search = ElasticVectorSearch.from_texts( texts, embeddings, elasticsearch_url="http://localhost:9200" )
delete_dataset
"""Delete the collection.""" self.delete(delete_all=True)
def delete_dataset(self) ->None: """Delete the collection.""" self.delete(delete_all=True)
Delete the collection.
embeddings
return self.embedding_function if isinstance(self.embedding_function, Embeddings) else None
@property def embeddings(self) ->Optional[Embeddings]: return self.embedding_function if isinstance(self.embedding_function, Embeddings) else None
null
check_only_one_provider_selected
""" This tool has no feature to combine providers results. Therefore we only allow one provider """ if len(v) > 1: raise ValueError( 'Please select only one provider. The feature to combine providers results is not available for this tool.' ) return v
@validator('providers') def check_only_one_provider_selected(cls, v: List[str]) ->List[str]: """ This tool has no feature to combine providers results. Therefore we only allow one provider """ if len(v) > 1: raise ValueError( 'Please select only one provider. The feature to combine providers results is not available for this tool.' ) return v
This tool has no feature to combine providers results. Therefore we only allow one provider
process_thread
thread = self.quip_client.get_thread(thread_id) thread_id = thread['thread']['id'] title = thread['thread']['title'] link = thread['thread']['link'] update_ts = thread['thread']['updated_usec'] sanitized_title = QuipLoader._sanitize_title(title) logger.info( f'processing thread {thread_id} title {sanitized_title} link {link} update_ts {update_ts}' ) if 'html' in thread: try: tree = self.quip_client.parse_document_html(thread['html']) except xml.etree.cElementTree.ParseError as e: logger.error(f'Error parsing thread {title} {thread_id}, skipping, {e}' ) return None metadata = {'title': sanitized_title, 'update_ts': update_ts, 'id': thread_id, 'source': link} text = '' if include_images: text = self.process_thread_images(tree) if include_messages: text = text + '/n' + self.process_thread_messages(thread_id) return Document(page_content=thread['html'] + text, metadata=metadata) return None
def process_thread(self, thread_id: str, include_images: bool, include_messages: bool) ->Optional[Document]: thread = self.quip_client.get_thread(thread_id) thread_id = thread['thread']['id'] title = thread['thread']['title'] link = thread['thread']['link'] update_ts = thread['thread']['updated_usec'] sanitized_title = QuipLoader._sanitize_title(title) logger.info( f'processing thread {thread_id} title {sanitized_title} link {link} update_ts {update_ts}' ) if 'html' in thread: try: tree = self.quip_client.parse_document_html(thread['html']) except xml.etree.cElementTree.ParseError as e: logger.error( f'Error parsing thread {title} {thread_id}, skipping, {e}') return None metadata = {'title': sanitized_title, 'update_ts': update_ts, 'id': thread_id, 'source': link} text = '' if include_images: text = self.process_thread_images(tree) if include_messages: text = text + '/n' + self.process_thread_messages(thread_id) return Document(page_content=thread['html'] + text, metadata=metadata) return None
null
validate_unstructured_version
"""Raise an error if the `Unstructured` version does not exceed the specified minimum.""" if not satisfies_min_unstructured_version(min_unstructured_version): raise ValueError( f'unstructured>={min_unstructured_version} is required in this loader.' )
def validate_unstructured_version(min_unstructured_version: str) ->None: """Raise an error if the `Unstructured` version does not exceed the specified minimum.""" if not satisfies_min_unstructured_version(min_unstructured_version): raise ValueError( f'unstructured>={min_unstructured_version} is required in this loader.' )
Raise an error if the `Unstructured` version does not exceed the specified minimum.
test_from_texts
"""Test end to end construction and search.""" unique_id = uuid.uuid4().hex needs = f'foobuu {unique_id} booo' texts.insert(0, needs) docsearch = Pinecone.from_texts(texts=texts, embedding=embedding_openai, index_name=index_name, namespace=namespace_name) output = docsearch.similarity_search(unique_id, k=1, namespace=namespace_name) assert output == [Document(page_content=needs)]
@pytest.mark.vcr() def test_from_texts(self, texts: List[str], embedding_openai: OpenAIEmbeddings ) ->None: """Test end to end construction and search.""" unique_id = uuid.uuid4().hex needs = f'foobuu {unique_id} booo' texts.insert(0, needs) docsearch = Pinecone.from_texts(texts=texts, embedding=embedding_openai, index_name=index_name, namespace=namespace_name) output = docsearch.similarity_search(unique_id, k=1, namespace= namespace_name) assert output == [Document(page_content=needs)]
Test end to end construction and search.
_tencent_vector_db_from_texts
conn_params = ConnectionParams(url='http://10.0.X.X', key= 'eC4bLRy2va******************************', username='root', timeout=20) return TencentVectorDB.from_texts(fake_texts, FakeEmbeddings(), metadatas= metadatas, connection_params=conn_params, drop_old=drop)
def _tencent_vector_db_from_texts(metadatas: Optional[List[dict]]=None, drop: bool=True) ->TencentVectorDB: conn_params = ConnectionParams(url='http://10.0.X.X', key= 'eC4bLRy2va******************************', username='root', timeout=20 ) return TencentVectorDB.from_texts(fake_texts, FakeEmbeddings(), metadatas=metadatas, connection_params=conn_params, drop_old=drop)
null
_llm_type
"""Return type of llm.""" return 'ollama-llm'
@property def _llm_type(self) ->str: """Return type of llm.""" return 'ollama-llm'
Return type of llm.
from_function
"""Create tool from a given function. A classmethod that helps to create a tool from a function. Args: func: The function from which to create a tool coroutine: The async function from which to create a tool name: The name of the tool. Defaults to the function name description: The description of the tool. Defaults to the function docstring return_direct: Whether to return the result directly or as a callback args_schema: The schema of the tool's input arguments infer_schema: Whether to infer the schema from the function's signature **kwargs: Additional arguments to pass to the tool Returns: The tool Examples: .. code-block:: python def add(a: int, b: int) -> int: ""\"Add two numbers""\" return a + b tool = StructuredTool.from_function(add) tool.run(1, 2) # 3 """ if func is not None: source_function = func elif coroutine is not None: source_function = coroutine else: raise ValueError('Function and/or coroutine must be provided') name = name or source_function.__name__ description = description or source_function.__doc__ if description is None: raise ValueError( 'Function must have a docstring if description not provided.') sig = signature(source_function) description = f'{name}{sig} - {description.strip()}' _args_schema = args_schema if _args_schema is None and infer_schema: _args_schema = create_schema_from_function(f'{name}Schema', source_function ) return cls(name=name, func=func, coroutine=coroutine, args_schema= _args_schema, description=description, return_direct=return_direct, ** kwargs)
@classmethod def from_function(cls, func: Optional[Callable]=None, coroutine: Optional[ Callable[..., Awaitable[Any]]]=None, name: Optional[str]=None, description: Optional[str]=None, return_direct: bool=False, args_schema: Optional[Type[BaseModel]]=None, infer_schema: bool=True, **kwargs: Any ) ->StructuredTool: """Create tool from a given function. A classmethod that helps to create a tool from a function. Args: func: The function from which to create a tool coroutine: The async function from which to create a tool name: The name of the tool. Defaults to the function name description: The description of the tool. Defaults to the function docstring return_direct: Whether to return the result directly or as a callback args_schema: The schema of the tool's input arguments infer_schema: Whether to infer the schema from the function's signature **kwargs: Additional arguments to pass to the tool Returns: The tool Examples: .. code-block:: python def add(a: int, b: int) -> int: ""\"Add two numbers""\" return a + b tool = StructuredTool.from_function(add) tool.run(1, 2) # 3 """ if func is not None: source_function = func elif coroutine is not None: source_function = coroutine else: raise ValueError('Function and/or coroutine must be provided') name = name or source_function.__name__ description = description or source_function.__doc__ if description is None: raise ValueError( 'Function must have a docstring if description not provided.') sig = signature(source_function) description = f'{name}{sig} - {description.strip()}' _args_schema = args_schema if _args_schema is None and infer_schema: _args_schema = create_schema_from_function(f'{name}Schema', source_function) return cls(name=name, func=func, coroutine=coroutine, args_schema= _args_schema, description=description, return_direct=return_direct, **kwargs)
Create tool from a given function. A classmethod that helps to create a tool from a function. Args: func: The function from which to create a tool coroutine: The async function from which to create a tool name: The name of the tool. Defaults to the function name description: The description of the tool. Defaults to the function docstring return_direct: Whether to return the result directly or as a callback args_schema: The schema of the tool's input arguments infer_schema: Whether to infer the schema from the function's signature **kwargs: Additional arguments to pass to the tool Returns: The tool Examples: .. code-block:: python def add(a: int, b: int) -> int: """Add two numbers""" return a + b tool = StructuredTool.from_function(add) tool.run(1, 2) # 3
_import_sql_database_tool_BaseSQLDatabaseTool
from langchain_community.tools.sql_database.tool import BaseSQLDatabaseTool return BaseSQLDatabaseTool
def _import_sql_database_tool_BaseSQLDatabaseTool() ->Any: from langchain_community.tools.sql_database.tool import BaseSQLDatabaseTool return BaseSQLDatabaseTool
null
test_runnable_branch_invoke_call_counts
"""Verify that runnables are invoked only when necessary.""" add = RunnableLambda(lambda x: x + 1) sub = RunnableLambda(lambda x: x - 1) condition = RunnableLambda(lambda x: x > 0) spy = mocker.spy(condition, 'invoke') add_spy = mocker.spy(add, 'invoke') branch = RunnableBranch[int, int]((condition, add), (condition, add), sub) assert spy.call_count == 0 assert add_spy.call_count == 0 assert branch.invoke(1) == 2 assert add_spy.call_count == 1 assert spy.call_count == 1 assert branch.invoke(2) == 3 assert spy.call_count == 2 assert add_spy.call_count == 2 assert branch.invoke(-3) == -4 assert spy.call_count == 4 assert add_spy.call_count == 2
def test_runnable_branch_invoke_call_counts(mocker: MockerFixture) ->None: """Verify that runnables are invoked only when necessary.""" add = RunnableLambda(lambda x: x + 1) sub = RunnableLambda(lambda x: x - 1) condition = RunnableLambda(lambda x: x > 0) spy = mocker.spy(condition, 'invoke') add_spy = mocker.spy(add, 'invoke') branch = RunnableBranch[int, int]((condition, add), (condition, add), sub) assert spy.call_count == 0 assert add_spy.call_count == 0 assert branch.invoke(1) == 2 assert add_spy.call_count == 1 assert spy.call_count == 1 assert branch.invoke(2) == 3 assert spy.call_count == 2 assert add_spy.call_count == 2 assert branch.invoke(-3) == -4 assert spy.call_count == 4 assert add_spy.call_count == 2
Verify that runnables are invoked only when necessary.
similarity_search_by_vector
"""Return docs most similar to embedding vector. Args: embedding (List[float]): Embedding to look up documents similar to. k (int): Number of Documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar to the query vector. """ results = self.__query_collection(query_embeddings=embedding, n_results=k, where=filter, where_document=where_document, **kwargs) return _results_to_docs(results)
def similarity_search_by_vector(self, embedding: List[float], k: int= DEFAULT_K, filter: Optional[Dict[str, str]]=None, where_document: Optional[Dict[str, str]]=None, **kwargs: Any) ->List[Document]: """Return docs most similar to embedding vector. Args: embedding (List[float]): Embedding to look up documents similar to. k (int): Number of Documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar to the query vector. """ results = self.__query_collection(query_embeddings=embedding, n_results =k, where=filter, where_document=where_document, **kwargs) return _results_to_docs(results)
Return docs most similar to embedding vector. Args: embedding (List[float]): Embedding to look up documents similar to. k (int): Number of Documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar to the query vector.
_embedding_func
"""Call out to Bedrock embedding endpoint.""" text = text.replace(os.linesep, ' ') provider = self.model_id.split('.')[0] _model_kwargs = self.model_kwargs or {} input_body = {**_model_kwargs} if provider == 'cohere': if 'input_type' not in input_body.keys(): input_body['input_type'] = 'search_document' input_body['texts'] = [text] else: input_body['inputText'] = text body = json.dumps(input_body) try: response = self.client.invoke_model(body=body, modelId=self.model_id, accept='application/json', contentType='application/json') response_body = json.loads(response.get('body').read()) if provider == 'cohere': return response_body.get('embeddings')[0] else: return response_body.get('embedding') except Exception as e: raise ValueError(f'Error raised by inference endpoint: {e}')
def _embedding_func(self, text: str) ->List[float]: """Call out to Bedrock embedding endpoint.""" text = text.replace(os.linesep, ' ') provider = self.model_id.split('.')[0] _model_kwargs = self.model_kwargs or {} input_body = {**_model_kwargs} if provider == 'cohere': if 'input_type' not in input_body.keys(): input_body['input_type'] = 'search_document' input_body['texts'] = [text] else: input_body['inputText'] = text body = json.dumps(input_body) try: response = self.client.invoke_model(body=body, modelId=self. model_id, accept='application/json', contentType='application/json' ) response_body = json.loads(response.get('body').read()) if provider == 'cohere': return response_body.get('embeddings')[0] else: return response_body.get('embedding') except Exception as e: raise ValueError(f'Error raised by inference endpoint: {e}')
Call out to Bedrock embedding endpoint.
_stream
message_dicts, params = self._create_message_dicts(messages, stop) params = {**params, **kwargs, 'stream': True} default_chunk_class = AIMessageChunk for chunk in self.completion_with_retry(messages=message_dicts, **params): delta = chunk['choices'][0]['delta'] chunk = _convert_delta_to_message_chunk(delta, default_chunk_class) default_chunk_class = chunk.__class__ yield ChatGenerationChunk(message=chunk) if run_manager: run_manager.on_llm_new_token(chunk.content)
def _stream(self, messages: List[BaseMessage], stop: Optional[List[str]]= None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any ) ->Iterator[ChatGenerationChunk]: message_dicts, params = self._create_message_dicts(messages, stop) params = {**params, **kwargs, 'stream': True} default_chunk_class = AIMessageChunk for chunk in self.completion_with_retry(messages=message_dicts, **params): delta = chunk['choices'][0]['delta'] chunk = _convert_delta_to_message_chunk(delta, default_chunk_class) default_chunk_class = chunk.__class__ yield ChatGenerationChunk(message=chunk) if run_manager: run_manager.on_llm_new_token(chunk.content)
null
from_params
"""Construct a map-reduce chain that uses the chain for map and reduce.""" llm_chain = LLMChain(llm=llm, prompt=prompt, callbacks=callbacks) stuff_chain = StuffDocumentsChain(llm_chain=llm_chain, callbacks=callbacks, **reduce_chain_kwargs if reduce_chain_kwargs else {}) reduce_documents_chain = ReduceDocumentsChain(combine_documents_chain= stuff_chain) combine_documents_chain = MapReduceDocumentsChain(llm_chain=llm_chain, reduce_documents_chain=reduce_documents_chain, callbacks=callbacks, ** combine_chain_kwargs if combine_chain_kwargs else {}) return cls(combine_documents_chain=combine_documents_chain, text_splitter= text_splitter, callbacks=callbacks, **kwargs)
@classmethod def from_params(cls, llm: BaseLanguageModel, prompt: BasePromptTemplate, text_splitter: TextSplitter, callbacks: Callbacks=None, combine_chain_kwargs: Optional[Mapping[str, Any]]=None, reduce_chain_kwargs: Optional[Mapping[str, Any]]=None, **kwargs: Any ) ->MapReduceChain: """Construct a map-reduce chain that uses the chain for map and reduce.""" llm_chain = LLMChain(llm=llm, prompt=prompt, callbacks=callbacks) stuff_chain = StuffDocumentsChain(llm_chain=llm_chain, callbacks= callbacks, **reduce_chain_kwargs if reduce_chain_kwargs else {}) reduce_documents_chain = ReduceDocumentsChain(combine_documents_chain= stuff_chain) combine_documents_chain = MapReduceDocumentsChain(llm_chain=llm_chain, reduce_documents_chain=reduce_documents_chain, callbacks=callbacks, **combine_chain_kwargs if combine_chain_kwargs else {}) return cls(combine_documents_chain=combine_documents_chain, text_splitter=text_splitter, callbacks=callbacks, **kwargs)
Construct a map-reduce chain that uses the chain for map and reduce.
inference_fn
"""Inference function for testing.""" if isinstance(prompt, list): return [emb[0][-1] for emb in pipeline(prompt)] return pipeline(prompt)[0][-1]
def inference_fn(pipeline: Any, prompt: str) ->Any: """Inference function for testing.""" if isinstance(prompt, list): return [emb[0][-1] for emb in pipeline(prompt)] return pipeline(prompt)[0][-1]
Inference function for testing.
load
"""Load given path as single page.""" import docx2txt return [Document(page_content=docx2txt.process(self.file_path), metadata={ 'source': self.file_path})]
def load(self) ->List[Document]: """Load given path as single page.""" import docx2txt return [Document(page_content=docx2txt.process(self.file_path), metadata={'source': self.file_path})]
Load given path as single page.
llm_prefix
"""Prefix to append the llm call with.""" return 'Thought:'
@property def llm_prefix(self) ->str: """Prefix to append the llm call with.""" return 'Thought:'
Prefix to append the llm call with.
visit_comparison
return {comparison.attribute: {self._format_func(comparison.comparator): comparison.value}}
def visit_comparison(self, comparison: Comparison) ->Dict: return {comparison.attribute: {self._format_func(comparison.comparator): comparison.value}}
null
get_num_tokens
if self._model_is_anthropic: return get_num_tokens_anthropic(text) else: return super().get_num_tokens(text)
def get_num_tokens(self, text: str) ->int: if self._model_is_anthropic: return get_num_tokens_anthropic(text) else: return super().get_num_tokens(text)
null
is_local
return self._config['LOCAL']
@property def is_local(self) ->str: return self._config['LOCAL']
null
_format_image_analysis_result
formatted_result = [] if 'caption' in image_analysis_result: formatted_result.append('Caption: ' + image_analysis_result['caption']) if 'objects' in image_analysis_result and len(image_analysis_result['objects'] ) > 0: formatted_result.append('Objects: ' + ', '.join(image_analysis_result[ 'objects'])) if 'tags' in image_analysis_result and len(image_analysis_result['tags']) > 0: formatted_result.append('Tags: ' + ', '.join(image_analysis_result['tags']) ) if 'text' in image_analysis_result and len(image_analysis_result['text']) > 0: formatted_result.append('Text: ' + ', '.join(image_analysis_result['text']) ) return '\n'.join(formatted_result)
def _format_image_analysis_result(self, image_analysis_result: Dict) ->str: formatted_result = [] if 'caption' in image_analysis_result: formatted_result.append('Caption: ' + image_analysis_result['caption']) if 'objects' in image_analysis_result and len(image_analysis_result[ 'objects']) > 0: formatted_result.append('Objects: ' + ', '.join( image_analysis_result['objects'])) if 'tags' in image_analysis_result and len(image_analysis_result['tags'] ) > 0: formatted_result.append('Tags: ' + ', '.join(image_analysis_result[ 'tags'])) if 'text' in image_analysis_result and len(image_analysis_result['text'] ) > 0: formatted_result.append('Text: ' + ', '.join(image_analysis_result[ 'text'])) return '\n'.join(formatted_result)
null
_run
"""Use the Zapier NLA tool to return a list of all exposed user actions.""" warn_deprecated(since='0.0.319', message= 'This tool will be deprecated on 2023-11-17. See https://nla.zapier.com/sunset/ for details' ) return self.api_wrapper.run_as_str(self.action_id, instructions, self.params)
def _run(self, instructions: str, run_manager: Optional[ CallbackManagerForToolRun]=None) ->str: """Use the Zapier NLA tool to return a list of all exposed user actions.""" warn_deprecated(since='0.0.319', message= 'This tool will be deprecated on 2023-11-17. See https://nla.zapier.com/sunset/ for details' ) return self.api_wrapper.run_as_str(self.action_id, instructions, self. params)
Use the Zapier NLA tool to return a list of all exposed user actions.
_import_searx_search
from langchain_community.utilities.searx_search import SearxSearchWrapper return SearxSearchWrapper
def _import_searx_search() ->Any: from langchain_community.utilities.searx_search import SearxSearchWrapper return SearxSearchWrapper
null
test_unstructured_api_file_loader
"""Test unstructured loader.""" file_path = os.path.join(EXAMPLE_DOCS_DIRECTORY, 'layout-parser-paper.pdf') loader = UnstructuredAPIFileLoader(file_path=file_path, api_key= 'FAKE_API_KEY', strategy='fast', mode='elements') docs = loader.load() assert len(docs) > 1
def test_unstructured_api_file_loader() ->None: """Test unstructured loader.""" file_path = os.path.join(EXAMPLE_DOCS_DIRECTORY, 'layout-parser-paper.pdf') loader = UnstructuredAPIFileLoader(file_path=file_path, api_key= 'FAKE_API_KEY', strategy='fast', mode='elements') docs = loader.load() assert len(docs) > 1
Test unstructured loader.
_default_script_query
if filter: (key, value), = filter.items() filter = {'match': {f'metadata.{key}.keyword': f'{value}'}} else: filter = {'match_all': {}} return {'script_score': {'query': filter, 'script': {'source': "cosineSimilarity(params.query_vector, 'vector') + 1.0", 'params': { 'query_vector': query_vector}}}}
def _default_script_query(query_vector: List[float], filter: Optional[dict] ) ->Dict: if filter: (key, value), = filter.items() filter = {'match': {f'metadata.{key}.keyword': f'{value}'}} else: filter = {'match_all': {}} return {'script_score': {'query': filter, 'script': {'source': "cosineSimilarity(params.query_vector, 'vector') + 1.0", 'params': {'query_vector': query_vector}}}}
null
test_dashvector_from_texts
dashvector = DashVector.from_texts(texts=texts, embedding=FakeEmbeddings(), ids=ids) sleep(0.5) output = dashvector.similarity_search('foo', k=1) assert output == [Document(page_content='foo')]
def test_dashvector_from_texts() ->None: dashvector = DashVector.from_texts(texts=texts, embedding= FakeEmbeddings(), ids=ids) sleep(0.5) output = dashvector.similarity_search('foo', k=1) assert output == [Document(page_content='foo')]
null
_llm_type
"""Return type of chat model.""" return 'cohere-chat'
@property def _llm_type(self) ->str: """Return type of chat model.""" return 'cohere-chat'
Return type of chat model.
similarity_search_with_score
"""Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. fetch_k: (Optional[int]) Number of Documents to fetch before filtering. Defaults to 20. Returns: List of documents most similar to the query text with Distance as float. Lower score represents more similarity. """ embedding = self.embedding_function(query) docs = self.similarity_search_with_score_by_vector(embedding, k=k, filter= filter, fetch_k=fetch_k, **kwargs) return docs
def similarity_search_with_score(self, query: str, *, k: int=4, filter: Optional[Dict[str, Any]]=None, fetch_k: int=20, **kwargs: Any) ->List[Tuple [Document, float]]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. fetch_k: (Optional[int]) Number of Documents to fetch before filtering. Defaults to 20. Returns: List of documents most similar to the query text with Distance as float. Lower score represents more similarity. """ embedding = self.embedding_function(query) docs = self.similarity_search_with_score_by_vector(embedding, k=k, filter=filter, fetch_k=fetch_k, **kwargs) return docs
Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. fetch_k: (Optional[int]) Number of Documents to fetch before filtering. Defaults to 20. Returns: List of documents most similar to the query text with Distance as float. Lower score represents more similarity.
_get_python_function_arguments
"""Get JsonSchema describing a Python functions arguments. Assumes all function arguments are of primitive types (int, float, str, bool) or are subclasses of pydantic.BaseModel. """ properties = {} annotations = inspect.getfullargspec(function).annotations for arg, arg_type in annotations.items(): if arg == 'return': continue if isinstance(arg_type, type) and issubclass(arg_type, BaseModel): properties[arg] = arg_type.schema() elif arg_type.__name__ in PYTHON_TO_JSON_TYPES: properties[arg] = {'type': PYTHON_TO_JSON_TYPES[arg_type.__name__]} if arg in arg_descriptions: if arg not in properties: properties[arg] = {} properties[arg]['description'] = arg_descriptions[arg] return properties
def _get_python_function_arguments(function: Callable, arg_descriptions: dict ) ->dict: """Get JsonSchema describing a Python functions arguments. Assumes all function arguments are of primitive types (int, float, str, bool) or are subclasses of pydantic.BaseModel. """ properties = {} annotations = inspect.getfullargspec(function).annotations for arg, arg_type in annotations.items(): if arg == 'return': continue if isinstance(arg_type, type) and issubclass(arg_type, BaseModel): properties[arg] = arg_type.schema() elif arg_type.__name__ in PYTHON_TO_JSON_TYPES: properties[arg] = {'type': PYTHON_TO_JSON_TYPES[arg_type.__name__]} if arg in arg_descriptions: if arg not in properties: properties[arg] = {} properties[arg]['description'] = arg_descriptions[arg] return properties
Get JsonSchema describing a Python functions arguments. Assumes all function arguments are of primitive types (int, float, str, bool) or are subclasses of pydantic.BaseModel.
visit_structured_query
if structured_query.filter is None: kwargs = {} else: kwargs = {'filter': [structured_query.filter.accept(self)]} return structured_query.query, kwargs
def visit_structured_query(self, structured_query: StructuredQuery) ->Tuple[ str, dict]: if structured_query.filter is None: kwargs = {} else: kwargs = {'filter': [structured_query.filter.accept(self)]} return structured_query.query, kwargs
null
__init__
self.llm_chain = llm_chain self.stop = stop
def __init__(self, llm_chain: LLMChain, stop: Optional[List]=None): self.llm_chain = llm_chain self.stop = stop
null
format_memories_detail
content = [] for mem in relevant_memories: content.append(self._format_memory_detail(mem, prefix='- ')) return '\n'.join([f'{mem}' for mem in content])
def format_memories_detail(self, relevant_memories: List[Document]) ->str: content = [] for mem in relevant_memories: content.append(self._format_memory_detail(mem, prefix='- ')) return '\n'.join([f'{mem}' for mem in content])
null
test_wrong_temperature_2
chat = ErnieBotChat() message = HumanMessage(content='Hello') with pytest.raises(ValueError) as e: chat([message], temperature=0) assert 'parameter check failed, temperature range is (0, 1.0]' in str(e)
def test_wrong_temperature_2() ->None: chat = ErnieBotChat() message = HumanMessage(content='Hello') with pytest.raises(ValueError) as e: chat([message], temperature=0) assert 'parameter check failed, temperature range is (0, 1.0]' in str(e)
null
test__convert_dict_to_message_human
message_dict = {'role': 'user', 'content': 'foo'} result = _convert_dict_to_message(message_dict) expected_output = HumanMessage(content='foo') assert result == expected_output
def test__convert_dict_to_message_human() ->None: message_dict = {'role': 'user', 'content': 'foo'} result = _convert_dict_to_message(message_dict) expected_output = HumanMessage(content='foo') assert result == expected_output
null
test_combining_sequences
prompt = SystemMessagePromptTemplate.from_template('You are a nice assistant.' ) + '{question}' chat = FakeListChatModel(responses=['foo, bar']) parser = CommaSeparatedListOutputParser() chain = prompt | chat | parser assert isinstance(chain, RunnableSequence) assert chain.first == prompt assert chain.middle == [chat] assert chain.last == parser if sys.version_info >= (3, 9): assert dumps(chain, pretty=True) == snapshot prompt2 = SystemMessagePromptTemplate.from_template( 'You are a nicer assistant.') + '{question}' chat2 = FakeListChatModel(responses=['baz, qux']) parser2 = CommaSeparatedListOutputParser() input_formatter: RunnableLambda[List[str], Dict[str, Any]] = RunnableLambda( lambda x: {'question': x[0] + x[1]}) chain2 = cast(RunnableSequence, input_formatter | prompt2 | chat2 | parser2) assert isinstance(chain, RunnableSequence) assert chain2.first == input_formatter assert chain2.middle == [prompt2, chat2] assert chain2.last == parser2 if sys.version_info >= (3, 9): assert dumps(chain2, pretty=True) == snapshot combined_chain = cast(RunnableSequence, chain | chain2) assert combined_chain.first == prompt assert combined_chain.middle == [chat, parser, input_formatter, prompt2, chat2] assert combined_chain.last == parser2 if sys.version_info >= (3, 9): assert dumps(combined_chain, pretty=True) == snapshot tracer = FakeTracer() assert combined_chain.invoke({'question': 'What is your name?'}, dict( callbacks=[tracer])) == ['baz', 'qux'] if sys.version_info >= (3, 9): assert tracer.runs == snapshot
@freeze_time('2023-01-01') def test_combining_sequences(mocker: MockerFixture, snapshot: SnapshotAssertion ) ->None: prompt = SystemMessagePromptTemplate.from_template( 'You are a nice assistant.') + '{question}' chat = FakeListChatModel(responses=['foo, bar']) parser = CommaSeparatedListOutputParser() chain = prompt | chat | parser assert isinstance(chain, RunnableSequence) assert chain.first == prompt assert chain.middle == [chat] assert chain.last == parser if sys.version_info >= (3, 9): assert dumps(chain, pretty=True) == snapshot prompt2 = SystemMessagePromptTemplate.from_template( 'You are a nicer assistant.') + '{question}' chat2 = FakeListChatModel(responses=['baz, qux']) parser2 = CommaSeparatedListOutputParser() input_formatter: RunnableLambda[List[str], Dict[str, Any] ] = RunnableLambda(lambda x: {'question': x[0] + x[1]}) chain2 = cast(RunnableSequence, input_formatter | prompt2 | chat2 | parser2 ) assert isinstance(chain, RunnableSequence) assert chain2.first == input_formatter assert chain2.middle == [prompt2, chat2] assert chain2.last == parser2 if sys.version_info >= (3, 9): assert dumps(chain2, pretty=True) == snapshot combined_chain = cast(RunnableSequence, chain | chain2) assert combined_chain.first == prompt assert combined_chain.middle == [chat, parser, input_formatter, prompt2, chat2] assert combined_chain.last == parser2 if sys.version_info >= (3, 9): assert dumps(combined_chain, pretty=True) == snapshot tracer = FakeTracer() assert combined_chain.invoke({'question': 'What is your name?'}, dict( callbacks=[tracer])) == ['baz', 'qux'] if sys.version_info >= (3, 9): assert tracer.runs == snapshot
null
test_add_texts_handle_single_text
index = mock_index(DIRECT_ACCESS_INDEX) vectorsearch = DatabricksVectorSearch(index, embedding= DEFAULT_EMBEDDING_MODEL, text_column=DEFAULT_TEXT_COLUMN) vectors = DEFAULT_EMBEDDING_MODEL.embed_documents(fake_texts) added_ids = vectorsearch.add_texts(fake_texts[0]) index.upsert.assert_called_once_with([{DEFAULT_PRIMARY_KEY: id_, DEFAULT_TEXT_COLUMN: text, DEFAULT_VECTOR_COLUMN: vector} for text, vector, id_ in zip(fake_texts, vectors, added_ids)]) assert len(added_ids) == 1 assert is_valid_uuid(added_ids[0])
@pytest.mark.requires('databricks', 'databricks.vector_search') def test_add_texts_handle_single_text() ->None: index = mock_index(DIRECT_ACCESS_INDEX) vectorsearch = DatabricksVectorSearch(index, embedding= DEFAULT_EMBEDDING_MODEL, text_column=DEFAULT_TEXT_COLUMN) vectors = DEFAULT_EMBEDDING_MODEL.embed_documents(fake_texts) added_ids = vectorsearch.add_texts(fake_texts[0]) index.upsert.assert_called_once_with([{DEFAULT_PRIMARY_KEY: id_, DEFAULT_TEXT_COLUMN: text, DEFAULT_VECTOR_COLUMN: vector} for text, vector, id_ in zip(fake_texts, vectors, added_ids)]) assert len(added_ids) == 1 assert is_valid_uuid(added_ids[0])
null
load
"""Load webpage.""" soup = self.scrape() text = soup.select_one("td[class='scrtext']").text metadata = {'source': self.web_path} return [Document(page_content=text, metadata=metadata)]
def load(self) ->List[Document]: """Load webpage.""" soup = self.scrape() text = soup.select_one("td[class='scrtext']").text metadata = {'source': self.web_path} return [Document(page_content=text, metadata=metadata)]
Load webpage.
max_marginal_relevance_search
"""Perform a search and return results that are reordered by MMR.""" embedding = self.embedding_func.embed_query(query) return self.max_marginal_relevance_search_by_vector(embedding=embedding, k= k, fetch_k=fetch_k, lambda_mult=lambda_mult, param=param, expr=expr, timeout=timeout, **kwargs)
def max_marginal_relevance_search(self, query: str, k: int=4, fetch_k: int= 20, lambda_mult: float=0.5, param: Optional[dict]=None, expr: Optional[ str]=None, timeout: Optional[int]=None, **kwargs: Any) ->List[Document]: """Perform a search and return results that are reordered by MMR.""" embedding = self.embedding_func.embed_query(query) return self.max_marginal_relevance_search_by_vector(embedding=embedding, k=k, fetch_k=fetch_k, lambda_mult=lambda_mult, param=param, expr= expr, timeout=timeout, **kwargs)
Perform a search and return results that are reordered by MMR.
__init__
"""Initializes the WandbTracer. Parameters: run_args: (dict, optional) Arguments to pass to `wandb.init()`. If not provided, `wandb.init()` will be called with no arguments. Please refer to the `wandb.init` for more details. To use W&B to monitor all LangChain activity, add this tracer like any other LangChain callback: ``` from wandb.integration.langchain import WandbTracer tracer = WandbTracer() chain = LLMChain(llm, callbacks=[tracer]) # ...end of notebook / script: tracer.finish() ``` """ super().__init__(**kwargs) try: import wandb from wandb.sdk.data_types import trace_tree except ImportError as e: raise ImportError( 'Could not import wandb python package.Please install it with `pip install -U wandb`.' ) from e self._wandb = wandb self._trace_tree = trace_tree self._run_args = run_args self._ensure_run(should_print_url=wandb.run is None) self.run_processor = RunProcessor(self._wandb, self._trace_tree)
def __init__(self, run_args: Optional[WandbRunArgs]=None, **kwargs: Any ) ->None: """Initializes the WandbTracer. Parameters: run_args: (dict, optional) Arguments to pass to `wandb.init()`. If not provided, `wandb.init()` will be called with no arguments. Please refer to the `wandb.init` for more details. To use W&B to monitor all LangChain activity, add this tracer like any other LangChain callback: ``` from wandb.integration.langchain import WandbTracer tracer = WandbTracer() chain = LLMChain(llm, callbacks=[tracer]) # ...end of notebook / script: tracer.finish() ``` """ super().__init__(**kwargs) try: import wandb from wandb.sdk.data_types import trace_tree except ImportError as e: raise ImportError( 'Could not import wandb python package.Please install it with `pip install -U wandb`.' ) from e self._wandb = wandb self._trace_tree = trace_tree self._run_args = run_args self._ensure_run(should_print_url=wandb.run is None) self.run_processor = RunProcessor(self._wandb, self._trace_tree)
Initializes the WandbTracer. Parameters: run_args: (dict, optional) Arguments to pass to `wandb.init()`. If not provided, `wandb.init()` will be called with no arguments. Please refer to the `wandb.init` for more details. To use W&B to monitor all LangChain activity, add this tracer like any other LangChain callback: ``` from wandb.integration.langchain import WandbTracer tracer = WandbTracer() chain = LLMChain(llm, callbacks=[tracer]) # ...end of notebook / script: tracer.finish() ```
get_tools
"""Get the tools in the toolkit.""" return [QueryPowerBITool(llm_chain=self._get_chain(), powerbi=self.powerbi, examples=self.examples, max_iterations=self.max_iterations, output_token_limit=self.output_token_limit, tiktoken_model_name=self. tiktoken_model_name), InfoPowerBITool(powerbi=self.powerbi), ListPowerBITool(powerbi=self.powerbi)]
def get_tools(self) ->List[BaseTool]: """Get the tools in the toolkit.""" return [QueryPowerBITool(llm_chain=self._get_chain(), powerbi=self. powerbi, examples=self.examples, max_iterations=self.max_iterations, output_token_limit=self.output_token_limit, tiktoken_model_name= self.tiktoken_model_name), InfoPowerBITool(powerbi=self.powerbi), ListPowerBITool(powerbi=self.powerbi)]
Get the tools in the toolkit.
_select_relevance_score_fn
""" The 'correct' relevance function may differ depending on a few things, including: - the distance / similarity metric used by the VectorStore - the scale of your embeddings (OpenAI's are unit normed. Many others are not!) - embedding dimensionality - etc. """ if self.override_relevance_score_fn is not None: return self.override_relevance_score_fn if self._distance_strategy == DistanceStrategy.COSINE: return lambda x: x elif self._distance_strategy == DistanceStrategy.EUCLIDEAN_DISTANCE: return lambda x: x else: raise ValueError( f'No supported normalization function for distance_strategy of {self._distance_strategy}.Consider providing relevance_score_fn to PGVector constructor.' )
def _select_relevance_score_fn(self) ->Callable[[float], float]: """ The 'correct' relevance function may differ depending on a few things, including: - the distance / similarity metric used by the VectorStore - the scale of your embeddings (OpenAI's are unit normed. Many others are not!) - embedding dimensionality - etc. """ if self.override_relevance_score_fn is not None: return self.override_relevance_score_fn if self._distance_strategy == DistanceStrategy.COSINE: return lambda x: x elif self._distance_strategy == DistanceStrategy.EUCLIDEAN_DISTANCE: return lambda x: x else: raise ValueError( f'No supported normalization function for distance_strategy of {self._distance_strategy}.Consider providing relevance_score_fn to PGVector constructor.' )
The 'correct' relevance function may differ depending on a few things, including: - the distance / similarity metric used by the VectorStore - the scale of your embeddings (OpenAI's are unit normed. Many others are not!) - embedding dimensionality - etc.
test_all_imports
assert set(__all__) == set(EXPECTED_ALL)
def test_all_imports() ->None: assert set(__all__) == set(EXPECTED_ALL)
null
parse
if f'{self.ai_prefix}:' in text: return AgentFinish({'output': text.split(f'{self.ai_prefix}:')[-1]. strip()}, text) regex = 'Action: (.*?)[\\n]*Action Input: ([\\s\\S]*)' match = re.search(regex, text, re.DOTALL) if not match: raise OutputParserException(f'Could not parse LLM output: `{text}`') action = match.group(1) action_input = match.group(2) return AgentAction(action.strip(), action_input.strip(' ').strip('"'), text)
def parse(self, text: str) ->Union[AgentAction, AgentFinish]: if f'{self.ai_prefix}:' in text: return AgentFinish({'output': text.split(f'{self.ai_prefix}:')[-1]. strip()}, text) regex = 'Action: (.*?)[\\n]*Action Input: ([\\s\\S]*)' match = re.search(regex, text, re.DOTALL) if not match: raise OutputParserException(f'Could not parse LLM output: `{text}`') action = match.group(1) action_input = match.group(2) return AgentAction(action.strip(), action_input.strip(' ').strip('"'), text )
null
model_cfg_sys_msg
return Llama2Chat(llm=FakeLLM(), system_message=SystemMessage(content= 'sys-msg'))
@pytest.fixture def model_cfg_sys_msg() ->Llama2Chat: return Llama2Chat(llm=FakeLLM(), system_message=SystemMessage(content= 'sys-msg'))
null
_default_params
return {'target_uri': self.target_uri, 'endpoint': self.endpoint, 'temperature': self.temperature, 'n': self.n, 'stop': self.stop, 'max_tokens': self.max_tokens, 'extra_params': self.extra_params}
@property def _default_params(self) ->Dict[str, Any]: return {'target_uri': self.target_uri, 'endpoint': self.endpoint, 'temperature': self.temperature, 'n': self.n, 'stop': self.stop, 'max_tokens': self.max_tokens, 'extra_params': self.extra_params}
null
validate_environment
"""Validate that api key and python package exists in environment.""" huggingfacehub_api_token = values['huggingfacehub_api_token'] or os.getenv( 'HUGGINGFACEHUB_API_TOKEN') try: from huggingface_hub import InferenceClient if values['model']: values['repo_id'] = values['model'] elif values['repo_id']: values['model'] = values['repo_id'] else: values['model'] = DEFAULT_MODEL values['repo_id'] = DEFAULT_MODEL client = InferenceClient(model=values['model'], token= huggingfacehub_api_token) if values['task'] not in VALID_TASKS: raise ValueError( f"Got invalid task {values['task']}, currently only {VALID_TASKS} are supported" ) values['client'] = client except ImportError: raise ImportError( 'Could not import huggingface_hub python package. Please install it with `pip install huggingface_hub`.' ) return values
@root_validator() def validate_environment(cls, values: Dict) ->Dict: """Validate that api key and python package exists in environment.""" huggingfacehub_api_token = values['huggingfacehub_api_token'] or os.getenv( 'HUGGINGFACEHUB_API_TOKEN') try: from huggingface_hub import InferenceClient if values['model']: values['repo_id'] = values['model'] elif values['repo_id']: values['model'] = values['repo_id'] else: values['model'] = DEFAULT_MODEL values['repo_id'] = DEFAULT_MODEL client = InferenceClient(model=values['model'], token= huggingfacehub_api_token) if values['task'] not in VALID_TASKS: raise ValueError( f"Got invalid task {values['task']}, currently only {VALID_TASKS} are supported" ) values['client'] = client except ImportError: raise ImportError( 'Could not import huggingface_hub python package. Please install it with `pip install huggingface_hub`.' ) return values
Validate that api key and python package exists in environment.
test_chat_fireworks_generate
"""Test ChatFireworks wrapper with generate.""" chat = ChatFireworks(model_kwargs={'n': 2}) message = HumanMessage(content='Hello') response = chat.generate([[message], [message]]) assert isinstance(response, LLMResult) assert len(response.generations) == 2 for generations in response.generations: assert len(generations) == 2 for generation in generations: assert isinstance(generation, ChatGeneration) assert isinstance(generation.text, str) assert generation.text == generation.message.content
@pytest.mark.scheduled def test_chat_fireworks_generate() ->None: """Test ChatFireworks wrapper with generate.""" chat = ChatFireworks(model_kwargs={'n': 2}) message = HumanMessage(content='Hello') response = chat.generate([[message], [message]]) assert isinstance(response, LLMResult) assert len(response.generations) == 2 for generations in response.generations: assert len(generations) == 2 for generation in generations: assert isinstance(generation, ChatGeneration) assert isinstance(generation.text, str) assert generation.text == generation.message.content
Test ChatFireworks wrapper with generate.
test_zilliz_add_extra
"""Test end to end construction and MRR search.""" texts = ['foo', 'bar', 'baz'] metadatas = [{'page': i} for i in range(len(texts))] docsearch = _zilliz_from_texts(metadatas=metadatas) docsearch.add_texts(texts, metadatas) output = docsearch.similarity_search('foo', k=10) assert len(output) == 6
def test_zilliz_add_extra() ->None: """Test end to end construction and MRR search.""" texts = ['foo', 'bar', 'baz'] metadatas = [{'page': i} for i in range(len(texts))] docsearch = _zilliz_from_texts(metadatas=metadatas) docsearch.add_texts(texts, metadatas) output = docsearch.similarity_search('foo', k=10) assert len(output) == 6
Test end to end construction and MRR search.
parse
"""Eagerly parse the blob into a document or documents. This is a convenience method for interactive development environment. Production applications should favor the lazy_parse method instead. Subclasses should generally not over-ride this parse method. Args: blob: Blob instance Returns: List of documents """ return list(self.lazy_parse(blob))
def parse(self, blob: Blob) ->List[Document]: """Eagerly parse the blob into a document or documents. This is a convenience method for interactive development environment. Production applications should favor the lazy_parse method instead. Subclasses should generally not over-ride this parse method. Args: blob: Blob instance Returns: List of documents """ return list(self.lazy_parse(blob))
Eagerly parse the blob into a document or documents. This is a convenience method for interactive development environment. Production applications should favor the lazy_parse method instead. Subclasses should generally not over-ride this parse method. Args: blob: Blob instance Returns: List of documents
_call
"""Generate nGQL statement, use it to look up in db and answer question.""" _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() callbacks = _run_manager.get_child() question = inputs[self.input_key] generated_ngql = self.ngql_generation_chain.run({'question': question, 'schema': self.graph.get_schema}, callbacks=callbacks) _run_manager.on_text('Generated nGQL:', end='\n', verbose=self.verbose) _run_manager.on_text(generated_ngql, color='green', end='\n', verbose=self. verbose) context = self.graph.query(generated_ngql) _run_manager.on_text('Full Context:', end='\n', verbose=self.verbose) _run_manager.on_text(str(context), color='green', end='\n', verbose=self. verbose) result = self.qa_chain({'question': question, 'context': context}, callbacks=callbacks) return {self.output_key: result[self.qa_chain.output_key]}
def _call(self, inputs: Dict[str, Any], run_manager: Optional[ CallbackManagerForChainRun]=None) ->Dict[str, str]: """Generate nGQL statement, use it to look up in db and answer question.""" _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() callbacks = _run_manager.get_child() question = inputs[self.input_key] generated_ngql = self.ngql_generation_chain.run({'question': question, 'schema': self.graph.get_schema}, callbacks=callbacks) _run_manager.on_text('Generated nGQL:', end='\n', verbose=self.verbose) _run_manager.on_text(generated_ngql, color='green', end='\n', verbose= self.verbose) context = self.graph.query(generated_ngql) _run_manager.on_text('Full Context:', end='\n', verbose=self.verbose) _run_manager.on_text(str(context), color='green', end='\n', verbose= self.verbose) result = self.qa_chain({'question': question, 'context': context}, callbacks=callbacks) return {self.output_key: result[self.qa_chain.output_key]}
Generate nGQL statement, use it to look up in db and answer question.
__init__
"""original doc""" pass
def __init__(self) ->None: """original doc""" pass
original doc
_call_api
"""Call Cloudflare Workers API""" headers = {'Authorization': f'Bearer {self.api_token}'} data = {'prompt': prompt, 'stream': self.streaming, **params} response = requests.post(self.endpoint_url, headers=headers, json=data) return response
def _call_api(self, prompt: str, params: Dict[str, Any]) ->requests.Response: """Call Cloudflare Workers API""" headers = {'Authorization': f'Bearer {self.api_token}'} data = {'prompt': prompt, 'stream': self.streaming, **params} response = requests.post(self.endpoint_url, headers=headers, json=data) return response
Call Cloudflare Workers API
test_max_marginal_relevance_search
"""Test max marginal relevance search.""" pepperoni_pizza = 'pepperoni pizza' cheese_pizza = 'cheese pizza' hot_dog = 'hot dog' vector_store.add_texts([pepperoni_pizza, cheese_pizza, hot_dog]) wait() search_results = vector_store.similarity_search('pizza', k=2) assert search_results == [Document(page_content=pepperoni_pizza, metadata={ }), Document(page_content=cheese_pizza, metadata={})] search_results = vector_store.max_marginal_relevance_search(query='pizza', k=2) assert search_results == [Document(page_content=pepperoni_pizza, metadata={ }), Document(page_content=hot_dog, metadata={})]
def test_max_marginal_relevance_search(vector_store: MomentoVectorIndex ) ->None: """Test max marginal relevance search.""" pepperoni_pizza = 'pepperoni pizza' cheese_pizza = 'cheese pizza' hot_dog = 'hot dog' vector_store.add_texts([pepperoni_pizza, cheese_pizza, hot_dog]) wait() search_results = vector_store.similarity_search('pizza', k=2) assert search_results == [Document(page_content=pepperoni_pizza, metadata={}), Document(page_content=cheese_pizza, metadata={})] search_results = vector_store.max_marginal_relevance_search(query= 'pizza', k=2) assert search_results == [Document(page_content=pepperoni_pizza, metadata={}), Document(page_content=hot_dog, metadata={})]
Test max marginal relevance search.
test_chat_prompt_template
"""Test chat prompt template.""" prompt_template = create_chat_prompt_template() prompt = prompt_template.format_prompt(foo='foo', bar='bar', context='context') assert isinstance(prompt, ChatPromptValue) messages = prompt.to_messages() assert len(messages) == 4 assert messages[0].content == "Here's some context: context" assert messages[1].content == "Hello foo, I'm bar. Thanks for the context" assert messages[2].content == "I'm an AI. I'm foo. I'm bar." assert messages[3].content == "I'm a generic message. I'm foo. I'm bar." string = prompt.to_string() expected = """System: Here's some context: context Human: Hello foo, I'm bar. Thanks for the context AI: I'm an AI. I'm foo. I'm bar. test: I'm a generic message. I'm foo. I'm bar.""" assert string == expected string = prompt_template.format(foo='foo', bar='bar', context='context') assert string == expected
def test_chat_prompt_template() ->None: """Test chat prompt template.""" prompt_template = create_chat_prompt_template() prompt = prompt_template.format_prompt(foo='foo', bar='bar', context= 'context') assert isinstance(prompt, ChatPromptValue) messages = prompt.to_messages() assert len(messages) == 4 assert messages[0].content == "Here's some context: context" assert messages[1].content == "Hello foo, I'm bar. Thanks for the context" assert messages[2].content == "I'm an AI. I'm foo. I'm bar." assert messages[3].content == "I'm a generic message. I'm foo. I'm bar." string = prompt.to_string() expected = """System: Here's some context: context Human: Hello foo, I'm bar. Thanks for the context AI: I'm an AI. I'm foo. I'm bar. test: I'm a generic message. I'm foo. I'm bar.""" assert string == expected string = prompt_template.format(foo='foo', bar='bar', context='context') assert string == expected
Test chat prompt template.
__deepcopy__
return self
def __deepcopy__(self, memo: dict) ->'FakeCallbackHandler': return self
null
__init__
"""Initialize the loader.""" self.file_path = path """Path to the directory containing the markdown files.""" self.encoding = encoding """Encoding to use when reading the files.""" self.collect_metadata = collect_metadata """Whether to collect metadata from the front matter."""
def __init__(self, path: str, encoding: str='UTF-8', collect_metadata: bool =True): """Initialize the loader.""" self.file_path = path """Path to the directory containing the markdown files.""" self.encoding = encoding """Encoding to use when reading the files.""" self.collect_metadata = collect_metadata """Whether to collect metadata from the front matter."""
Initialize the loader.
__init__
"""Initialize with dict.""" self._dict = _dict if _dict is not None else {}
def __init__(self, _dict: Optional[Dict[str, Document]]=None): """Initialize with dict.""" self._dict = _dict if _dict is not None else {}
Initialize with dict.
typed_lambda_impl
return len(x)
def typed_lambda_impl(x: str) ->int: return len(x)
null
on_chain_error
"""Run when chain errors.""" self.metrics['step'] += 1 self.metrics['errors'] += 1
def on_chain_error(self, error: BaseException, **kwargs: Any) ->None: """Run when chain errors.""" self.metrics['step'] += 1 self.metrics['errors'] += 1
Run when chain errors.
_value_deserializer
"""Deserialize a value.""" return cast(List[float], json.loads(serialized_value.decode()))
def _value_deserializer(serialized_value: bytes) ->List[float]: """Deserialize a value.""" return cast(List[float], json.loads(serialized_value.decode()))
Deserialize a value.
_generate
"""Generate next turn in the conversation. Args: messages: The history of the conversation as a list of messages. stop: The list of stop words (optional). run_manager: The CallbackManager for LLM run, it's not used at the moment. Returns: The ChatResult that contains outputs generated by the model. Raises: ValueError: if the last message in the list is not from human. """ text = completion_with_retry(self, messages=messages) text = text if stop is None else enforce_stop_tokens(text, stop) message = AIMessage(content=text) return ChatResult(generations=[ChatGeneration(message=message)])
def _generate(self, messages: List[BaseMessage], stop: Optional[List[str]]= None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any ) ->ChatResult: """Generate next turn in the conversation. Args: messages: The history of the conversation as a list of messages. stop: The list of stop words (optional). run_manager: The CallbackManager for LLM run, it's not used at the moment. Returns: The ChatResult that contains outputs generated by the model. Raises: ValueError: if the last message in the list is not from human. """ text = completion_with_retry(self, messages=messages) text = text if stop is None else enforce_stop_tokens(text, stop) message = AIMessage(content=text) return ChatResult(generations=[ChatGeneration(message=message)])
Generate next turn in the conversation. Args: messages: The history of the conversation as a list of messages. stop: The list of stop words (optional). run_manager: The CallbackManager for LLM run, it's not used at the moment. Returns: The ChatResult that contains outputs generated by the model. Raises: ValueError: if the last message in the list is not from human.
test_missing_docstring
"""Test error is raised when docstring is missing.""" with pytest.raises(ValueError, match='Function must have a docstring'): @tool def search_api(query: str) ->str: return 'API result'
def test_missing_docstring() ->None: """Test error is raised when docstring is missing.""" with pytest.raises(ValueError, match='Function must have a docstring'): @tool def search_api(query: str) ->str: return 'API result'
Test error is raised when docstring is missing.
test_input_messages
runnable = RunnableLambda(lambda messages: 'you said: ' + '\n'.join(str(m. content) for m in messages if isinstance(m, HumanMessage))) store: Dict = {} get_session_history = _get_get_session_history(store=store) with_history = RunnableWithMessageHistory(runnable, get_session_history) config: RunnableConfig = {'configurable': {'session_id': '1'}} output = with_history.invoke([HumanMessage(content='hello')], config) assert output == 'you said: hello' output = with_history.invoke([HumanMessage(content='good bye')], config) assert output == """you said: hello good bye""" assert store == {'1': ChatMessageHistory(messages=[HumanMessage(content= 'hello'), AIMessage(content='you said: hello'), HumanMessage(content= 'good bye'), AIMessage(content="""you said: hello good bye""")])}
def test_input_messages() ->None: runnable = RunnableLambda(lambda messages: 'you said: ' + '\n'.join(str (m.content) for m in messages if isinstance(m, HumanMessage))) store: Dict = {} get_session_history = _get_get_session_history(store=store) with_history = RunnableWithMessageHistory(runnable, get_session_history) config: RunnableConfig = {'configurable': {'session_id': '1'}} output = with_history.invoke([HumanMessage(content='hello')], config) assert output == 'you said: hello' output = with_history.invoke([HumanMessage(content='good bye')], config) assert output == 'you said: hello\ngood bye' assert store == {'1': ChatMessageHistory(messages=[HumanMessage(content ='hello'), AIMessage(content='you said: hello'), HumanMessage( content='good bye'), AIMessage(content= """you said: hello good bye""")])}
null
similarity_search_with_score_by_vector
"""Perform a search on a query string and return results with score. For more information about the search parameters, take a look at the pymilvus documentation found here: https://milvus.io/api-reference/pymilvus/v2.2.6/Collection/search().md Args: embedding (List[float]): The embedding vector being searched. k (int, optional): The amount of results to return. Defaults to 4. param (dict): The search params for the specified index. Defaults to None. expr (str, optional): Filtering expression. Defaults to None. timeout (int, optional): How long to wait before timeout error. Defaults to None. kwargs: Collection.search() keyword arguments. Returns: List[Tuple[Document, float]]: Result doc and score. """ if self.col is None: logger.debug('No existing collection to search.') return [] if param is None: param = self.search_params output_fields = self.fields[:] output_fields.remove(self._vector_field) res = self.col.search(data=[embedding], anns_field=self._vector_field, param=param, limit=k, expr=expr, output_fields=output_fields, timeout= timeout, **kwargs) ret = [] for result in res[0]: data = {x: result.entity.get(x) for x in output_fields} doc = self._parse_document(data) pair = doc, result.score ret.append(pair) return ret
def similarity_search_with_score_by_vector(self, embedding: List[float], k: int=4, param: Optional[dict]=None, expr: Optional[str]=None, timeout: Optional[int]=None, **kwargs: Any) ->List[Tuple[Document, float]]: """Perform a search on a query string and return results with score. For more information about the search parameters, take a look at the pymilvus documentation found here: https://milvus.io/api-reference/pymilvus/v2.2.6/Collection/search().md Args: embedding (List[float]): The embedding vector being searched. k (int, optional): The amount of results to return. Defaults to 4. param (dict): The search params for the specified index. Defaults to None. expr (str, optional): Filtering expression. Defaults to None. timeout (int, optional): How long to wait before timeout error. Defaults to None. kwargs: Collection.search() keyword arguments. Returns: List[Tuple[Document, float]]: Result doc and score. """ if self.col is None: logger.debug('No existing collection to search.') return [] if param is None: param = self.search_params output_fields = self.fields[:] output_fields.remove(self._vector_field) res = self.col.search(data=[embedding], anns_field=self._vector_field, param=param, limit=k, expr=expr, output_fields=output_fields, timeout=timeout, **kwargs) ret = [] for result in res[0]: data = {x: result.entity.get(x) for x in output_fields} doc = self._parse_document(data) pair = doc, result.score ret.append(pair) return ret
Perform a search on a query string and return results with score. For more information about the search parameters, take a look at the pymilvus documentation found here: https://milvus.io/api-reference/pymilvus/v2.2.6/Collection/search().md Args: embedding (List[float]): The embedding vector being searched. k (int, optional): The amount of results to return. Defaults to 4. param (dict): The search params for the specified index. Defaults to None. expr (str, optional): Filtering expression. Defaults to None. timeout (int, optional): How long to wait before timeout error. Defaults to None. kwargs: Collection.search() keyword arguments. Returns: List[Tuple[Document, float]]: Result doc and score.
default
"""Return a default value for a Serializable object or a SerializedNotImplemented object.""" if isinstance(obj, Serializable): return obj.to_json() else: return to_json_not_implemented(obj)
def default(obj: Any) ->Any: """Return a default value for a Serializable object or a SerializedNotImplemented object.""" if isinstance(obj, Serializable): return obj.to_json() else: return to_json_not_implemented(obj)
Return a default value for a Serializable object or a SerializedNotImplemented object.
parse
cleaned_text = text.strip() if cleaned_text == self.no_output_str: return '' return cleaned_text
def parse(self, text: str) ->str: cleaned_text = text.strip() if cleaned_text == self.no_output_str: return '' return cleaned_text
null
_import_deepinfra
from langchain_community.llms.deepinfra import DeepInfra return DeepInfra
def _import_deepinfra() ->Any: from langchain_community.llms.deepinfra import DeepInfra return DeepInfra
null
requires_reference
"""Whether the chain requires a reference string.""" return True
@property def requires_reference(self) ->bool: """Whether the chain requires a reference string.""" return True
Whether the chain requires a reference string.
process_span
"""Converts a LangChain Run into a W&B Trace Span. :param run: The LangChain Run to convert. :return: The converted W&B Trace Span. """ try: span = self._convert_lc_run_to_wb_span(run) return span except Exception as e: if PRINT_WARNINGS: self.wandb.termwarn( f'Skipping trace saving - unable to safely convert LangChain Run into W&B Trace due to: {e}' ) return None
def process_span(self, run: Run) ->Optional['Span']: """Converts a LangChain Run into a W&B Trace Span. :param run: The LangChain Run to convert. :return: The converted W&B Trace Span. """ try: span = self._convert_lc_run_to_wb_span(run) return span except Exception as e: if PRINT_WARNINGS: self.wandb.termwarn( f'Skipping trace saving - unable to safely convert LangChain Run into W&B Trace due to: {e}' ) return None
Converts a LangChain Run into a W&B Trace Span. :param run: The LangChain Run to convert. :return: The converted W&B Trace Span.
__init__
"""Initialize with file path.""" self.file_path = file_path self.encoding = encoding self.autodetect_encoding = autodetect_encoding
def __init__(self, file_path: str, encoding: Optional[str]=None, autodetect_encoding: bool=False): """Initialize with file path.""" self.file_path = file_path self.encoding = encoding self.autodetect_encoding = autodetect_encoding
Initialize with file path.