method_name
stringlengths
1
78
method_body
stringlengths
3
9.66k
full_code
stringlengths
31
10.7k
docstring
stringlengths
4
4.74k
_llm_type
"""Return type of llm.""" return 'oci_model_deployment_vllm_endpoint'
@property def _llm_type(self) ->str: """Return type of llm.""" return 'oci_model_deployment_vllm_endpoint'
Return type of llm.
_import_file_management_DeleteFileTool
from langchain_community.tools.file_management import DeleteFileTool return DeleteFileTool
def _import_file_management_DeleteFileTool() ->Any: from langchain_community.tools.file_management import DeleteFileTool return DeleteFileTool
null
test__split_list_works_correctly
"""Test splitting works correctly.""" docs = [Document(page_content='foo'), Document(page_content='bar'), Document(page_content='baz'), Document(page_content='foo' * 2), Document(page_content='bar'), Document(page_content='baz')] doc_list = split_list_of_docs(docs, _fake_docs_len_func, 10) expected_result = [[Document(page_content='foo'), Document(page_content= 'bar'), Document(page_content='baz')], [Document(page_content='foo' * 2 ), Document(page_content='bar')], [Document(page_content='baz')]] assert doc_list == expected_result
def test__split_list_works_correctly() ->None: """Test splitting works correctly.""" docs = [Document(page_content='foo'), Document(page_content='bar'), Document(page_content='baz'), Document(page_content='foo' * 2), Document(page_content='bar'), Document(page_content='baz')] doc_list = split_list_of_docs(docs, _fake_docs_len_func, 10) expected_result = [[Document(page_content='foo'), Document(page_content ='bar'), Document(page_content='baz')], [Document(page_content= 'foo' * 2), Document(page_content='bar')], [Document(page_content= 'baz')]] assert doc_list == expected_result
Test splitting works correctly.
_import_steamship_image_generation
from langchain_community.tools.steamship_image_generation import SteamshipImageGenerationTool return SteamshipImageGenerationTool
def _import_steamship_image_generation() ->Any: from langchain_community.tools.steamship_image_generation import SteamshipImageGenerationTool return SteamshipImageGenerationTool
null
__init__
try: from playwright.sync_api import sync_playwright except ImportError: raise ImportError( 'Could not import playwright python package. Please install it with `pip install playwright`.' ) self.browser: Browser = sync_playwright().start().chromium.launch(headless= False) self.page: Page = self.browser.new_page() self.page.set_viewport_size({'width': 1280, 'height': 1080}) self.page_element_buffer: Dict[int, ElementInViewPort] self.client: CDPSession
def __init__(self) ->None: try: from playwright.sync_api import sync_playwright except ImportError: raise ImportError( 'Could not import playwright python package. Please install it with `pip install playwright`.' ) self.browser: Browser = sync_playwright().start().chromium.launch(headless =False) self.page: Page = self.browser.new_page() self.page.set_viewport_size({'width': 1280, 'height': 1080}) self.page_element_buffer: Dict[int, ElementInViewPort] self.client: CDPSession
null
_format_prompt_with_error_handling
if not isinstance(inner_input, dict): raise TypeError( f'Expected mapping type as input to {self.__class__.__name__}. Received {type(inner_input)}.' ) missing = set(self.input_variables).difference(inner_input) if missing: raise KeyError( f'Input to {self.__class__.__name__} is missing variables {missing}. Expected: {self.input_variables} Received: {list(inner_input.keys())}' ) return self.format_prompt(**inner_input)
def _format_prompt_with_error_handling(self, inner_input: Dict) ->PromptValue: if not isinstance(inner_input, dict): raise TypeError( f'Expected mapping type as input to {self.__class__.__name__}. Received {type(inner_input)}.' ) missing = set(self.input_variables).difference(inner_input) if missing: raise KeyError( f'Input to {self.__class__.__name__} is missing variables {missing}. Expected: {self.input_variables} Received: {list(inner_input.keys())}' ) return self.format_prompt(**inner_input)
null
_filter_results
output = [] types = self.json_result_types if self.json_result_types is not None else [] for task in res.get('tasks', []): for result in task.get('result', []): for item in result.get('items', []): if len(types) == 0 or item.get('type', '') in types: self._cleanup_unnecessary_items(item) if len(item) != 0: output.append(item) if self.top_count is not None and len(output) >= self.top_count: break return output
def _filter_results(self, res: dict) ->list: output = [] types = self.json_result_types if self.json_result_types is not None else [ ] for task in res.get('tasks', []): for result in task.get('result', []): for item in result.get('items', []): if len(types) == 0 or item.get('type', '') in types: self._cleanup_unnecessary_items(item) if len(item) != 0: output.append(item) if self.top_count is not None and len(output ) >= self.top_count: break return output
null
_get_labels
"""Get node and edge labels from the Neptune statistics summary""" summary = self._get_summary() n_labels = summary['nodeLabels'] e_labels = summary['edgeLabels'] return n_labels, e_labels
def _get_labels(self) ->Tuple[List[str], List[str]]: """Get node and edge labels from the Neptune statistics summary""" summary = self._get_summary() n_labels = summary['nodeLabels'] e_labels = summary['edgeLabels'] return n_labels, e_labels
Get node and edge labels from the Neptune statistics summary
create_collection
"""Creates the corresponding collection in SemaDB.""" payload = {'id': self.collection_name, 'vectorSize': self.vector_size, 'distanceMetric': self._get_internal_distance_strategy()} response = requests.post(SemaDB.BASE_URL + '/collections', json=payload, headers=self.headers) return response.status_code == 200
def create_collection(self) ->bool: """Creates the corresponding collection in SemaDB.""" payload = {'id': self.collection_name, 'vectorSize': self.vector_size, 'distanceMetric': self._get_internal_distance_strategy()} response = requests.post(SemaDB.BASE_URL + '/collections', json=payload, headers=self.headers) return response.status_code == 200
Creates the corresponding collection in SemaDB.
embeddings
return self._embed_fn
@property def embeddings(self) ->Embeddings: return self._embed_fn
null
_client_params
"""Get the parameters used for the client.""" return self._default_params
@property def _client_params(self) ->Dict[str, Any]: """Get the parameters used for the client.""" return self._default_params
Get the parameters used for the client.
_on_chat_model_start
"""Process the Chat Model Run upon start."""
def _on_chat_model_start(self, run: Run) ->None: """Process the Chat Model Run upon start."""
Process the Chat Model Run upon start.
is_lc_serializable
"""Return whether this model can be serialized by Langchain.""" return True
@classmethod def is_lc_serializable(cls) ->bool: """Return whether this model can be serialized by Langchain.""" return True
Return whether this model can be serialized by Langchain.
_create_chat_result
generations = [] for res in response['choices']: message = _convert_dict_to_message(res['message']) gen = ChatGeneration(message=message, generation_info=dict( finish_reason=res.get('finish_reason'))) generations.append(gen) token_usage = response.get('usage', {}) set_model_value = self.model if self.model_name is not None: set_model_value = self.model_name llm_output = {'token_usage': token_usage, 'model': set_model_value} return ChatResult(generations=generations, llm_output=llm_output)
def _create_chat_result(self, response: Mapping[str, Any]) ->ChatResult: generations = [] for res in response['choices']: message = _convert_dict_to_message(res['message']) gen = ChatGeneration(message=message, generation_info=dict( finish_reason=res.get('finish_reason'))) generations.append(gen) token_usage = response.get('usage', {}) set_model_value = self.model if self.model_name is not None: set_model_value = self.model_name llm_output = {'token_usage': token_usage, 'model': set_model_value} return ChatResult(generations=generations, llm_output=llm_output)
null
set_cluster_driver_port
if v and values['endpoint_name']: raise ValueError('Cannot set both endpoint_name and cluster_driver_port.') elif values['endpoint_name']: return None elif v is None: raise ValueError( 'Must set cluster_driver_port to connect to a cluster driver.') elif int(v) <= 0: raise ValueError(f'Invalid cluster_driver_port: {v}') else: return v
@validator('cluster_driver_port', always=True) def set_cluster_driver_port(cls, v: Any, values: Dict[str, Any]) ->Optional[str ]: if v and values['endpoint_name']: raise ValueError( 'Cannot set both endpoint_name and cluster_driver_port.') elif values['endpoint_name']: return None elif v is None: raise ValueError( 'Must set cluster_driver_port to connect to a cluster driver.') elif int(v) <= 0: raise ValueError(f'Invalid cluster_driver_port: {v}') else: return v
null
serve
""" Starts a demo app for this template. """ project_dir = get_package_root() pyproject = project_dir / 'pyproject.toml' get_langserve_export(pyproject) host_str = host if host is not None else '127.0.0.1' script = ('langchain_cli.dev_scripts:create_demo_server' if not configurable else 'langchain_cli.dev_scripts:create_demo_server_configurable') import uvicorn uvicorn.run(script, factory=True, reload=True, port=port if port is not None else 8000, host=host_str)
@package_cli.command() def serve(*, port: Annotated[Optional[int], typer.Option(help= 'The port to run the server on')]=None, host: Annotated[Optional[str], typer.Option(help='The host to run the server on')]=None, configurable: Annotated[bool, typer.Option('--configurable/--no-configurable', help= 'Whether to include a configurable route')]=True) ->None: """ Starts a demo app for this template. """ project_dir = get_package_root() pyproject = project_dir / 'pyproject.toml' get_langserve_export(pyproject) host_str = host if host is not None else '127.0.0.1' script = ('langchain_cli.dev_scripts:create_demo_server' if not configurable else 'langchain_cli.dev_scripts:create_demo_server_configurable') import uvicorn uvicorn.run(script, factory=True, reload=True, port=port if port is not None else 8000, host=host_str)
Starts a demo app for this template.
__init__
self.wandb = wandb_module self.trace_tree = trace_module
def __init__(self, wandb_module: Any, trace_module: Any): self.wandb = wandb_module self.trace_tree = trace_module
null
test_elasticsearch_embedding_documents
"""Test Elasticsearch embedding documents.""" documents = ['foo bar', 'bar foo', 'foo'] embedding = ElasticsearchEmbeddings.from_credentials(model_id) output = embedding.embed_documents(documents) assert len(output) == 3 assert len(output[0]) == 768 assert len(output[1]) == 768 assert len(output[2]) == 768
def test_elasticsearch_embedding_documents(model_id: str) ->None: """Test Elasticsearch embedding documents.""" documents = ['foo bar', 'bar foo', 'foo'] embedding = ElasticsearchEmbeddings.from_credentials(model_id) output = embedding.embed_documents(documents) assert len(output) == 3 assert len(output[0]) == 768 assert len(output[1]) == 768 assert len(output[2]) == 768
Test Elasticsearch embedding documents.
flatten
"""Flatten generations into a single list. Unpack List[List[Generation]] -> List[LLMResult] where each returned LLMResult contains only a single Generation. If token usage information is available, it is kept only for the LLMResult corresponding to the top-choice Generation, to avoid over-counting of token usage downstream. Returns: List of LLMResults where each returned LLMResult contains a single Generation. """ llm_results = [] for i, gen_list in enumerate(self.generations): if i == 0: llm_results.append(LLMResult(generations=[gen_list], llm_output= self.llm_output)) else: if self.llm_output is not None: llm_output = deepcopy(self.llm_output) llm_output['token_usage'] = dict() else: llm_output = None llm_results.append(LLMResult(generations=[gen_list], llm_output= llm_output)) return llm_results
def flatten(self) ->List[LLMResult]: """Flatten generations into a single list. Unpack List[List[Generation]] -> List[LLMResult] where each returned LLMResult contains only a single Generation. If token usage information is available, it is kept only for the LLMResult corresponding to the top-choice Generation, to avoid over-counting of token usage downstream. Returns: List of LLMResults where each returned LLMResult contains a single Generation. """ llm_results = [] for i, gen_list in enumerate(self.generations): if i == 0: llm_results.append(LLMResult(generations=[gen_list], llm_output =self.llm_output)) else: if self.llm_output is not None: llm_output = deepcopy(self.llm_output) llm_output['token_usage'] = dict() else: llm_output = None llm_results.append(LLMResult(generations=[gen_list], llm_output =llm_output)) return llm_results
Flatten generations into a single list. Unpack List[List[Generation]] -> List[LLMResult] where each returned LLMResult contains only a single Generation. If token usage information is available, it is kept only for the LLMResult corresponding to the top-choice Generation, to avoid over-counting of token usage downstream. Returns: List of LLMResults where each returned LLMResult contains a single Generation.
resolve_criteria
"""Resolve the criteria for the pairwise evaluator. Args: criteria (Union[CRITERIA_TYPE, str], optional): The criteria to use. Returns: dict: The resolved criteria. """ if criteria is None: _default_criteria = [Criteria.HELPFULNESS, Criteria.RELEVANCE, Criteria .CORRECTNESS, Criteria.DEPTH] return {k.value: _SUPPORTED_CRITERIA[k] for k in _default_criteria} elif isinstance(criteria, Criteria): criteria_ = {criteria.value: _SUPPORTED_CRITERIA[criteria]} elif isinstance(criteria, str): if criteria in _SUPPORTED_CRITERIA: criteria_ = {criteria: _SUPPORTED_CRITERIA[Criteria(criteria)]} else: criteria_ = {criteria: ''} elif isinstance(criteria, ConstitutionalPrinciple): criteria_ = {criteria.name: criteria.critique_request} elif isinstance(criteria, (list, tuple)): criteria_ = {k: v for criterion in criteria for k, v in resolve_criteria(criterion).items()} else: if not criteria: raise ValueError( 'Criteria cannot be empty. Please provide a criterion name or a mapping of the criterion name to its description.' ) criteria_ = dict(criteria) return criteria_
def resolve_criteria(criteria: Optional[Union[CRITERIA_TYPE, str, List[ CRITERIA_TYPE]]]) ->dict: """Resolve the criteria for the pairwise evaluator. Args: criteria (Union[CRITERIA_TYPE, str], optional): The criteria to use. Returns: dict: The resolved criteria. """ if criteria is None: _default_criteria = [Criteria.HELPFULNESS, Criteria.RELEVANCE, Criteria.CORRECTNESS, Criteria.DEPTH] return {k.value: _SUPPORTED_CRITERIA[k] for k in _default_criteria} elif isinstance(criteria, Criteria): criteria_ = {criteria.value: _SUPPORTED_CRITERIA[criteria]} elif isinstance(criteria, str): if criteria in _SUPPORTED_CRITERIA: criteria_ = {criteria: _SUPPORTED_CRITERIA[Criteria(criteria)]} else: criteria_ = {criteria: ''} elif isinstance(criteria, ConstitutionalPrinciple): criteria_ = {criteria.name: criteria.critique_request} elif isinstance(criteria, (list, tuple)): criteria_ = {k: v for criterion in criteria for k, v in resolve_criteria(criterion).items()} else: if not criteria: raise ValueError( 'Criteria cannot be empty. Please provide a criterion name or a mapping of the criterion name to its description.' ) criteria_ = dict(criteria) return criteria_
Resolve the criteria for the pairwise evaluator. Args: criteria (Union[CRITERIA_TYPE, str], optional): The criteria to use. Returns: dict: The resolved criteria.
test_opensearch_script_scoring
"""Test end to end indexing and search using Script Scoring Search.""" pre_filter_val = {'bool': {'filter': {'term': {'text': 'bar'}}}} docsearch = OpenSearchVectorSearch.from_texts(texts, FakeEmbeddings(), opensearch_url=DEFAULT_OPENSEARCH_URL, is_appx_search=False) output = docsearch.similarity_search('foo', k=1, search_type= SCRIPT_SCORING_SEARCH, pre_filter=pre_filter_val) assert output == [Document(page_content='bar')]
def test_opensearch_script_scoring() ->None: """Test end to end indexing and search using Script Scoring Search.""" pre_filter_val = {'bool': {'filter': {'term': {'text': 'bar'}}}} docsearch = OpenSearchVectorSearch.from_texts(texts, FakeEmbeddings(), opensearch_url=DEFAULT_OPENSEARCH_URL, is_appx_search=False) output = docsearch.similarity_search('foo', k=1, search_type= SCRIPT_SCORING_SEARCH, pre_filter=pre_filter_val) assert output == [Document(page_content='bar')]
Test end to end indexing and search using Script Scoring Search.
delete_collection
"""Deletes the corresponding collection in SemaDB.""" response = requests.delete(SemaDB.BASE_URL + f'/collections/{self.collection_name}', headers=self.headers) return response.status_code == 200
def delete_collection(self) ->bool: """Deletes the corresponding collection in SemaDB.""" response = requests.delete(SemaDB.BASE_URL + f'/collections/{self.collection_name}', headers=self.headers) return response.status_code == 200
Deletes the corresponding collection in SemaDB.
_import_playwright_ClickTool
from langchain_community.tools.playwright import ClickTool return ClickTool
def _import_playwright_ClickTool() ->Any: from langchain_community.tools.playwright import ClickTool return ClickTool
null
stream
yield from self.transform(iter([input]), config, **kwargs)
def stream(self, input: Input, config: Optional[RunnableConfig]=None, ** kwargs: Optional[Any]) ->Iterator[Output]: yield from self.transform(iter([input]), config, **kwargs)
null
test_huggingface_text_generation
"""Test valid call to HuggingFace text generation model.""" llm = HuggingFaceHub(repo_id='gpt2', model_kwargs={'max_new_tokens': 10}) output = llm('Say foo:') assert isinstance(output, str)
def test_huggingface_text_generation() ->None: """Test valid call to HuggingFace text generation model.""" llm = HuggingFaceHub(repo_id='gpt2', model_kwargs={'max_new_tokens': 10}) output = llm('Say foo:') assert isinstance(output, str)
Test valid call to HuggingFace text generation model.
test_semantic_search
"""Test on semantic similarity.""" docs = store.similarity_search('food', k=4) print(docs) kinds = [d.metadata['kind'] for d in docs] assert 'fruit' in kinds assert 'treat' in kinds assert 'planet' not in kinds
def test_semantic_search(self, store: BigQueryVectorSearch) ->None: """Test on semantic similarity.""" docs = store.similarity_search('food', k=4) print(docs) kinds = [d.metadata['kind'] for d in docs] assert 'fruit' in kinds assert 'treat' in kinds assert 'planet' not in kinds
Test on semantic similarity.
__init__
""" Set up the RDFlib graph :param source_file: either a path for a local file or a URL :param serialization: serialization of the input :param query_endpoint: SPARQL endpoint for queries, read access :param update_endpoint: SPARQL endpoint for UPDATE queries, write access :param standard: RDF, RDFS, or OWL :param local_copy: new local copy for storing changes """ self.source_file = source_file self.serialization = serialization self.query_endpoint = query_endpoint self.update_endpoint = update_endpoint self.standard = standard self.local_copy = local_copy try: import rdflib from rdflib.graph import DATASET_DEFAULT_GRAPH_ID as default from rdflib.plugins.stores import sparqlstore except ImportError: raise ValueError( 'Could not import rdflib python package. Please install it with `pip install rdflib`.' ) if self.standard not in (supported_standards := ('rdf', 'rdfs', 'owl')): raise ValueError( f'Invalid standard. Supported standards are: {supported_standards}.') if not source_file and not query_endpoint or source_file and (query_endpoint or update_endpoint): raise ValueError( 'Could not unambiguously initialize the graph wrapper. Specify either a file (local or online) via the source_file or a triple store via the endpoints.' ) if source_file: if source_file.startswith('http'): self.mode = 'online' else: self.mode = 'local' if self.local_copy is None: self.local_copy = self.source_file self.graph = rdflib.Graph() self.graph.parse_folder(source_file, format=self.serialization) if query_endpoint: self.mode = 'store' if not update_endpoint: self._store = sparqlstore.SPARQLStore() self._store.open(query_endpoint) else: self._store = sparqlstore.SPARQLUpdateStore() self._store.open((query_endpoint, update_endpoint)) self.graph = rdflib.Graph(self._store, identifier=default) if not len(self.graph): raise AssertionError('The graph is empty.') self.schema = '' self.load_schema()
def __init__(self, source_file: Optional[str]=None, serialization: Optional [str]='ttl', query_endpoint: Optional[str]=None, update_endpoint: Optional[str]=None, standard: Optional[str]='rdf', local_copy: Optional [str]=None) ->None: """ Set up the RDFlib graph :param source_file: either a path for a local file or a URL :param serialization: serialization of the input :param query_endpoint: SPARQL endpoint for queries, read access :param update_endpoint: SPARQL endpoint for UPDATE queries, write access :param standard: RDF, RDFS, or OWL :param local_copy: new local copy for storing changes """ self.source_file = source_file self.serialization = serialization self.query_endpoint = query_endpoint self.update_endpoint = update_endpoint self.standard = standard self.local_copy = local_copy try: import rdflib from rdflib.graph import DATASET_DEFAULT_GRAPH_ID as default from rdflib.plugins.stores import sparqlstore except ImportError: raise ValueError( 'Could not import rdflib python package. Please install it with `pip install rdflib`.' ) if self.standard not in (supported_standards := ('rdf', 'rdfs', 'owl')): raise ValueError( f'Invalid standard. Supported standards are: {supported_standards}.' ) if not source_file and not query_endpoint or source_file and ( query_endpoint or update_endpoint): raise ValueError( 'Could not unambiguously initialize the graph wrapper. Specify either a file (local or online) via the source_file or a triple store via the endpoints.' ) if source_file: if source_file.startswith('http'): self.mode = 'online' else: self.mode = 'local' if self.local_copy is None: self.local_copy = self.source_file self.graph = rdflib.Graph() self.graph.parse_folder(source_file, format=self.serialization) if query_endpoint: self.mode = 'store' if not update_endpoint: self._store = sparqlstore.SPARQLStore() self._store.open(query_endpoint) else: self._store = sparqlstore.SPARQLUpdateStore() self._store.open((query_endpoint, update_endpoint)) self.graph = rdflib.Graph(self._store, identifier=default) if not len(self.graph): raise AssertionError('The graph is empty.') self.schema = '' self.load_schema()
Set up the RDFlib graph :param source_file: either a path for a local file or a URL :param serialization: serialization of the input :param query_endpoint: SPARQL endpoint for queries, read access :param update_endpoint: SPARQL endpoint for UPDATE queries, write access :param standard: RDF, RDFS, or OWL :param local_copy: new local copy for storing changes
__init__
super().__init__(**kwargs) self._validate_uri() try: from mlflow.deployments import get_deploy_client self._client = get_deploy_client(self.target_uri) except ImportError as e: raise ImportError( f'Failed to create the client. Please run `pip install mlflow{self._mlflow_extras}` to install required dependencies.' ) from e
def __init__(self, **kwargs: Any): super().__init__(**kwargs) self._validate_uri() try: from mlflow.deployments import get_deploy_client self._client = get_deploy_client(self.target_uri) except ImportError as e: raise ImportError( f'Failed to create the client. Please run `pip install mlflow{self._mlflow_extras}` to install required dependencies.' ) from e
null
search
"""Return the fake document.""" document = Document(page_content=_PAGE_CONTENT) return document
def search(self, search: str) ->Union[str, Document]: """Return the fake document.""" document = Document(page_content=_PAGE_CONTENT) return document
Return the fake document.
test_json_schema_evaluator_requires_reference
assert json_schema_evaluator.requires_reference is True
@pytest.mark.requires('jsonschema') def test_json_schema_evaluator_requires_reference(json_schema_evaluator: JsonSchemaEvaluator) ->None: assert json_schema_evaluator.requires_reference is True
null
_import_google_finance
from langchain_community.utilities.google_finance import GoogleFinanceAPIWrapper return GoogleFinanceAPIWrapper
def _import_google_finance() ->Any: from langchain_community.utilities.google_finance import GoogleFinanceAPIWrapper return GoogleFinanceAPIWrapper
null
__init__
"""Initialize the PubMedLoader. Args: query: The query to be passed to the PubMed API. load_max_docs: The maximum number of documents to load. Defaults to 3. """ self.query = query self.load_max_docs = load_max_docs self._client = PubMedAPIWrapper(top_k_results=load_max_docs)
def __init__(self, query: str, load_max_docs: Optional[int]=3): """Initialize the PubMedLoader. Args: query: The query to be passed to the PubMed API. load_max_docs: The maximum number of documents to load. Defaults to 3. """ self.query = query self.load_max_docs = load_max_docs self._client = PubMedAPIWrapper(top_k_results=load_max_docs)
Initialize the PubMedLoader. Args: query: The query to be passed to the PubMed API. load_max_docs: The maximum number of documents to load. Defaults to 3.
test_pandas_output_parser_row_col_1
expected_output = {'1': 2} actual_output = parser.parse_folder('row:1[chicken]') assert actual_output == expected_output
def test_pandas_output_parser_row_col_1() ->None: expected_output = {'1': 2} actual_output = parser.parse_folder('row:1[chicken]') assert actual_output == expected_output
null
split_list_of_docs
"""Split Documents into subsets that each meet a cumulative length constraint. Args: docs: The full list of Documents. length_func: Function for computing the cumulative length of a set of Documents. token_max: The maximum cumulative length of any subset of Documents. **kwargs: Arbitrary additional keyword params to pass to each call of the length_func. Returns: A List[List[Document]]. """ new_result_doc_list = [] _sub_result_docs = [] for doc in docs: _sub_result_docs.append(doc) _num_tokens = length_func(_sub_result_docs, **kwargs) if _num_tokens > token_max: if len(_sub_result_docs) == 1: raise ValueError( 'A single document was longer than the context length, we cannot handle this.' ) new_result_doc_list.append(_sub_result_docs[:-1]) _sub_result_docs = _sub_result_docs[-1:] new_result_doc_list.append(_sub_result_docs) return new_result_doc_list
def split_list_of_docs(docs: List[Document], length_func: Callable, token_max: int, **kwargs: Any) ->List[List[Document]]: """Split Documents into subsets that each meet a cumulative length constraint. Args: docs: The full list of Documents. length_func: Function for computing the cumulative length of a set of Documents. token_max: The maximum cumulative length of any subset of Documents. **kwargs: Arbitrary additional keyword params to pass to each call of the length_func. Returns: A List[List[Document]]. """ new_result_doc_list = [] _sub_result_docs = [] for doc in docs: _sub_result_docs.append(doc) _num_tokens = length_func(_sub_result_docs, **kwargs) if _num_tokens > token_max: if len(_sub_result_docs) == 1: raise ValueError( 'A single document was longer than the context length, we cannot handle this.' ) new_result_doc_list.append(_sub_result_docs[:-1]) _sub_result_docs = _sub_result_docs[-1:] new_result_doc_list.append(_sub_result_docs) return new_result_doc_list
Split Documents into subsets that each meet a cumulative length constraint. Args: docs: The full list of Documents. length_func: Function for computing the cumulative length of a set of Documents. token_max: The maximum cumulative length of any subset of Documents. **kwargs: Arbitrary additional keyword params to pass to each call of the length_func. Returns: A List[List[Document]].
from_params
"""Instantiate retriever from params. Args: url (str): Vespa app URL. content_field (str): Field in results to return as Document page_content. k (Optional[int]): Number of Documents to return. Defaults to None. metadata_fields(Sequence[str] or "*"): Fields in results to include in document metadata. Defaults to empty tuple (). sources (Sequence[str] or "*" or None): Sources to retrieve from. Defaults to None. _filter (Optional[str]): Document filter condition expressed in YQL. Defaults to None. yql (Optional[str]): Full YQL query to be used. Should not be specified if _filter or sources are specified. Defaults to None. kwargs (Any): Keyword arguments added to query body. Returns: VespaRetriever: Instantiated VespaRetriever. """ try: from vespa.application import Vespa except ImportError: raise ImportError( 'pyvespa is not installed, please install with `pip install pyvespa`') app = Vespa(url) body = kwargs.copy() if yql and (sources or _filter): raise ValueError( 'yql should only be specified if both sources and _filter are not specified.' ) else: if metadata_fields == '*': _fields = '*' body['summary'] = 'short' else: _fields = ', '.join([content_field] + list(metadata_fields or [])) _sources = ', '.join(sources) if isinstance(sources, Sequence) else '*' _filter = f' and {_filter}' if _filter else '' yql = ( f'select {_fields} from sources {_sources} where userQuery(){_filter}') body['yql'] = yql if k: body['hits'] = k return cls(app=app, body=body, content_field=content_field, metadata_fields =metadata_fields)
@classmethod def from_params(cls, url: str, content_field: str, *, k: Optional[int]=None, metadata_fields: Union[Sequence[str], Literal['*']]=(), sources: Union[ Sequence[str], Literal['*'], None]=None, _filter: Optional[str]=None, yql: Optional[str]=None, **kwargs: Any) ->VespaRetriever: """Instantiate retriever from params. Args: url (str): Vespa app URL. content_field (str): Field in results to return as Document page_content. k (Optional[int]): Number of Documents to return. Defaults to None. metadata_fields(Sequence[str] or "*"): Fields in results to include in document metadata. Defaults to empty tuple (). sources (Sequence[str] or "*" or None): Sources to retrieve from. Defaults to None. _filter (Optional[str]): Document filter condition expressed in YQL. Defaults to None. yql (Optional[str]): Full YQL query to be used. Should not be specified if _filter or sources are specified. Defaults to None. kwargs (Any): Keyword arguments added to query body. Returns: VespaRetriever: Instantiated VespaRetriever. """ try: from vespa.application import Vespa except ImportError: raise ImportError( 'pyvespa is not installed, please install with `pip install pyvespa`' ) app = Vespa(url) body = kwargs.copy() if yql and (sources or _filter): raise ValueError( 'yql should only be specified if both sources and _filter are not specified.' ) else: if metadata_fields == '*': _fields = '*' body['summary'] = 'short' else: _fields = ', '.join([content_field] + list(metadata_fields or [])) _sources = ', '.join(sources) if isinstance(sources, Sequence) else '*' _filter = f' and {_filter}' if _filter else '' yql = ( f'select {_fields} from sources {_sources} where userQuery(){_filter}' ) body['yql'] = yql if k: body['hits'] = k return cls(app=app, body=body, content_field=content_field, metadata_fields=metadata_fields)
Instantiate retriever from params. Args: url (str): Vespa app URL. content_field (str): Field in results to return as Document page_content. k (Optional[int]): Number of Documents to return. Defaults to None. metadata_fields(Sequence[str] or "*"): Fields in results to include in document metadata. Defaults to empty tuple (). sources (Sequence[str] or "*" or None): Sources to retrieve from. Defaults to None. _filter (Optional[str]): Document filter condition expressed in YQL. Defaults to None. yql (Optional[str]): Full YQL query to be used. Should not be specified if _filter or sources are specified. Defaults to None. kwargs (Any): Keyword arguments added to query body. Returns: VespaRetriever: Instantiated VespaRetriever.
is_lc_serializable
return True
@classmethod def is_lc_serializable(cls) ->bool: return True
null
__init__
"""Initialize the embedder. Args: underlying_embeddings: the embedder to use for computing embeddings. document_embedding_store: The store to use for caching document embeddings. """ super().__init__() self.document_embedding_store = document_embedding_store self.underlying_embeddings = underlying_embeddings
def __init__(self, underlying_embeddings: Embeddings, document_embedding_store: BaseStore[str, List[float]]) ->None: """Initialize the embedder. Args: underlying_embeddings: the embedder to use for computing embeddings. document_embedding_store: The store to use for caching document embeddings. """ super().__init__() self.document_embedding_store = document_embedding_store self.underlying_embeddings = underlying_embeddings
Initialize the embedder. Args: underlying_embeddings: the embedder to use for computing embeddings. document_embedding_store: The store to use for caching document embeddings.
_import_neo4j_vector
from langchain_community.vectorstores.neo4j_vector import Neo4jVector return Neo4jVector
def _import_neo4j_vector() ->Any: from langchain_community.vectorstores.neo4j_vector import Neo4jVector return Neo4jVector
null
output_keys
"""Return the output keys. :meta private: """ _output_keys = [self.output_key] return _output_keys
@property def output_keys(self) ->List[str]: """Return the output keys. :meta private: """ _output_keys = [self.output_key] return _output_keys
Return the output keys. :meta private:
evaluate
"""Synchronously process the page and return the resulting text. Args: page: The page to process. browser: The browser instance. response: The response from page.goto(). Returns: text: The text content of the page. """ pass
@abstractmethod def evaluate(self, page: 'Page', browser: 'Browser', response: 'Response' ) ->str: """Synchronously process the page and return the resulting text. Args: page: The page to process. browser: The browser instance. response: The response from page.goto(). Returns: text: The text content of the page. """ pass
Synchronously process the page and return the resulting text. Args: page: The page to process. browser: The browser instance. response: The response from page.goto(). Returns: text: The text content of the page.
test__split_list_long_single_doc
"""Test splitting of a long single doc.""" docs = [Document(page_content='foo' * 100)] with pytest.raises(ValueError): split_list_of_docs(docs, _fake_docs_len_func, 100)
def test__split_list_long_single_doc() ->None: """Test splitting of a long single doc.""" docs = [Document(page_content='foo' * 100)] with pytest.raises(ValueError): split_list_of_docs(docs, _fake_docs_len_func, 100)
Test splitting of a long single doc.
test_create_external_handler
"""If we're using a Streamlit that *does* expose its own callback handler, delegate to that implementation. """ mock_streamlit_module = MagicMock() def external_import_success(name: str, globals: Any, locals: Any, fromlist: Any, level: int) ->Any: if name == 'streamlit.external.langchain': return mock_streamlit_module return self.builtins_import(name, globals, locals, fromlist, level) builtins.__import__ = external_import_success parent_container = MagicMock() thought_labeler = MagicMock() StreamlitCallbackHandler(parent_container, max_thought_containers=1, expand_new_thoughts=True, collapse_completed_thoughts=False, thought_labeler=thought_labeler) mock_streamlit_module.StreamlitCallbackHandler.assert_called_once_with( parent_container, max_thought_containers=1, expand_new_thoughts=True, collapse_completed_thoughts=False, thought_labeler=thought_labeler)
def test_create_external_handler(self) ->None: """If we're using a Streamlit that *does* expose its own callback handler, delegate to that implementation. """ mock_streamlit_module = MagicMock() def external_import_success(name: str, globals: Any, locals: Any, fromlist: Any, level: int) ->Any: if name == 'streamlit.external.langchain': return mock_streamlit_module return self.builtins_import(name, globals, locals, fromlist, level) builtins.__import__ = external_import_success parent_container = MagicMock() thought_labeler = MagicMock() StreamlitCallbackHandler(parent_container, max_thought_containers=1, expand_new_thoughts=True, collapse_completed_thoughts=False, thought_labeler=thought_labeler) mock_streamlit_module.StreamlitCallbackHandler.assert_called_once_with( parent_container, max_thought_containers=1, expand_new_thoughts= True, collapse_completed_thoughts=False, thought_labeler= thought_labeler)
If we're using a Streamlit that *does* expose its own callback handler, delegate to that implementation.
__init__
try: from upstash_redis import Redis except ImportError: raise ImportError( 'Could not import upstash redis python package. Please install it with `pip install upstash_redis`.' ) if url == '' or token == '': raise ValueError( 'UPSTASH_REDIS_REST_URL and UPSTASH_REDIS_REST_TOKEN are needed.') try: self.redis_client = Redis(url=url, token=token) except Exception: logger.error('Upstash Redis instance could not be initiated.') self.session_id = session_id self.key_prefix = key_prefix self.ttl = ttl
def __init__(self, session_id: str, url: str='', token: str='', key_prefix: str='message_store:', ttl: Optional[int]=None): try: from upstash_redis import Redis except ImportError: raise ImportError( 'Could not import upstash redis python package. Please install it with `pip install upstash_redis`.' ) if url == '' or token == '': raise ValueError( 'UPSTASH_REDIS_REST_URL and UPSTASH_REDIS_REST_TOKEN are needed.') try: self.redis_client = Redis(url=url, token=token) except Exception: logger.error('Upstash Redis instance could not be initiated.') self.session_id = session_id self.key_prefix = key_prefix self.ttl = ttl
null
test_extract_images_text_from_pdf
"""Test extract image from pdf and recognize text with rapid ocr""" _assert_with_parser(PyPDFParser(extract_images=True)) _assert_with_parser(PDFMinerParser(extract_images=True)) _assert_with_parser(PyMuPDFParser(extract_images=True)) _assert_with_parser(PyPDFium2Parser(extract_images=True))
@pytest.mark.requires('rapidocr_onnxruntime') def test_extract_images_text_from_pdf() ->None: """Test extract image from pdf and recognize text with rapid ocr""" _assert_with_parser(PyPDFParser(extract_images=True)) _assert_with_parser(PDFMinerParser(extract_images=True)) _assert_with_parser(PyMuPDFParser(extract_images=True)) _assert_with_parser(PyPDFium2Parser(extract_images=True))
Test extract image from pdf and recognize text with rapid ocr
is_lc_serializable
return True
@classmethod def is_lc_serializable(cls) ->bool: return True
null
test_load_success
"""Test that returns the correct answer""" output = tfds_client.load() assert isinstance(output, list) assert len(output) == MAX_DOCS assert isinstance(output[0], Document) assert len(output[0].page_content) > 0 assert isinstance(output[0].page_content, str) assert isinstance(output[0].metadata, dict)
def test_load_success(tfds_client: TensorflowDatasets) ->None: """Test that returns the correct answer""" output = tfds_client.load() assert isinstance(output, list) assert len(output) == MAX_DOCS assert isinstance(output[0], Document) assert len(output[0].page_content) > 0 assert isinstance(output[0].page_content, str) assert isinstance(output[0].metadata, dict)
Test that returns the correct answer
get_indexes
"""Helper to see your available indexes in marqo, useful if the from_texts method was used without an index name specified Returns: List[Dict[str, str]]: The list of indexes """ return self._client.get_indexes()['results']
def get_indexes(self) ->List[Dict[str, str]]: """Helper to see your available indexes in marqo, useful if the from_texts method was used without an index name specified Returns: List[Dict[str, str]]: The list of indexes """ return self._client.get_indexes()['results']
Helper to see your available indexes in marqo, useful if the from_texts method was used without an index name specified Returns: List[Dict[str, str]]: The list of indexes
get_input_schema
return _seq_input_schema(self.steps, config)
def get_input_schema(self, config: Optional[RunnableConfig]=None) ->Type[ BaseModel]: return _seq_input_schema(self.steps, config)
null
test_annoy_search_not_found
"""Test what happens when document is not found.""" texts = ['foo', 'bar', 'baz'] docsearch = Annoy.from_texts(texts, FakeEmbeddings()) docsearch.docstore = InMemoryDocstore({}) with pytest.raises(ValueError): docsearch.similarity_search('foo')
def test_annoy_search_not_found() ->None: """Test what happens when document is not found.""" texts = ['foo', 'bar', 'baz'] docsearch = Annoy.from_texts(texts, FakeEmbeddings()) docsearch.docstore = InMemoryDocstore({}) with pytest.raises(ValueError): docsearch.similarity_search('foo')
Test what happens when document is not found.
test_visit_structured_query
query = 'What is the capital of France?' structured_query = StructuredQuery(query=query, filter=None, limit=None) expected: Tuple[str, Dict] = (query, {}) actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query) assert expected == actual
def test_visit_structured_query() ->None: query = 'What is the capital of France?' structured_query = StructuredQuery(query=query, filter=None, limit=None) expected: Tuple[str, Dict] = (query, {}) actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query) assert expected == actual
null
_run
try: from amadeus import ResponseError except ImportError as e: raise ImportError( 'Unable to import amadeus, please install with `pip install amadeus`.' ) from e RESULTS_PER_PAGE = 10 client = self.client earliestDeparture = dt.strptime(departureDateTimeEarliest, '%Y-%m-%dT%H:%M:%S') latestDeparture = dt.strptime(departureDateTimeLatest, '%Y-%m-%dT%H:%M:%S') if earliestDeparture.date() != latestDeparture.date(): logger.error( " Error: Earliest and latest departure dates need to be the same date. If you're trying to search for round-trip flights, call this function for the outbound flight first, and then call again for the return flight. " ) return [None] try: response = client.shopping.flight_offers_search.get(originLocationCode= originLocationCode, destinationLocationCode=destinationLocationCode, departureDate=latestDeparture.strftime('%Y-%m-%d'), adults=1) except ResponseError as error: print(error) output = [] for offer in response.data: itinerary: Dict = {} itinerary['price'] = {} itinerary['price']['total'] = offer['price']['total'] currency = offer['price']['currency'] currency = response.result['dictionaries']['currencies'][currency] itinerary['price']['currency'] = {} itinerary['price']['currency'] = currency segments = [] for segment in offer['itineraries'][0]['segments']: flight = {} flight['departure'] = segment['departure'] flight['arrival'] = segment['arrival'] flight['flightNumber'] = segment['number'] carrier = segment['carrierCode'] carrier = response.result['dictionaries']['carriers'][carrier] flight['carrier'] = carrier segments.append(flight) itinerary['segments'] = [] itinerary['segments'] = segments output.append(itinerary) for index, offer in enumerate(output): offerDeparture = dt.strptime(offer['segments'][0]['departure']['at'], '%Y-%m-%dT%H:%M:%S') if offerDeparture > latestDeparture: output.pop(index) startIndex = (page_number - 1) * RESULTS_PER_PAGE endIndex = startIndex + RESULTS_PER_PAGE return output[startIndex:endIndex]
def _run(self, originLocationCode: str, destinationLocationCode: str, departureDateTimeEarliest: str, departureDateTimeLatest: str, page_number: int=1, run_manager: Optional[CallbackManagerForToolRun]=None ) ->list: try: from amadeus import ResponseError except ImportError as e: raise ImportError( 'Unable to import amadeus, please install with `pip install amadeus`.' ) from e RESULTS_PER_PAGE = 10 client = self.client earliestDeparture = dt.strptime(departureDateTimeEarliest, '%Y-%m-%dT%H:%M:%S') latestDeparture = dt.strptime(departureDateTimeLatest, '%Y-%m-%dT%H:%M:%S') if earliestDeparture.date() != latestDeparture.date(): logger.error( " Error: Earliest and latest departure dates need to be the same date. If you're trying to search for round-trip flights, call this function for the outbound flight first, and then call again for the return flight. " ) return [None] try: response = client.shopping.flight_offers_search.get(originLocationCode =originLocationCode, destinationLocationCode= destinationLocationCode, departureDate=latestDeparture.strftime ('%Y-%m-%d'), adults=1) except ResponseError as error: print(error) output = [] for offer in response.data: itinerary: Dict = {} itinerary['price'] = {} itinerary['price']['total'] = offer['price']['total'] currency = offer['price']['currency'] currency = response.result['dictionaries']['currencies'][currency] itinerary['price']['currency'] = {} itinerary['price']['currency'] = currency segments = [] for segment in offer['itineraries'][0]['segments']: flight = {} flight['departure'] = segment['departure'] flight['arrival'] = segment['arrival'] flight['flightNumber'] = segment['number'] carrier = segment['carrierCode'] carrier = response.result['dictionaries']['carriers'][carrier] flight['carrier'] = carrier segments.append(flight) itinerary['segments'] = [] itinerary['segments'] = segments output.append(itinerary) for index, offer in enumerate(output): offerDeparture = dt.strptime(offer['segments'][0]['departure']['at' ], '%Y-%m-%dT%H:%M:%S') if offerDeparture > latestDeparture: output.pop(index) startIndex = (page_number - 1) * RESULTS_PER_PAGE endIndex = startIndex + RESULTS_PER_PAGE return output[startIndex:endIndex]
null
resize_base64_image
""" Resize an image encoded as a Base64 string :param base64_string: Base64 string :param size: Image size :return: Re-sized Base64 string """ img_data = base64.b64decode(base64_string) img = Image.open(io.BytesIO(img_data)) resized_img = img.resize(size, Image.LANCZOS) buffered = io.BytesIO() resized_img.save(buffered, format=img.format) return base64.b64encode(buffered.getvalue()).decode('utf-8')
def resize_base64_image(base64_string, size=(128, 128)): """ Resize an image encoded as a Base64 string :param base64_string: Base64 string :param size: Image size :return: Re-sized Base64 string """ img_data = base64.b64decode(base64_string) img = Image.open(io.BytesIO(img_data)) resized_img = img.resize(size, Image.LANCZOS) buffered = io.BytesIO() resized_img.save(buffered, format=img.format) return base64.b64encode(buffered.getvalue()).decode('utf-8')
Resize an image encoded as a Base64 string :param base64_string: Base64 string :param size: Image size :return: Re-sized Base64 string
_identifying_params
return {**{'endpoint': self.endpoint, 'model': self.model}, **super(). _identifying_params}
@property def _identifying_params(self) ->Dict[str, Any]: return {**{'endpoint': self.endpoint, 'model': self.model}, **super(). _identifying_params}
null
_identifying_params
"""Get the identifying parameters.""" _model_kwargs = self.model_kwargs or {} return {'eas_service_url': self.eas_service_url, 'eas_service_token': self. eas_service_token, **_model_kwargs}
@property def _identifying_params(self) ->Mapping[str, Any]: """Get the identifying parameters.""" _model_kwargs = self.model_kwargs or {} return {'eas_service_url': self.eas_service_url, 'eas_service_token': self.eas_service_token, **_model_kwargs}
Get the identifying parameters.
get_or_create
""" Get or create a collection. Returns [Collection, bool] where the bool is True if the collection was created. """ created = False collection = cls.get_by_name(session, name) if collection: return collection, created collection = cls(name=name, cmetadata=cmetadata) session.add(collection) session.commit() created = True return collection, created
@classmethod def get_or_create(cls, session: Session, name: str, cmetadata: Optional[ dict]=None) ->Tuple['CollectionStore', bool]: """ Get or create a collection. Returns [Collection, bool] where the bool is True if the collection was created. """ created = False collection = cls.get_by_name(session, name) if collection: return collection, created collection = cls(name=name, cmetadata=cmetadata) session.add(collection) session.commit() created = True return collection, created
Get or create a collection. Returns [Collection, bool] where the bool is True if the collection was created.
validate_environment
"""Validate that the required Python package exists.""" try: from stackapi import StackAPI values['client'] = StackAPI('stackoverflow') except ImportError: raise ImportError( "The 'stackapi' Python package is not installed. Please install it with `pip install stackapi`." ) return values
@root_validator() def validate_environment(cls, values: Dict) ->Dict: """Validate that the required Python package exists.""" try: from stackapi import StackAPI values['client'] = StackAPI('stackoverflow') except ImportError: raise ImportError( "The 'stackapi' Python package is not installed. Please install it with `pip install stackapi`." ) return values
Validate that the required Python package exists.
_load_blocks
"""Read a block and its children.""" result_lines_arr: List[str] = [] cur_block_id: str = block_id while cur_block_id: data = self._request(BLOCK_URL.format(block_id=cur_block_id)) for result in data['results']: result_obj = result[result['type']] if 'rich_text' not in result_obj: continue cur_result_text_arr: List[str] = [] for rich_text in result_obj['rich_text']: if 'text' in rich_text: cur_result_text_arr.append('\t' * num_tabs + rich_text[ 'text']['content']) if result['has_children']: children_text = self._load_blocks(result['id'], num_tabs= num_tabs + 1) cur_result_text_arr.append(children_text) result_lines_arr.append('\n'.join(cur_result_text_arr)) cur_block_id = data.get('next_cursor') return '\n'.join(result_lines_arr)
def _load_blocks(self, block_id: str, num_tabs: int=0) ->str: """Read a block and its children.""" result_lines_arr: List[str] = [] cur_block_id: str = block_id while cur_block_id: data = self._request(BLOCK_URL.format(block_id=cur_block_id)) for result in data['results']: result_obj = result[result['type']] if 'rich_text' not in result_obj: continue cur_result_text_arr: List[str] = [] for rich_text in result_obj['rich_text']: if 'text' in rich_text: cur_result_text_arr.append('\t' * num_tabs + rich_text[ 'text']['content']) if result['has_children']: children_text = self._load_blocks(result['id'], num_tabs= num_tabs + 1) cur_result_text_arr.append(children_text) result_lines_arr.append('\n'.join(cur_result_text_arr)) cur_block_id = data.get('next_cursor') return '\n'.join(result_lines_arr)
Read a block and its children.
_prepare_draft_message
draft_message = EmailMessage() draft_message.set_content(message) draft_message['To'] = ', '.join(to) draft_message['Subject'] = subject if cc is not None: draft_message['Cc'] = ', '.join(cc) if bcc is not None: draft_message['Bcc'] = ', '.join(bcc) encoded_message = base64.urlsafe_b64encode(draft_message.as_bytes()).decode() return {'message': {'raw': encoded_message}}
def _prepare_draft_message(self, message: str, to: List[str], subject: str, cc: Optional[List[str]]=None, bcc: Optional[List[str]]=None) ->dict: draft_message = EmailMessage() draft_message.set_content(message) draft_message['To'] = ', '.join(to) draft_message['Subject'] = subject if cc is not None: draft_message['Cc'] = ', '.join(cc) if bcc is not None: draft_message['Bcc'] = ', '.join(bcc) encoded_message = base64.urlsafe_b64encode(draft_message.as_bytes() ).decode() return {'message': {'raw': encoded_message}}
null
get_prompt
"""Get default prompt for a language model. Args: llm: Language model to get prompt for. Returns: Prompt to use for the language model. """ for condition, prompt in self.conditionals: if condition(llm): return prompt return self.default_prompt
def get_prompt(self, llm: BaseLanguageModel) ->BasePromptTemplate: """Get default prompt for a language model. Args: llm: Language model to get prompt for. Returns: Prompt to use for the language model. """ for condition, prompt in self.conditionals: if condition(llm): return prompt return self.default_prompt
Get default prompt for a language model. Args: llm: Language model to get prompt for. Returns: Prompt to use for the language model.
handle_endtag
"""Hook when a tag is closed.""" self.depth -= 1 top_of_stack = dict(self.stack.pop(-1)) is_leaf = self.data is not None value = self.data if is_leaf else top_of_stack self.stack[-1][tag].append(value) self.data = None
def handle_endtag(self, tag: str) ->None: """Hook when a tag is closed.""" self.depth -= 1 top_of_stack = dict(self.stack.pop(-1)) is_leaf = self.data is not None value = self.data if is_leaf else top_of_stack self.stack[-1][tag].append(value) self.data = None
Hook when a tag is closed.
similarity_search_by_vector
"""Perform a similarity search with MyScale by vectors Args: query (str): query string k (int, optional): Top K neighbors to retrieve. Defaults to 4. where_str (Optional[str], optional): where condition string. Defaults to None. NOTE: Please do not let end-user to fill this and always be aware of SQL injection. When dealing with metadatas, remember to use `{self.metadata_column}.attribute` instead of `attribute` alone. The default name for it is `metadata`. Returns: List[Document]: List of (Document, similarity) """ q_str = self._build_qstr(embedding, k, where_str) try: return [Document(page_content=r[self.config.column_map['text']], metadata={k: r[k] for k in self.must_have_cols}) for r in self. client.query(q_str).named_results()] except Exception as e: logger.error(f'\x1b[91m\x1b[1m{type(e)}\x1b[0m \x1b[95m{str(e)}\x1b[0m') return []
def similarity_search_by_vector(self, embedding: List[float], k: int=4, where_str: Optional[str]=None, **kwargs: Any) ->List[Document]: """Perform a similarity search with MyScale by vectors Args: query (str): query string k (int, optional): Top K neighbors to retrieve. Defaults to 4. where_str (Optional[str], optional): where condition string. Defaults to None. NOTE: Please do not let end-user to fill this and always be aware of SQL injection. When dealing with metadatas, remember to use `{self.metadata_column}.attribute` instead of `attribute` alone. The default name for it is `metadata`. Returns: List[Document]: List of (Document, similarity) """ q_str = self._build_qstr(embedding, k, where_str) try: return [Document(page_content=r[self.config.column_map['text']], metadata={k: r[k] for k in self.must_have_cols}) for r in self. client.query(q_str).named_results()] except Exception as e: logger.error(f'\x1b[91m\x1b[1m{type(e)}\x1b[0m \x1b[95m{str(e)}\x1b[0m' ) return []
Perform a similarity search with MyScale by vectors Args: query (str): query string k (int, optional): Top K neighbors to retrieve. Defaults to 4. where_str (Optional[str], optional): where condition string. Defaults to None. NOTE: Please do not let end-user to fill this and always be aware of SQL injection. When dealing with metadatas, remember to use `{self.metadata_column}.attribute` instead of `attribute` alone. The default name for it is `metadata`. Returns: List[Document]: List of (Document, similarity)
add_resource
""" Add a resource to the resources list. Args: resource (str): The resource to be added. """ self.resources.append(resource)
def add_resource(self, resource: str) ->None: """ Add a resource to the resources list. Args: resource (str): The resource to be added. """ self.resources.append(resource)
Add a resource to the resources list. Args: resource (str): The resource to be added.
_import_huggingface_text_gen_inference
from langchain_community.llms.huggingface_text_gen_inference import HuggingFaceTextGenInference return HuggingFaceTextGenInference
def _import_huggingface_text_gen_inference() ->Any: from langchain_community.llms.huggingface_text_gen_inference import HuggingFaceTextGenInference return HuggingFaceTextGenInference
null
test_ignore_images
html2text_transformer = Html2TextTransformer(ignore_images=False) multiple_tags_html = ( "<h1>First heading.</h1><p>First paragraph with an <img src='example.jpg' alt='Example image' width='500' height='600'></p>" ) documents = [Document(page_content=multiple_tags_html)] docs_transformed = html2text_transformer.transform_documents(documents) assert docs_transformed[0].page_content == """# First heading. First paragraph with an ![Example image](example.jpg) """ html2text_transformer = Html2TextTransformer(ignore_images=True) docs_transformed = html2text_transformer.transform_documents(documents) assert docs_transformed[0].page_content == """# First heading. First paragraph with an """
@pytest.mark.requires('html2text') def test_ignore_images() ->None: html2text_transformer = Html2TextTransformer(ignore_images=False) multiple_tags_html = ( "<h1>First heading.</h1><p>First paragraph with an <img src='example.jpg' alt='Example image' width='500' height='600'></p>" ) documents = [Document(page_content=multiple_tags_html)] docs_transformed = html2text_transformer.transform_documents(documents) assert docs_transformed[0].page_content == """# First heading. First paragraph with an ![Example image](example.jpg) """ html2text_transformer = Html2TextTransformer(ignore_images=True) docs_transformed = html2text_transformer.transform_documents(documents) assert docs_transformed[0 ].page_content == '# First heading.\n\nFirst paragraph with an\n\n'
null
_identifying_params
"""Get the identifying parameters.""" return self._default_params
@property def _identifying_params(self) ->Mapping[str, Any]: """Get the identifying parameters.""" return self._default_params
Get the identifying parameters.
custom_document_builder
return Document(page_content='Mock content!', metadata={'page_number': -1, 'original_filename': 'Mock filename!'})
def custom_document_builder(_: Dict) ->Document: return Document(page_content='Mock content!', metadata={'page_number': -1, 'original_filename': 'Mock filename!'})
null
test_raise_error_if_path_is_not_directory
loader = DirectoryLoader(__file__) with pytest.raises(ValueError) as e: loader.load() assert str(e.value) == f"Expected directory, got file: '{__file__}'"
def test_raise_error_if_path_is_not_directory() ->None: loader = DirectoryLoader(__file__) with pytest.raises(ValueError) as e: loader.load() assert str(e.value) == f"Expected directory, got file: '{__file__}'"
null
from_retrievers
if default_prompt and not default_retriever: raise ValueError( '`default_retriever` must be specified if `default_prompt` is provided. Received only `default_prompt`.' ) destinations = [f"{r['name']}: {r['description']}" for r in retriever_infos] destinations_str = '\n'.join(destinations) router_template = MULTI_RETRIEVAL_ROUTER_TEMPLATE.format(destinations= destinations_str) router_prompt = PromptTemplate(template=router_template, input_variables=[ 'input'], output_parser=RouterOutputParser(next_inputs_inner_key='query')) router_chain = LLMRouterChain.from_llm(llm, router_prompt) destination_chains = {} for r_info in retriever_infos: prompt = r_info.get('prompt') retriever = r_info['retriever'] chain = RetrievalQA.from_llm(llm, prompt=prompt, retriever=retriever) name = r_info['name'] destination_chains[name] = chain if default_chain: _default_chain = default_chain elif default_retriever: _default_chain = RetrievalQA.from_llm(llm, prompt=default_prompt, retriever=default_retriever) else: prompt_template = DEFAULT_TEMPLATE.replace('input', 'query') prompt = PromptTemplate(template=prompt_template, input_variables=[ 'history', 'query']) _default_chain = ConversationChain(llm=ChatOpenAI(), prompt=prompt, input_key='query', output_key='result') return cls(router_chain=router_chain, destination_chains=destination_chains, default_chain=_default_chain, **kwargs)
@classmethod def from_retrievers(cls, llm: BaseLanguageModel, retriever_infos: List[Dict [str, Any]], default_retriever: Optional[BaseRetriever]=None, default_prompt: Optional[PromptTemplate]=None, default_chain: Optional[ Chain]=None, **kwargs: Any) ->MultiRetrievalQAChain: if default_prompt and not default_retriever: raise ValueError( '`default_retriever` must be specified if `default_prompt` is provided. Received only `default_prompt`.' ) destinations = [f"{r['name']}: {r['description']}" for r in retriever_infos ] destinations_str = '\n'.join(destinations) router_template = MULTI_RETRIEVAL_ROUTER_TEMPLATE.format(destinations= destinations_str) router_prompt = PromptTemplate(template=router_template, input_variables=['input'], output_parser=RouterOutputParser( next_inputs_inner_key='query')) router_chain = LLMRouterChain.from_llm(llm, router_prompt) destination_chains = {} for r_info in retriever_infos: prompt = r_info.get('prompt') retriever = r_info['retriever'] chain = RetrievalQA.from_llm(llm, prompt=prompt, retriever=retriever) name = r_info['name'] destination_chains[name] = chain if default_chain: _default_chain = default_chain elif default_retriever: _default_chain = RetrievalQA.from_llm(llm, prompt=default_prompt, retriever=default_retriever) else: prompt_template = DEFAULT_TEMPLATE.replace('input', 'query') prompt = PromptTemplate(template=prompt_template, input_variables=[ 'history', 'query']) _default_chain = ConversationChain(llm=ChatOpenAI(), prompt=prompt, input_key='query', output_key='result') return cls(router_chain=router_chain, destination_chains= destination_chains, default_chain=_default_chain, **kwargs)
null
finish_run
"""To finish the run.""" with self.mlflow.start_run(run_id=self.run.info.run_id, experiment_id=self. mlf_expid): self.mlflow.end_run()
def finish_run(self) ->None: """To finish the run.""" with self.mlflow.start_run(run_id=self.run.info.run_id, experiment_id= self.mlf_expid): self.mlflow.end_run()
To finish the run.
validate_environment
"""Validate that api key is in your environment variable.""" gplaces_api_key = get_from_dict_or_env(values, 'gplaces_api_key', 'GPLACES_API_KEY') values['gplaces_api_key'] = gplaces_api_key try: import googlemaps values['google_map_client'] = googlemaps.Client(gplaces_api_key) except ImportError: raise ImportError( 'Could not import googlemaps python package. Please install it with `pip install googlemaps`.' ) return values
@root_validator() def validate_environment(cls, values: Dict) ->Dict: """Validate that api key is in your environment variable.""" gplaces_api_key = get_from_dict_or_env(values, 'gplaces_api_key', 'GPLACES_API_KEY') values['gplaces_api_key'] = gplaces_api_key try: import googlemaps values['google_map_client'] = googlemaps.Client(gplaces_api_key) except ImportError: raise ImportError( 'Could not import googlemaps python package. Please install it with `pip install googlemaps`.' ) return values
Validate that api key is in your environment variable.
validate_prompt_input_variables
"""Validate that prompt input variables are consistent.""" memory_keys = values['memory'].memory_variables input_key = values['input_key'] if input_key in memory_keys: raise ValueError( f"The input key {input_key} was also found in the memory keys ({memory_keys}) - please provide keys that don't overlap." ) prompt_variables = values['prompt'].input_variables expected_keys = memory_keys + [input_key] if set(expected_keys) != set(prompt_variables): raise ValueError( f'Got unexpected prompt input variables. The prompt expects {prompt_variables}, but got {memory_keys} as inputs from memory, and {input_key} as the normal input key.' ) return values
@root_validator() def validate_prompt_input_variables(cls, values: Dict) ->Dict: """Validate that prompt input variables are consistent.""" memory_keys = values['memory'].memory_variables input_key = values['input_key'] if input_key in memory_keys: raise ValueError( f"The input key {input_key} was also found in the memory keys ({memory_keys}) - please provide keys that don't overlap." ) prompt_variables = values['prompt'].input_variables expected_keys = memory_keys + [input_key] if set(expected_keys) != set(prompt_variables): raise ValueError( f'Got unexpected prompt input variables. The prompt expects {prompt_variables}, but got {memory_keys} as inputs from memory, and {input_key} as the normal input key.' ) return values
Validate that prompt input variables are consistent.
_format_func
self._validate_func(func) if isinstance(func, Operator): value = self.OPERATOR_MAP[func.value] elif isinstance(func, Comparator): value = self.COMPARATOR_MAP[func.value] return f'{value}'
def _format_func(self, func: Union[Operator, Comparator]) ->str: self._validate_func(func) if isinstance(func, Operator): value = self.OPERATOR_MAP[func.value] elif isinstance(func, Comparator): value = self.COMPARATOR_MAP[func.value] return f'{value}'
null
test_load_returns_limited_doc_content_chars
"""Test that returns limited doc_content_chars_max""" doc_content_chars_max = 100 api_client = ArxivAPIWrapper(doc_content_chars_max=doc_content_chars_max) docs = api_client.load('1605.08386') assert len(docs[0].page_content) == doc_content_chars_max
def test_load_returns_limited_doc_content_chars() ->None: """Test that returns limited doc_content_chars_max""" doc_content_chars_max = 100 api_client = ArxivAPIWrapper(doc_content_chars_max=doc_content_chars_max) docs = api_client.load('1605.08386') assert len(docs[0].page_content) == doc_content_chars_max
Test that returns limited doc_content_chars_max
_legacy_stream
prompt = self._format_messages_as_text(messages) for stream_resp in self._create_generate_stream(prompt, stop, **kwargs): if stream_resp: chunk = _stream_response_to_chat_generation_chunk(stream_resp) yield chunk if run_manager: run_manager.on_llm_new_token(chunk.text, verbose=self.verbose)
@deprecated('0.0.3', alternative='_stream') def _legacy_stream(self, messages: List[BaseMessage], stop: Optional[List[ str]]=None, run_manager: Optional[CallbackManagerForLLMRun]=None, ** kwargs: Any) ->Iterator[ChatGenerationChunk]: prompt = self._format_messages_as_text(messages) for stream_resp in self._create_generate_stream(prompt, stop, **kwargs): if stream_resp: chunk = _stream_response_to_chat_generation_chunk(stream_resp) yield chunk if run_manager: run_manager.on_llm_new_token(chunk.text, verbose=self.verbose)
null
__init__
"""Initialize the OBSDirectoryLoader with the specified settings. Args: bucket (str): The name of the OBS bucket to be used. endpoint (str): The endpoint URL of your OBS bucket. config (dict): The parameters for connecting to OBS, provided as a dictionary. The dictionary could have the following keys: - "ak" (str, optional): Your OBS access key (required if `get_token_from_ecs` is False and bucket policy is not public read). - "sk" (str, optional): Your OBS secret key (required if `get_token_from_ecs` is False and bucket policy is not public read). - "token" (str, optional): Your security token (required if using temporary credentials). - "get_token_from_ecs" (bool, optional): Whether to retrieve the security token from ECS. Defaults to False if not provided. If set to True, `ak`, `sk`, and `token` will be ignored. prefix (str, optional): The prefix to be added to the OBS key. Defaults to "". Note: Before using this class, make sure you have registered with OBS and have the necessary credentials. The `ak`, `sk`, and `endpoint` values are mandatory unless `get_token_from_ecs` is True or the bucket policy is public read. `token` is required when using temporary credentials. Example: To create a new OBSDirectoryLoader: ``` config = { "ak": "your-access-key", "sk": "your-secret-key" } ``` directory_loader = OBSDirectoryLoader("your-bucket-name", "your-end-endpoint", config, "your-prefix") """ try: from obs import ObsClient except ImportError: raise ImportError( 'Could not import esdk-obs-python python package. Please install it with `pip install esdk-obs-python`.' ) if not config: config = dict() if config.get('get_token_from_ecs'): self.client = ObsClient(server=endpoint, security_provider_policy='ECS') else: self.client = ObsClient(access_key_id=config.get('ak'), secret_access_key=config.get('sk'), security_token=config.get( 'token'), server=endpoint) self.bucket = bucket self.prefix = prefix
def __init__(self, bucket: str, endpoint: str, config: Optional[dict]=None, prefix: str=''): """Initialize the OBSDirectoryLoader with the specified settings. Args: bucket (str): The name of the OBS bucket to be used. endpoint (str): The endpoint URL of your OBS bucket. config (dict): The parameters for connecting to OBS, provided as a dictionary. The dictionary could have the following keys: - "ak" (str, optional): Your OBS access key (required if `get_token_from_ecs` is False and bucket policy is not public read). - "sk" (str, optional): Your OBS secret key (required if `get_token_from_ecs` is False and bucket policy is not public read). - "token" (str, optional): Your security token (required if using temporary credentials). - "get_token_from_ecs" (bool, optional): Whether to retrieve the security token from ECS. Defaults to False if not provided. If set to True, `ak`, `sk`, and `token` will be ignored. prefix (str, optional): The prefix to be added to the OBS key. Defaults to "". Note: Before using this class, make sure you have registered with OBS and have the necessary credentials. The `ak`, `sk`, and `endpoint` values are mandatory unless `get_token_from_ecs` is True or the bucket policy is public read. `token` is required when using temporary credentials. Example: To create a new OBSDirectoryLoader: ``` config = { "ak": "your-access-key", "sk": "your-secret-key" } ``` directory_loader = OBSDirectoryLoader("your-bucket-name", "your-end-endpoint", config, "your-prefix") """ try: from obs import ObsClient except ImportError: raise ImportError( 'Could not import esdk-obs-python python package. Please install it with `pip install esdk-obs-python`.' ) if not config: config = dict() if config.get('get_token_from_ecs'): self.client = ObsClient(server=endpoint, security_provider_policy='ECS' ) else: self.client = ObsClient(access_key_id=config.get('ak'), secret_access_key=config.get('sk'), security_token=config.get( 'token'), server=endpoint) self.bucket = bucket self.prefix = prefix
Initialize the OBSDirectoryLoader with the specified settings. Args: bucket (str): The name of the OBS bucket to be used. endpoint (str): The endpoint URL of your OBS bucket. config (dict): The parameters for connecting to OBS, provided as a dictionary. The dictionary could have the following keys: - "ak" (str, optional): Your OBS access key (required if `get_token_from_ecs` is False and bucket policy is not public read). - "sk" (str, optional): Your OBS secret key (required if `get_token_from_ecs` is False and bucket policy is not public read). - "token" (str, optional): Your security token (required if using temporary credentials). - "get_token_from_ecs" (bool, optional): Whether to retrieve the security token from ECS. Defaults to False if not provided. If set to True, `ak`, `sk`, and `token` will be ignored. prefix (str, optional): The prefix to be added to the OBS key. Defaults to "". Note: Before using this class, make sure you have registered with OBS and have the necessary credentials. The `ak`, `sk`, and `endpoint` values are mandatory unless `get_token_from_ecs` is True or the bucket policy is public read. `token` is required when using temporary credentials. Example: To create a new OBSDirectoryLoader: ``` config = { "ak": "your-access-key", "sk": "your-secret-key" } ``` directory_loader = OBSDirectoryLoader("your-bucket-name", "your-end-endpoint", config, "your-prefix")
list_packages
conn = http.client.HTTPSConnection('api.github.com') headers = {'Accept': 'application/vnd.github+json', 'X-GitHub-Api-Version': '2022-11-28', 'User-Agent': 'langchain-cli'} conn.request('GET', '/repos/langchain-ai/langchain/contents/templates', headers=headers) res = conn.getresponse() res_str = res.read() data = json.loads(res_str) package_names = [p['name'] for p in data if p['type'] == 'dir' and p['name' ] != 'docs'] package_names_filtered = [p for p in package_names if contains in p ] if contains else package_names return package_names_filtered
def list_packages(*, contains: Optional[str]=None): conn = http.client.HTTPSConnection('api.github.com') headers = {'Accept': 'application/vnd.github+json', 'X-GitHub-Api-Version': '2022-11-28', 'User-Agent': 'langchain-cli'} conn.request('GET', '/repos/langchain-ai/langchain/contents/templates', headers=headers) res = conn.getresponse() res_str = res.read() data = json.loads(res_str) package_names = [p['name'] for p in data if p['type'] == 'dir' and p[ 'name'] != 'docs'] package_names_filtered = [p for p in package_names if contains in p ] if contains else package_names return package_names_filtered
null
similarity_search_with_score_by_vector
"""Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Filter on metadata properties, e.g. { "str_property": "foo", "int_property": 123 } brute_force: Whether to use brute force search. Defaults to False. fraction_lists_to_search: Optional percentage of lists to search, must be in range 0.0 and 1.0, exclusive. If Node, uses service's default which is 0.05. Returns: List of Documents most similar to the query vector with distance. """ del kwargs document_tuples = self._search_with_score_and_embeddings_by_vector(embedding, k, filter, brute_force, fraction_lists_to_search) return [(doc, distance) for doc, _, distance in document_tuples]
def similarity_search_with_score_by_vector(self, embedding: List[float], k: int=DEFAULT_TOP_K, filter: Optional[Dict[str, Any]]=None, brute_force: bool=False, fraction_lists_to_search: Optional[float]=None, **kwargs: Any ) ->List[Tuple[Document, float]]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Filter on metadata properties, e.g. { "str_property": "foo", "int_property": 123 } brute_force: Whether to use brute force search. Defaults to False. fraction_lists_to_search: Optional percentage of lists to search, must be in range 0.0 and 1.0, exclusive. If Node, uses service's default which is 0.05. Returns: List of Documents most similar to the query vector with distance. """ del kwargs document_tuples = self._search_with_score_and_embeddings_by_vector( embedding, k, filter, brute_force, fraction_lists_to_search) return [(doc, distance) for doc, _, distance in document_tuples]
Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Filter on metadata properties, e.g. { "str_property": "foo", "int_property": 123 } brute_force: Whether to use brute force search. Defaults to False. fraction_lists_to_search: Optional percentage of lists to search, must be in range 0.0 and 1.0, exclusive. If Node, uses service's default which is 0.05. Returns: List of Documents most similar to the query vector with distance.
clear
"""Nothing to clear, got a memory like a vault.""" pass
def clear(self) ->None: """Nothing to clear, got a memory like a vault.""" pass
Nothing to clear, got a memory like a vault.
test_finish_custom
"""Test custom finish.""" parser = SelfAskOutputParser(finish_string='Finally: ') _input = 'Finally: 4' output = parser.invoke(_input) expected_output = AgentFinish(return_values={'output': '4'}, log=_input) assert output == expected_output
def test_finish_custom() ->None: """Test custom finish.""" parser = SelfAskOutputParser(finish_string='Finally: ') _input = 'Finally: 4' output = parser.invoke(_input) expected_output = AgentFinish(return_values={'output': '4'}, log=_input) assert output == expected_output
Test custom finish.
input_iter
for token in STREAMED_TOKENS: yield token
def input_iter(_: Any) ->Iterator[str]: for token in STREAMED_TOKENS: yield token
null
__init__
"""Initialize with a web path.""" if not web_path.startswith('https://www.ifixit.com'): raise ValueError("web path must start with 'https://www.ifixit.com'") path = web_path.replace('https://www.ifixit.com', '') allowed_paths = ['/Device', '/Guide', '/Answers', '/Teardown'] """ TODO: Add /Wiki """ if not any(path.startswith(allowed_path) for allowed_path in allowed_paths): raise ValueError( 'web path must start with /Device, /Guide, /Teardown or /Answers') pieces = [x for x in path.split('/') if x] """Teardowns are just guides by a different name""" self.page_type = pieces[0] if pieces[0] != 'Teardown' else 'Guide' if self.page_type == 'Guide' or self.page_type == 'Answers': self.id = pieces[2] else: self.id = pieces[1] self.web_path = web_path
def __init__(self, web_path: str): """Initialize with a web path.""" if not web_path.startswith('https://www.ifixit.com'): raise ValueError("web path must start with 'https://www.ifixit.com'") path = web_path.replace('https://www.ifixit.com', '') allowed_paths = ['/Device', '/Guide', '/Answers', '/Teardown'] """ TODO: Add /Wiki """ if not any(path.startswith(allowed_path) for allowed_path in allowed_paths ): raise ValueError( 'web path must start with /Device, /Guide, /Teardown or /Answers') pieces = [x for x in path.split('/') if x] """Teardowns are just guides by a different name""" self.page_type = pieces[0] if pieces[0] != 'Teardown' else 'Guide' if self.page_type == 'Guide' or self.page_type == 'Answers': self.id = pieces[2] else: self.id = pieces[1] self.web_path = web_path
Initialize with a web path.
test_cassandra_add_extra
"""Test end to end construction with further insertions.""" texts = ['foo', 'bar', 'baz'] metadatas = [{'page': i} for i in range(len(texts))] docsearch = _vectorstore_from_texts(texts, metadatas=metadatas) texts2 = ['foo2', 'bar2', 'baz2'] metadatas2 = [{'page': i + 3} for i in range(len(texts))] docsearch.add_texts(texts2, metadatas2) output = docsearch.similarity_search('foo', k=10) assert len(output) == 6
def test_cassandra_add_extra() ->None: """Test end to end construction with further insertions.""" texts = ['foo', 'bar', 'baz'] metadatas = [{'page': i} for i in range(len(texts))] docsearch = _vectorstore_from_texts(texts, metadatas=metadatas) texts2 = ['foo2', 'bar2', 'baz2'] metadatas2 = [{'page': i + 3} for i in range(len(texts))] docsearch.add_texts(texts2, metadatas2) output = docsearch.similarity_search('foo', k=10) assert len(output) == 6
Test end to end construction with further insertions.
f
args_: map[str] = map(str, args) return f' {op_name} '.join(args_)
def f(*args: Any) ->str: args_: map[str] = map(str, args) return f' {op_name} '.join(args_)
null
EmbedAndKeep
return Embed(anything, keep=True)
def EmbedAndKeep(anything: Any) ->Any: return Embed(anything, keep=True)
null
test_llm_on_chat_dataset
llm = OpenAI(temperature=0) eval_config = RunEvalConfig(evaluators=[EvaluatorType.QA, EvaluatorType. CRITERIA]) run_on_dataset(dataset_name=chat_dataset_name, llm_or_chain_factory=llm, client=client, evaluation=eval_config, project_name=eval_project_name, tags=['shouldpass']) _check_all_feedback_passed(eval_project_name, client)
def test_llm_on_chat_dataset(chat_dataset_name: str, eval_project_name: str, client: Client) ->None: llm = OpenAI(temperature=0) eval_config = RunEvalConfig(evaluators=[EvaluatorType.QA, EvaluatorType .CRITERIA]) run_on_dataset(dataset_name=chat_dataset_name, llm_or_chain_factory=llm, client=client, evaluation=eval_config, project_name= eval_project_name, tags=['shouldpass']) _check_all_feedback_passed(eval_project_name, client)
null
load
"""Load from a file path.""" return [doc for doc in self.lazy_load()]
def load(self) ->List[Document]: """Load from a file path.""" return [doc for doc in self.lazy_load()]
Load from a file path.
_call
"""First try to lookup in queries, else return 'foo' or 'bar'.""" response = self.responses[self.i] if self.i < len(self.responses) - 1: self.i += 1 else: self.i = 0 return response
def _call(self, messages: List[BaseMessage], stop: Optional[List[str]]=None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str: """First try to lookup in queries, else return 'foo' or 'bar'.""" response = self.responses[self.i] if self.i < len(self.responses) - 1: self.i += 1 else: self.i = 0 return response
First try to lookup in queries, else return 'foo' or 'bar'.
_search_api_results
request_details = self._prepare_request(query, **kwargs) response = requests.get(url=request_details['url'], params=request_details[ 'params'], headers=request_details['headers']) response.raise_for_status() return response.json()
def _search_api_results(self, query: str, **kwargs: Any) ->dict: request_details = self._prepare_request(query, **kwargs) response = requests.get(url=request_details['url'], params= request_details['params'], headers=request_details['headers']) response.raise_for_status() return response.json()
null
_default_score_normalizer
return 1 - 1 / (1 + np.exp(val))
def _default_score_normalizer(val: float) ->float: return 1 - 1 / (1 + np.exp(val))
null
build_extra
"""Build extra kwargs from additional params that were passed in.""" all_required_field_names = get_pydantic_field_names(cls) extra = values.get('model_kwargs', {}) for field_name in list(values): if field_name in extra: raise ValueError(f'Found {field_name} supplied twice.') if field_name not in all_required_field_names: logger.warning( f"""WARNING! {field_name} is not default parameter. {field_name} was transferred to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) invalid_model_kwargs = all_required_field_names.intersection(extra.keys()) if invalid_model_kwargs: raise ValueError( f'Parameters {invalid_model_kwargs} should be specified explicitly. Instead they were passed in as part of `model_kwargs` parameter.' ) values['model_kwargs'] = extra return values
@root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) ->Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = get_pydantic_field_names(cls) extra = values.get('model_kwargs', {}) for field_name in list(values): if field_name in extra: raise ValueError(f'Found {field_name} supplied twice.') if field_name not in all_required_field_names: logger.warning( f"""WARNING! {field_name} is not default parameter. {field_name} was transferred to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) invalid_model_kwargs = all_required_field_names.intersection(extra.keys()) if invalid_model_kwargs: raise ValueError( f'Parameters {invalid_model_kwargs} should be specified explicitly. Instead they were passed in as part of `model_kwargs` parameter.' ) values['model_kwargs'] = extra return values
Build extra kwargs from additional params that were passed in.
__init__
"""Initialize the sentence_transformer.""" super().__init__(**kwargs) try: from InstructorEmbedding import INSTRUCTOR self.client = INSTRUCTOR(self.model_name, cache_folder=self. cache_folder, **self.model_kwargs) except ImportError as e: raise ImportError('Dependencies for InstructorEmbedding not found.') from e
def __init__(self, **kwargs: Any): """Initialize the sentence_transformer.""" super().__init__(**kwargs) try: from InstructorEmbedding import INSTRUCTOR self.client = INSTRUCTOR(self.model_name, cache_folder=self. cache_folder, **self.model_kwargs) except ImportError as e: raise ImportError('Dependencies for InstructorEmbedding not found.' ) from e
Initialize the sentence_transformer.
select_examples
"""Select which examples to use based on semantic similarity.""" if self.input_keys: input_variables = {key: input_variables[key] for key in self.input_keys} vectorstore_kwargs = self.vectorstore_kwargs or {} query = ' '.join(sorted_values(input_variables)) example_docs = self.vectorstore.similarity_search(query, k=self.k, ** vectorstore_kwargs) examples = [dict(e.metadata) for e in example_docs] if self.example_keys: examples = [{k: eg[k] for k in self.example_keys} for eg in examples] return examples
def select_examples(self, input_variables: Dict[str, str]) ->List[dict]: """Select which examples to use based on semantic similarity.""" if self.input_keys: input_variables = {key: input_variables[key] for key in self.input_keys } vectorstore_kwargs = self.vectorstore_kwargs or {} query = ' '.join(sorted_values(input_variables)) example_docs = self.vectorstore.similarity_search(query, k=self.k, ** vectorstore_kwargs) examples = [dict(e.metadata) for e in example_docs] if self.example_keys: examples = [{k: eg[k] for k in self.example_keys} for eg in examples] return examples
Select which examples to use based on semantic similarity.
retrieve_existing_fts_index
""" Check if the fulltext index exists in the Neo4j database This method queries the Neo4j database for existing fts indexes with the specified name. Returns: (Tuple): keyword index information """ index_information = self.query( "SHOW INDEXES YIELD name, type, labelsOrTypes, properties, options WHERE type = 'FULLTEXT' AND (name = $keyword_index_name OR (labelsOrTypes = [$node_label] AND properties = $text_node_property)) RETURN name, labelsOrTypes, properties, options " , params={'keyword_index_name': self.keyword_index_name, 'node_label': self.node_label, 'text_node_property': text_node_properties or [self. text_node_property]}) index_information = sort_by_index_name(index_information, self.index_name) try: self.keyword_index_name = index_information[0]['name'] self.text_node_property = index_information[0]['properties'][0] node_label = index_information[0]['labelsOrTypes'][0] return node_label except IndexError: return None
def retrieve_existing_fts_index(self, text_node_properties: List[str]=[] ) ->Optional[str]: """ Check if the fulltext index exists in the Neo4j database This method queries the Neo4j database for existing fts indexes with the specified name. Returns: (Tuple): keyword index information """ index_information = self.query( "SHOW INDEXES YIELD name, type, labelsOrTypes, properties, options WHERE type = 'FULLTEXT' AND (name = $keyword_index_name OR (labelsOrTypes = [$node_label] AND properties = $text_node_property)) RETURN name, labelsOrTypes, properties, options " , params={'keyword_index_name': self.keyword_index_name, 'node_label': self.node_label, 'text_node_property': text_node_properties or [self.text_node_property]}) index_information = sort_by_index_name(index_information, self.index_name) try: self.keyword_index_name = index_information[0]['name'] self.text_node_property = index_information[0]['properties'][0] node_label = index_information[0]['labelsOrTypes'][0] return node_label except IndexError: return None
Check if the fulltext index exists in the Neo4j database This method queries the Neo4j database for existing fts indexes with the specified name. Returns: (Tuple): keyword index information
with_listeners
""" Bind lifecycle listeners to a Runnable, returning a new Runnable. on_start: Called before the runnable starts running, with the Run object. on_end: Called after the runnable finishes running, with the Run object. on_error: Called if the runnable throws an error, with the Run object. The Run object contains information about the run, including its id, type, input, output, error, start_time, end_time, and any tags or metadata added to the run. """ return RunnableEach(bound=self.bound.with_listeners(on_start=on_start, on_end=on_end, on_error=on_error))
def with_listeners(self, *, on_start: Optional[Listener]=None, on_end: Optional[Listener]=None, on_error: Optional[Listener]=None) ->RunnableEach[ Input, Output]: """ Bind lifecycle listeners to a Runnable, returning a new Runnable. on_start: Called before the runnable starts running, with the Run object. on_end: Called after the runnable finishes running, with the Run object. on_error: Called if the runnable throws an error, with the Run object. The Run object contains information about the run, including its id, type, input, output, error, start_time, end_time, and any tags or metadata added to the run. """ return RunnableEach(bound=self.bound.with_listeners(on_start=on_start, on_end=on_end, on_error=on_error))
Bind lifecycle listeners to a Runnable, returning a new Runnable. on_start: Called before the runnable starts running, with the Run object. on_end: Called after the runnable finishes running, with the Run object. on_error: Called if the runnable throws an error, with the Run object. The Run object contains information about the run, including its id, type, input, output, error, start_time, end_time, and any tags or metadata added to the run.
_select_relevance_score_fn
""" The 'correct' relevance function may differ depending on a few things, including: - the distance / similarity metric used by the VectorStore - the scale of your embeddings (OpenAI's are unit normed. Many others are not!) - embedding dimensionality - etc. """ if self.override_relevance_score_fn is not None: return self.override_relevance_score_fn if self.distance_strategy == DistanceStrategy.MAX_INNER_PRODUCT: return self._max_inner_product_relevance_score_fn elif self.distance_strategy == DistanceStrategy.EUCLIDEAN_DISTANCE: return self._euclidean_relevance_score_fn elif self.distance_strategy == DistanceStrategy.COSINE: return self._cosine_relevance_score_fn else: raise ValueError( 'Unknown distance strategy, must be cosine, max_inner_product, or euclidean' )
def _select_relevance_score_fn(self) ->Callable[[float], float]: """ The 'correct' relevance function may differ depending on a few things, including: - the distance / similarity metric used by the VectorStore - the scale of your embeddings (OpenAI's are unit normed. Many others are not!) - embedding dimensionality - etc. """ if self.override_relevance_score_fn is not None: return self.override_relevance_score_fn if self.distance_strategy == DistanceStrategy.MAX_INNER_PRODUCT: return self._max_inner_product_relevance_score_fn elif self.distance_strategy == DistanceStrategy.EUCLIDEAN_DISTANCE: return self._euclidean_relevance_score_fn elif self.distance_strategy == DistanceStrategy.COSINE: return self._cosine_relevance_score_fn else: raise ValueError( 'Unknown distance strategy, must be cosine, max_inner_product, or euclidean' )
The 'correct' relevance function may differ depending on a few things, including: - the distance / similarity metric used by the VectorStore - the scale of your embeddings (OpenAI's are unit normed. Many others are not!) - embedding dimensionality - etc.
test_openai_streaming
"""Test streaming tokens from OpenAI.""" llm = OpenAI(max_tokens=10) generator = llm.stream("I'm Pickle Rick") assert isinstance(generator, Generator) for token in generator: assert isinstance(token, str)
@pytest.mark.scheduled def test_openai_streaming() ->None: """Test streaming tokens from OpenAI.""" llm = OpenAI(max_tokens=10) generator = llm.stream("I'm Pickle Rick") assert isinstance(generator, Generator) for token in generator: assert isinstance(token, str)
Test streaming tokens from OpenAI.
test_forefrontai_api_key_masked_when_passed_from_env
"""Test that the API key is masked when passed from an environment variable.""" monkeypatch.setenv('FOREFRONTAI_API_KEY', 'secret-api-key') llm = ForefrontAI(temperature=0.2) print(llm.forefrontai_api_key, end='') captured = capsys.readouterr() assert captured.out == '**********'
def test_forefrontai_api_key_masked_when_passed_from_env(monkeypatch: MonkeyPatch, capsys: CaptureFixture) ->None: """Test that the API key is masked when passed from an environment variable.""" monkeypatch.setenv('FOREFRONTAI_API_KEY', 'secret-api-key') llm = ForefrontAI(temperature=0.2) print(llm.forefrontai_api_key, end='') captured = capsys.readouterr() assert captured.out == '**********'
Test that the API key is masked when passed from an environment variable.
__init__
try: from motor.motor_asyncio import AsyncIOMotorClient except ImportError as e: raise ImportError( 'Cannot import from motor, please install with `pip install motor`.' ) from e if not connection_string: raise ValueError('connection_string must be provided.') if not db_name: raise ValueError('db_name must be provided.') if not collection_name: raise ValueError('collection_name must be provided.') self.client = AsyncIOMotorClient(connection_string) self.db_name = db_name self.collection_name = collection_name self.filter_criteria = filter_criteria or {} self.db = self.client.get_database(db_name) self.collection = self.db.get_collection(collection_name)
def __init__(self, connection_string: str, db_name: str, collection_name: str, *, filter_criteria: Optional[Dict]=None) ->None: try: from motor.motor_asyncio import AsyncIOMotorClient except ImportError as e: raise ImportError( 'Cannot import from motor, please install with `pip install motor`.' ) from e if not connection_string: raise ValueError('connection_string must be provided.') if not db_name: raise ValueError('db_name must be provided.') if not collection_name: raise ValueError('collection_name must be provided.') self.client = AsyncIOMotorClient(connection_string) self.db_name = db_name self.collection_name = collection_name self.filter_criteria = filter_criteria or {} self.db = self.client.get_database(db_name) self.collection = self.db.get_collection(collection_name)
null
save
df = self.pd.DataFrame(data) table = self.pa.Table.from_pandas(df) if os.path.exists(self.persist_path): backup_path = str(self.persist_path) + '-backup' os.rename(self.persist_path, backup_path) try: self.pq.write_table(table, self.persist_path) except Exception as exc: os.rename(backup_path, self.persist_path) raise exc else: os.remove(backup_path) else: self.pq.write_table(table, self.persist_path)
def save(self, data: Any) ->None: df = self.pd.DataFrame(data) table = self.pa.Table.from_pandas(df) if os.path.exists(self.persist_path): backup_path = str(self.persist_path) + '-backup' os.rename(self.persist_path, backup_path) try: self.pq.write_table(table, self.persist_path) except Exception as exc: os.rename(backup_path, self.persist_path) raise exc else: os.remove(backup_path) else: self.pq.write_table(table, self.persist_path)
null
add_texts
"""Upload texts with metadata (properties) to Marqo. You can either have marqo generate ids for each document or you can provide your own by including a "_id" field in the metadata objects. Args: texts (Iterable[str]): am iterator of texts - assumed to preserve an order that matches the metadatas. metadatas (Optional[List[dict]], optional): a list of metadatas. Raises: ValueError: if metadatas is provided and the number of metadatas differs from the number of texts. Returns: List[str]: The list of ids that were added. """ if self._client.index(self._index_name).get_settings()['index_defaults'][ 'treat_urls_and_pointers_as_images']: raise ValueError( 'Marqo.add_texts is disabled for multimodal indexes. To add documents with a multimodal index use the Python client for Marqo directly.' ) documents: List[Dict[str, str]] = [] num_docs = 0 for i, text in enumerate(texts): doc = {'text': text, 'metadata': json.dumps(metadatas[i]) if metadatas else json.dumps({})} documents.append(doc) num_docs += 1 ids = [] for i in range(0, num_docs, self._document_batch_size): response = self._client.index(self._index_name).add_documents(documents [i:i + self._document_batch_size], tensor_fields=self.tensor_fields, **self._add_documents_settings) if response['errors']: err_msg = ( f'Error in upload for documents in index range [{i},{i + self._document_batch_size}], check Marqo logs.' ) raise RuntimeError(err_msg) ids += [item['_id'] for item in response['items']] return ids
def add_texts(self, texts: Iterable[str], metadatas: Optional[List[dict]]= None, **kwargs: Any) ->List[str]: """Upload texts with metadata (properties) to Marqo. You can either have marqo generate ids for each document or you can provide your own by including a "_id" field in the metadata objects. Args: texts (Iterable[str]): am iterator of texts - assumed to preserve an order that matches the metadatas. metadatas (Optional[List[dict]], optional): a list of metadatas. Raises: ValueError: if metadatas is provided and the number of metadatas differs from the number of texts. Returns: List[str]: The list of ids that were added. """ if self._client.index(self._index_name).get_settings()['index_defaults'][ 'treat_urls_and_pointers_as_images']: raise ValueError( 'Marqo.add_texts is disabled for multimodal indexes. To add documents with a multimodal index use the Python client for Marqo directly.' ) documents: List[Dict[str, str]] = [] num_docs = 0 for i, text in enumerate(texts): doc = {'text': text, 'metadata': json.dumps(metadatas[i]) if metadatas else json.dumps({})} documents.append(doc) num_docs += 1 ids = [] for i in range(0, num_docs, self._document_batch_size): response = self._client.index(self._index_name).add_documents(documents [i:i + self._document_batch_size], tensor_fields=self. tensor_fields, **self._add_documents_settings) if response['errors']: err_msg = ( f'Error in upload for documents in index range [{i},{i + self._document_batch_size}], check Marqo logs.' ) raise RuntimeError(err_msg) ids += [item['_id'] for item in response['items']] return ids
Upload texts with metadata (properties) to Marqo. You can either have marqo generate ids for each document or you can provide your own by including a "_id" field in the metadata objects. Args: texts (Iterable[str]): am iterator of texts - assumed to preserve an order that matches the metadatas. metadatas (Optional[List[dict]], optional): a list of metadatas. Raises: ValueError: if metadatas is provided and the number of metadatas differs from the number of texts. Returns: List[str]: The list of ids that were added.