method_name
stringlengths
1
78
method_body
stringlengths
3
9.66k
full_code
stringlengths
31
10.7k
docstring
stringlengths
4
4.74k
compress_documents
"""Compress page content of raw documents.""" compressed_docs = [] for doc in documents: _input = self.get_input(query, doc) output = self.llm_chain.predict_and_parse(**_input, callbacks=callbacks) if len(output) == 0: continue compressed_docs.append(Document(page_content=output, metadata=doc.me...
def compress_documents(self, documents: Sequence[Document], query: str, callbacks: Optional[Callbacks]=None) ->Sequence[Document]: """Compress page content of raw documents.""" compressed_docs = [] for doc in documents: _input = self.get_input(query, doc) output = self.llm_chain.predict_...
Compress page content of raw documents.
__init__
""" Initialize the cache with all relevant parameters. Args: collection_name (str): name of the Astra DB collection to create/use. token (Optional[str]): API token for Astra DB usage. api_endpoint (Optional[str]): full URL to the API endpoint, such as...
def __init__(self, *, collection_name: str= ASTRA_DB_CACHE_DEFAULT_COLLECTION_NAME, token: Optional[str]=None, api_endpoint: Optional[str]=None, astra_db_client: Optional[Any]=None, namespace: Optional[str]=None, embedding: Embeddings, metric: Optional[ str]=None, similarity_threshold: float= ASTRA_...
Initialize the cache with all relevant parameters. Args: collection_name (str): name of the Astra DB collection to create/use. token (Optional[str]): API token for Astra DB usage. api_endpoint (Optional[str]): full URL to the API endpoint, such as "https://<DB-ID>-us-east1.apps.astra.datastax.com"....
__init__
"""Initializes private fields.""" super().__init__(**data) api_key = cast(SecretStr, self.arcee_api_key) self._client = ArceeWrapper(arcee_api_key=api_key, arcee_api_url=self. arcee_api_url, arcee_api_version=self.arcee_api_version, model_kwargs= self.model_kwargs, model_name=self.model)
def __init__(self, **data: Any) ->None: """Initializes private fields.""" super().__init__(**data) api_key = cast(SecretStr, self.arcee_api_key) self._client = ArceeWrapper(arcee_api_key=api_key, arcee_api_url=self. arcee_api_url, arcee_api_version=self.arcee_api_version, model_kwargs=se...
Initializes private fields.
_import_pubmed_tool
from langchain_community.tools.pubmed.tool import PubmedQueryRun return PubmedQueryRun
def _import_pubmed_tool() ->Any: from langchain_community.tools.pubmed.tool import PubmedQueryRun return PubmedQueryRun
null
lazy_parse
"""Lazily parse the blob.""" import pypdfium2 with blob.as_bytes_io() as file_path: pdf_reader = pypdfium2.PdfDocument(file_path, autoclose=True) try: for page_number, page in enumerate(pdf_reader): text_page = page.get_textpage() content = text_page.get_text_range() ...
def lazy_parse(self, blob: Blob) ->Iterator[Document]: """Lazily parse the blob.""" import pypdfium2 with blob.as_bytes_io() as file_path: pdf_reader = pypdfium2.PdfDocument(file_path, autoclose=True) try: for page_number, page in enumerate(pdf_reader): text_page ...
Lazily parse the blob.
chain
...
@overload def chain(func: Callable[[Input], Iterator[Output]]) ->Runnable[Input, Output]: ...
null
validate_environment
"""Validate that api key and python package exists in environment.""" nebula_service_url = get_from_dict_or_env(values, 'nebula_service_url', 'NEBULA_SERVICE_URL', DEFAULT_NEBULA_SERVICE_URL) nebula_service_path = get_from_dict_or_env(values, 'nebula_service_path', 'NEBULA_SERVICE_PATH', DEFAULT_NEBULA_SERVICE_...
@root_validator() def validate_environment(cls, values: Dict) ->Dict: """Validate that api key and python package exists in environment.""" nebula_service_url = get_from_dict_or_env(values, 'nebula_service_url', 'NEBULA_SERVICE_URL', DEFAULT_NEBULA_SERVICE_URL) nebula_service_path = get_from_dict_or...
Validate that api key and python package exists in environment.
_import_playwright_NavigateBackTool
from langchain_community.tools.playwright import NavigateBackTool return NavigateBackTool
def _import_playwright_NavigateBackTool() ->Any: from langchain_community.tools.playwright import NavigateBackTool return NavigateBackTool
null
on_tool_error
"""Run when tool errors.""" self.step += 1 self.errors += 1
def on_tool_error(self, error: BaseException, **kwargs: Any) ->None: """Run when tool errors.""" self.step += 1 self.errors += 1
Run when tool errors.
_get_schema_with_defaults
from langchain_community.vectorstores.redis.schema import RedisModel, read_schema schema = RedisModel() if index_schema: schema_values = read_schema(index_schema) schema = RedisModel(**schema_values) schema.add_content_field() try: schema.content_vector if vector_schema: logger.warning( ...
def _get_schema_with_defaults(self, index_schema: Optional[Union[Dict[str, str], str, os.PathLike]]=None, vector_schema: Optional[Dict[str, Union[ str, int]]]=None) ->'RedisModel': from langchain_community.vectorstores.redis.schema import RedisModel, read_schema schema = RedisModel() if index_schema...
null
critique_prompt
return ChatPromptTemplate.from_strings(self.get_prompt_strings('critique'))
def critique_prompt(self) ->ChatPromptTemplate: return ChatPromptTemplate.from_strings(self.get_prompt_strings('critique'))
null
_lazy_import_pexpect
"""Import pexpect only when needed.""" if platform.system() == 'Windows': raise ValueError( 'Persistent bash processes are not yet supported on Windows.') try: import pexpect except ImportError: raise ImportError( 'pexpect required for persistent bash processes. To install, run `pip install ...
@staticmethod def _lazy_import_pexpect() ->pexpect: """Import pexpect only when needed.""" if platform.system() == 'Windows': raise ValueError( 'Persistent bash processes are not yet supported on Windows.') try: import pexpect except ImportError: raise ImportError( ...
Import pexpect only when needed.
_send_request
"""Sends request to the oci data science model deployment endpoint. Args: data (Json serializable): data need to be sent to the endpoint. endpoint (str): The model HTTP endpoint. header (dict, optional): A dictionary of HTTP he...
def _send_request(self, data: Any, endpoint: str, header: Optional[dict]={}, **kwargs: Any) ->Dict: """Sends request to the oci data science model deployment endpoint. Args: data (Json serializable): data need to be sent to the endpoint. endpoint (str): ...
Sends request to the oci data science model deployment endpoint. Args: data (Json serializable): data need to be sent to the endpoint. endpoint (str): The model HTTP endpoint. header (dict, optional): A dictionary of HTTP headers to send to the specified url. Defaults to {}....
docai_parse
"""Runs Google Document AI PDF Batch Processing on a list of blobs. Args: blobs: a list of blobs to be parsed gcs_output_path: a path (folder) on GCS to store results processor_name: name of a Document AI processor. batch_size: amount of documents per batch ...
def docai_parse(self, blobs: Sequence[Blob], *, gcs_output_path: Optional[ str]=None, processor_name: Optional[str]=None, batch_size: int=1000, enable_native_pdf_parsing: bool=True, field_mask: Optional[str]=None ) ->List['Operation']: """Runs Google Document AI PDF Batch Processing on a list of blobs. ...
Runs Google Document AI PDF Batch Processing on a list of blobs. Args: blobs: a list of blobs to be parsed gcs_output_path: a path (folder) on GCS to store results processor_name: name of a Document AI processor. batch_size: amount of documents per batch enable_native_pdf_parsing: a config option f...
from_texts
"""Return VectorStore initialized from texts and embeddings.""" if not client: raise ValueError('Supabase client is required.') if not table_name: raise ValueError('Supabase document table_name is required.') embeddings = embedding.embed_documents(texts) ids = [str(uuid.uuid4()) for _ in texts] docs = cls._text...
@classmethod def from_texts(cls: Type['SupabaseVectorStore'], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]]=None, client: Optional[supabase.client.Client]=None, table_name: Optional[str]= 'documents', query_name: Union[str, None]='match_documents', chunk_size: int=500, ids: Op...
Return VectorStore initialized from texts and embeddings.
test_astradb_vectorstore_crud
"""Basic add/delete/update behaviour.""" res0 = store_someemb.similarity_search('Abc', k=2) assert res0 == [] store_someemb.add_texts(texts=['aa', 'bb', 'cc'], metadatas=[{'k': 'a', 'ord': 0}, {'k': 'b', 'ord': 1}, {'k': 'c', 'ord': 2}], ids=['a', 'b', 'c'] ) res1 = store_someemb.similarity_search('Abc', k=5) a...
def test_astradb_vectorstore_crud(self, store_someemb: AstraDB) ->None: """Basic add/delete/update behaviour.""" res0 = store_someemb.similarity_search('Abc', k=2) assert res0 == [] store_someemb.add_texts(texts=['aa', 'bb', 'cc'], metadatas=[{'k': 'a', 'ord': 0}, {'k': 'b', 'ord': 1}, {'k': 'c'...
Basic add/delete/update behaviour.
_import_graphql
from langchain_community.utilities.graphql import GraphQLAPIWrapper return GraphQLAPIWrapper
def _import_graphql() ->Any: from langchain_community.utilities.graphql import GraphQLAPIWrapper return GraphQLAPIWrapper
null
test_tracer_tool_run_on_error
"""Test tracer on a Tool run with an error.""" exception = Exception('test') uuid = uuid4() compare_run = Run(id=str(uuid), start_time=datetime.now(timezone.utc), end_time=datetime.now(timezone.utc), events=[{'name': 'start', 'time': datetime.now(timezone.utc)}, {'name': 'error', 'time': datetime.now( timez...
@freeze_time('2023-01-01') def test_tracer_tool_run_on_error() ->None: """Test tracer on a Tool run with an error.""" exception = Exception('test') uuid = uuid4() compare_run = Run(id=str(uuid), start_time=datetime.now(timezone.utc), end_time=datetime.now(timezone.utc), events=[{'name': 'start',...
Test tracer on a Tool run with an error.
_call_with_config
"""Helper method to transform an Input value to an Output value, with callbacks. Use this method to implement invoke() in subclasses.""" config = ensure_config(config) callback_manager = get_callback_manager_for_config(config) run_manager = callback_manager.on_chain_start(dumpd(self), input, run_type= run_t...
def _call_with_config(self, func: Union[Callable[[Input], Output], Callable [[Input, CallbackManagerForChainRun], Output], Callable[[Input, CallbackManagerForChainRun, RunnableConfig], Output]], input: Input, config: Optional[RunnableConfig], run_type: Optional[str]=None, ** kwargs: Optional[Any]) ->Out...
Helper method to transform an Input value to an Output value, with callbacks. Use this method to implement invoke() in subclasses.
test_all_imports
assert set(__all__) == set(EXPECTED_ALL)
def test_all_imports() ->None: assert set(__all__) == set(EXPECTED_ALL)
null
split_text
return self._split_text(text, self._separators)
def split_text(self, text: str) ->List[str]: return self._split_text(text, self._separators)
null
save
"""Save the agent. Args: file_path: Path to file to save the agent to. Example: .. code-block:: python # If working with agent executor agent.agent.save(file_path="path/agent.yaml") """ if isinstance(file_path, str): save_path = Path(file_path) ...
def save(self, file_path: Union[Path, str]) ->None: """Save the agent. Args: file_path: Path to file to save the agent to. Example: .. code-block:: python # If working with agent executor agent.agent.save(file_path="path/agent.yaml") """ if ...
Save the agent. Args: file_path: Path to file to save the agent to. Example: .. code-block:: python # If working with agent executor agent.agent.save(file_path="path/agent.yaml")
test_llamacpp_inference
"""Test valid llama.cpp inference.""" model_path = get_model() llm = LlamaCpp(model_path=model_path) output = llm('Say foo:') assert isinstance(output, str) assert len(output) > 1
def test_llamacpp_inference() ->None: """Test valid llama.cpp inference.""" model_path = get_model() llm = LlamaCpp(model_path=model_path) output = llm('Say foo:') assert isinstance(output, str) assert len(output) > 1
Test valid llama.cpp inference.
_get_video_info
"""Get important video information. Components are: - title - description - thumbnail url, - publish_date - channel_author - and more. """ try: from pytube import YouTube except ImportError: raise ImportError( 'Coul...
def _get_video_info(self) ->dict: """Get important video information. Components are: - title - description - thumbnail url, - publish_date - channel_author - and more. """ try: from pytube import YouTube except...
Get important video information. Components are: - title - description - thumbnail url, - publish_date - channel_author - and more.
test_respect_user_specified_user_agent
user_specified_user_agent = 'user_specified_user_agent' header_template = {'User-Agent': user_specified_user_agent} url = 'https://www.example.com' loader = WebBaseLoader(url, header_template=header_template) assert loader.session.headers['User-Agent'] == user_specified_user_agent
@pytest.mark.requires('bs4') def test_respect_user_specified_user_agent(self) ->None: user_specified_user_agent = 'user_specified_user_agent' header_template = {'User-Agent': user_specified_user_agent} url = 'https://www.example.com' loader = WebBaseLoader(url, header_template=header_template) asser...
null
_run
"""Use the tool.""" return str(self.api_wrapper.run(query))
def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun] =None) ->str: """Use the tool.""" return str(self.api_wrapper.run(query))
Use the tool.
_mlflow_extras
return ''
@property def _mlflow_extras(self) ->str: return ''
null
test_format_doc_with_metadata
"""Test format doc on a valid document.""" doc = Document(page_content='foo', metadata={'bar': 'baz'}) prompt = PromptTemplate(input_variables=['page_content', 'bar'], template= '{page_content}, {bar}') expected_output = 'foo, baz' output = format_document(doc, prompt) assert output == expected_output
def test_format_doc_with_metadata() ->None: """Test format doc on a valid document.""" doc = Document(page_content='foo', metadata={'bar': 'baz'}) prompt = PromptTemplate(input_variables=['page_content', 'bar'], template='{page_content}, {bar}') expected_output = 'foo, baz' output = format_d...
Test format doc on a valid document.
_chain_type
return 'graph_cypher_chain'
@property def _chain_type(self) ->str: return 'graph_cypher_chain'
null
test_single_input_agent_raises_error_on_structured_tool
"""Test that older agents raise errors on older tools.""" @tool def the_tool(foo: str, bar: str) ->str: """Return the concat of foo and bar.""" return foo + bar with pytest.raises(ValueError, match= f'{agent_cls.__name__} does not support multi-input tool {the_tool.name}.' ): agent_cls.from_llm_and_...
@pytest.mark.parametrize('agent_cls', [ZeroShotAgent, ChatAgent, ConversationalChatAgent, ConversationalAgent, ReActDocstoreAgent, ReActTextWorldAgent, SelfAskWithSearchAgent]) def test_single_input_agent_raises_error_on_structured_tool(agent_cls: Type [Agent]) ->None: """Test that older agents raise er...
Test that older agents raise errors on older tools.
format_request_payload
input_str = json.dumps({'incorrect_input': {'input_string': [prompt]}, 'parameters': model_kwargs}) return str.encode(input_str)
def format_request_payload(self, prompt: str, model_kwargs: Dict) ->bytes: input_str = json.dumps({'incorrect_input': {'input_string': [prompt]}, 'parameters': model_kwargs}) return str.encode(input_str)
null
_llm_type
return 'fake-openai-chat-model'
@property def _llm_type(self) ->str: return 'fake-openai-chat-model'
null
test_azure_openai_embedding_documents
"""Test openai embeddings.""" documents = ['foo bar'] embedding = _get_embeddings() output = embedding.embed_documents(documents) assert len(output) == 1 assert len(output[0]) == 1536
@pytest.mark.scheduled def test_azure_openai_embedding_documents() ->None: """Test openai embeddings.""" documents = ['foo bar'] embedding = _get_embeddings() output = embedding.embed_documents(documents) assert len(output) == 1 assert len(output[0]) == 1536
Test openai embeddings.
save
if self.example_selector: raise ValueError('Saving an example selector is not currently supported') return super().save(file_path)
def save(self, file_path: Union[Path, str]) ->None: if self.example_selector: raise ValueError( 'Saving an example selector is not currently supported') return super().save(file_path)
null
format
"""Format the prompt with inputs generating a string. Use this method to generate a string representation of a prompt consisting of chat messages. Useful for feeding into a string based completion language model or debugging. Args: **kwargs: keyword arguments to use for fo...
def format(self, **kwargs: Any) ->str: """Format the prompt with inputs generating a string. Use this method to generate a string representation of a prompt consisting of chat messages. Useful for feeding into a string based completion language model or debugging. Args: ...
Format the prompt with inputs generating a string. Use this method to generate a string representation of a prompt consisting of chat messages. Useful for feeding into a string based completion language model or debugging. Args: **kwargs: keyword arguments to use for formatting. Returns: A string representa...
__init__
super().__init__() try: import label_studio_sdk as ls except ImportError: raise ImportError( f"You're using {self.__class__.__name__} in your code, but you don't have the LabelStudio SDK Python package installed or upgraded to the latest version. Please run `pip install -U label-studio-sdk` before using...
def __init__(self, api_key: Optional[str]=None, url: Optional[str]=None, project_id: Optional[int]=None, project_name: str=DEFAULT_PROJECT_NAME, project_config: Optional[str]=None, mode: Union[str, LabelStudioMode]= LabelStudioMode.PROMPT): super().__init__() try: import label_studio_sdk as ...
null
embed_documents
embeddings: List[List[float]] = [] for txt in _chunk(texts, 20): resp = self._client.predict(endpoint=self.endpoint, inputs={'input': txt}) embeddings.extend(r['embedding'] for r in resp['data']) return embeddings
def embed_documents(self, texts: List[str]) ->List[List[float]]: embeddings: List[List[float]] = [] for txt in _chunk(texts, 20): resp = self._client.predict(endpoint=self.endpoint, inputs={'input': txt}) embeddings.extend(r['embedding'] for r in resp['data']) return embeddings
null
test_api_key_masked_when_passed_via_constructor
llm = ChatFireworks(fireworks_api_key='secret-api-key') print(llm.fireworks_api_key, end='') captured = capsys.readouterr() assert captured.out == '**********'
@pytest.mark.requires('fireworks') def test_api_key_masked_when_passed_via_constructor(capsys: CaptureFixture ) ->None: llm = ChatFireworks(fireworks_api_key='secret-api-key') print(llm.fireworks_api_key, end='') captured = capsys.readouterr() assert captured.out == '**********'
null
embed_documents
"""Return consistent embeddings for each text seen so far.""" out_vectors = [] for text in texts: if text not in self.known_texts: self.known_texts.append(text) vector = [float(1.0)] * (self.dimensionality - 1) + [float(self. known_texts.index(text))] out_vectors.append(vector) return out_ve...
def embed_documents(self, texts: List[str]) ->List[List[float]]: """Return consistent embeddings for each text seen so far.""" out_vectors = [] for text in texts: if text not in self.known_texts: self.known_texts.append(text) vector = [float(1.0)] * (self.dimensionality - 1) + [f...
Return consistent embeddings for each text seen so far.
_identifying_params
return {**{'deployment_name': self.deployment_name}, **super(). _identifying_params}
@property def _identifying_params(self) ->Mapping[str, Any]: return {**{'deployment_name': self.deployment_name}, **super(). _identifying_params}
null
authenticate
"""Authenticate using the Microsoft Grah API""" try: from O365 import Account except ImportError as e: raise ImportError( 'Cannot import 0365. Please install the package with `pip install O365`.' ) from e if 'CLIENT_ID' in os.environ and 'CLIENT_SECRET' in os.environ: client_id = os.environ[...
def authenticate() ->Account: """Authenticate using the Microsoft Grah API""" try: from O365 import Account except ImportError as e: raise ImportError( 'Cannot import 0365. Please install the package with `pip install O365`.' ) from e if 'CLIENT_ID' in os.environ ...
Authenticate using the Microsoft Grah API
escape
if not isinstance(value, str): raise TypeError( f'Value must be a string object for token escaping.Got type {type(value)}' ) def escape_symbol(match: re.Match) ->str: value = match.group(0) return f'\\{value}' return self.escaped_chars_re.sub(escape_symbol, value)
def escape(self, value: str) ->str: if not isinstance(value, str): raise TypeError( f'Value must be a string object for token escaping.Got type {type(value)}' ) def escape_symbol(match: re.Match) ->str: value = match.group(0) return f'\\{value}' return self.e...
null
before_record_request
for host in skipped_host: if request.host.startswith(host) or request.host.endswith(host): return None return request
def before_record_request(request: Request) ->Union[Request, None]: for host in skipped_host: if request.host.startswith(host) or request.host.endswith(host): return None return request
null
load_bibtex_entries
"""Load bibtex entries from the bibtex file at the given path.""" import bibtexparser with open(path) as file: entries = bibtexparser.load(file).entries return entries
def load_bibtex_entries(self, path: str) ->List[Dict[str, Any]]: """Load bibtex entries from the bibtex file at the given path.""" import bibtexparser with open(path) as file: entries = bibtexparser.load(file).entries return entries
Load bibtex entries from the bibtex file at the given path.
_get_anthropic_stop
if not self.HUMAN_PROMPT or not self.AI_PROMPT: raise NameError('Please ensure the anthropic package is loaded') if stop is None: stop = [] stop.extend([self.HUMAN_PROMPT]) return stop
def _get_anthropic_stop(self, stop: Optional[List[str]]=None) ->List[str]: if not self.HUMAN_PROMPT or not self.AI_PROMPT: raise NameError('Please ensure the anthropic package is loaded') if stop is None: stop = [] stop.extend([self.HUMAN_PROMPT]) return stop
null
test_neo4jvector_hybrid_deduplicate
"""Test result deduplication with hybrid search.""" text_embeddings = FakeEmbeddingsWithOsDimension().embed_documents(texts) text_embedding_pairs = list(zip(texts, text_embeddings)) docsearch = Neo4jVector.from_embeddings(text_embeddings= text_embedding_pairs, embedding=FakeEmbeddingsWithOsDimension(), url= url...
def test_neo4jvector_hybrid_deduplicate() ->None: """Test result deduplication with hybrid search.""" text_embeddings = FakeEmbeddingsWithOsDimension().embed_documents(texts) text_embedding_pairs = list(zip(texts, text_embeddings)) docsearch = Neo4jVector.from_embeddings(text_embeddings= text_em...
Test result deduplication with hybrid search.
setup
index_stats = self.index.describe_index_stats() for _namespace_name in index_stats['namespaces'].keys(): self.index.delete(delete_all=True, namespace=_namespace_name) reset_pinecone()
@pytest.fixture(autouse=True) def setup(self) ->None: index_stats = self.index.describe_index_stats() for _namespace_name in index_stats['namespaces'].keys(): self.index.delete(delete_all=True, namespace=_namespace_name) reset_pinecone()
null
test__validate_example_inputs_for_chain_single_input_multi_expect
mock_ = mock.MagicMock() mock_.inputs = {'foo': 'bar'} chain = mock.MagicMock() chain.input_keys = ['def not foo', 'oh here is another'] with pytest.raises(InputFormatError, match='Example inputs missing expected'): _validate_example_inputs_for_chain(mock_, chain, None)
def test__validate_example_inputs_for_chain_single_input_multi_expect() ->None: mock_ = mock.MagicMock() mock_.inputs = {'foo': 'bar'} chain = mock.MagicMock() chain.input_keys = ['def not foo', 'oh here is another'] with pytest.raises(InputFormatError, match= 'Example inputs missing expecte...
null
raise_deprecation
if 'llm' in values: warnings.warn( 'Directly instantiating an SQLDatabaseChain with an llm is deprecated. Please instantiate with llm_chain argument or using the from_llm class method.' ) if 'llm_chain' not in values and values['llm'] is not None: database = values['database'] pr...
@root_validator(pre=True) def raise_deprecation(cls, values: Dict) ->Dict: if 'llm' in values: warnings.warn( 'Directly instantiating an SQLDatabaseChain with an llm is deprecated. Please instantiate with llm_chain argument or using the from_llm class method.' ) if 'llm_chain...
null
from_embeddings
"""Construct OpenSearchVectorSearch wrapper from pre-vectorized embeddings. Example: .. code-block:: python from langchain_community.vectorstores import OpenSearchVectorSearch from langchain_community.embeddings import OpenAIEmbeddings embedder = Ope...
@classmethod def from_embeddings(cls, embeddings: List[List[float]], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]]=None, bulk_size: int=500, ids: Optional[List[str]]=None, **kwargs: Any ) ->OpenSearchVectorSearch: """Construct OpenSearchVectorSearch wrapper from pre-vectorized...
Construct OpenSearchVectorSearch wrapper from pre-vectorized embeddings. Example: .. code-block:: python from langchain_community.vectorstores import OpenSearchVectorSearch from langchain_community.embeddings import OpenAIEmbeddings embedder = OpenAIEmbeddings() embeddings = embedd...
test_async_recursive_url_loader
url = 'https://docs.python.org/3.9/' loader = RecursiveUrlLoader(url, extractor=lambda _: 'placeholder', use_async=True, max_depth=3, timeout=None, check_response_status=True) docs = loader.load() assert len(docs) == 513 assert docs[0].page_content == 'placeholder'
def test_async_recursive_url_loader() ->None: url = 'https://docs.python.org/3.9/' loader = RecursiveUrlLoader(url, extractor=lambda _: 'placeholder', use_async=True, max_depth=3, timeout=None, check_response_status=True) docs = loader.load() assert len(docs) == 513 assert docs[0].page_conte...
null
prt
with open('/tmp/debugjaguar.log', 'a') as file: print(f'msg={msg}', file=file, flush=True)
def prt(self, msg: str) ->None: with open('/tmp/debugjaguar.log', 'a') as file: print(f'msg={msg}', file=file, flush=True)
null
test_sequential_valid_outputs
"""Test chain runs when valid outputs are specified.""" chain_1 = FakeChain(input_variables=['foo'], output_variables=['bar']) chain_2 = FakeChain(input_variables=['bar'], output_variables=['baz']) chain = SequentialChain(chains=[chain_1, chain_2], input_variables=['foo'], output_variables=['bar', 'baz']) output = ...
def test_sequential_valid_outputs() ->None: """Test chain runs when valid outputs are specified.""" chain_1 = FakeChain(input_variables=['foo'], output_variables=['bar']) chain_2 = FakeChain(input_variables=['bar'], output_variables=['baz']) chain = SequentialChain(chains=[chain_1, chain_2], input_varia...
Test chain runs when valid outputs are specified.
import_langkit
"""Import the langkit python package and raise an error if it is not installed. Args: sentiment: Whether to import the langkit.sentiment module. Defaults to False. toxicity: Whether to import the langkit.toxicity module. Defaults to False. themes: Whether to import the langkit.themes module...
def import_langkit(sentiment: bool=False, toxicity: bool=False, themes: bool=False) ->Any: """Import the langkit python package and raise an error if it is not installed. Args: sentiment: Whether to import the langkit.sentiment module. Defaults to False. toxicity: Whether to import the lang...
Import the langkit python package and raise an error if it is not installed. Args: sentiment: Whether to import the langkit.sentiment module. Defaults to False. toxicity: Whether to import the langkit.toxicity module. Defaults to False. themes: Whether to import the langkit.themes module. Defaults to False...
__repr__
return self.__str__()
def __repr__(self) ->str: return self.__str__()
null
test_vertexai_args_passed
response_text = 'Goodbye' user_prompt = 'Hello' prompt_params = {'max_output_tokens': 1, 'temperature': 10000.0, 'top_k': 10, 'top_p': 0.5} with patch('vertexai.language_models._language_models.ChatModel.start_chat' ) as start_chat: mock_response = MagicMock() mock_response.candidates = [Mock(text=resp...
@pytest.mark.parametrize('stop', [None, 'stop1']) def test_vertexai_args_passed(stop: Optional[str]) ->None: response_text = 'Goodbye' user_prompt = 'Hello' prompt_params = {'max_output_tokens': 1, 'temperature': 10000.0, 'top_k': 10, 'top_p': 0.5} with patch('vertexai.language_models._language_...
null
_default_params
"""Get the default parameters for calling aphrodite.""" return {'n': self.n, 'best_of': self.best_of, 'max_tokens': self.max_tokens, 'top_k': self.top_k, 'top_p': self.top_p, 'top_a': self.top_a, 'min_p': self.min_p, 'temperature': self.temperature, 'presence_penalty': self. presence_penalty, 'frequency_pen...
@property def _default_params(self) ->Dict[str, Any]: """Get the default parameters for calling aphrodite.""" return {'n': self.n, 'best_of': self.best_of, 'max_tokens': self. max_tokens, 'top_k': self.top_k, 'top_p': self.top_p, 'top_a': self .top_a, 'min_p': self.min_p, 'temperature': self.tem...
Get the default parameters for calling aphrodite.
max_marginal_relevance_search_with_score_by_vector
"""Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. ...
def max_marginal_relevance_search_with_score_by_vector(self, embedding: List[float], k: int=4, fetch_k: int=20, lambda_mult: float=0.5, filter: Optional[MetadataFilter]=None, search_params: Optional[common_types. SearchParams]=None, score_threshold: Optional[float]=None, consistency: Optional[common_typ...
Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to...
to_json_not_implemented
"""Serialize a "not implemented" object. Args: obj: object to serialize Returns: SerializedNotImplemented """ _id: List[str] = [] try: if hasattr(obj, '__name__'): _id = [*obj.__module__.split('.'), obj.__name__] elif hasattr(obj, '__class__'): _id = [*obj.__class__...
def to_json_not_implemented(obj: object) ->SerializedNotImplemented: """Serialize a "not implemented" object. Args: obj: object to serialize Returns: SerializedNotImplemented """ _id: List[str] = [] try: if hasattr(obj, '__name__'): _id = [*obj.__module__.sp...
Serialize a "not implemented" object. Args: obj: object to serialize Returns: SerializedNotImplemented
test_tiledb_mmr_with_metadatas_and_filter
texts = ['foo', 'foo', 'fou', 'foy'] metadatas = [{'page': i} for i in range(len(texts))] docsearch = TileDB.from_texts(texts=texts, metadatas=metadatas, embedding= ConsistentFakeEmbeddings(), index_uri=f'{str(tmp_path)}/flat', index_type='FLAT') query_vec = ConsistentFakeEmbeddings().embed_query(text='foo') ou...
@pytest.mark.requires('tiledb-vector-search') def test_tiledb_mmr_with_metadatas_and_filter(tmp_path: Path) ->None: texts = ['foo', 'foo', 'fou', 'foy'] metadatas = [{'page': i} for i in range(len(texts))] docsearch = TileDB.from_texts(texts=texts, metadatas=metadatas, embedding=ConsistentFakeEmbedd...
null
test_all_imports
assert set(__all__) == set(EXPECTED_ALL)
def test_all_imports() ->None: assert set(__all__) == set(EXPECTED_ALL)
null
drop_vector_indexes
"""Cleanup all vector indexes""" all_indexes = store.query( """ SHOW INDEXES YIELD name, type WHERE type IN ["VECTOR", "FULLTEXT"] RETURN name """ ) for index in all_indexes: store.query(f"DROP INDEX {index['name']}")
def drop_vector_indexes(store: Neo4jVector) ->None: """Cleanup all vector indexes""" all_indexes = store.query( """ SHOW INDEXES YIELD name, type WHERE type IN ["VECTOR", "FULLTEXT"] RETURN name """ ) for index in all_indexes:...
Cleanup all vector indexes
parse
return self.parse_result([Generation(text=text)])
def parse(self, text: str) ->Any: return self.parse_result([Generation(text=text)])
null
input_keys
"""Return the input keys. :meta private: """ return [self.input_key]
@property def input_keys(self) ->List[str]: """Return the input keys. :meta private: """ return [self.input_key]
Return the input keys. :meta private:
__init__
""" Initialize the object for file processing with Azure Document Intelligence (formerly Form Recognizer). This constructor initializes a AzureAIDocumentIntelligenceParser object to be used for parsing files using the Azure Document Intelligence API. The load method generates Do...
def __init__(self, api_endpoint: str, api_key: str, file_path: Optional[str ]=None, url_path: Optional[str]=None, api_version: Optional[str]=None, api_model: str='prebuilt-layout', mode: str='markdown') ->None: """ Initialize the object for file processing with Azure Document Intelligence (f...
Initialize the object for file processing with Azure Document Intelligence (formerly Form Recognizer). This constructor initializes a AzureAIDocumentIntelligenceParser object to be used for parsing files using the Azure Document Intelligence API. The load method generates Documents whose content representations are de...
_on_chat_model_start
"""Persist an LLM run.""" if run.parent_run_id is None: run.reference_example_id = self.example_id self._submit(self._persist_run_single, _copy(run))
def _on_chat_model_start(self, run: Run) ->None: """Persist an LLM run.""" if run.parent_run_id is None: run.reference_example_id = self.example_id self._submit(self._persist_run_single, _copy(run))
Persist an LLM run.
test_initialize_watsonxllm_cpd_bad_path_apikey_without_username
try: WatsonxLLM(model_id='google/flan-ul2', url= 'https://cpd-zen.apps.cpd48.cp.fyre.ibm.com', apikey='test_apikey') except ValueError as e: assert 'WATSONX_USERNAME' in e.__str__()
def test_initialize_watsonxllm_cpd_bad_path_apikey_without_username() ->None: try: WatsonxLLM(model_id='google/flan-ul2', url= 'https://cpd-zen.apps.cpd48.cp.fyre.ibm.com', apikey='test_apikey') except ValueError as e: assert 'WATSONX_USERNAME' in e.__str__()
null
similarity_search_by_vector
"""Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Doc fields filter conditions that meet the SQL where clause specification. Retur...
def similarity_search_by_vector(self, embedding: List[float], k: int=4, filter: Optional[str]=None, **kwargs: Any) ->List[Document]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Def...
Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Doc fields filter conditions that meet the SQL where clause specification. Returns: List of Documents most similar to the query...
_Break
self.fill('break')
def _Break(self, t): self.fill('break')
null
get_spans
for quote in self.substring_quote: yield from self._get_span(quote, context)
def get_spans(self, context: str) ->Iterator[str]: for quote in self.substring_quote: yield from self._get_span(quote, context)
null
remove_dependencies_from_pyproject_toml
"""Remove dependencies from pyproject.toml.""" with open(pyproject_toml, encoding='utf-8') as f: pyproject: Dict[str, Any] = load(f) dependencies = pyproject['tool']['poetry']['dependencies'] for name in local_editable_dependencies: try: del dependencies[name] except KeyError: ...
def remove_dependencies_from_pyproject_toml(pyproject_toml: Path, local_editable_dependencies: Iterable[str]) ->None: """Remove dependencies from pyproject.toml.""" with open(pyproject_toml, encoding='utf-8') as f: pyproject: Dict[str, Any] = load(f) dependencies = pyproject['tool']['poetry'...
Remove dependencies from pyproject.toml.
args
"""The tool's input arguments.""" if self.args_schema is not None: return self.args_schema.schema()['properties'] return {'tool_input': {'type': 'string'}}
@property def args(self) ->dict: """The tool's input arguments.""" if self.args_schema is not None: return self.args_schema.schema()['properties'] return {'tool_input': {'type': 'string'}}
The tool's input arguments.
test_generate
"""Test valid call to volc engine.""" llm = VolcEngineMaasLLM() output = llm.generate(['tell me a joke']) assert isinstance(output, LLMResult) assert isinstance(output.generations, list)
def test_generate() ->None: """Test valid call to volc engine.""" llm = VolcEngineMaasLLM() output = llm.generate(['tell me a joke']) assert isinstance(output, LLMResult) assert isinstance(output.generations, list)
Test valid call to volc engine.
map_to_base_relationship
"""Map the KnowledgeGraph Relationship to the base Relationship.""" source = map_to_base_node(rel.source) target = map_to_base_node(rel.target) properties = props_to_dict(rel.properties) if rel.properties else {} return BaseRelationship(source=source, target=target, type=rel.type, properties=properties)
def map_to_base_relationship(rel: Relationship) ->BaseRelationship: """Map the KnowledgeGraph Relationship to the base Relationship.""" source = map_to_base_node(rel.source) target = map_to_base_node(rel.target) properties = props_to_dict(rel.properties) if rel.properties else {} return BaseRelation...
Map the KnowledgeGraph Relationship to the base Relationship.
_identifying_params
"""Get the identifying parameters.""" return {**{'model_id': self.model_id}, **{'model_kwargs': self.model_kwargs}}
@property def _identifying_params(self) ->Mapping[str, Any]: """Get the identifying parameters.""" return {**{'model_id': self.model_id}, **{'model_kwargs': self. model_kwargs}}
Get the identifying parameters.
test_load_multiple_pages
loader = GitbookLoader(web_page, load_all_paths=True) result = loader.load() print(len(result)) assert len(result) > 10
@pytest.mark.parametrize('web_page', ['https://platform-docs.opentargets.org/'] ) def test_load_multiple_pages(self, web_page: str) ->None: loader = GitbookLoader(web_page, load_all_paths=True) result = loader.load() print(len(result)) assert len(result) > 10
null
__init__
"""Initialize by creating the engine and all tables.""" engine = create_engine(f'sqlite:///{database_path}') super().__init__(engine)
def __init__(self, database_path: str='.langchain.db'): """Initialize by creating the engine and all tables.""" engine = create_engine(f'sqlite:///{database_path}') super().__init__(engine)
Initialize by creating the engine and all tables.
_identifying_params
"""Get the identifying parameters.""" return {**{}, **self._default_params}
@property def _identifying_params(self) ->Mapping[str, Any]: """Get the identifying parameters.""" return {**{}, **self._default_params}
Get the identifying parameters.
lower_case_transform
v = v.lower() return v
@validator('name') def lower_case_transform(cls, v: str) ->str: v = v.lower() return v
null
test_extract_functions_classes
"""Test that functions and classes are extracted correctly.""" segmenter = CobolSegmenter(EXAMPLE_CODE) extracted_code = segmenter.extract_functions_classes() assert extracted_code == [ """A000-INITIALIZE-PARA. DISPLAY 'Initialization Paragraph'. MOVE 'New Value' TO SAMPLE-VAR.""" , """A100-PROCESS-PARA...
def test_extract_functions_classes() ->None: """Test that functions and classes are extracted correctly.""" segmenter = CobolSegmenter(EXAMPLE_CODE) extracted_code = segmenter.extract_functions_classes() assert extracted_code == [ """A000-INITIALIZE-PARA. DISPLAY 'Initialization Paragraph'. ...
Test that functions and classes are extracted correctly.
test__convert_dict_to_message_human
message = {'role': 'user', 'content': 'foo'} result = convert_dict_to_message(message) expected_output = HumanMessage(content='foo') assert result == expected_output
def test__convert_dict_to_message_human() ->None: message = {'role': 'user', 'content': 'foo'} result = convert_dict_to_message(message) expected_output = HumanMessage(content='foo') assert result == expected_output
null
test_similarity_search_exact_search
"""Test end to end construction and search with metadata.""" texts = ['foo', 'bar', 'baz'] docsearch = ElasticsearchStore.from_texts(texts, FakeEmbeddings(), ** elasticsearch_connection, index_name=index_name, strategy= ElasticsearchStore.ExactRetrievalStrategy()) expected_query = {'query': {'script_score': {'q...
def test_similarity_search_exact_search(self, elasticsearch_connection: dict, index_name: str) ->None: """Test end to end construction and search with metadata.""" texts = ['foo', 'bar', 'baz'] docsearch = ElasticsearchStore.from_texts(texts, FakeEmbeddings(), ** elasticsearch_connection, index_...
Test end to end construction and search with metadata.
test_api_key_is_string
llm = EmbaasEmbeddings(embaas_api_key='secret-api-key') assert isinstance(llm.embaas_api_key, SecretStr)
def test_api_key_is_string() ->None: llm = EmbaasEmbeddings(embaas_api_key='secret-api-key') assert isinstance(llm.embaas_api_key, SecretStr)
null
_import_openapi_utils_openapi_utils
from langchain_community.tools.openapi.utils.openapi_utils import OpenAPISpec return OpenAPISpec
def _import_openapi_utils_openapi_utils() ->Any: from langchain_community.tools.openapi.utils.openapi_utils import OpenAPISpec return OpenAPISpec
null
embeddings
return self.embedding_func
@property def embeddings(self) ->Optional[Embeddings]: return self.embedding_func
null
_import_rocksetdb
from langchain_community.vectorstores.rocksetdb import Rockset return Rockset
def _import_rocksetdb() ->Any: from langchain_community.vectorstores.rocksetdb import Rockset return Rockset
null
similarity_search_with_score
embedding_vector = self.embedding.embed_query(query) return self.similarity_search_with_score_by_vector(embedding_vector, k, filter=filter)
def similarity_search_with_score(self, query: str, k: int=4, filter: Optional[Dict[str, str]]=None) ->List[Tuple[Document, float]]: embedding_vector = self.embedding.embed_query(query) return self.similarity_search_with_score_by_vector(embedding_vector, k, filter=filter)
null
_search
"""Return searched documents result from BES Args: query: Text to look up documents similar to. query_vector: Embedding to look up documents similar to. filter: Array of Baidu ElasticSearch filter clauses to apply to the query. custom_query: Function to modify th...
def _search(self, query: Optional[str]=None, query_vector: Union[List[float ], None]=None, filter: Optional[dict]=None, custom_query: Optional[ Callable[[Dict, Union[str, None]], Dict]]=None, search_params: Dict={} ) ->List[Tuple[Document, float]]: """Return searched documents result from BES A...
Return searched documents result from BES Args: query: Text to look up documents similar to. query_vector: Embedding to look up documents similar to. filter: Array of Baidu ElasticSearch filter clauses to apply to the query. custom_query: Function to modify the query body before it is sent to BES. Ret...
create_json_chat_agent
"""Create an agent that uses JSON to format its logic, build for Chat Models. Examples: .. code-block:: python from langchain import hub from langchain_community.chat_models import ChatOpenAI from langchain.agents import AgentExecutor, create_json_chat_agent ...
def create_json_chat_agent(llm: BaseLanguageModel, tools: Sequence[BaseTool ], prompt: ChatPromptTemplate) ->Runnable: """Create an agent that uses JSON to format its logic, build for Chat Models. Examples: .. code-block:: python from langchain import hub from langchain_c...
Create an agent that uses JSON to format its logic, build for Chat Models. Examples: .. code-block:: python from langchain import hub from langchain_community.chat_models import ChatOpenAI from langchain.agents import AgentExecutor, create_json_chat_agent prompt = hub.pull("hwch...
validate_environment
"""Validate that api key and python package exists in environment.""" try: from bs4 import BeautifulSoup except ImportError: raise ImportError( 'Could not import bs4 python package. Please install it with `pip install bs4`.' ) return values
@root_validator() def validate_environment(cls, values: Dict) ->Dict: """Validate that api key and python package exists in environment.""" try: from bs4 import BeautifulSoup except ImportError: raise ImportError( 'Could not import bs4 python package. Please install it with `pip ...
Validate that api key and python package exists in environment.
check_mypy
"""Run mypy on a file.""" cmd = ( f"mypy {'--strict' if strict else ''} --follow-imports={follow_imports} {filepath}" ) subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True, text= True, timeout=3)
def check_mypy(filepath: str, strict: bool=True, follow_imports: str='skip'): """Run mypy on a file.""" cmd = ( f"mypy {'--strict' if strict else ''} --follow-imports={follow_imports} {filepath}" ) subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True, text =True, timeout...
Run mypy on a file.
get_current_entities
chain = LLMChain(llm=self.llm, prompt=self.entity_extraction_prompt) buffer_string = get_buffer_string(self.chat_memory.messages[-self.k * 2:], human_prefix=self.human_prefix, ai_prefix=self.ai_prefix) output = chain.predict(history=buffer_string, input=input_string) return get_entities(output)
def get_current_entities(self, input_string: str) ->List[str]: chain = LLMChain(llm=self.llm, prompt=self.entity_extraction_prompt) buffer_string = get_buffer_string(self.chat_memory.messages[-self.k * 2 :], human_prefix=self.human_prefix, ai_prefix=self.ai_prefix) output = chain.predict(history=buf...
null
parse_iter
"""Parse the output of an LLM call.""" return re.finditer(self.pattern, text, re.MULTILINE)
def parse_iter(self, text: str) ->Iterator[re.Match]: """Parse the output of an LLM call.""" return re.finditer(self.pattern, text, re.MULTILINE)
Parse the output of an LLM call.
__init__
warnings.warn( '`MlflowAIGateway` is deprecated. Use `Mlflow` or `Databricks` instead.', DeprecationWarning) try: import mlflow.gateway except ImportError as e: raise ImportError( 'Could not import `mlflow.gateway` module. Please install it with `pip install mlflow[gateway]`.' ) from e s...
def __init__(self, **kwargs: Any): warnings.warn( '`MlflowAIGateway` is deprecated. Use `Mlflow` or `Databricks` instead.' , DeprecationWarning) try: import mlflow.gateway except ImportError as e: raise ImportError( 'Could not import `mlflow.gateway` module. Pleas...
null
get_package_root
package_root = Path.cwd() if cwd is None else cwd visited: Set[Path] = set() while package_root not in visited: visited.add(package_root) pyproject_path = package_root / 'pyproject.toml' if pyproject_path.exists(): return package_root package_root = package_root.parent raise FileNotFoundError('N...
def get_package_root(cwd: Optional[Path]=None) ->Path: package_root = Path.cwd() if cwd is None else cwd visited: Set[Path] = set() while package_root not in visited: visited.add(package_root) pyproject_path = package_root / 'pyproject.toml' if pyproject_path.exists(): re...
null
on_tool_start
self.on_tool_start_common()
def on_tool_start(self, *args: Any, **kwargs: Any) ->Any: self.on_tool_start_common()
null
assert_query
assert query_body == {'knn': {'field': 'vector', 'filter': [], 'k': 1, 'num_candidates': 50, 'query_vector': [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0]}, 'query': {'bool': {'filter': [], 'must': [{ 'match': {'text': {'query': 'foo'}}}]}}, 'rank': {'rrf': {}}} return query_body
def assert_query(query_body: dict, query: str) ->dict: assert query_body == {'knn': {'field': 'vector', 'filter': [], 'k': 1, 'num_candidates': 50, 'query_vector': [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0]}, 'query': {'bool': {'filter': [], 'must': [{ 'match': {'text': {'query': 'fo...
null
_llm_type
return 'anthropic_functions'
@property def _llm_type(self) ->str: return 'anthropic_functions'
null
run
"""Run query through Merriam-Webster API and return a formatted result.""" quoted_query = quote(query) request_url = ( f'{MERRIAM_WEBSTER_API_URL}/{quoted_query}?key={self.merriam_webster_api_key}' ) response = requests.get(request_url, timeout=MERRIAM_WEBSTER_TIMEOUT) if response.status_code != 200: return...
def run(self, query: str) ->str: """Run query through Merriam-Webster API and return a formatted result.""" quoted_query = quote(query) request_url = ( f'{MERRIAM_WEBSTER_API_URL}/{quoted_query}?key={self.merriam_webster_api_key}' ) response = requests.get(request_url, timeout=MERRIAM_WE...
Run query through Merriam-Webster API and return a formatted result.
_embed
if self.show_progress: try: from tqdm import tqdm iter_ = tqdm(input, desc='OllamaEmbeddings') except ImportError: logger.warning( 'Unable to show progress bar because tqdm could not be imported. Please install with `pip install tqdm`.' ) iter_ = input els...
def _embed(self, input: List[str]) ->List[List[float]]: if self.show_progress: try: from tqdm import tqdm iter_ = tqdm(input, desc='OllamaEmbeddings') except ImportError: logger.warning( 'Unable to show progress bar because tqdm could not be import...
null