method_name
stringlengths
1
78
method_body
stringlengths
3
9.66k
full_code
stringlengths
31
10.7k
docstring
stringlengths
4
4.74k
_validate_aoss_with_engines
"""Validate AOSS with the engine.""" if is_aoss and engine != 'nmslib' and engine != 'faiss': raise ValueError( 'Amazon OpenSearch Service Serverless only supports `nmslib` or `faiss` engines' )
def _validate_aoss_with_engines(is_aoss: bool, engine: str) ->None: """Validate AOSS with the engine.""" if is_aoss and engine != 'nmslib' and engine != 'faiss': raise ValueError( 'Amazon OpenSearch Service Serverless only supports `nmslib` or `faiss` engines' )
Validate AOSS with the engine.
__init__
"""Initialize with url and api key.""" self.url = url self.api_key = api_key
def __init__(self, url: str, api_key: str): """Initialize with url and api key.""" self.url = url self.api_key = api_key
Initialize with url and api key.
_import_gigachat
from langchain_community.llms.gigachat import GigaChat return GigaChat
def _import_gigachat() ->Any: from langchain_community.llms.gigachat import GigaChat return GigaChat
null
_import_replicate
from langchain_community.llms.replicate import Replicate return Replicate
def _import_replicate() ->Any: from langchain_community.llms.replicate import Replicate return Replicate
null
_result_too_large
"""Tokenize the output of the query.""" if self.tiktoken_model_name: tiktoken_ = _import_tiktoken() encoding = tiktoken_.encoding_for_model(self.tiktoken_model_name) length = len(encoding.encode(result)) logger.info('Result length: %s', length) return length > self.output_token_limit, length return ...
def _result_too_large(self, result: str) ->Tuple[bool, int]: """Tokenize the output of the query.""" if self.tiktoken_model_name: tiktoken_ = _import_tiktoken() encoding = tiktoken_.encoding_for_model(self.tiktoken_model_name) length = len(encoding.encode(result)) logger.info('Re...
Tokenize the output of the query.
call_as_llm
return self.predict(message, stop=stop, **kwargs)
def call_as_llm(self, message: str, stop: Optional[List[str]]=None, ** kwargs: Any) ->str: return self.predict(message, stop=stop, **kwargs)
null
_request
"""Request inferencing from the triton server.""" inputs = self._generate_inputs(stream=False, prompt=prompt, **params) outputs = self._generate_outputs() result_queue = self._invoke_triton(self.model_name, inputs, outputs, stop) result_str = '' for token in result_queue: result_str += token self.client.stop_stream...
def _request(self, model_name: str, prompt: Sequence[Sequence[str]], stop: Optional[List[str]]=None, **params: Any) ->str: """Request inferencing from the triton server.""" inputs = self._generate_inputs(stream=False, prompt=prompt, **params) outputs = self._generate_outputs() result_queue = self._i...
Request inferencing from the triton server.
embeddings
return self._embeddings
@property def embeddings(self) ->Optional[Embeddings]: return self._embeddings
null
__init__
"""Initialize with necessary components. Args: embedding (Embeddings): A text embedding model. distance_strategy (DistanceStrategy, optional): Determines the strategy employed for calculating the distance between vectors in the embedding space. ...
def __init__(self, embedding: Embeddings, *, distance_strategy: DistanceStrategy=DEFAULT_DISTANCE_STRATEGY, table_name: str= 'embeddings', content_field: str='content', metadata_field: str= 'metadata', vector_field: str='vector', pool_size: int=5, max_overflow: int=10, timeout: float=30, **kwargs: Any):...
Initialize with necessary components. Args: embedding (Embeddings): A text embedding model. distance_strategy (DistanceStrategy, optional): Determines the strategy employed for calculating the distance between vectors in the embedding space. Defaults to DOT_PRODUCT. Available o...
test_dereference_refs_nested_refs_no_skip
schema = {'type': 'object', 'properties': {'info': {'$ref': '#/$defs/info'} }, '$defs': {'name': {'type': 'string'}, 'info': {'type': 'object', 'properties': {'age': 'int', 'name': {'$ref': '#/$defs/name'}}}}} expected = {'type': 'object', 'properties': {'info': {'type': 'object', 'properties': {'age': 'int...
def test_dereference_refs_nested_refs_no_skip() ->None: schema = {'type': 'object', 'properties': {'info': {'$ref': '#/$defs/info'}}, '$defs': {'name': {'type': 'string'}, 'info': { 'type': 'object', 'properties': {'age': 'int', 'name': {'$ref': '#/$defs/name'}}}}} expected = {'type': 'o...
null
_convert_message_to_dict
message_dict: Dict[str, Any] if isinstance(message, ChatMessage): message_dict = {'role': message.role, 'content': message.content} elif isinstance(message, HumanMessage): message_dict = {'role': 'user', 'content': message.content} elif isinstance(message, AIMessage): message_dict = {'role': 'assistant', 'c...
def _convert_message_to_dict(message: BaseMessage) ->dict: message_dict: Dict[str, Any] if isinstance(message, ChatMessage): message_dict = {'role': message.role, 'content': message.content} elif isinstance(message, HumanMessage): message_dict = {'role': 'user', 'content': message.content} ...
null
_convert_dict_to_message
role = _dict['role'] if role == 'user': return HumanMessage(content=_dict['content']) elif role == 'assistant': content = _dict.get('content') or '' if _dict.get('function_call'): _dict['function_call']['arguments'] = json.dumps(_dict[ 'function_call']['arguments']) additional_kw...
def _convert_dict_to_message(_dict: Mapping[str, Any]) ->BaseMessage: role = _dict['role'] if role == 'user': return HumanMessage(content=_dict['content']) elif role == 'assistant': content = _dict.get('content') or '' if _dict.get('function_call'): _dict['function_call']...
null
test_vectara_add_documents
"""Test add_documents.""" output1 = vectara1.similarity_search('large language model', k=2, n_sentence_context=0) assert len(output1) == 2 assert output1[0].page_content == 'large language model' assert output1[0].metadata['abbr'] == 'llm' assert output1[1].page_content == 'grounded generation' assert output1[1].me...
def test_vectara_add_documents(vectara1) ->None: """Test add_documents.""" output1 = vectara1.similarity_search('large language model', k=2, n_sentence_context=0) assert len(output1) == 2 assert output1[0].page_content == 'large language model' assert output1[0].metadata['abbr'] == 'llm' ...
Test add_documents.
test_raises_error
parser = SimpleJsonOutputParser() with pytest.raises(Exception): parser.invoke('hi')
def test_raises_error() ->None: parser = SimpleJsonOutputParser() with pytest.raises(Exception): parser.invoke('hi')
null
add_documents
"""Upload documents to Weaviate.""" from weaviate.util import get_valid_uuid with self.client.batch as batch: ids = [] for i, doc in enumerate(docs): metadata = doc.metadata or {} data_properties = {self.text_key: doc.page_content, **metadata} if 'uuids' in kwargs: _id = kwar...
def add_documents(self, docs: List[Document], **kwargs: Any) ->List[str]: """Upload documents to Weaviate.""" from weaviate.util import get_valid_uuid with self.client.batch as batch: ids = [] for i, doc in enumerate(docs): metadata = doc.metadata or {} data_propertie...
Upload documents to Weaviate.
lazy_parse
"""Lazily parse the blob.""" yield Document(page_content=blob.as_string(), metadata={'source': blob.source})
def lazy_parse(self, blob: Blob) ->Iterator[Document]: """Lazily parse the blob.""" yield Document(page_content=blob.as_string(), metadata={'source': blob. source})
Lazily parse the blob.
test_load_no_result
loader = WikipediaLoader( 'NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL' ) docs = loader.load() assert not docs
def test_load_no_result() ->None: loader = WikipediaLoader( 'NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL' ) docs = loader.load() assert not docs
null
embed_query
""" Convert input text to a 'vector' (list of floats). If the text is a number, use it as the angle for the unit vector in units of pi. Any other input text becomes the singular result [0, 0] ! """ try: angle = float(text) return [math.cos(angle * math.pi), math.sin(angle...
def embed_query(self, text: str) ->List[float]: """ Convert input text to a 'vector' (list of floats). If the text is a number, use it as the angle for the unit vector in units of pi. Any other input text becomes the singular result [0, 0] ! """ try: angle = float...
Convert input text to a 'vector' (list of floats). If the text is a number, use it as the angle for the unit vector in units of pi. Any other input text becomes the singular result [0, 0] !
_run
"""Use the tool.""" return self.api_spec
def _run(self, tool_input: Optional[str]='', run_manager: Optional[ CallbackManagerForToolRun]=None) ->str: """Use the tool.""" return self.api_spec
Use the tool.
_import_pubmed
from langchain_community.utilities.pubmed import PubMedAPIWrapper return PubMedAPIWrapper
def _import_pubmed() ->Any: from langchain_community.utilities.pubmed import PubMedAPIWrapper return PubMedAPIWrapper
null
__init__
""" Args: schemas: list of schemas """ self.schemas = schemas
def __init__(self, schemas: List[Schema]): """ Args: schemas: list of schemas """ self.schemas = schemas
Args: schemas: list of schemas
test_does_not_allow_args
"""Test formatting raises error when args are provided.""" template = 'This is a {} test.' with pytest.raises(ValueError): formatter.format(template, 'good')
def test_does_not_allow_args() ->None: """Test formatting raises error when args are provided.""" template = 'This is a {} test.' with pytest.raises(ValueError): formatter.format(template, 'good')
Test formatting raises error when args are provided.
test_sequential_usage_memory
"""Test sequential usage with memory.""" memory = SimpleMemory(memories={'zab': 'rab'}) chain_1 = FakeChain(input_variables=['foo'], output_variables=['bar']) chain_2 = FakeChain(input_variables=['bar'], output_variables=['baz']) chain = SequentialChain(memory=memory, chains=[chain_1, chain_2], input_variables=['fo...
def test_sequential_usage_memory() ->None: """Test sequential usage with memory.""" memory = SimpleMemory(memories={'zab': 'rab'}) chain_1 = FakeChain(input_variables=['foo'], output_variables=['bar']) chain_2 = FakeChain(input_variables=['bar'], output_variables=['baz']) chain = SequentialChain(mem...
Test sequential usage with memory.
visit_operation
args = [arg.accept(self) for arg in operation.arguments] return self._format_func(operation.operator).join(args)
def visit_operation(self, operation: Operation) ->str: args = [arg.accept(self) for arg in operation.arguments] return self._format_func(operation.operator).join(args)
null
test_annoy_vector_sim_by_index
"""Test vector similarity.""" texts = ['foo', 'bar', 'baz'] docsearch = Annoy.from_texts(texts, FakeEmbeddings()) index_to_id = docsearch.index_to_docstore_id expected_docstore = InMemoryDocstore({index_to_id[0]: Document(page_content ='foo'), index_to_id[1]: Document(page_content='bar'), index_to_id[2]: Docume...
def test_annoy_vector_sim_by_index() ->None: """Test vector similarity.""" texts = ['foo', 'bar', 'baz'] docsearch = Annoy.from_texts(texts, FakeEmbeddings()) index_to_id = docsearch.index_to_docstore_id expected_docstore = InMemoryDocstore({index_to_id[0]: Document( page_content='foo'), ind...
Test vector similarity.
_import_supabase
from langchain_community.vectorstores.supabase import SupabaseVectorStore return SupabaseVectorStore
def _import_supabase() ->Any: from langchain_community.vectorstores.supabase import SupabaseVectorStore return SupabaseVectorStore
null
_call
if self.sequential_responses: return self._get_next_response_in_sequence if self.queries is not None: return self.queries[prompt] if stop is None: return 'foo' else: return 'bar'
def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str: if self.sequential_responses: return self._get_next_response_in_sequence if self.queries is not None: return self.queries[prompt] if stop is None: ...
null
return_values
return ['output']
@property def return_values(self) ->List[str]: return ['output']
null
_summarize_metrics_for_generated_outputs
pd = import_pandas() metrics_df = pd.DataFrame(metrics) metrics_summary = metrics_df.describe() return metrics_summary.to_dict()
def _summarize_metrics_for_generated_outputs(metrics: Sequence) ->dict: pd = import_pandas() metrics_df = pd.DataFrame(metrics) metrics_summary = metrics_df.describe() return metrics_summary.to_dict()
null
_format_response_payload
"""Formats response""" try: text = json.loads(output)['response'] if stop_sequences: text = enforce_stop_tokens(text, stop_sequences) return text except Exception as e: if isinstance(e, json.decoder.JSONDecodeError): return output.decode('utf-8') raise e
def _format_response_payload(self, output: bytes, stop_sequences: Optional[ List[str]]) ->str: """Formats response""" try: text = json.loads(output)['response'] if stop_sequences: text = enforce_stop_tokens(text, stop_sequences) return text except Exception as e: ...
Formats response
lazy_load
"""A lazy loader for Documents.""" try: from azureml.fsspec import AzureMachineLearningFileSystem except ImportError as exc: raise ImportError( 'Could not import azureml-fspec package.Please install it with `pip install azureml-fsspec`.' ) from exc fs = AzureMachineLearningFileSystem(self.url) i...
def lazy_load(self) ->Iterator[Document]: """A lazy loader for Documents.""" try: from azureml.fsspec import AzureMachineLearningFileSystem except ImportError as exc: raise ImportError( 'Could not import azureml-fspec package.Please install it with `pip install azureml-fsspec`.' ...
A lazy loader for Documents.
run_coroutine_in_new_loop
new_loop = asyncio.new_event_loop() try: asyncio.set_event_loop(new_loop) return new_loop.run_until_complete(coroutine_func(*args, **kwargs)) finally: new_loop.close()
def run_coroutine_in_new_loop(coroutine_func: Any, *args: Dict, **kwargs: Dict ) ->Any: new_loop = asyncio.new_event_loop() try: asyncio.set_event_loop(new_loop) return new_loop.run_until_complete(coroutine_func(*args, **kwargs)) finally: new_loop.close()
null
query_params
"""Create query parameters for GitHub API.""" labels = ','.join(self.labels) if self.labels else self.labels query_params_dict = {'milestone': self.milestone, 'state': self.state, 'assignee': self.assignee, 'creator': self.creator, 'mentioned': self. mentioned, 'labels': labels, 'sort': self.sort, 'direction': ...
@property def query_params(self) ->str: """Create query parameters for GitHub API.""" labels = ','.join(self.labels) if self.labels else self.labels query_params_dict = {'milestone': self.milestone, 'state': self.state, 'assignee': self.assignee, 'creator': self.creator, 'mentioned': self.me...
Create query parameters for GitHub API.
put
"""PUT the URL and return the text.""" return requests.put(url, json=data, headers=self.headers, auth=self.auth, **kwargs)
def put(self, url: str, data: Dict[str, Any], **kwargs: Any ) ->requests.Response: """PUT the URL and return the text.""" return requests.put(url, json=data, headers=self.headers, auth=self. auth, **kwargs)
PUT the URL and return the text.
on_retriever_error
self.on_retriever_error_common()
def on_retriever_error(self, *args: Any, **kwargs: Any) ->Any: self.on_retriever_error_common()
null
_run
"""Use the tool.""" from bs4 import BeautifulSoup if self.sync_browser is None: raise ValueError(f'Synchronous browser not provided to {self.name}') page = get_current_page(self.sync_browser) html_content = page.content() soup = BeautifulSoup(html_content, 'lxml') return ' '.join(text for text in soup.stripped_stri...
def _run(self, run_manager: Optional[CallbackManagerForToolRun]=None) ->str: """Use the tool.""" from bs4 import BeautifulSoup if self.sync_browser is None: raise ValueError(f'Synchronous browser not provided to {self.name}') page = get_current_page(self.sync_browser) html_content = page.con...
Use the tool.
_stream
message_dicts, params = self._create_message_dicts(messages, stop) params = {**params, **kwargs, 'stream': True} default_chunk_class = AIMessageChunk for chunk in self.completion_with_retry(messages=message_dicts, run_manager =run_manager, **params): if len(chunk.choices) == 0: continue delta = chun...
def _stream(self, messages: List[BaseMessage], stop: Optional[List[str]]= None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any ) ->Iterator[ChatGenerationChunk]: message_dicts, params = self._create_message_dicts(messages, stop) params = {**params, **kwargs, 'stream': True} defa...
null
default_preprocessing_func
return text.split()
def default_preprocessing_func(text: str) ->List[str]: return text.split()
null
apply_and_parse
"""Call apply and then parse the results.""" warnings.warn( 'The apply_and_parse method is deprecated, instead pass an output parser directly to LLMChain.' ) result = self.apply(input_list, callbacks=callbacks) return self._parse_generation(result)
def apply_and_parse(self, input_list: List[Dict[str, Any]], callbacks: Callbacks=None) ->Sequence[Union[str, List[str], Dict[str, str]]]: """Call apply and then parse the results.""" warnings.warn( 'The apply_and_parse method is deprecated, instead pass an output parser directly to LLMChain.' ...
Call apply and then parse the results.
__copy__
"""Return a copy of the callback handler.""" return self
def __copy__(self) ->'OpenAICallbackHandler': """Return a copy of the callback handler.""" return self
Return a copy of the callback handler.
clear
""" Clear cache. If `asynchronous` is True, flush asynchronously. This flushes the *whole* db. """ asynchronous = kwargs.get('asynchronous', False) if asynchronous: asynchronous = 'ASYNC' else: asynchronous = 'SYNC' self.redis.flushdb(flush_type=asynchronous)
def clear(self, **kwargs: Any) ->None: """ Clear cache. If `asynchronous` is True, flush asynchronously. This flushes the *whole* db. """ asynchronous = kwargs.get('asynchronous', False) if asynchronous: asynchronous = 'ASYNC' else: asynchronous = 'SYNC' self....
Clear cache. If `asynchronous` is True, flush asynchronously. This flushes the *whole* db.
embed_query
return self.embed_documents([text])[0]
def embed_query(self, text: str) ->List[float]: return self.embed_documents([text])[0]
null
resize_base64_image
""" Resize an image encoded as a Base64 string. :param base64_string: A Base64 encoded string of the image to be resized. :param size: A tuple representing the new size (width, height) for the image. :return: A Base64 encoded string of the resized image. """ img_data = base64.b64decode(base64_strin...
def resize_base64_image(base64_string, size=(128, 128)): """ Resize an image encoded as a Base64 string. :param base64_string: A Base64 encoded string of the image to be resized. :param size: A tuple representing the new size (width, height) for the image. :return: A Base64 encoded string of the re...
Resize an image encoded as a Base64 string. :param base64_string: A Base64 encoded string of the image to be resized. :param size: A tuple representing the new size (width, height) for the image. :return: A Base64 encoded string of the resized image.
test_cpal_chain
""" patch required since `networkx` package is not part of unit test environment """ with mock.patch('langchain_experimental.cpal.models.NetworkxEntityGraph' ) as mock_networkx: graph_instance = mock_networkx.return_value graph_instance.get_topological_sort.return_value = ['cindy', 'marcia',...
def test_cpal_chain(self) ->None: """ patch required since `networkx` package is not part of unit test environment """ with mock.patch('langchain_experimental.cpal.models.NetworkxEntityGraph' ) as mock_networkx: graph_instance = mock_networkx.return_value graph_instance.g...
patch required since `networkx` package is not part of unit test environment
_texts_to_documents
"""Return list of Documents from list of texts and metadatas.""" if metadatas is None: metadatas = repeat({}) docs = [Document(page_content=text, metadata=metadata) for text, metadata in zip(texts, metadatas)] return docs
@staticmethod def _texts_to_documents(texts: Iterable[str], metadatas: Optional[Iterable[ Dict[Any, Any]]]=None) ->List[Document]: """Return list of Documents from list of texts and metadatas.""" if metadatas is None: metadatas = repeat({}) docs = [Document(page_content=text, metadata=metadata) ...
Return list of Documents from list of texts and metadatas.
get_connection_string
connection_string: str = get_from_dict_or_env(data=kwargs, key= 'connection_string', env_key='PGVECTOR_CONNECTION_STRING') if not connection_string: raise ValueError( 'Postgres connection string is requiredEither pass it as a parameteror set the PGVECTOR_CONNECTION_STRING environment variable.' ...
@classmethod def get_connection_string(cls, kwargs: Dict[str, Any]) ->str: connection_string: str = get_from_dict_or_env(data=kwargs, key= 'connection_string', env_key='PGVECTOR_CONNECTION_STRING') if not connection_string: raise ValueError( 'Postgres connection string is requiredEit...
null
test_litellm_generate
"""Test generate method of anthropic.""" chat = ChatLiteLLM(model='test') chat_messages: List[List[BaseMessage]] = [[HumanMessage(content= 'How many toes do dogs have?')]] messages_copy = [messages.copy() for messages in chat_messages] result: LLMResult = chat.generate(chat_messages) assert isinstance(result, LLMRe...
def test_litellm_generate() ->None: """Test generate method of anthropic.""" chat = ChatLiteLLM(model='test') chat_messages: List[List[BaseMessage]] = [[HumanMessage(content= 'How many toes do dogs have?')]] messages_copy = [messages.copy() for messages in chat_messages] result: LLMResult = ...
Test generate method of anthropic.
partial
"""Get a new ChatPromptTemplate with some input variables already filled in. Args: **kwargs: keyword arguments to use for filling in template variables. Ought to be a subset of the input variables. Returns: A new ChatPromptTemplate. Example: ...
def partial(self, **kwargs: Union[str, Callable[[], str]] ) ->ChatPromptTemplate: """Get a new ChatPromptTemplate with some input variables already filled in. Args: **kwargs: keyword arguments to use for filling in template variables. Ought to be a subset of the inpu...
Get a new ChatPromptTemplate with some input variables already filled in. Args: **kwargs: keyword arguments to use for filling in template variables. Ought to be a subset of the input variables. Returns: A new ChatPromptTemplate. Example: .. code-block:: python from langchain_c...
_run
"""Use the Stack Exchange tool.""" return self.api_wrapper.run(query)
def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun] =None) ->str: """Use the Stack Exchange tool.""" return self.api_wrapper.run(query)
Use the Stack Exchange tool.
_format_params
system, formatted_messages = _format_messages(messages) rtn = {'model': self.model, 'max_tokens': self.max_tokens, 'messages': formatted_messages, 'temperature': self.temperature, 'top_k': self. top_k, 'top_p': self.top_p, 'stop_sequences': stop, 'system': system} rtn = {k: v for k, v in rtn.items() if v is not...
def _format_params(self, *, messages: List[BaseMessage], stop: Optional[ List[str]]=None, **kwargs: Dict) ->Dict: system, formatted_messages = _format_messages(messages) rtn = {'model': self.model, 'max_tokens': self.max_tokens, 'messages': formatted_messages, 'temperature': self.temperature, 'top_k...
null
_get_relevant_documents
from zep_python.memory import MemorySearchPayload if not self.zep_client: raise RuntimeError('Zep client not initialized.') payload = MemorySearchPayload(text=query, metadata=metadata, search_scope= self.search_scope, search_type=self.search_type, mmr_lambda=self.mmr_lambda ) results: List[MemorySearchResul...
def _get_relevant_documents(self, query: str, *, run_manager: CallbackManagerForRetrieverRun, metadata: Optional[Dict[str, Any]]=None ) ->List[Document]: from zep_python.memory import MemorySearchPayload if not self.zep_client: raise RuntimeError('Zep client not initialized.') payload = Memo...
null
clear
"""Clear cache. This is for all LLMs at once.""" self.astra_db.truncate_collection(self.collection_name)
def clear(self, **kwargs: Any) ->None: """Clear cache. This is for all LLMs at once.""" self.astra_db.truncate_collection(self.collection_name)
Clear cache. This is for all LLMs at once.
test_agent_stopped_early
"""Test react chain when max iterations or max execution time is exceeded.""" agent = _get_agent(max_iterations=0) output = agent.run('when was langchain made') assert output == 'Agent stopped due to iteration limit or time limit.' agent = _get_agent(max_execution_time=0.0) output = agent.run('when was langchain made')...
def test_agent_stopped_early() ->None: """Test react chain when max iterations or max execution time is exceeded.""" agent = _get_agent(max_iterations=0) output = agent.run('when was langchain made') assert output == 'Agent stopped due to iteration limit or time limit.' agent = _get_agent(max_execut...
Test react chain when max iterations or max execution time is exceeded.
test_llm_with_callbacks
"""Test LLM callbacks.""" handler = FakeCallbackHandler() llm = FakeListLLM(callbacks=[handler], verbose=True, responses=['foo']) output = llm('foo') assert output == 'foo' assert handler.starts == 1 assert handler.ends == 1 assert handler.errors == 0
def test_llm_with_callbacks() ->None: """Test LLM callbacks.""" handler = FakeCallbackHandler() llm = FakeListLLM(callbacks=[handler], verbose=True, responses=['foo']) output = llm('foo') assert output == 'foo' assert handler.starts == 1 assert handler.ends == 1 assert handler.errors == ...
Test LLM callbacks.
format_chat_history
messages = format_messages(chain_input) return {'chat_history': messages, 'text': chain_input.get('text')}
def format_chat_history(chain_input: dict) ->dict: messages = format_messages(chain_input) return {'chat_history': messages, 'text': chain_input.get('text')}
null
test_from_texts_with_metadatas_and_pre_filter
texts = ['Dogs are tough.', 'Cats have fluff.', 'What is a sandwich?', 'The fence is purple.'] metadatas = [{'a': 1}, {'b': 1}, {'c': 1}, {'d': 1, 'e': 2}] vectorstore = MongoDBAtlasVectorSearch.from_texts(texts, embedding_openai, metadatas=metadatas, collection=collection, index_name=INDEX_NAME) sleep(1) outpu...
def test_from_texts_with_metadatas_and_pre_filter(self, embedding_openai: Embeddings, collection: Any) ->None: texts = ['Dogs are tough.', 'Cats have fluff.', 'What is a sandwich?', 'The fence is purple.'] metadatas = [{'a': 1}, {'b': 1}, {'c': 1}, {'d': 1, 'e': 2}] vectorstore = MongoDBAtlasVec...
null
create_tagging_chain_pydantic
"""Creates a chain that extracts information from a passage based on a pydantic schema. Args: pydantic_schema: The pydantic schema of the entities to extract. llm: The language model to use. Returns: Chain (LLMChain) that can be used to extract information from a passage. """ ...
def create_tagging_chain_pydantic(pydantic_schema: Any, llm: BaseLanguageModel, prompt: Optional[ChatPromptTemplate]=None, **kwargs: Any ) ->Chain: """Creates a chain that extracts information from a passage based on a pydantic schema. Args: pydantic_schema: The pydantic schema of the enti...
Creates a chain that extracts information from a passage based on a pydantic schema. Args: pydantic_schema: The pydantic schema of the entities to extract. llm: The language model to use. Returns: Chain (LLMChain) that can be used to extract information from a passage.
test_wasm_chat_without_service_url
chat = WasmChatService() system_message = SystemMessage(content='You are an AI assistant') user_message = HumanMessage(content='What is the capital of France?') messages = [system_message, user_message] with pytest.raises(ValueError) as e: chat(messages) assert 'Error code: 503' in str(e) assert 'reason: The IP add...
def test_wasm_chat_without_service_url() ->None: chat = WasmChatService() system_message = SystemMessage(content='You are an AI assistant') user_message = HumanMessage(content='What is the capital of France?') messages = [system_message, user_message] with pytest.raises(ValueError) as e: cha...
null
_generate
"""Call out to OpenAI's endpoint with k unique prompts. Args: prompts: The prompts to pass into the model. stop: Optional list of stop words to use when generating. Returns: The full LLM output. Example: .. code-block:: python r...
def _generate(self, prompts: List[str], stop: Optional[List[str]]=None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any ) ->LLMResult: """Call out to OpenAI's endpoint with k unique prompts. Args: prompts: The prompts to pass into the model. stop: Optiona...
Call out to OpenAI's endpoint with k unique prompts. Args: prompts: The prompts to pass into the model. stop: Optional list of stop words to use when generating. Returns: The full LLM output. Example: .. code-block:: python response = openai.generate(["Tell me a joke."])
get_sync
"""Get the equivalent sync RunManager. Returns: CallbackManagerForRetrieverRun: The sync RunManager. """ return CallbackManagerForRetrieverRun(run_id=self.run_id, handlers=self. handlers, inheritable_handlers=self.inheritable_handlers, parent_run_id =self.parent_run_id, tags=self.ta...
def get_sync(self) ->CallbackManagerForRetrieverRun: """Get the equivalent sync RunManager. Returns: CallbackManagerForRetrieverRun: The sync RunManager. """ return CallbackManagerForRetrieverRun(run_id=self.run_id, handlers=self .handlers, inheritable_handlers=self.inherita...
Get the equivalent sync RunManager. Returns: CallbackManagerForRetrieverRun: The sync RunManager.
test_events_call
search = DataForSeoAPIWrapper(params={'location_name': 'Spain', 'language_code': 'es', 'se_type': 'events'}) output = search.results('concerts') assert any('Madrid' in ((i['location_info'] or dict())['address'] or '') for i in output)
def test_events_call() ->None: search = DataForSeoAPIWrapper(params={'location_name': 'Spain', 'language_code': 'es', 'se_type': 'events'}) output = search.results('concerts') assert any('Madrid' in ((i['location_info'] or dict())['address'] or '' ) for i in output)
null
from_llm
"""Load the necessary chains.""" sql_chain = SQLDatabaseChain.from_llm(llm, db, prompt=query_prompt, **kwargs) decider_chain = LLMChain(llm=llm, prompt=decider_prompt, output_key= 'table_names') return cls(sql_chain=sql_chain, decider_chain=decider_chain, **kwargs)
@classmethod def from_llm(cls, llm: BaseLanguageModel, db: SQLDatabase, query_prompt: BasePromptTemplate=PROMPT, decider_prompt: BasePromptTemplate= DECIDER_PROMPT, **kwargs: Any) ->SQLDatabaseSequentialChain: """Load the necessary chains.""" sql_chain = SQLDatabaseChain.from_llm(llm, db, prompt=query_p...
Load the necessary chains.
invoke
"""First evaluates the condition, then delegate to true or false branch.""" config = ensure_config(config) callback_manager = get_callback_manager_for_config(config) run_manager = callback_manager.on_chain_start(dumpd(self), input, name= config.get('run_name')) try: for idx, branch in enumerate(self.branches): ...
def invoke(self, input: Input, config: Optional[RunnableConfig]=None, ** kwargs: Any) ->Output: """First evaluates the condition, then delegate to true or false branch.""" config = ensure_config(config) callback_manager = get_callback_manager_for_config(config) run_manager = callback_manager.on_chai...
First evaluates the condition, then delegate to true or false branch.
test_fake_retriever_v2
callbacks = FakeCallbackHandler() assert fake_retriever_v2._new_arg_supported is True results = fake_retriever_v2.get_relevant_documents('Foo', callbacks=[callbacks] ) assert results[0].page_content == 'Foo' assert callbacks.retriever_starts == 1 assert callbacks.retriever_ends == 1 assert callbacks.retriever_error...
def test_fake_retriever_v2(fake_retriever_v2: BaseRetriever, fake_erroring_retriever_v2: BaseRetriever) ->None: callbacks = FakeCallbackHandler() assert fake_retriever_v2._new_arg_supported is True results = fake_retriever_v2.get_relevant_documents('Foo', callbacks=[ callbacks]) assert resul...
null
test_pypdfium2_parser
"""Test PyPDFium2 parser.""" _assert_with_parser(PyPDFium2Parser())
def test_pypdfium2_parser() ->None: """Test PyPDFium2 parser.""" _assert_with_parser(PyPDFium2Parser())
Test PyPDFium2 parser.
_load_template
"""Load template from the path if applicable.""" if f'{var_name}_path' in config: if var_name in config: raise ValueError( f'Both `{var_name}_path` and `{var_name}` cannot be provided.') template_path = Path(config.pop(f'{var_name}_path')) if template_path.suffix == '.txt': with ...
def _load_template(var_name: str, config: dict) ->dict: """Load template from the path if applicable.""" if f'{var_name}_path' in config: if var_name in config: raise ValueError( f'Both `{var_name}_path` and `{var_name}` cannot be provided.') template_path = Path(conf...
Load template from the path if applicable.
test__get_prompts_valid
_get_prompt(inputs)
@pytest.mark.parametrize('inputs', _VALID_PROMPTS) def test__get_prompts_valid(inputs: Dict[str, Any]) ->None: _get_prompt(inputs)
null
validate_environment
"""Validate that python package exists in environment.""" if not values.get('client'): values['client'] = grpcclient.InferenceServerClient(values['server_url']) return values
@root_validator(pre=True, allow_reuse=True) def validate_environment(cls, values: Dict[str, Any]) ->Dict[str, Any]: """Validate that python package exists in environment.""" if not values.get('client'): values['client'] = grpcclient.InferenceServerClient(values[ 'server_url']) return val...
Validate that python package exists in environment.
_identifying_params
"""Get the identifying parameters.""" return {'temperature': self.temperature, 'model': self.model, 'profanity': self.profanity, 'streaming': self.streaming, 'max_tokens': self.max_tokens}
@property def _identifying_params(self) ->Dict[str, Any]: """Get the identifying parameters.""" return {'temperature': self.temperature, 'model': self.model, 'profanity': self.profanity, 'streaming': self.streaming, 'max_tokens': self.max_tokens}
Get the identifying parameters.
predict_messages
if stop is None: _stop = None else: _stop = list(stop) return self(messages, stop=_stop, **kwargs)
def predict_messages(self, messages: List[BaseMessage], *, stop: Optional[ Sequence[str]]=None, **kwargs: Any) ->BaseMessage: if stop is None: _stop = None else: _stop = list(stop) return self(messages, stop=_stop, **kwargs)
null
test_evaluate_run
run_mapper = ChainStringRunMapper() example_mapper = MagicMock() string_evaluator = criteria.CriteriaEvalChain.from_llm(fake_llm.FakeLLM()) evaluator = StringRunEvaluatorChain(run_mapper=run_mapper, example_mapper= example_mapper, name='test_evaluator', string_evaluator=string_evaluator) run = MagicMock() example =...
def test_evaluate_run() ->None: run_mapper = ChainStringRunMapper() example_mapper = MagicMock() string_evaluator = criteria.CriteriaEvalChain.from_llm(fake_llm.FakeLLM()) evaluator = StringRunEvaluatorChain(run_mapper=run_mapper, example_mapper=example_mapper, name='test_evaluator', str...
null
test_from_texts
input_texts = ['I have a pen.', 'Do you have a pen?', 'I have a bag.'] knn_retriever = KNNRetriever.from_texts(texts=input_texts, embeddings= FakeEmbeddings(size=100)) assert len(knn_retriever.texts) == 3
def test_from_texts(self) ->None: input_texts = ['I have a pen.', 'Do you have a pen?', 'I have a bag.'] knn_retriever = KNNRetriever.from_texts(texts=input_texts, embeddings= FakeEmbeddings(size=100)) assert len(knn_retriever.texts) == 3
null
embeddings
return None
@property def embeddings(self) ->Optional[Embeddings]: return None
null
_unique_documents
return [doc for i, doc in enumerate(documents) if doc not in documents[:i]]
def _unique_documents(documents: Sequence[Document]) ->List[Document]: return [doc for i, doc in enumerate(documents) if doc not in documents[:i]]
null
test_invalid_sparse_vector_name
with pytest.raises(QdrantException) as e: QdrantSparseVectorRetriever(client=retriever.client, collection_name= retriever.collection_name, sparse_vector_name= 'invalid sparse vector', sparse_encoder=consistent_fake_sparse_encoder) assert 'does not contain sparse vector' in str(e.value)
def test_invalid_sparse_vector_name(retriever: QdrantSparseVectorRetriever ) ->None: with pytest.raises(QdrantException) as e: QdrantSparseVectorRetriever(client=retriever.client, collection_name=retriever.collection_name, sparse_vector_name= 'invalid sparse vector', sparse_encod...
null
scroll
if direction == 'up': self.page.evaluate( '(document.scrollingElement || document.body).scrollTop = (document.scrollingElement || document.body).scrollTop - window.innerHeight;' ) elif direction == 'down': self.page.evaluate( '(document.scrollingElement || document.body).scrollTop = (doc...
def scroll(self, direction: str) ->None: if direction == 'up': self.page.evaluate( '(document.scrollingElement || document.body).scrollTop = (document.scrollingElement || document.body).scrollTop - window.innerHeight;' ) elif direction == 'down': self.page.evaluate( ...
null
test_configured_system_message
messages = [HumanMessage(content='usr-msg-1')] actual = model_cfg_sys_msg.predict_messages(messages).content expected = """<s>[INST] <<SYS>> sys-msg <</SYS>> usr-msg-1 [/INST]""" assert actual == expected
def test_configured_system_message(model_cfg_sys_msg: Llama2Chat) ->None: messages = [HumanMessage(content='usr-msg-1')] actual = model_cfg_sys_msg.predict_messages(messages).content expected = '<s>[INST] <<SYS>>\nsys-msg\n<</SYS>>\n\nusr-msg-1 [/INST]' assert actual == expected
null
test_embed_query
text = 'query_text' vector = cache_embeddings.embed_query(text) expected_vector = [5.0, 6.0] assert vector == expected_vector
def test_embed_query(cache_embeddings: CacheBackedEmbeddings) ->None: text = 'query_text' vector = cache_embeddings.embed_query(text) expected_vector = [5.0, 6.0] assert vector == expected_vector
null
deserialize_json_input
"""Use the serialized typescript dictionary. Resolve the path, query params dict, and optional requestBody dict. """ args: dict = json.loads(serialized_args) path = self._construct_path(args) body_params = self._extract_body_params(args) query_params = self._extract_query_params(args) return {'url': pa...
def deserialize_json_input(self, serialized_args: str) ->dict: """Use the serialized typescript dictionary. Resolve the path, query params dict, and optional requestBody dict. """ args: dict = json.loads(serialized_args) path = self._construct_path(args) body_params = self._extract_body...
Use the serialized typescript dictionary. Resolve the path, query params dict, and optional requestBody dict.
_display_prompt
"""Displays the given prompt to the user.""" print(f'\n{prompt}')
def _display_prompt(prompt: str) ->None: """Displays the given prompt to the user.""" print(f'\n{prompt}')
Displays the given prompt to the user.
test_call
"""Test that call gives the correct answer.""" search = GoogleSearchAPIWrapper() output = search.run("What was Obama's first name?") assert 'Barack Hussein Obama II' in output
def test_call() ->None: """Test that call gives the correct answer.""" search = GoogleSearchAPIWrapper() output = search.run("What was Obama's first name?") assert 'Barack Hussein Obama II' in output
Test that call gives the correct answer.
validate_environment
"""Validate that api key in environment.""" try: import fireworks.client except ImportError as e: raise ImportError( 'Could not import fireworks-ai python package. Please install it with `pip install fireworks-ai`.' ) from e fireworks_api_key = convert_to_secret_str(get_from_dict_or_env(values, ...
@root_validator() def validate_environment(cls, values: Dict) ->Dict: """Validate that api key in environment.""" try: import fireworks.client except ImportError as e: raise ImportError( 'Could not import fireworks-ai python package. Please install it with `pip install fireworks-...
Validate that api key in environment.
_import_openlm
from langchain_community.llms.openlm import OpenLM return OpenLM
def _import_openlm() ->Any: from langchain_community.llms.openlm import OpenLM return OpenLM
null
_create_search_params
"""Generate search params based on the current index type""" from pymilvus import Collection if isinstance(self.col, Collection) and self.search_params is None: index = self._get_index() if index is not None: index_type: str = index['index_param']['index_type'] metric_type: str = index['index_pa...
def _create_search_params(self) ->None: """Generate search params based on the current index type""" from pymilvus import Collection if isinstance(self.col, Collection) and self.search_params is None: index = self._get_index() if index is not None: index_type: str = index['index_...
Generate search params based on the current index type
_import_slack_get_channel
from langchain_community.tools.slack.get_channel import SlackGetChannel return SlackGetChannel
def _import_slack_get_channel() ->Any: from langchain_community.tools.slack.get_channel import SlackGetChannel return SlackGetChannel
null
validate_browser_provided
"""Check that the arguments are valid.""" lazy_import_playwright_browsers() if values.get('async_browser') is None and values.get('sync_browser') is None: raise ValueError('Either async_browser or sync_browser must be specified.') return values
@root_validator def validate_browser_provided(cls, values: dict) ->dict: """Check that the arguments are valid.""" lazy_import_playwright_browsers() if values.get('async_browser') is None and values.get('sync_browser' ) is None: raise ValueError( 'Either async_browser or sync_bro...
Check that the arguments are valid.
_create_index_if_not_exists
"""Create the Elasticsearch index if it doesn't already exist. Args: index_name: Name of the Elasticsearch index to create. dims_length: Length of the embedding vectors. """ if self.client.indices.exists(index=index_name): logger.debug(f'Index {index_name} already exists. Sk...
def _create_index_if_not_exists(self, index_name: str, dims_length: Optional[int]=None) ->None: """Create the Elasticsearch index if it doesn't already exist. Args: index_name: Name of the Elasticsearch index to create. dims_length: Length of the embedding vectors. """ ...
Create the Elasticsearch index if it doesn't already exist. Args: index_name: Name of the Elasticsearch index to create. dims_length: Length of the embedding vectors.
_identifying_params
return self._default_params
@property def _identifying_params(self) ->Mapping[str, Any]: return self._default_params
null
test_find_all_links_multiple
html = ( '<div><a class="blah" href="https://foobar.com">hullo</a></div><div><a class="bleh" href="/baz/cool">buhbye</a></div>' ) actual = find_all_links(html) assert sorted(actual) == ['/baz/cool', 'https://foobar.com']
def test_find_all_links_multiple() ->None: html = ( '<div><a class="blah" href="https://foobar.com">hullo</a></div><div><a class="bleh" href="/baz/cool">buhbye</a></div>' ) actual = find_all_links(html) assert sorted(actual) == ['/baz/cool', 'https://foobar.com']
null
_replace_secrets
result = root.copy() for path, secret_id in secrets_map.items(): [*parts, last] = path.split('.') current = result for part in parts: if part not in current: break current[part] = current[part].copy() current = current[part] if last in current: current[last] =...
def _replace_secrets(root: Dict[Any, Any], secrets_map: Dict[str, str]) ->Dict[ Any, Any]: result = root.copy() for path, secret_id in secrets_map.items(): [*parts, last] = path.split('.') current = result for part in parts: if part not in current: break ...
null
file_store
with tempfile.TemporaryDirectory() as temp_dir: store = LocalFileStore(temp_dir) yield store
@pytest.fixture def file_store() ->Generator[LocalFileStore, None, None]: with tempfile.TemporaryDirectory() as temp_dir: store = LocalFileStore(temp_dir) yield store
null
test_embedding_query
document = 'foo bar' model = VertexAIEmbeddings() output = model.embed_query(document) assert len(output) == 768
def test_embedding_query() ->None: document = 'foo bar' model = VertexAIEmbeddings() output = model.embed_query(document) assert len(output) == 768
null
_call
"""Call out to Nebula Service endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response ...
def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str: """Call out to Nebula Service endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generati...
Call out to Nebula Service endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = nebula("Tell me a joke.")
parse_log
""" Create Document objects from Datadog log items. """ attributes = log.get('attributes', {}) metadata = {'id': log.get('id', ''), 'status': attributes.get('status'), 'service': attributes.get('service', ''), 'tags': attributes.get('tags', []), 'timestamp': attributes.get('timestamp', '')} mess...
def parse_log(self, log: dict) ->Document: """ Create Document objects from Datadog log items. """ attributes = log.get('attributes', {}) metadata = {'id': log.get('id', ''), 'status': attributes.get('status'), 'service': attributes.get('service', ''), 'tags': attributes.get( ...
Create Document objects from Datadog log items.
_import_office365_utils
from langchain_community.tools.office365.utils import authenticate return authenticate
def _import_office365_utils() ->Any: from langchain_community.tools.office365.utils import authenticate return authenticate
null
test_deepsparse_call
"""Test valid call to DeepSparse.""" config = {'max_generated_tokens': 5, 'use_deepsparse_cache': False} llm = DeepSparse(model= 'zoo:nlg/text_generation/codegen_mono-350m/pytorch/huggingface/bigpython_bigquery_thepile/base-none' , config=config) output = llm('def ') assert isinstance(output, str) assert len(ou...
def test_deepsparse_call() ->None: """Test valid call to DeepSparse.""" config = {'max_generated_tokens': 5, 'use_deepsparse_cache': False} llm = DeepSparse(model= 'zoo:nlg/text_generation/codegen_mono-350m/pytorch/huggingface/bigpython_bigquery_thepile/base-none' , config=config) output...
Test valid call to DeepSparse.
_format_func
self._validate_func(func) if isinstance(func, Operator): value = OPERATOR_TO_TQL[func.value] elif isinstance(func, Comparator): value = COMPARATOR_TO_TQL[func.value] return f'{value}'
def _format_func(self, func: Union[Operator, Comparator]) ->str: self._validate_func(func) if isinstance(func, Operator): value = OPERATOR_TO_TQL[func.value] elif isinstance(func, Comparator): value = COMPARATOR_TO_TQL[func.value] return f'{value}'
null
similarity_search
"""Return documents most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Array of Elasticsearch filter clauses to apply to the query. Returns: List of Documents most similar to...
def similarity_search(self, query: str, k: int=4, filter: Optional[dict]= None, **kwargs: Any) ->List[Document]: """Return documents most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Arr...
Return documents most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Array of Elasticsearch filter clauses to apply to the query. Returns: List of Documents most similar to the query, in descending order of similarity.
test_uses_actual_secret_value_from_secretstr
"""Test that actual secret is retrieved using `.get_secret_value()`.""" llm = Minimax(minimax_api_key='secret-api-key', minimax_group_id='group_id') assert cast(SecretStr, llm.minimax_api_key).get_secret_value( ) == 'secret-api-key'
def test_uses_actual_secret_value_from_secretstr() ->None: """Test that actual secret is retrieved using `.get_secret_value()`.""" llm = Minimax(minimax_api_key='secret-api-key', minimax_group_id='group_id' ) assert cast(SecretStr, llm.minimax_api_key).get_secret_value( ) == 'secret-api-key'
Test that actual secret is retrieved using `.get_secret_value()`.
_embedding_func
"""Call out to LocalAI's embedding endpoint.""" if self.model.endswith('001'): text = text.replace('\n', ' ') return embed_with_retry(self, input=[text], **self._invocation_params)['data'][ 0]['embedding']
def _embedding_func(self, text: str, *, engine: str) ->List[float]: """Call out to LocalAI's embedding endpoint.""" if self.model.endswith('001'): text = text.replace('\n', ' ') return embed_with_retry(self, input=[text], **self._invocation_params)[ 'data'][0]['embedding']
Call out to LocalAI's embedding endpoint.