method_name
stringlengths 1
78
| method_body
stringlengths 3
9.66k
| full_code
stringlengths 31
10.7k
| docstring
stringlengths 4
4.74k
⌀ |
|---|---|---|---|
_import_vearch
|
from langchain_community.vectorstores.vearch import Vearch
return Vearch
|
def _import_vearch() ->Any:
from langchain_community.vectorstores.vearch import Vearch
return Vearch
| null |
test_writer_call
|
"""Test valid call to Writer."""
llm = Writer()
output = llm('Say foo:')
assert isinstance(output, str)
|
def test_writer_call() ->None:
"""Test valid call to Writer."""
llm = Writer()
output = llm('Say foo:')
assert isinstance(output, str)
|
Test valid call to Writer.
|
validate_environment
|
"""Validate that api key and python package exists in environment."""
cohere_api_key = get_from_dict_or_env(values, 'cohere_api_key',
'COHERE_API_KEY')
max_retries = values.get('max_retries')
request_timeout = values.get('request_timeout')
try:
import cohere
client_name = values['user_agent']
values['client'] = cohere.Client(cohere_api_key, max_retries=
max_retries, timeout=request_timeout, client_name=client_name)
values['async_client'] = cohere.AsyncClient(cohere_api_key, max_retries
=max_retries, timeout=request_timeout, client_name=client_name)
except ImportError:
raise ValueError(
'Could not import cohere python package. Please install it with `pip install cohere`.'
)
return values
|
@root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that api key and python package exists in environment."""
cohere_api_key = get_from_dict_or_env(values, 'cohere_api_key',
'COHERE_API_KEY')
max_retries = values.get('max_retries')
request_timeout = values.get('request_timeout')
try:
import cohere
client_name = values['user_agent']
values['client'] = cohere.Client(cohere_api_key, max_retries=
max_retries, timeout=request_timeout, client_name=client_name)
values['async_client'] = cohere.AsyncClient(cohere_api_key,
max_retries=max_retries, timeout=request_timeout, client_name=
client_name)
except ImportError:
raise ValueError(
'Could not import cohere python package. Please install it with `pip install cohere`.'
)
return values
|
Validate that api key and python package exists in environment.
|
load
|
brave_client = BraveSearchWrapper(api_key=self.api_key, search_kwargs=self.
search_kwargs)
return brave_client.download_documents(self.query)
|
def load(self) ->List[Document]:
brave_client = BraveSearchWrapper(api_key=self.api_key, search_kwargs=
self.search_kwargs)
return brave_client.download_documents(self.query)
| null |
new_persist_run_single
|
time.sleep(0.01)
old_persist_run_single(run)
|
def new_persist_run_single(run: Run) ->None:
time.sleep(0.01)
old_persist_run_single(run)
| null |
_import_merriam_webster
|
from langchain_community.utilities.merriam_webster import MerriamWebsterAPIWrapper
return MerriamWebsterAPIWrapper
|
def _import_merriam_webster() ->Any:
from langchain_community.utilities.merriam_webster import MerriamWebsterAPIWrapper
return MerriamWebsterAPIWrapper
| null |
_import_scenexplain_tool
|
from langchain_community.tools.scenexplain.tool import SceneXplainTool
return SceneXplainTool
|
def _import_scenexplain_tool() ->Any:
from langchain_community.tools.scenexplain.tool import SceneXplainTool
return SceneXplainTool
| null |
is_lc_serializable
|
return True
|
@classmethod
def is_lc_serializable(cls) ->bool:
return True
| null |
_import_opensearch_vector_search
|
from langchain_community.vectorstores.opensearch_vector_search import OpenSearchVectorSearch
return OpenSearchVectorSearch
|
def _import_opensearch_vector_search() ->Any:
from langchain_community.vectorstores.opensearch_vector_search import OpenSearchVectorSearch
return OpenSearchVectorSearch
| null |
_identifying_params
|
kwargs = self.llm_kwargs or {}
return {**self.client.client_pool.get_current_client().get_model_params(),
**kwargs}
|
@property
def _identifying_params(self) ->Mapping[str, Any]:
kwargs = self.llm_kwargs or {}
return {**self.client.client_pool.get_current_client().get_model_params
(), **kwargs}
| null |
plan
|
"""Given input, decided what to do.
Args:
intermediate_steps: Steps the LLM has taken to date, along with observations
**kwargs: User inputs.
Returns:
Action specifying what tool to use.
"""
agent_scratchpad = format_to_openai_function_messages(intermediate_steps)
selected_inputs = {k: kwargs[k] for k in self.prompt.input_variables if k !=
'agent_scratchpad'}
full_inputs = dict(**selected_inputs, agent_scratchpad=agent_scratchpad)
prompt = self.prompt.format_prompt(**full_inputs)
messages = prompt.to_messages()
if with_functions:
predicted_message = self.llm.predict_messages(messages, functions=self.
functions, callbacks=callbacks)
else:
predicted_message = self.llm.predict_messages(messages, callbacks=callbacks
)
agent_decision = OpenAIFunctionsAgentOutputParser._parse_ai_message(
predicted_message)
return agent_decision
|
def plan(self, intermediate_steps: List[Tuple[AgentAction, str]], callbacks:
Callbacks=None, with_functions: bool=True, **kwargs: Any) ->Union[
AgentAction, AgentFinish]:
"""Given input, decided what to do.
Args:
intermediate_steps: Steps the LLM has taken to date, along with observations
**kwargs: User inputs.
Returns:
Action specifying what tool to use.
"""
agent_scratchpad = format_to_openai_function_messages(intermediate_steps)
selected_inputs = {k: kwargs[k] for k in self.prompt.input_variables if
k != 'agent_scratchpad'}
full_inputs = dict(**selected_inputs, agent_scratchpad=agent_scratchpad)
prompt = self.prompt.format_prompt(**full_inputs)
messages = prompt.to_messages()
if with_functions:
predicted_message = self.llm.predict_messages(messages, functions=
self.functions, callbacks=callbacks)
else:
predicted_message = self.llm.predict_messages(messages, callbacks=
callbacks)
agent_decision = OpenAIFunctionsAgentOutputParser._parse_ai_message(
predicted_message)
return agent_decision
|
Given input, decided what to do.
Args:
intermediate_steps: Steps the LLM has taken to date, along with observations
**kwargs: User inputs.
Returns:
Action specifying what tool to use.
|
_call
|
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
url = inputs[self.input_url_key]
browser_content = inputs[self.input_browser_content_key]
llm_cmd = self.llm_chain.predict(objective=self.objective, url=url[:100],
previous_command=self.previous_command, browser_content=browser_content
[:4500], callbacks=_run_manager.get_child())
llm_cmd = llm_cmd.strip()
self.previous_command = llm_cmd
return {self.output_key: llm_cmd}
|
def _call(self, inputs: Dict[str, str], run_manager: Optional[
CallbackManagerForChainRun]=None) ->Dict[str, str]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
url = inputs[self.input_url_key]
browser_content = inputs[self.input_browser_content_key]
llm_cmd = self.llm_chain.predict(objective=self.objective, url=url[:100
], previous_command=self.previous_command, browser_content=
browser_content[:4500], callbacks=_run_manager.get_child())
llm_cmd = llm_cmd.strip()
self.previous_command = llm_cmd
return {self.output_key: llm_cmd}
| null |
test_from_texts
|
input_texts = ['I have a pen.', 'Do you have a pen?', 'I have a bag.']
tfidf_retriever = TFIDFRetriever.from_texts(texts=input_texts)
assert len(tfidf_retriever.docs) == 3
assert tfidf_retriever.tfidf_array.toarray().shape == (3, 5)
|
@pytest.mark.requires('sklearn')
def test_from_texts() ->None:
input_texts = ['I have a pen.', 'Do you have a pen?', 'I have a bag.']
tfidf_retriever = TFIDFRetriever.from_texts(texts=input_texts)
assert len(tfidf_retriever.docs) == 3
assert tfidf_retriever.tfidf_array.toarray().shape == (3, 5)
| null |
load
|
"""Load weather data for the given locations."""
return list(self.lazy_load())
|
def load(self) ->List[Document]:
"""Load weather data for the given locations."""
return list(self.lazy_load())
|
Load weather data for the given locations.
|
similarity_search
|
"""Return Elasticsearch documents most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k (int): Number of Documents to fetch to pass to knn num_candidates.
filter: Array of Elasticsearch filter clauses to apply to the query.
Returns:
List of Documents most similar to the query,
in descending order of similarity.
"""
results = self._search(query=query, k=k, fetch_k=fetch_k, filter=filter, **
kwargs)
return [doc for doc, _ in results]
|
def similarity_search(self, query: str, k: int=4, fetch_k: int=50, filter:
Optional[List[dict]]=None, **kwargs: Any) ->List[Document]:
"""Return Elasticsearch documents most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k (int): Number of Documents to fetch to pass to knn num_candidates.
filter: Array of Elasticsearch filter clauses to apply to the query.
Returns:
List of Documents most similar to the query,
in descending order of similarity.
"""
results = self._search(query=query, k=k, fetch_k=fetch_k, filter=filter,
**kwargs)
return [doc for doc, _ in results]
|
Return Elasticsearch documents most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k (int): Number of Documents to fetch to pass to knn num_candidates.
filter: Array of Elasticsearch filter clauses to apply to the query.
Returns:
List of Documents most similar to the query,
in descending order of similarity.
|
set_api_url
|
if 'api_url' not in values:
host = values['host']
endpoint_name = values['endpoint_name']
api_url = f'https://{host}/serving-endpoints/{endpoint_name}/invocations'
values['api_url'] = api_url
return values
|
@root_validator(pre=True)
def set_api_url(cls, values: Dict[str, Any]) ->Dict[str, Any]:
if 'api_url' not in values:
host = values['host']
endpoint_name = values['endpoint_name']
api_url = (
f'https://{host}/serving-endpoints/{endpoint_name}/invocations')
values['api_url'] = api_url
return values
| null |
test_initialization
|
loader = AssemblyAIAudioTranscriptLoader(file_path='./testfile.mp3',
api_key='api_key')
assert loader.file_path == './testfile.mp3'
assert loader.transcript_format == TranscriptFormat.TEXT
|
@pytest.mark.requires('assemblyai')
def test_initialization() ->None:
loader = AssemblyAIAudioTranscriptLoader(file_path='./testfile.mp3',
api_key='api_key')
assert loader.file_path == './testfile.mp3'
assert loader.transcript_format == TranscriptFormat.TEXT
| null |
invoke
|
if isinstance(input, BaseMessage):
return self._call_with_config(lambda inner_input: self.parse_result([
ChatGeneration(message=inner_input)]), input, config, run_type='parser'
)
else:
return self._call_with_config(lambda inner_input: self.parse_result([
Generation(text=inner_input)]), input, config, run_type='parser')
|
def invoke(self, input: Union[str, BaseMessage], config: Optional[
RunnableConfig]=None) ->T:
if isinstance(input, BaseMessage):
return self._call_with_config(lambda inner_input: self.parse_result
([ChatGeneration(message=inner_input)]), input, config,
run_type='parser')
else:
return self._call_with_config(lambda inner_input: self.parse_result
([Generation(text=inner_input)]), input, config, run_type='parser')
| null |
_get_len_safe_embeddings
|
"""
Generate length-safe embeddings for a list of texts.
This method handles tokenization and embedding generation, respecting the
set embedding context length and chunk size. It supports both tiktoken
and HuggingFace tokenizer based on the tiktoken_enabled flag.
Args:
texts (List[str]): A list of texts to embed.
engine (str): The engine or model to use for embeddings.
chunk_size (Optional[int]): The size of chunks for processing embeddings.
Returns:
List[List[float]]: A list of embeddings for each input text.
"""
tokens = []
indices = []
model_name = self.tiktoken_model_name or self.model
_chunk_size = chunk_size or self.chunk_size
if not self.tiktoken_enabled:
try:
from transformers import AutoTokenizer
except ImportError:
raise ValueError(
'Could not import transformers python package. This is needed in order to for OpenAIEmbeddings without `tiktoken`. Please install it with `pip install transformers`. '
)
tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path
=model_name)
for i, text in enumerate(texts):
tokenized = tokenizer.encode(text, add_special_tokens=False)
for j in range(0, len(tokenized), self.embedding_ctx_length):
token_chunk = tokenized[j:j + self.embedding_ctx_length]
chunk_text = tokenizer.decode(token_chunk)
tokens.append(chunk_text)
indices.append(i)
else:
try:
import tiktoken
except ImportError:
raise ImportError(
'Could not import tiktoken python package. This is needed in order to for OpenAIEmbeddings. Please install it with `pip install tiktoken`.'
)
try:
encoding = tiktoken.encoding_for_model(model_name)
except KeyError:
logger.warning('Warning: model not found. Using cl100k_base encoding.')
model = 'cl100k_base'
encoding = tiktoken.get_encoding(model)
for i, text in enumerate(texts):
if self.model.endswith('001'):
text = text.replace('\n', ' ')
token = encoding.encode(text=text, allowed_special=self.
allowed_special, disallowed_special=self.disallowed_special)
for j in range(0, len(token), self.embedding_ctx_length):
tokens.append(token[j:j + self.embedding_ctx_length])
indices.append(i)
if self.show_progress_bar:
try:
from tqdm.auto import tqdm
_iter = tqdm(range(0, len(tokens), _chunk_size))
except ImportError:
_iter = range(0, len(tokens), _chunk_size)
else:
_iter = range(0, len(tokens), _chunk_size)
batched_embeddings: List[List[float]] = []
for i in _iter:
response = embed_with_retry(self, input=tokens[i:i + _chunk_size], **
self._invocation_params)
if not isinstance(response, dict):
response = response.dict()
batched_embeddings.extend(r['embedding'] for r in response['data'])
results: List[List[List[float]]] = [[] for _ in range(len(texts))]
num_tokens_in_batch: List[List[int]] = [[] for _ in range(len(texts))]
for i in range(len(indices)):
if self.skip_empty and len(batched_embeddings[i]) == 1:
continue
results[indices[i]].append(batched_embeddings[i])
num_tokens_in_batch[indices[i]].append(len(tokens[i]))
embeddings: List[List[float]] = [[] for _ in range(len(texts))]
for i in range(len(texts)):
_result = results[i]
if len(_result) == 0:
average_embedded = embed_with_retry(self, input='', **self.
_invocation_params)
if not isinstance(average_embedded, dict):
average_embedded = average_embedded.dict()
average = average_embedded['data'][0]['embedding']
else:
average = np.average(_result, axis=0, weights=num_tokens_in_batch[i])
embeddings[i] = (average / np.linalg.norm(average)).tolist()
return embeddings
|
def _get_len_safe_embeddings(self, texts: List[str], *, engine: str,
chunk_size: Optional[int]=None) ->List[List[float]]:
"""
Generate length-safe embeddings for a list of texts.
This method handles tokenization and embedding generation, respecting the
set embedding context length and chunk size. It supports both tiktoken
and HuggingFace tokenizer based on the tiktoken_enabled flag.
Args:
texts (List[str]): A list of texts to embed.
engine (str): The engine or model to use for embeddings.
chunk_size (Optional[int]): The size of chunks for processing embeddings.
Returns:
List[List[float]]: A list of embeddings for each input text.
"""
tokens = []
indices = []
model_name = self.tiktoken_model_name or self.model
_chunk_size = chunk_size or self.chunk_size
if not self.tiktoken_enabled:
try:
from transformers import AutoTokenizer
except ImportError:
raise ValueError(
'Could not import transformers python package. This is needed in order to for OpenAIEmbeddings without `tiktoken`. Please install it with `pip install transformers`. '
)
tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path
=model_name)
for i, text in enumerate(texts):
tokenized = tokenizer.encode(text, add_special_tokens=False)
for j in range(0, len(tokenized), self.embedding_ctx_length):
token_chunk = tokenized[j:j + self.embedding_ctx_length]
chunk_text = tokenizer.decode(token_chunk)
tokens.append(chunk_text)
indices.append(i)
else:
try:
import tiktoken
except ImportError:
raise ImportError(
'Could not import tiktoken python package. This is needed in order to for OpenAIEmbeddings. Please install it with `pip install tiktoken`.'
)
try:
encoding = tiktoken.encoding_for_model(model_name)
except KeyError:
logger.warning(
'Warning: model not found. Using cl100k_base encoding.')
model = 'cl100k_base'
encoding = tiktoken.get_encoding(model)
for i, text in enumerate(texts):
if self.model.endswith('001'):
text = text.replace('\n', ' ')
token = encoding.encode(text=text, allowed_special=self.
allowed_special, disallowed_special=self.disallowed_special)
for j in range(0, len(token), self.embedding_ctx_length):
tokens.append(token[j:j + self.embedding_ctx_length])
indices.append(i)
if self.show_progress_bar:
try:
from tqdm.auto import tqdm
_iter = tqdm(range(0, len(tokens), _chunk_size))
except ImportError:
_iter = range(0, len(tokens), _chunk_size)
else:
_iter = range(0, len(tokens), _chunk_size)
batched_embeddings: List[List[float]] = []
for i in _iter:
response = embed_with_retry(self, input=tokens[i:i + _chunk_size],
**self._invocation_params)
if not isinstance(response, dict):
response = response.dict()
batched_embeddings.extend(r['embedding'] for r in response['data'])
results: List[List[List[float]]] = [[] for _ in range(len(texts))]
num_tokens_in_batch: List[List[int]] = [[] for _ in range(len(texts))]
for i in range(len(indices)):
if self.skip_empty and len(batched_embeddings[i]) == 1:
continue
results[indices[i]].append(batched_embeddings[i])
num_tokens_in_batch[indices[i]].append(len(tokens[i]))
embeddings: List[List[float]] = [[] for _ in range(len(texts))]
for i in range(len(texts)):
_result = results[i]
if len(_result) == 0:
average_embedded = embed_with_retry(self, input='', **self.
_invocation_params)
if not isinstance(average_embedded, dict):
average_embedded = average_embedded.dict()
average = average_embedded['data'][0]['embedding']
else:
average = np.average(_result, axis=0, weights=
num_tokens_in_batch[i])
embeddings[i] = (average / np.linalg.norm(average)).tolist()
return embeddings
|
Generate length-safe embeddings for a list of texts.
This method handles tokenization and embedding generation, respecting the
set embedding context length and chunk size. It supports both tiktoken
and HuggingFace tokenizer based on the tiktoken_enabled flag.
Args:
texts (List[str]): A list of texts to embed.
engine (str): The engine or model to use for embeddings.
chunk_size (Optional[int]): The size of chunks for processing embeddings.
Returns:
List[List[float]]: A list of embeddings for each input text.
|
select_examples
|
"""Select which examples to use based on the input lengths."""
inputs = ' '.join(input_variables.values())
remaining_length = self.max_length - self.get_text_length(inputs)
i = 0
examples = []
while remaining_length > 0 and i < len(self.examples):
new_length = remaining_length - self.example_text_lengths[i]
if new_length < 0:
break
else:
examples.append(self.examples[i])
remaining_length = new_length
i += 1
return examples
|
def select_examples(self, input_variables: Dict[str, str]) ->List[dict]:
"""Select which examples to use based on the input lengths."""
inputs = ' '.join(input_variables.values())
remaining_length = self.max_length - self.get_text_length(inputs)
i = 0
examples = []
while remaining_length > 0 and i < len(self.examples):
new_length = remaining_length - self.example_text_lengths[i]
if new_length < 0:
break
else:
examples.append(self.examples[i])
remaining_length = new_length
i += 1
return examples
|
Select which examples to use based on the input lengths.
|
load
|
"""
Get issues of a GitHub repository.
Returns:
A list of Documents with attributes:
- page_content
- metadata
- url
- title
- creator
- created_at
- last_update_time
- closed_time
- number of comments
- state
- labels
- assignee
- assignees
- milestone
- locked
- number
- is_pull_request
"""
return list(self.lazy_load())
|
def load(self) ->List[Document]:
"""
Get issues of a GitHub repository.
Returns:
A list of Documents with attributes:
- page_content
- metadata
- url
- title
- creator
- created_at
- last_update_time
- closed_time
- number of comments
- state
- labels
- assignee
- assignees
- milestone
- locked
- number
- is_pull_request
"""
return list(self.lazy_load())
|
Get issues of a GitHub repository.
Returns:
A list of Documents with attributes:
- page_content
- metadata
- url
- title
- creator
- created_at
- last_update_time
- closed_time
- number of comments
- state
- labels
- assignee
- assignees
- milestone
- locked
- number
- is_pull_request
|
multi_modal_rag_chain
|
"""
Multi-modal RAG chain,
:param retriever: A function that retrieves the necessary context for the model.
:return: A chain of functions representing the multi-modal RAG process.
"""
model = ChatGoogleGenerativeAI(model='gemini-pro-vision')
chain = {'context': retriever | RunnableLambda(get_resized_images),
'question': RunnablePassthrough()} | RunnableLambda(img_prompt_func
) | model | StrOutputParser()
return chain
|
def multi_modal_rag_chain(retriever):
"""
Multi-modal RAG chain,
:param retriever: A function that retrieves the necessary context for the model.
:return: A chain of functions representing the multi-modal RAG process.
"""
model = ChatGoogleGenerativeAI(model='gemini-pro-vision')
chain = {'context': retriever | RunnableLambda(get_resized_images),
'question': RunnablePassthrough()} | RunnableLambda(img_prompt_func
) | model | StrOutputParser()
return chain
|
Multi-modal RAG chain,
:param retriever: A function that retrieves the necessary context for the model.
:return: A chain of functions representing the multi-modal RAG process.
|
test_all_imports
|
assert set(__all__) == set(EXPECTED_ALL)
|
def test_all_imports() ->None:
assert set(__all__) == set(EXPECTED_ALL)
| null |
test_load_issue_9046
|
"""Test for the fixed issue 9046"""
expected_docs = 3
loader = ArxivLoader(query=
'MetaGPT: Meta Programming for Multi-Agent Collaborative Framework',
load_max_docs=expected_docs)
docs = loader.load()
assert_docs(docs)
assert 'MetaGPT' in docs[0].metadata['Title']
loader = ArxivLoader(query=
'MetaGPT - Meta Programming for Multi-Agent Collaborative Framework',
load_max_docs=expected_docs)
docs = loader.load()
assert_docs(docs)
assert 'MetaGPT' in docs[0].metadata['Title']
|
@pytest.mark.skip(reason='test could be flaky')
def test_load_issue_9046() ->None:
"""Test for the fixed issue 9046"""
expected_docs = 3
loader = ArxivLoader(query=
'MetaGPT: Meta Programming for Multi-Agent Collaborative Framework',
load_max_docs=expected_docs)
docs = loader.load()
assert_docs(docs)
assert 'MetaGPT' in docs[0].metadata['Title']
loader = ArxivLoader(query=
'MetaGPT - Meta Programming for Multi-Agent Collaborative Framework',
load_max_docs=expected_docs)
docs = loader.load()
assert_docs(docs)
assert 'MetaGPT' in docs[0].metadata['Title']
|
Test for the fixed issue 9046
|
similarity_search
|
"""Run similarity search with Chroma.
Args:
query (str): Query text to search for.
k (int): Number of results to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List[Document]: List of documents most similar to the query text.
"""
docs_and_scores = self.similarity_search_with_score(query, k, filter=filter,
**kwargs)
return [doc for doc, _ in docs_and_scores]
|
def similarity_search(self, query: str, k: int=DEFAULT_K, filter: Optional[
Dict[str, str]]=None, **kwargs: Any) ->List[Document]:
"""Run similarity search with Chroma.
Args:
query (str): Query text to search for.
k (int): Number of results to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List[Document]: List of documents most similar to the query text.
"""
docs_and_scores = self.similarity_search_with_score(query, k, filter=
filter, **kwargs)
return [doc for doc, _ in docs_and_scores]
|
Run similarity search with Chroma.
Args:
query (str): Query text to search for.
k (int): Number of results to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List[Document]: List of documents most similar to the query text.
|
evaluation_name
|
"""
Get the evaluation name.
Returns:
str: The evaluation name.
"""
return 'exact_match'
|
@property
def evaluation_name(self) ->str:
"""
Get the evaluation name.
Returns:
str: The evaluation name.
"""
return 'exact_match'
|
Get the evaluation name.
Returns:
str: The evaluation name.
|
test_default_call
|
"""Test default model(`ERNIE-Bot`) call."""
chat = QianfanChatEndpoint()
response = chat(messages=[HumanMessage(content='Hello')])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
|
def test_default_call() ->None:
"""Test default model(`ERNIE-Bot`) call."""
chat = QianfanChatEndpoint()
response = chat(messages=[HumanMessage(content='Hello')])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
|
Test default model(`ERNIE-Bot`) call.
|
_import_pubmed_tool
|
from langchain_community.tools.pubmed.tool import PubmedQueryRun
return PubmedQueryRun
|
def _import_pubmed_tool() ->Any:
from langchain_community.tools.pubmed.tool import PubmedQueryRun
return PubmedQueryRun
| null |
__key
|
"""Compute cache key from prompt and associated model and settings.
Args:
prompt (str): The prompt run through the language model.
llm_string (str): The language model version and settings.
Returns:
str: The cache key.
"""
return _hash(prompt + llm_string)
|
def __key(self, prompt: str, llm_string: str) ->str:
"""Compute cache key from prompt and associated model and settings.
Args:
prompt (str): The prompt run through the language model.
llm_string (str): The language model version and settings.
Returns:
str: The cache key.
"""
return _hash(prompt + llm_string)
|
Compute cache key from prompt and associated model and settings.
Args:
prompt (str): The prompt run through the language model.
llm_string (str): The language model version and settings.
Returns:
str: The cache key.
|
on_chain_end
|
"""Run when chain ends running."""
self.step += 1
self.chain_ends += 1
self.ends += 1
resp: Dict[str, Any] = {}
chain_output = ','.join([f'{k}={v}' for k, v in outputs.items()])
resp.update({'action': 'on_chain_end', 'outputs': chain_output})
resp.update(self.get_custom_callback_meta())
self.deck.append(self.markdown_renderer().to_html('### Chain End'))
self.deck.append(self.table_renderer().to_html(self.pandas.DataFrame([resp]
)) + '\n')
|
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) ->None:
"""Run when chain ends running."""
self.step += 1
self.chain_ends += 1
self.ends += 1
resp: Dict[str, Any] = {}
chain_output = ','.join([f'{k}={v}' for k, v in outputs.items()])
resp.update({'action': 'on_chain_end', 'outputs': chain_output})
resp.update(self.get_custom_callback_meta())
self.deck.append(self.markdown_renderer().to_html('### Chain End'))
self.deck.append(self.table_renderer().to_html(self.pandas.DataFrame([
resp])) + '\n')
|
Run when chain ends running.
|
ddg_installed
|
try:
from duckduckgo_search import DDGS
return True
except Exception as e:
print(f'duckduckgo not installed, skipping test {e}')
return False
|
def ddg_installed() ->bool:
try:
from duckduckgo_search import DDGS
return True
except Exception as e:
print(f'duckduckgo not installed, skipping test {e}')
return False
| null |
_run_llm_or_chain
|
"""
Run the Chain or language model synchronously.
Args:
example: The example to run.
llm_or_chain_factory: The Chain or language model constructor to run.
tags: Optional tags to add to the run.
callbacks: Optional callbacks to use during the run.
Returns:
Union[List[dict], List[str], List[LLMResult], List[ChatResult]]:
The outputs of the model or chain.
"""
chain_or_llm = 'LLM' if isinstance(llm_or_chain_factory, BaseLanguageModel
) else 'Chain'
result = None
try:
if isinstance(llm_or_chain_factory, BaseLanguageModel):
output: Any = _run_llm(llm_or_chain_factory, example.inputs, config
['callbacks'], tags=config['tags'], input_mapper=input_mapper)
else:
chain = llm_or_chain_factory()
output = _run_chain(chain, example.inputs, config['callbacks'],
tags=config['tags'], input_mapper=input_mapper)
result = output
except Exception as e:
error_type = type(e).__name__
logger.warning(
f"""{chain_or_llm} failed for example {example.id} with inputs {example.inputs}
Error Type: {error_type}, Message: {e}"""
)
result = EvalError(Error=e)
return result
|
def _run_llm_or_chain(example: Example, config: RunnableConfig, *,
llm_or_chain_factory: MCF, input_mapper: Optional[Callable[[Dict], Any]
]=None) ->Union[dict, str, LLMResult, ChatResult]:
"""
Run the Chain or language model synchronously.
Args:
example: The example to run.
llm_or_chain_factory: The Chain or language model constructor to run.
tags: Optional tags to add to the run.
callbacks: Optional callbacks to use during the run.
Returns:
Union[List[dict], List[str], List[LLMResult], List[ChatResult]]:
The outputs of the model or chain.
"""
chain_or_llm = 'LLM' if isinstance(llm_or_chain_factory, BaseLanguageModel
) else 'Chain'
result = None
try:
if isinstance(llm_or_chain_factory, BaseLanguageModel):
output: Any = _run_llm(llm_or_chain_factory, example.inputs,
config['callbacks'], tags=config['tags'], input_mapper=
input_mapper)
else:
chain = llm_or_chain_factory()
output = _run_chain(chain, example.inputs, config['callbacks'],
tags=config['tags'], input_mapper=input_mapper)
result = output
except Exception as e:
error_type = type(e).__name__
logger.warning(
f"""{chain_or_llm} failed for example {example.id} with inputs {example.inputs}
Error Type: {error_type}, Message: {e}"""
)
result = EvalError(Error=e)
return result
|
Run the Chain or language model synchronously.
Args:
example: The example to run.
llm_or_chain_factory: The Chain or language model constructor to run.
tags: Optional tags to add to the run.
callbacks: Optional callbacks to use during the run.
Returns:
Union[List[dict], List[str], List[LLMResult], List[ChatResult]]:
The outputs of the model or chain.
|
_generate_command_string
|
output = f'{tool.name}: {tool.description}'
output += f', args json schema: {json.dumps(tool.args)}'
return output
|
def _generate_command_string(self, tool: BaseTool) ->str:
output = f'{tool.name}: {tool.description}'
output += f', args json schema: {json.dumps(tool.args)}'
return output
| null |
get
|
query = f"""
SELECT value
FROM {self.full_table_name}
WHERE key = ?
"""
cursor = self.conn.execute(query, (key,))
result = cursor.fetchone()
if result is not None:
value = result[0]
return value
return default
|
def get(self, key: str, default: Optional[str]=None) ->Optional[str]:
query = f"""
SELECT value
FROM {self.full_table_name}
WHERE key = ?
"""
cursor = self.conn.execute(query, (key,))
result = cursor.fetchone()
if result is not None:
value = result[0]
return value
return default
| null |
test_add_ai_message
|
zep_chat.add_ai_message('test message')
zep_chat.zep_client.memory.add_memory.assert_called_once()
|
@pytest.mark.requires('zep_python')
def test_add_ai_message(mocker: MockerFixture, zep_chat: ZepChatMessageHistory
) ->None:
zep_chat.add_ai_message('test message')
zep_chat.zep_client.memory.add_memory.assert_called_once()
| null |
_generate_outputs
|
"""Generate the expected output structure."""
return [grpcclient.InferRequestedOutput('text_output')]
|
def _generate_outputs(self) ->List[grpcclient.InferRequestedOutput]:
"""Generate the expected output structure."""
return [grpcclient.InferRequestedOutput('text_output')]
|
Generate the expected output structure.
|
_import_bananadev
|
from langchain_community.llms.bananadev import Banana
return Banana
|
def _import_bananadev() ->Any:
from langchain_community.llms.bananadev import Banana
return Banana
| null |
lazy_load
|
"""Lazy load records from dataframe."""
for row in self.data_frame.iter_rows(named=True):
text = row[self.page_content_column]
row.pop(self.page_content_column)
yield Document(page_content=text, metadata=row)
|
def lazy_load(self) ->Iterator[Document]:
"""Lazy load records from dataframe."""
for row in self.data_frame.iter_rows(named=True):
text = row[self.page_content_column]
row.pop(self.page_content_column)
yield Document(page_content=text, metadata=row)
|
Lazy load records from dataframe.
|
test_openweathermap_api_wrapper
|
"""Test that OpenWeatherMapAPIWrapper returns correct data for London, GB."""
weather = OpenWeatherMapAPIWrapper()
weather_data = weather.run('London,GB')
assert weather_data is not None
assert 'London' in weather_data
assert 'GB' in weather_data
assert 'Detailed status:' in weather_data
assert 'Wind speed:' in weather_data
assert 'direction:' in weather_data
assert 'Humidity:' in weather_data
assert 'Temperature:' in weather_data
assert 'Current:' in weather_data
assert 'High:' in weather_data
assert 'Low:' in weather_data
assert 'Feels like:' in weather_data
assert 'Rain:' in weather_data
assert 'Heat index:' in weather_data
assert 'Cloud cover:' in weather_data
|
def test_openweathermap_api_wrapper() ->None:
"""Test that OpenWeatherMapAPIWrapper returns correct data for London, GB."""
weather = OpenWeatherMapAPIWrapper()
weather_data = weather.run('London,GB')
assert weather_data is not None
assert 'London' in weather_data
assert 'GB' in weather_data
assert 'Detailed status:' in weather_data
assert 'Wind speed:' in weather_data
assert 'direction:' in weather_data
assert 'Humidity:' in weather_data
assert 'Temperature:' in weather_data
assert 'Current:' in weather_data
assert 'High:' in weather_data
assert 'Low:' in weather_data
assert 'Feels like:' in weather_data
assert 'Rain:' in weather_data
assert 'Heat index:' in weather_data
assert 'Cloud cover:' in weather_data
|
Test that OpenWeatherMapAPIWrapper returns correct data for London, GB.
|
_import_human_tool
|
from langchain_community.tools.human.tool import HumanInputRun
return HumanInputRun
|
def _import_human_tool() ->Any:
from langchain_community.tools.human.tool import HumanInputRun
return HumanInputRun
| null |
clear
|
self.messages = []
|
def clear(self) ->None:
self.messages = []
| null |
on_retriever_error
|
self.on_retriever_error_common()
|
def on_retriever_error(self, *args: Any, **kwargs: Any) ->Any:
self.on_retriever_error_common()
| null |
test_memory_with_message_store
|
"""Test the memory with a message store."""
message_history = ElasticsearchChatMessageHistory(**
elasticsearch_connection, index=index_name, session_id='test-session')
memory = ConversationBufferMemory(memory_key='baz', chat_memory=
message_history, return_messages=True)
memory.chat_memory.add_ai_message('This is me, the AI')
memory.chat_memory.add_user_message('This is me, the human')
messages = memory.chat_memory.messages
messages_json = json.dumps([message_to_dict(msg) for msg in messages])
assert 'This is me, the AI' in messages_json
assert 'This is me, the human' in messages_json
memory.chat_memory.clear()
assert memory.chat_memory.messages == []
|
def test_memory_with_message_store(self, elasticsearch_connection: dict,
index_name: str) ->None:
"""Test the memory with a message store."""
message_history = ElasticsearchChatMessageHistory(**
elasticsearch_connection, index=index_name, session_id='test-session')
memory = ConversationBufferMemory(memory_key='baz', chat_memory=
message_history, return_messages=True)
memory.chat_memory.add_ai_message('This is me, the AI')
memory.chat_memory.add_user_message('This is me, the human')
messages = memory.chat_memory.messages
messages_json = json.dumps([message_to_dict(msg) for msg in messages])
assert 'This is me, the AI' in messages_json
assert 'This is me, the human' in messages_json
memory.chat_memory.clear()
assert memory.chat_memory.messages == []
|
Test the memory with a message store.
|
fn
|
if url.endswith('/processing/upload'):
return FakeUploadResponse()
elif url.endswith('/processing/push'):
return FakePushResponse()
else:
raise Exception('Invalid POST URL')
|
def fn(url: str, **kwargs: Any) ->Any:
if url.endswith('/processing/upload'):
return FakeUploadResponse()
elif url.endswith('/processing/push'):
return FakePushResponse()
else:
raise Exception('Invalid POST URL')
| null |
get_tools
|
"""Get the tools in the toolkit."""
|
@abstractmethod
def get_tools(self) ->List[BaseTool]:
"""Get the tools in the toolkit."""
|
Get the tools in the toolkit.
|
validate_input_variables
|
dummy_inputs = {input_variable: 'foo' for input_variable in input_variables}
super().format(format_string, **dummy_inputs)
|
def validate_input_variables(self, format_string: str, input_variables:
List[str]) ->None:
dummy_inputs = {input_variable: 'foo' for input_variable in input_variables
}
super().format(format_string, **dummy_inputs)
| null |
load
|
"""Load toots into documents."""
results: List[Document] = []
for account in self.mastodon_accounts:
user = self.api.account_lookup(account)
toots = self.api.account_statuses(user.id, only_media=False, pinned=
False, exclude_replies=self.exclude_replies, exclude_reblogs=True,
limit=self.number_toots)
docs = self._format_toots(toots, user)
results.extend(docs)
return results
|
def load(self) ->List[Document]:
"""Load toots into documents."""
results: List[Document] = []
for account in self.mastodon_accounts:
user = self.api.account_lookup(account)
toots = self.api.account_statuses(user.id, only_media=False, pinned
=False, exclude_replies=self.exclude_replies, exclude_reblogs=
True, limit=self.number_toots)
docs = self._format_toots(toots, user)
results.extend(docs)
return results
|
Load toots into documents.
|
__str__
|
"""Get a string representation of the object for printing."""
cls_name = f'\x1b[1m{self.__class__.__name__}\x1b[0m'
return f"""{cls_name}
Params: {self._identifying_params}"""
|
def __str__(self) ->str:
"""Get a string representation of the object for printing."""
cls_name = f'\x1b[1m{self.__class__.__name__}\x1b[0m'
return f'{cls_name}\nParams: {self._identifying_params}'
|
Get a string representation of the object for printing.
|
test_tiledb_mmr
|
texts = ['foo', 'foo', 'fou', 'foy']
docsearch = TileDB.from_texts(texts=texts, embedding=
ConsistentFakeEmbeddings(), index_uri=f'{str(tmp_path)}/flat',
index_type='FLAT')
query_vec = ConsistentFakeEmbeddings().embed_query(text='foo')
output = docsearch.max_marginal_relevance_search_with_score_by_vector(query_vec
, k=3, lambda_mult=0.1)
assert output[0][0] == Document(page_content='foo')
assert output[0][1] == 0.0
assert output[1][0] != Document(page_content='foo')
assert output[2][0] != Document(page_content='foo')
docsearch = TileDB.from_texts(texts=texts, embedding=
ConsistentFakeEmbeddings(), index_uri=f'{str(tmp_path)}/ivf_flat',
index_type='IVF_FLAT')
query_vec = ConsistentFakeEmbeddings().embed_query(text='foo')
output = docsearch.max_marginal_relevance_search_with_score_by_vector(query_vec
, k=3, lambda_mult=0.1, nprobe=docsearch.vector_index.partitions)
assert output[0][0] == Document(page_content='foo')
assert output[0][1] == 0.0
assert output[1][0] != Document(page_content='foo')
assert output[2][0] != Document(page_content='foo')
|
@pytest.mark.requires('tiledb-vector-search')
def test_tiledb_mmr(tmp_path: Path) ->None:
texts = ['foo', 'foo', 'fou', 'foy']
docsearch = TileDB.from_texts(texts=texts, embedding=
ConsistentFakeEmbeddings(), index_uri=f'{str(tmp_path)}/flat',
index_type='FLAT')
query_vec = ConsistentFakeEmbeddings().embed_query(text='foo')
output = docsearch.max_marginal_relevance_search_with_score_by_vector(
query_vec, k=3, lambda_mult=0.1)
assert output[0][0] == Document(page_content='foo')
assert output[0][1] == 0.0
assert output[1][0] != Document(page_content='foo')
assert output[2][0] != Document(page_content='foo')
docsearch = TileDB.from_texts(texts=texts, embedding=
ConsistentFakeEmbeddings(), index_uri=f'{str(tmp_path)}/ivf_flat',
index_type='IVF_FLAT')
query_vec = ConsistentFakeEmbeddings().embed_query(text='foo')
output = docsearch.max_marginal_relevance_search_with_score_by_vector(
query_vec, k=3, lambda_mult=0.1, nprobe=docsearch.vector_index.
partitions)
assert output[0][0] == Document(page_content='foo')
assert output[0][1] == 0.0
assert output[1][0] != Document(page_content='foo')
assert output[2][0] != Document(page_content='foo')
| null |
_get_invocation_params
|
"""Get the parameters used to invoke the model FOR THE CALLBACKS."""
return {**self._default_params, **super()._get_invocation_params(stop=stop,
**kwargs)}
|
def _get_invocation_params(self, stop: Optional[List[str]]=None, **kwargs: Any
) ->Dict[str, Any]:
"""Get the parameters used to invoke the model FOR THE CALLBACKS."""
return {**self._default_params, **super()._get_invocation_params(stop=
stop, **kwargs)}
|
Get the parameters used to invoke the model FOR THE CALLBACKS.
|
input_keys
|
"""Return the singular input key.
:meta private:
"""
return [self.input_key]
|
@property
def input_keys(self) ->List[str]:
"""Return the singular input key.
:meta private:
"""
return [self.input_key]
|
Return the singular input key.
:meta private:
|
build_extra
|
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = get_pydantic_field_names(cls)
extra = values.get('model_kwargs', {})
values['model_kwargs'] = build_extra_kwargs(extra, values,
all_required_field_names)
return values
|
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) ->Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = get_pydantic_field_names(cls)
extra = values.get('model_kwargs', {})
values['model_kwargs'] = build_extra_kwargs(extra, values,
all_required_field_names)
return values
|
Build extra kwargs from additional params that were passed in.
|
from_llm
|
"""Construct the chain from an LLM."""
llm_question_chain = LLMChain(llm=llm, prompt=question_prompt)
llm_combine_chain = LLMChain(llm=llm, prompt=combine_prompt)
combine_results_chain = StuffDocumentsChain(llm_chain=llm_combine_chain,
document_prompt=document_prompt, document_variable_name='summaries')
reduce_documents_chain = ReduceDocumentsChain(combine_documents_chain=
combine_results_chain)
combine_documents_chain = MapReduceDocumentsChain(llm_chain=
llm_question_chain, reduce_documents_chain=reduce_documents_chain,
document_variable_name='context')
return cls(combine_documents_chain=combine_documents_chain, **kwargs)
|
@classmethod
def from_llm(cls, llm: BaseLanguageModel, document_prompt:
BasePromptTemplate=EXAMPLE_PROMPT, question_prompt: BasePromptTemplate=
QUESTION_PROMPT, combine_prompt: BasePromptTemplate=COMBINE_PROMPT, **
kwargs: Any) ->BaseQAWithSourcesChain:
"""Construct the chain from an LLM."""
llm_question_chain = LLMChain(llm=llm, prompt=question_prompt)
llm_combine_chain = LLMChain(llm=llm, prompt=combine_prompt)
combine_results_chain = StuffDocumentsChain(llm_chain=llm_combine_chain,
document_prompt=document_prompt, document_variable_name='summaries')
reduce_documents_chain = ReduceDocumentsChain(combine_documents_chain=
combine_results_chain)
combine_documents_chain = MapReduceDocumentsChain(llm_chain=
llm_question_chain, reduce_documents_chain=reduce_documents_chain,
document_variable_name='context')
return cls(combine_documents_chain=combine_documents_chain, **kwargs)
|
Construct the chain from an LLM.
|
accept
|
"""Accept a visitor.
Args:
visitor: visitor to accept
Returns:
result of visiting
"""
return getattr(visitor, f'visit_{_to_snake_case(self.__class__.__name__)}')(
self)
|
def accept(self, visitor: Visitor) ->Any:
"""Accept a visitor.
Args:
visitor: visitor to accept
Returns:
result of visiting
"""
return getattr(visitor, f'visit_{_to_snake_case(self.__class__.__name__)}'
)(self)
|
Accept a visitor.
Args:
visitor: visitor to accept
Returns:
result of visiting
|
mock_collection
|
from zep_python.document import DocumentCollection
mock_collection: DocumentCollection = mocker.patch(
'zep_python.document.collections.DocumentCollection', autospec=True)
mock_collection.search.return_value = copy.deepcopy(search_results)
mock_collection.asearch.return_value = copy.deepcopy(search_results)
temp_value = copy.deepcopy(search_results_with_query_embedding)
mock_collection.search_return_query_vector.return_value = copy.deepcopy(
temp_value)
mock_collection.asearch_return_query_vector.return_value = copy.deepcopy(
temp_value)
mock_collection.name = mock_collection_config.name
mock_collection.is_auto_embedded = mock_collection_config.is_auto_embedded
mock_collection.embedding_dimensions = (mock_collection_config.
embedding_dimensions)
return mock_collection
|
@pytest.fixture
@pytest.mark.requires('zep_python')
def mock_collection(mocker: MockerFixture, mock_collection_config:
CollectionConfig, search_results: List[Document],
search_results_with_query_embedding: Tuple[List[Document], List[float]]
) ->'DocumentCollection':
from zep_python.document import DocumentCollection
mock_collection: DocumentCollection = mocker.patch(
'zep_python.document.collections.DocumentCollection', autospec=True)
mock_collection.search.return_value = copy.deepcopy(search_results)
mock_collection.asearch.return_value = copy.deepcopy(search_results)
temp_value = copy.deepcopy(search_results_with_query_embedding)
mock_collection.search_return_query_vector.return_value = copy.deepcopy(
temp_value)
mock_collection.asearch_return_query_vector.return_value = copy.deepcopy(
temp_value)
mock_collection.name = mock_collection_config.name
mock_collection.is_auto_embedded = mock_collection_config.is_auto_embedded
mock_collection.embedding_dimensions = (mock_collection_config.
embedding_dimensions)
return mock_collection
| null |
test_parse_with_language_and_spaces
|
llm_output = """I can use the `foo` tool to achieve the goal.
Action:
```json
{
"action": "foo",
"action_input": "bar"
}
```
"""
action, action_input = get_action_and_input(llm_output)
assert action == 'foo'
assert action_input == 'bar'
|
def test_parse_with_language_and_spaces() ->None:
llm_output = """I can use the `foo` tool to achieve the goal.
Action:
```json
{
"action": "foo",
"action_input": "bar"
}
```
"""
action, action_input = get_action_and_input(llm_output)
assert action == 'foo'
assert action_input == 'bar'
| null |
test_run_query
|
"""Test that run gives the correct answer."""
search = api_client.run(query='university', sort='relevance', time_filter=
'all', subreddit='funny', limit=5)
assert 'University' in search
|
@pytest.mark.requires('praw')
def test_run_query(api_client: RedditSearchAPIWrapper) ->None:
"""Test that run gives the correct answer."""
search = api_client.run(query='university', sort='relevance',
time_filter='all', subreddit='funny', limit=5)
assert 'University' in search
|
Test that run gives the correct answer.
|
verify_version
|
"""
Check if the connected Neo4j database version supports vector indexing.
Queries the Neo4j database to retrieve its version and compares it
against a target version (5.11.0) that is known to support vector
indexing. Raises a ValueError if the connected Neo4j version is
not supported.
"""
version = self.query('CALL dbms.components()')[0]['versions'][0]
if 'aura' in version:
version_tuple = tuple(map(int, version.split('-')[0].split('.'))) + (0,)
else:
version_tuple = tuple(map(int, version.split('.')))
target_version = 5, 11, 0
if version_tuple < target_version:
raise ValueError(
'Version index is only supported in Neo4j version 5.11 or greater')
|
def verify_version(self) ->None:
"""
Check if the connected Neo4j database version supports vector indexing.
Queries the Neo4j database to retrieve its version and compares it
against a target version (5.11.0) that is known to support vector
indexing. Raises a ValueError if the connected Neo4j version is
not supported.
"""
version = self.query('CALL dbms.components()')[0]['versions'][0]
if 'aura' in version:
version_tuple = tuple(map(int, version.split('-')[0].split('.'))) + (0,
)
else:
version_tuple = tuple(map(int, version.split('.')))
target_version = 5, 11, 0
if version_tuple < target_version:
raise ValueError(
'Version index is only supported in Neo4j version 5.11 or greater')
|
Check if the connected Neo4j database version supports vector indexing.
Queries the Neo4j database to retrieve its version and compares it
against a target version (5.11.0) that is known to support vector
indexing. Raises a ValueError if the connected Neo4j version is
not supported.
|
test_using_custom_config_specs
|
"""Test that we can configure which keys should be passed to the session factory."""
def _fake_llm(input: Dict[str, Any]) ->List[BaseMessage]:
messages = input['messages']
return [AIMessage(content='you said: ' + '\n'.join(str(m.content) for m in
messages if isinstance(m, HumanMessage)))]
runnable = RunnableLambda(_fake_llm)
store = {}
def get_session_history(user_id: str, conversation_id: str
) ->ChatMessageHistory:
if (user_id, conversation_id) not in store:
store[user_id, conversation_id] = ChatMessageHistory()
return store[user_id, conversation_id]
with_message_history = RunnableWithMessageHistory(runnable,
get_session_history=get_session_history, input_messages_key='messages',
history_messages_key='history', history_factory_config=[
ConfigurableFieldSpec(id='user_id', annotation=str, name='User ID',
description='Unique identifier for the user.', default='', is_shared=
True), ConfigurableFieldSpec(id='conversation_id', annotation=str, name
='Conversation ID', description=
'Unique identifier for the conversation.', default=None, is_shared=True)])
result = with_message_history.invoke({'messages': [HumanMessage(content=
'hello')]}, {'configurable': {'user_id': 'user1', 'conversation_id': '1'}})
assert result == [AIMessage(content='you said: hello')]
assert store == {('user1', '1'): ChatMessageHistory(messages=[HumanMessage(
content='hello'), AIMessage(content='you said: hello')])}
result = with_message_history.invoke({'messages': [HumanMessage(content=
'goodbye')]}, {'configurable': {'user_id': 'user1', 'conversation_id':
'1'}})
assert result == [AIMessage(content='you said: goodbye')]
assert store == {('user1', '1'): ChatMessageHistory(messages=[HumanMessage(
content='hello'), AIMessage(content='you said: hello'), HumanMessage(
content='goodbye'), AIMessage(content='you said: goodbye')])}
result = with_message_history.invoke({'messages': [HumanMessage(content=
'meow')]}, {'configurable': {'user_id': 'user2', 'conversation_id': '1'}})
assert result == [AIMessage(content='you said: meow')]
assert store == {('user1', '1'): ChatMessageHistory(messages=[HumanMessage(
content='hello'), AIMessage(content='you said: hello'), HumanMessage(
content='goodbye'), AIMessage(content='you said: goodbye')]), ('user2',
'1'): ChatMessageHistory(messages=[HumanMessage(content='meow'),
AIMessage(content='you said: meow')])}
|
def test_using_custom_config_specs() ->None:
"""Test that we can configure which keys should be passed to the session factory."""
def _fake_llm(input: Dict[str, Any]) ->List[BaseMessage]:
messages = input['messages']
return [AIMessage(content='you said: ' + '\n'.join(str(m.content) for
m in messages if isinstance(m, HumanMessage)))]
runnable = RunnableLambda(_fake_llm)
store = {}
def get_session_history(user_id: str, conversation_id: str
) ->ChatMessageHistory:
if (user_id, conversation_id) not in store:
store[user_id, conversation_id] = ChatMessageHistory()
return store[user_id, conversation_id]
with_message_history = RunnableWithMessageHistory(runnable,
get_session_history=get_session_history, input_messages_key=
'messages', history_messages_key='history', history_factory_config=
[ConfigurableFieldSpec(id='user_id', annotation=str, name='User ID',
description='Unique identifier for the user.', default='',
is_shared=True), ConfigurableFieldSpec(id='conversation_id',
annotation=str, name='Conversation ID', description=
'Unique identifier for the conversation.', default=None, is_shared=
True)])
result = with_message_history.invoke({'messages': [HumanMessage(content
='hello')]}, {'configurable': {'user_id': 'user1',
'conversation_id': '1'}})
assert result == [AIMessage(content='you said: hello')]
assert store == {('user1', '1'): ChatMessageHistory(messages=[
HumanMessage(content='hello'), AIMessage(content='you said: hello')])}
result = with_message_history.invoke({'messages': [HumanMessage(content
='goodbye')]}, {'configurable': {'user_id': 'user1',
'conversation_id': '1'}})
assert result == [AIMessage(content='you said: goodbye')]
assert store == {('user1', '1'): ChatMessageHistory(messages=[
HumanMessage(content='hello'), AIMessage(content='you said: hello'),
HumanMessage(content='goodbye'), AIMessage(content=
'you said: goodbye')])}
result = with_message_history.invoke({'messages': [HumanMessage(content
='meow')]}, {'configurable': {'user_id': 'user2', 'conversation_id':
'1'}})
assert result == [AIMessage(content='you said: meow')]
assert store == {('user1', '1'): ChatMessageHistory(messages=[
HumanMessage(content='hello'), AIMessage(content='you said: hello'),
HumanMessage(content='goodbye'), AIMessage(content=
'you said: goodbye')]), ('user2', '1'): ChatMessageHistory(messages
=[HumanMessage(content='meow'), AIMessage(content='you said: meow')])}
|
Test that we can configure which keys should be passed to the session factory.
|
embeddings
|
return self._embedding_function
|
@property
def embeddings(self) ->Optional[Embeddings]:
return self._embedding_function
| null |
build_extra
|
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.values()}
input = values.pop('input', {})
if input:
logger.warning(
'Init param `input` is deprecated, please use `model_kwargs` instead.')
extra = {**values.pop('model_kwargs', {}), **input}
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f'Found {field_name} supplied twice.')
logger.warning(
f"""{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values['model_kwargs'] = extra
return values
|
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) ->Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.
values()}
input = values.pop('input', {})
if input:
logger.warning(
'Init param `input` is deprecated, please use `model_kwargs` instead.'
)
extra = {**values.pop('model_kwargs', {}), **input}
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f'Found {field_name} supplied twice.')
logger.warning(
f"""{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values['model_kwargs'] = extra
return values
|
Build extra kwargs from additional params that were passed in.
|
_process_response
|
text = response['output']['text']
if stop:
text = enforce_stop_tokens(text, stop)
return text
|
@staticmethod
def _process_response(response: Any, stop: Optional[List[str]]) ->str:
text = response['output']['text']
if stop:
text = enforce_stop_tokens(text, stop)
return text
| null |
test_indexing_same_content
|
"""Indexing some content to confirm it gets added only once."""
loader = ToyLoader(documents=[Document(page_content=
'This is a test document.'), Document(page_content=
'This is another document.')])
assert index(loader, record_manager, vector_store) == {'num_added': 2,
'num_deleted': 0, 'num_skipped': 0, 'num_updated': 0}
assert len(list(vector_store.store)) == 2
for _ in range(2):
assert index(loader, record_manager, vector_store) == {'num_added': 0,
'num_deleted': 0, 'num_skipped': 2, 'num_updated': 0}
|
def test_indexing_same_content(record_manager: SQLRecordManager,
vector_store: InMemoryVectorStore) ->None:
"""Indexing some content to confirm it gets added only once."""
loader = ToyLoader(documents=[Document(page_content=
'This is a test document.'), Document(page_content=
'This is another document.')])
assert index(loader, record_manager, vector_store) == {'num_added': 2,
'num_deleted': 0, 'num_skipped': 0, 'num_updated': 0}
assert len(list(vector_store.store)) == 2
for _ in range(2):
assert index(loader, record_manager, vector_store) == {'num_added':
0, 'num_deleted': 0, 'num_skipped': 2, 'num_updated': 0}
|
Indexing some content to confirm it gets added only once.
|
max_marginal_relevance_search_by_vector
|
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
if namespace is None:
namespace = self._namespace
results = self._index.query([embedding], top_k=fetch_k, include_values=True,
include_metadata=True, namespace=namespace, filter=filter)
mmr_selected = maximal_marginal_relevance(np.array([embedding], dtype=np.
float32), [item['values'] for item in results['matches']], k=k,
lambda_mult=lambda_mult)
selected = [results['matches'][i]['metadata'] for i in mmr_selected]
return [Document(page_content=metadata.pop(self._text_key), metadata=
metadata) for metadata in selected]
|
def max_marginal_relevance_search_by_vector(self, embedding: List[float], k:
int=4, fetch_k: int=20, lambda_mult: float=0.5, filter: Optional[dict]=
None, namespace: Optional[str]=None, **kwargs: Any) ->List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
if namespace is None:
namespace = self._namespace
results = self._index.query([embedding], top_k=fetch_k, include_values=
True, include_metadata=True, namespace=namespace, filter=filter)
mmr_selected = maximal_marginal_relevance(np.array([embedding], dtype=
np.float32), [item['values'] for item in results['matches']], k=k,
lambda_mult=lambda_mult)
selected = [results['matches'][i]['metadata'] for i in mmr_selected]
return [Document(page_content=metadata.pop(self._text_key), metadata=
metadata) for metadata in selected]
|
Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
|
test_default_ollama_functions
|
base_model = OllamaFunctions(model='mistral')
self.assertIsInstance(base_model.model, ChatOllama)
model = base_model.bind(functions=[{'name': 'get_current_weather',
'description': 'Get the current weather in a given location',
'parameters': {'type': 'object', 'properties': {'location': {'type':
'string', 'description': 'The city and state, e.g. San Francisco, CA'},
'unit': {'type': 'string', 'enum': ['celsius', 'fahrenheit']}},
'required': ['location']}}], function_call={'name': 'get_current_weather'})
res = model.invoke("What's the weather in San Francisco?")
function_call = res.additional_kwargs.get('function_call')
assert function_call
self.assertEqual(function_call.get('name'), 'get_current_weather')
|
def test_default_ollama_functions(self) ->None:
base_model = OllamaFunctions(model='mistral')
self.assertIsInstance(base_model.model, ChatOllama)
model = base_model.bind(functions=[{'name': 'get_current_weather',
'description': 'Get the current weather in a given location',
'parameters': {'type': 'object', 'properties': {'location': {'type':
'string', 'description':
'The city and state, e.g. San Francisco, CA'}, 'unit': {'type':
'string', 'enum': ['celsius', 'fahrenheit']}}, 'required': [
'location']}}], function_call={'name': 'get_current_weather'})
res = model.invoke("What's the weather in San Francisco?")
function_call = res.additional_kwargs.get('function_call')
assert function_call
self.assertEqual(function_call.get('name'), 'get_current_weather')
| null |
_texts_to_documents
|
"""Return list of Documents from list of texts and metadatas."""
if metadatas is None:
metadatas = repeat({})
docs = [Document(page_content=text, metadata=metadata) for text, metadata in
zip(texts, metadatas)]
return docs
|
@staticmethod
def _texts_to_documents(texts: Iterable[str], metadatas: Optional[Iterable[
Dict[Any, Any]]]=None) ->List[Document]:
"""Return list of Documents from list of texts and metadatas."""
if metadatas is None:
metadatas = repeat({})
docs = [Document(page_content=text, metadata=metadata) for text,
metadata in zip(texts, metadatas)]
return docs
|
Return list of Documents from list of texts and metadatas.
|
completion_with_retry
|
"""Use tenacity to retry the completion call."""
retry_decorator = create_retry_decorator(llm, max_retries=llm.max_retries,
run_manager=run_manager)
@retry_decorator
def _completion_with_retry(prompt: LanguageModelInput, is_gemini: bool,
stream: bool, **kwargs: Any) ->Any:
generation_config = kwargs.get('generation_config', {})
if is_gemini:
return llm.client.generate_content(contents=prompt, stream=stream,
generation_config=generation_config)
return llm.client.generate_text(prompt=prompt, **kwargs)
return _completion_with_retry(prompt=prompt, is_gemini=is_gemini, stream=
stream, **kwargs)
|
def completion_with_retry(llm: GooglePalm, prompt: LanguageModelInput,
is_gemini: bool=False, stream: bool=False, run_manager: Optional[
CallbackManagerForLLMRun]=None, **kwargs: Any) ->Any:
"""Use tenacity to retry the completion call."""
retry_decorator = create_retry_decorator(llm, max_retries=llm.
max_retries, run_manager=run_manager)
@retry_decorator
def _completion_with_retry(prompt: LanguageModelInput, is_gemini: bool,
stream: bool, **kwargs: Any) ->Any:
generation_config = kwargs.get('generation_config', {})
if is_gemini:
return llm.client.generate_content(contents=prompt, stream=
stream, generation_config=generation_config)
return llm.client.generate_text(prompt=prompt, **kwargs)
return _completion_with_retry(prompt=prompt, is_gemini=is_gemini,
stream=stream, **kwargs)
|
Use tenacity to retry the completion call.
|
__iter__
|
logger.debug('Initialising AgentExecutorIterator')
self.reset()
callback_manager = CallbackManager.configure(self.callbacks, self.
agent_executor.callbacks, self.agent_executor.verbose, self.tags, self.
agent_executor.tags, self.metadata, self.agent_executor.metadata)
run_manager = callback_manager.on_chain_start(dumpd(self.agent_executor),
self.inputs, name=self.run_name)
try:
while self.agent_executor._should_continue(self.iterations, self.
time_elapsed):
next_step_seq: NextStepOutput = []
for chunk in self.agent_executor._iter_next_step(self.
name_to_tool_map, self.color_mapping, self.inputs, self.
intermediate_steps, run_manager):
next_step_seq.append(chunk)
if self.yield_actions:
if isinstance(chunk, AgentAction):
yield AddableDict(actions=[chunk], messages=chunk.messages)
elif isinstance(chunk, AgentStep):
yield AddableDict(steps=[chunk], messages=chunk.messages)
next_step = self.agent_executor._consume_next_step(next_step_seq)
self.update_iterations()
output = self._process_next_step_output(next_step, run_manager)
is_final = 'intermediate_step' not in output
if not self.yield_actions or is_final:
yield output
if is_final:
return
except BaseException as e:
run_manager.on_chain_error(e)
raise
yield self._stop(run_manager)
|
def __iter__(self: 'AgentExecutorIterator') ->Iterator[AddableDict]:
logger.debug('Initialising AgentExecutorIterator')
self.reset()
callback_manager = CallbackManager.configure(self.callbacks, self.
agent_executor.callbacks, self.agent_executor.verbose, self.tags,
self.agent_executor.tags, self.metadata, self.agent_executor.metadata)
run_manager = callback_manager.on_chain_start(dumpd(self.agent_executor
), self.inputs, name=self.run_name)
try:
while self.agent_executor._should_continue(self.iterations, self.
time_elapsed):
next_step_seq: NextStepOutput = []
for chunk in self.agent_executor._iter_next_step(self.
name_to_tool_map, self.color_mapping, self.inputs, self.
intermediate_steps, run_manager):
next_step_seq.append(chunk)
if self.yield_actions:
if isinstance(chunk, AgentAction):
yield AddableDict(actions=[chunk], messages=chunk.
messages)
elif isinstance(chunk, AgentStep):
yield AddableDict(steps=[chunk], messages=chunk.
messages)
next_step = self.agent_executor._consume_next_step(next_step_seq)
self.update_iterations()
output = self._process_next_step_output(next_step, run_manager)
is_final = 'intermediate_step' not in output
if not self.yield_actions or is_final:
yield output
if is_final:
return
except BaseException as e:
run_manager.on_chain_error(e)
raise
yield self._stop(run_manager)
| null |
run_rnn
|
AVOID_REPEAT_TOKENS = []
AVOID_REPEAT = ',:?!'
for i in AVOID_REPEAT:
dd = self.pipeline.encode(i)
assert len(dd) == 1
AVOID_REPEAT_TOKENS += dd
tokens = [int(x) for x in _tokens]
self.model_tokens += tokens
out: Any = None
while len(tokens) > 0:
out, self.model_state = self.client.forward(tokens[:self.CHUNK_LEN],
self.model_state)
tokens = tokens[self.CHUNK_LEN:]
END_OF_LINE = 187
out[END_OF_LINE] += newline_adj
if self.model_tokens[-1] in AVOID_REPEAT_TOKENS:
out[self.model_tokens[-1]] = -999999999
return out
|
def run_rnn(self, _tokens: List[str], newline_adj: int=0) ->Any:
AVOID_REPEAT_TOKENS = []
AVOID_REPEAT = ',:?!'
for i in AVOID_REPEAT:
dd = self.pipeline.encode(i)
assert len(dd) == 1
AVOID_REPEAT_TOKENS += dd
tokens = [int(x) for x in _tokens]
self.model_tokens += tokens
out: Any = None
while len(tokens) > 0:
out, self.model_state = self.client.forward(tokens[:self.CHUNK_LEN],
self.model_state)
tokens = tokens[self.CHUNK_LEN:]
END_OF_LINE = 187
out[END_OF_LINE] += newline_adj
if self.model_tokens[-1] in AVOID_REPEAT_TOKENS:
out[self.model_tokens[-1]] = -999999999
return out
| null |
test_correct_call
|
"""Test correct call of fake chain."""
chain = FakeChain()
output = chain({'foo': 'bar'})
assert output == {'foo': 'bar', 'bar': 'baz'}
|
def test_correct_call() ->None:
"""Test correct call of fake chain."""
chain = FakeChain()
output = chain({'foo': 'bar'})
assert output == {'foo': 'bar', 'bar': 'baz'}
|
Test correct call of fake chain.
|
input_variables
|
"""Input variables for this prompt template.
Returns:
List of input variables.
"""
|
@property
@abstractmethod
def input_variables(self) ->List[str]:
"""Input variables for this prompt template.
Returns:
List of input variables.
"""
|
Input variables for this prompt template.
Returns:
List of input variables.
|
_import_clickhouse_settings
|
from langchain_community.vectorstores.clickhouse import ClickhouseSettings
return ClickhouseSettings
|
def _import_clickhouse_settings() ->Any:
from langchain_community.vectorstores.clickhouse import ClickhouseSettings
return ClickhouseSettings
| null |
test_runnable_context_deadlock
|
seq: Runnable = {'bar': Context.setter('input') | Context.getter('foo'),
'foo': Context.setter('foo') | Context.getter('input')
} | RunnablePassthrough()
with pytest.raises(ValueError):
seq.invoke('foo')
|
def test_runnable_context_deadlock() ->None:
seq: Runnable = {'bar': Context.setter('input') | Context.getter('foo'),
'foo': Context.setter('foo') | Context.getter('input')
} | RunnablePassthrough()
with pytest.raises(ValueError):
seq.invoke('foo')
| null |
load
|
"""Load documents."""
if self.chat_entity is not None:
try:
import nest_asyncio
nest_asyncio.apply()
asyncio.run(self.fetch_data_from_telegram())
except ImportError:
raise ImportError(
"""`nest_asyncio` package not found.
please install with `pip install nest_asyncio`
"""
)
p = Path(self.file_path)
with open(p, encoding='utf8') as f:
d = json.load(f)
try:
import pandas as pd
except ImportError:
raise ImportError(
"""`pandas` package not found.
please install with `pip install pandas`
"""
)
normalized_messages = pd.json_normalize(d)
df = pd.DataFrame(normalized_messages)
message_threads = self._get_message_threads(df)
combined_texts = self._combine_message_texts(message_threads, df)
return text_to_docs(combined_texts)
|
def load(self) ->List[Document]:
"""Load documents."""
if self.chat_entity is not None:
try:
import nest_asyncio
nest_asyncio.apply()
asyncio.run(self.fetch_data_from_telegram())
except ImportError:
raise ImportError(
"""`nest_asyncio` package not found.
please install with `pip install nest_asyncio`
"""
)
p = Path(self.file_path)
with open(p, encoding='utf8') as f:
d = json.load(f)
try:
import pandas as pd
except ImportError:
raise ImportError(
"""`pandas` package not found.
please install with `pip install pandas`
"""
)
normalized_messages = pd.json_normalize(d)
df = pd.DataFrame(normalized_messages)
message_threads = self._get_message_threads(df)
combined_texts = self._combine_message_texts(message_threads, df)
return text_to_docs(combined_texts)
|
Load documents.
|
_default_params
|
"""Get the default parameters for calling Javelin AI Gateway API."""
params: Dict[str, Any] = {'gateway_uri': self.gateway_uri, 'route': self.
route, 'javelin_api_key': self.javelin_api_key, **self.params.dict() if
self.params else {}}
return params
|
@property
def _default_params(self) ->Dict[str, Any]:
"""Get the default parameters for calling Javelin AI Gateway API."""
params: Dict[str, Any] = {'gateway_uri': self.gateway_uri, 'route':
self.route, 'javelin_api_key': self.javelin_api_key, **self.params.
dict() if self.params else {}}
return params
|
Get the default parameters for calling Javelin AI Gateway API.
|
_parse_response
|
"""Take a dict response and condense it's data in a human readable string"""
pass
|
@abstractmethod
def _parse_response(self, response: Any) ->str:
"""Take a dict response and condense it's data in a human readable string"""
pass
|
Take a dict response and condense it's data in a human readable string
|
on_llm_new_token
|
"""Run when LLM generates a new token."""
self.step += 1
self.llm_streams += 1
resp = self._init_resp()
resp.update({'action': 'on_llm_new_token', 'token': token})
resp.update(self.get_custom_callback_meta())
self.on_llm_token_records.append(resp)
self.action_records.append(resp)
if self.stream_logs:
self.logger.report_text(resp)
|
def on_llm_new_token(self, token: str, **kwargs: Any) ->None:
"""Run when LLM generates a new token."""
self.step += 1
self.llm_streams += 1
resp = self._init_resp()
resp.update({'action': 'on_llm_new_token', 'token': token})
resp.update(self.get_custom_callback_meta())
self.on_llm_token_records.append(resp)
self.action_records.append(resp)
if self.stream_logs:
self.logger.report_text(resp)
|
Run when LLM generates a new token.
|
last_tool
|
"""The last tool executed by this thought"""
return self._last_tool
|
@property
def last_tool(self) ->Optional[ToolRecord]:
"""The last tool executed by this thought"""
return self._last_tool
|
The last tool executed by this thought
|
_is_gemini_model
|
return 'gemini' in model_name
|
def _is_gemini_model(model_name: str) ->bool:
return 'gemini' in model_name
| null |
test_configurable_fields_example
|
fake_chat = FakeListChatModel(responses=['b']).configurable_fields(responses
=ConfigurableFieldMultiOption(id='chat_responses', name=
'Chat Responses', options={'hello': 'A good morning to you!', 'bye':
'See you later!', 'helpful': 'How can I help you?'}, default=['hello',
'bye']))
fake_llm = FakeListLLM(responses=['a']).configurable_fields(responses=
ConfigurableField(id='llm_responses', name='LLM Responses', description
='A list of fake responses for this LLM')).configurable_alternatives(
ConfigurableField(id='llm', name='LLM'), chat=fake_chat | StrOutputParser()
)
prompt = PromptTemplate.from_template('Hello, {name}!').configurable_fields(
template=ConfigurableFieldSingleOption(id='prompt_template', name=
'Prompt Template', description='The prompt template for this chain',
options={'hello': 'Hello, {name}!', 'good_morning':
'A very good morning to you, {name}!'}, default='hello'))
chain_configurable = prompt | fake_llm | (lambda x: {'name': x}
) | prompt | fake_llm
assert chain_configurable.invoke({'name': 'John'}) == 'a'
assert chain_configurable.config_schema().schema() == {'title':
'RunnableSequenceConfig', 'type': 'object', 'properties': {
'configurable': {'$ref': '#/definitions/Configurable'}}, 'definitions':
{'LLM': {'title': 'LLM', 'description': 'An enumeration.', 'enum': [
'chat', 'default'], 'type': 'string'}, 'Chat_Responses': {'description':
'An enumeration.', 'enum': ['hello', 'bye', 'helpful'], 'title':
'Chat Responses', 'type': 'string'}, 'Prompt_Template': {'description':
'An enumeration.', 'enum': ['hello', 'good_morning'], 'title':
'Prompt Template', 'type': 'string'}, 'Configurable': {'title':
'Configurable', 'type': 'object', 'properties': {'chat_responses': {
'default': ['hello', 'bye'], 'items': {'$ref':
'#/definitions/Chat_Responses'}, 'title': 'Chat Responses', 'type':
'array'}, 'llm': {'title': 'LLM', 'default': 'default', 'allOf': [{
'$ref': '#/definitions/LLM'}]}, 'llm_responses': {'title':
'LLM Responses', 'description': 'A list of fake responses for this LLM',
'default': ['a'], 'type': 'array', 'items': {'type': 'string'}},
'prompt_template': {'title': 'Prompt Template', 'description':
'The prompt template for this chain', 'default': 'hello', 'allOf': [{
'$ref': '#/definitions/Prompt_Template'}]}}}}}
with pytest.raises(ValueError):
chain_configurable.with_config(configurable={'llm123': 'chat'})
assert chain_configurable.with_config(configurable={'llm': 'chat'}).invoke({
'name': 'John'}) == 'A good morning to you!'
assert chain_configurable.with_config(configurable={'llm': 'chat',
'chat_responses': ['helpful']}).invoke({'name': 'John'}
) == 'How can I help you?'
|
def test_configurable_fields_example() ->None:
fake_chat = FakeListChatModel(responses=['b']).configurable_fields(
responses=ConfigurableFieldMultiOption(id='chat_responses', name=
'Chat Responses', options={'hello': 'A good morning to you!', 'bye':
'See you later!', 'helpful': 'How can I help you?'}, default=[
'hello', 'bye']))
fake_llm = FakeListLLM(responses=['a']).configurable_fields(responses=
ConfigurableField(id='llm_responses', name='LLM Responses',
description='A list of fake responses for this LLM')
).configurable_alternatives(ConfigurableField(id='llm', name='LLM'),
chat=fake_chat | StrOutputParser())
prompt = PromptTemplate.from_template('Hello, {name}!'
).configurable_fields(template=ConfigurableFieldSingleOption(id=
'prompt_template', name='Prompt Template', description=
'The prompt template for this chain', options={'hello':
'Hello, {name}!', 'good_morning':
'A very good morning to you, {name}!'}, default='hello'))
chain_configurable = prompt | fake_llm | (lambda x: {'name': x}
) | prompt | fake_llm
assert chain_configurable.invoke({'name': 'John'}) == 'a'
assert chain_configurable.config_schema().schema() == {'title':
'RunnableSequenceConfig', 'type': 'object', 'properties': {
'configurable': {'$ref': '#/definitions/Configurable'}},
'definitions': {'LLM': {'title': 'LLM', 'description':
'An enumeration.', 'enum': ['chat', 'default'], 'type': 'string'},
'Chat_Responses': {'description': 'An enumeration.', 'enum': [
'hello', 'bye', 'helpful'], 'title': 'Chat Responses', 'type':
'string'}, 'Prompt_Template': {'description': 'An enumeration.',
'enum': ['hello', 'good_morning'], 'title': 'Prompt Template',
'type': 'string'}, 'Configurable': {'title': 'Configurable', 'type':
'object', 'properties': {'chat_responses': {'default': ['hello',
'bye'], 'items': {'$ref': '#/definitions/Chat_Responses'}, 'title':
'Chat Responses', 'type': 'array'}, 'llm': {'title': 'LLM',
'default': 'default', 'allOf': [{'$ref': '#/definitions/LLM'}]},
'llm_responses': {'title': 'LLM Responses', 'description':
'A list of fake responses for this LLM', 'default': ['a'], 'type':
'array', 'items': {'type': 'string'}}, 'prompt_template': {'title':
'Prompt Template', 'description':
'The prompt template for this chain', 'default': 'hello', 'allOf':
[{'$ref': '#/definitions/Prompt_Template'}]}}}}}
with pytest.raises(ValueError):
chain_configurable.with_config(configurable={'llm123': 'chat'})
assert chain_configurable.with_config(configurable={'llm': 'chat'}).invoke(
{'name': 'John'}) == 'A good morning to you!'
assert chain_configurable.with_config(configurable={'llm': 'chat',
'chat_responses': ['helpful']}).invoke({'name': 'John'}
) == 'How can I help you?'
| null |
fn
|
return None
|
def fn(self: Any, **kwargs: Any) ->None:
return None
| null |
transform_documents
|
"""Translates text documents using doctran."""
try:
from doctran import Doctran
doctran = Doctran(openai_api_key=self.openai_api_key, openai_model=self
.openai_api_model)
except ImportError:
raise ImportError(
'Install doctran to use this parser. (pip install doctran)')
doctran_docs = [doctran.parse_folder(content=doc.page_content, metadata=doc
.metadata) for doc in documents]
for i, doc in enumerate(doctran_docs):
doctran_docs[i] = doc.translate(language=self.language).execute()
return [Document(page_content=doc.transformed_content, metadata=doc.
metadata) for doc in doctran_docs]
|
def transform_documents(self, documents: Sequence[Document], **kwargs: Any
) ->Sequence[Document]:
"""Translates text documents using doctran."""
try:
from doctran import Doctran
doctran = Doctran(openai_api_key=self.openai_api_key, openai_model=
self.openai_api_model)
except ImportError:
raise ImportError(
'Install doctran to use this parser. (pip install doctran)')
doctran_docs = [doctran.parse_folder(content=doc.page_content, metadata
=doc.metadata) for doc in documents]
for i, doc in enumerate(doctran_docs):
doctran_docs[i] = doc.translate(language=self.language).execute()
return [Document(page_content=doc.transformed_content, metadata=doc.
metadata) for doc in doctran_docs]
|
Translates text documents using doctran.
|
on_chain_end
|
"""Print out that we finished a chain."""
print_text("""
[1m> Finished chain.[0m""", end='\n', file=self.file)
|
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) ->None:
"""Print out that we finished a chain."""
print_text('\n\x1b[1m> Finished chain.\x1b[0m', end='\n', file=self.file)
|
Print out that we finished a chain.
|
from_schema
|
"""Recursively populate from an OpenAPI Schema."""
if references_used is None:
references_used = []
schema_type = schema.type
properties: List[APIRequestBodyProperty] = []
if schema_type == 'object' and schema.properties:
schema_type, properties = cls._process_object_schema(schema, spec,
references_used)
elif schema_type == 'array':
schema_type = cls._process_array_schema(schema, name, spec, references_used
)
elif schema_type in PRIMITIVE_TYPES:
pass
elif schema_type is None:
pass
else:
raise ValueError(f'Unsupported type: {schema_type}')
return cls(name=name, required=required, type=schema_type, default=schema.
default, description=schema.description, properties=properties,
references_used=references_used)
|
@classmethod
def from_schema(cls, schema: Schema, name: str, required: bool, spec:
OpenAPISpec, references_used: Optional[List[str]]=None
) ->'APIRequestBodyProperty':
"""Recursively populate from an OpenAPI Schema."""
if references_used is None:
references_used = []
schema_type = schema.type
properties: List[APIRequestBodyProperty] = []
if schema_type == 'object' and schema.properties:
schema_type, properties = cls._process_object_schema(schema, spec,
references_used)
elif schema_type == 'array':
schema_type = cls._process_array_schema(schema, name, spec,
references_used)
elif schema_type in PRIMITIVE_TYPES:
pass
elif schema_type is None:
pass
else:
raise ValueError(f'Unsupported type: {schema_type}')
return cls(name=name, required=required, type=schema_type, default=
schema.default, description=schema.description, properties=
properties, references_used=references_used)
|
Recursively populate from an OpenAPI Schema.
|
test_dereference_refs_missing_ref
|
schema = {'type': 'object', 'properties': {'first_name': {'$ref':
'#/$defs/name'}}, '$defs': {}}
with pytest.raises(KeyError):
dereference_refs(schema)
|
def test_dereference_refs_missing_ref() ->None:
schema = {'type': 'object', 'properties': {'first_name': {'$ref':
'#/$defs/name'}}, '$defs': {}}
with pytest.raises(KeyError):
dereference_refs(schema)
| null |
from_texts
|
"""Return VectorStore initialized from texts and embeddings."""
|
@classmethod
@abstractmethod
def from_texts(cls: Type[VST], texts: List[str], embedding: Embeddings,
metadatas: Optional[List[dict]]=None, **kwargs: Any) ->VST:
"""Return VectorStore initialized from texts and embeddings."""
|
Return VectorStore initialized from texts and embeddings.
|
test_deanonymizer_mapping
|
"""Test if deanonymizer mapping is correctly populated"""
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
anonymizer = PresidioReversibleAnonymizer(analyzed_fields=['PERSON',
'PHONE_NUMBER', 'EMAIL_ADDRESS', 'CREDIT_CARD'])
anonymizer.anonymize(
'Hello, my name is John Doe and my number is 444 555 6666.')
assert len(anonymizer.deanonymizer_mapping.keys()) == 2
assert 'John Doe' in anonymizer.deanonymizer_mapping.get('PERSON', {}).values()
assert '444 555 6666' in anonymizer.deanonymizer_mapping.get('PHONE_NUMBER', {}
).values()
text_to_anonymize = (
'And my name is Jane Doe, my email is jane@gmail.com and my credit card is 4929 5319 6292 5362.'
)
anonymizer.anonymize(text_to_anonymize)
assert len(anonymizer.deanonymizer_mapping.keys()) == 4
assert 'Jane Doe' in anonymizer.deanonymizer_mapping.get('PERSON', {}).values()
assert 'jane@gmail.com' in anonymizer.deanonymizer_mapping.get('EMAIL_ADDRESS',
{}).values()
assert '4929 5319 6292 5362' in anonymizer.deanonymizer_mapping.get(
'CREDIT_CARD', {}).values()
|
@pytest.mark.requires('presidio_analyzer', 'presidio_anonymizer', 'faker')
def test_deanonymizer_mapping() ->None:
"""Test if deanonymizer mapping is correctly populated"""
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
anonymizer = PresidioReversibleAnonymizer(analyzed_fields=['PERSON',
'PHONE_NUMBER', 'EMAIL_ADDRESS', 'CREDIT_CARD'])
anonymizer.anonymize(
'Hello, my name is John Doe and my number is 444 555 6666.')
assert len(anonymizer.deanonymizer_mapping.keys()) == 2
assert 'John Doe' in anonymizer.deanonymizer_mapping.get('PERSON', {}
).values()
assert '444 555 6666' in anonymizer.deanonymizer_mapping.get('PHONE_NUMBER'
, {}).values()
text_to_anonymize = (
'And my name is Jane Doe, my email is jane@gmail.com and my credit card is 4929 5319 6292 5362.'
)
anonymizer.anonymize(text_to_anonymize)
assert len(anonymizer.deanonymizer_mapping.keys()) == 4
assert 'Jane Doe' in anonymizer.deanonymizer_mapping.get('PERSON', {}
).values()
assert 'jane@gmail.com' in anonymizer.deanonymizer_mapping.get(
'EMAIL_ADDRESS', {}).values()
assert '4929 5319 6292 5362' in anonymizer.deanonymizer_mapping.get(
'CREDIT_CARD', {}).values()
|
Test if deanonymizer mapping is correctly populated
|
_chebyshev_distance
|
"""Compute the Chebyshev distance between two vectors.
Args:
a (np.ndarray): The first vector.
b (np.ndarray): The second vector.
Returns:
np.floating: The Chebyshev distance.
"""
return np.max(np.abs(a - b))
|
@staticmethod
def _chebyshev_distance(a: np.ndarray, b: np.ndarray) ->np.floating:
"""Compute the Chebyshev distance between two vectors.
Args:
a (np.ndarray): The first vector.
b (np.ndarray): The second vector.
Returns:
np.floating: The Chebyshev distance.
"""
return np.max(np.abs(a - b))
|
Compute the Chebyshev distance between two vectors.
Args:
a (np.ndarray): The first vector.
b (np.ndarray): The second vector.
Returns:
np.floating: The Chebyshev distance.
|
test__collapse_docs_no_metadata
|
"""Test collapse documents functionality when no metadata."""
docs = [Document(page_content='foo'), Document(page_content='bar'),
Document(page_content='baz')]
output = collapse_docs(docs, _fake_combine_docs_func)
expected_output = Document(page_content='foobarbaz')
assert output == expected_output
|
def test__collapse_docs_no_metadata() ->None:
"""Test collapse documents functionality when no metadata."""
docs = [Document(page_content='foo'), Document(page_content='bar'),
Document(page_content='baz')]
output = collapse_docs(docs, _fake_combine_docs_func)
expected_output = Document(page_content='foobarbaz')
assert output == expected_output
|
Test collapse documents functionality when no metadata.
|
filter_func
|
return x in include_types if include_types else x not in exclude_types
|
def filter_func(x: str) ->bool:
return x in include_types if include_types else x not in exclude_types
| null |
lookup
|
"""Lookup llm generations in cache by prompt and associated model and settings.
Args:
prompt (str): The prompt run through the language model.
llm_string (str): The language model version and settings.
Raises:
SdkException: Momento service or network error
Returns:
Optional[RETURN_VAL_TYPE]: A list of language model generations.
"""
from momento.responses import CacheGet
generations: RETURN_VAL_TYPE = []
get_response = self.cache_client.get(self.cache_name, self.__key(prompt,
llm_string))
if isinstance(get_response, CacheGet.Hit):
value = get_response.value_string
generations = _load_generations_from_json(value)
elif isinstance(get_response, CacheGet.Miss):
pass
elif isinstance(get_response, CacheGet.Error):
raise get_response.inner_exception
return generations if generations else None
|
def lookup(self, prompt: str, llm_string: str) ->Optional[RETURN_VAL_TYPE]:
"""Lookup llm generations in cache by prompt and associated model and settings.
Args:
prompt (str): The prompt run through the language model.
llm_string (str): The language model version and settings.
Raises:
SdkException: Momento service or network error
Returns:
Optional[RETURN_VAL_TYPE]: A list of language model generations.
"""
from momento.responses import CacheGet
generations: RETURN_VAL_TYPE = []
get_response = self.cache_client.get(self.cache_name, self.__key(prompt,
llm_string))
if isinstance(get_response, CacheGet.Hit):
value = get_response.value_string
generations = _load_generations_from_json(value)
elif isinstance(get_response, CacheGet.Miss):
pass
elif isinstance(get_response, CacheGet.Error):
raise get_response.inner_exception
return generations if generations else None
|
Lookup llm generations in cache by prompt and associated model and settings.
Args:
prompt (str): The prompt run through the language model.
llm_string (str): The language model version and settings.
Raises:
SdkException: Momento service or network error
Returns:
Optional[RETURN_VAL_TYPE]: A list of language model generations.
|
_import_sql_database_tool_ListSQLDatabaseTool
|
from langchain_community.tools.sql_database.tool import ListSQLDatabaseTool
return ListSQLDatabaseTool
|
def _import_sql_database_tool_ListSQLDatabaseTool() ->Any:
from langchain_community.tools.sql_database.tool import ListSQLDatabaseTool
return ListSQLDatabaseTool
| null |
_build_insert_sql
|
ks = ','.join(column_names)
embed_tuple_index = tuple(column_names).index(self.config.column_map[
'embedding'])
_data = []
for n in transac:
n = ','.join([(f"'{self.escape_str(str(_n))}'" if idx !=
embed_tuple_index else f'array<float>{str(_n)}') for idx, _n in
enumerate(n)])
_data.append(f'({n})')
i_str = f"""
INSERT INTO
{self.config.database}.{self.config.table}({ks})
VALUES
{','.join(_data)}
"""
return i_str
|
def _build_insert_sql(self, transac: Iterable, column_names: Iterable[str]
) ->str:
ks = ','.join(column_names)
embed_tuple_index = tuple(column_names).index(self.config.column_map[
'embedding'])
_data = []
for n in transac:
n = ','.join([(f"'{self.escape_str(str(_n))}'" if idx !=
embed_tuple_index else f'array<float>{str(_n)}') for idx, _n in
enumerate(n)])
_data.append(f'({n})')
i_str = f"""
INSERT INTO
{self.config.database}.{self.config.table}({ks})
VALUES
{','.join(_data)}
"""
return i_str
| null |
_call
|
"""Run the agent."""
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
objective = inputs['objective']
first_task = inputs.get('first_task', 'Make a todo list')
self.add_task({'task_id': 1, 'task_name': first_task})
num_iters = 0
while True:
if self.task_list:
self.print_task_list()
task = self.task_list.popleft()
self.print_next_task(task)
result = self.execute_task(objective, task['task_name'], callbacks=
_run_manager.get_child())
this_task_id = int(task['task_id'])
self.print_task_result(result)
result_id = f"result_{task['task_id']}_{num_iters}"
self.vectorstore.add_texts(texts=[result], metadatas=[{'task': task
['task_name']}], ids=[result_id])
new_tasks = self.get_next_task(result, task['task_name'], objective,
callbacks=_run_manager.get_child())
for new_task in new_tasks:
self.task_id_counter += 1
new_task.update({'task_id': self.task_id_counter})
self.add_task(new_task)
self.task_list = deque(self.prioritize_tasks(this_task_id,
objective, callbacks=_run_manager.get_child()))
num_iters += 1
if self.max_iterations is not None and num_iters == self.max_iterations:
print('\x1b[91m\x1b[1m' + '\n*****TASK ENDING*****\n' +
'\x1b[0m\x1b[0m')
break
return {}
|
def _call(self, inputs: Dict[str, Any], run_manager: Optional[
CallbackManagerForChainRun]=None) ->Dict[str, Any]:
"""Run the agent."""
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
objective = inputs['objective']
first_task = inputs.get('first_task', 'Make a todo list')
self.add_task({'task_id': 1, 'task_name': first_task})
num_iters = 0
while True:
if self.task_list:
self.print_task_list()
task = self.task_list.popleft()
self.print_next_task(task)
result = self.execute_task(objective, task['task_name'],
callbacks=_run_manager.get_child())
this_task_id = int(task['task_id'])
self.print_task_result(result)
result_id = f"result_{task['task_id']}_{num_iters}"
self.vectorstore.add_texts(texts=[result], metadatas=[{'task':
task['task_name']}], ids=[result_id])
new_tasks = self.get_next_task(result, task['task_name'],
objective, callbacks=_run_manager.get_child())
for new_task in new_tasks:
self.task_id_counter += 1
new_task.update({'task_id': self.task_id_counter})
self.add_task(new_task)
self.task_list = deque(self.prioritize_tasks(this_task_id,
objective, callbacks=_run_manager.get_child()))
num_iters += 1
if (self.max_iterations is not None and num_iters == self.
max_iterations):
print('\x1b[91m\x1b[1m' + '\n*****TASK ENDING*****\n' +
'\x1b[0m\x1b[0m')
break
return {}
|
Run the agent.
|
evaluate
|
return 'test'
|
def evaluate(self, page: 'Page', browser: 'Browser', response: 'Response'
) ->str:
return 'test'
| null |
test_messages
|
from zep_python import Memory, Message, Summary
mock_memory: Memory = Memory(summary=Summary(content='summary'), messages=[
Message(content='message', role='ai', metadata={'key': 'value'}),
Message(content='message2', role='human', metadata={'key2': 'value2'})])
zep_chat.zep_client.memory.get_memory.return_value = mock_memory
result = zep_chat.messages
assert len(result) == 3
assert isinstance(result[0], SystemMessage)
assert isinstance(result[1], AIMessage)
assert isinstance(result[2], HumanMessage)
|
@pytest.mark.requires('zep_python')
def test_messages(mocker: MockerFixture, zep_chat: ZepChatMessageHistory
) ->None:
from zep_python import Memory, Message, Summary
mock_memory: Memory = Memory(summary=Summary(content='summary'),
messages=[Message(content='message', role='ai', metadata={'key':
'value'}), Message(content='message2', role='human', metadata={
'key2': 'value2'})])
zep_chat.zep_client.memory.get_memory.return_value = mock_memory
result = zep_chat.messages
assert len(result) == 3
assert isinstance(result[0], SystemMessage)
assert isinstance(result[1], AIMessage)
assert isinstance(result[2], HumanMessage)
| null |
test_pgvector_with_filter_nin_set
|
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': str(i)} for i in range(len(texts))]
docsearch = PGVector.from_texts(texts=texts, collection_name=
'test_collection_filter', embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas, connection_string=CONNECTION_STRING,
pre_delete_collection=True)
output = docsearch.similarity_search_with_score('foo', k=2, filter={'page':
{'NIN': ['1']}})
assert output == [(Document(page_content='foo', metadata={'page': '0'}),
0.0), (Document(page_content='baz', metadata={'page': '2'}),
0.0013003906671379406)]
|
def test_pgvector_with_filter_nin_set() ->None:
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': str(i)} for i in range(len(texts))]
docsearch = PGVector.from_texts(texts=texts, collection_name=
'test_collection_filter', embedding=FakeEmbeddingsWithAdaDimension(
), metadatas=metadatas, connection_string=CONNECTION_STRING,
pre_delete_collection=True)
output = docsearch.similarity_search_with_score('foo', k=2, filter={
'page': {'NIN': ['1']}})
assert output == [(Document(page_content='foo', metadata={'page': '0'}),
0.0), (Document(page_content='baz', metadata={'page': '2'}),
0.0013003906671379406)]
|
Test end to end construction and search.
|
_get_index
|
"""Return the vector index information if it exists"""
from pymilvus import Collection
if isinstance(self.col, Collection):
for x in self.col.indexes:
if x.field_name == self._vector_field:
return x.to_dict()
return None
|
def _get_index(self) ->Optional[dict[str, Any]]:
"""Return the vector index information if it exists"""
from pymilvus import Collection
if isinstance(self.col, Collection):
for x in self.col.indexes:
if x.field_name == self._vector_field:
return x.to_dict()
return None
|
Return the vector index information if it exists
|
test_chat_invalid_input_variables_missing
|
messages = [HumanMessagePromptTemplate.from_template('{foo}')]
with pytest.raises(ValueError):
ChatPromptTemplate(messages=messages, input_variables=[],
validate_template=True)
assert ChatPromptTemplate(messages=messages, input_variables=[]
).input_variables == ['foo']
|
def test_chat_invalid_input_variables_missing() ->None:
messages = [HumanMessagePromptTemplate.from_template('{foo}')]
with pytest.raises(ValueError):
ChatPromptTemplate(messages=messages, input_variables=[],
validate_template=True)
assert ChatPromptTemplate(messages=messages, input_variables=[]
).input_variables == ['foo']
| null |
test_quip_loader_load_date_invalid_args
|
quip_loader = QuipLoader(self.API_URL, access_token=self.ACCESS_TOKEN,
request_timeout=60)
with pytest.raises(ValueError, match=
'Must specify at least one among `folder_ids`, `thread_ids` or set `include_all`_folders as True'
):
quip_loader.load()
|
def test_quip_loader_load_date_invalid_args(self) ->None:
quip_loader = QuipLoader(self.API_URL, access_token=self.ACCESS_TOKEN,
request_timeout=60)
with pytest.raises(ValueError, match=
'Must specify at least one among `folder_ids`, `thread_ids` or set `include_all`_folders as True'
):
quip_loader.load()
| null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.