method_name
stringlengths 1
78
| method_body
stringlengths 3
9.66k
| full_code
stringlengths 31
10.7k
| docstring
stringlengths 4
4.74k
⌀ |
|---|---|---|---|
_deanonymize
|
"""Abstract method to deanonymize text"""
|
@abstractmethod
def _deanonymize(self, text_to_deanonymize: str,
deanonymizer_matching_strategy: Callable[[str, MappingDataType], str]
) ->str:
"""Abstract method to deanonymize text"""
|
Abstract method to deanonymize text
|
get_format_instructions
|
return PANDAS_DATAFRAME_FORMAT_INSTRUCTIONS.format(columns=', '.join(self.
dataframe.columns))
|
def get_format_instructions(self) ->str:
return PANDAS_DATAFRAME_FORMAT_INSTRUCTIONS.format(columns=', '.join(
self.dataframe.columns))
| null |
validate_environment
|
"""Validate that api key and python package exists in environment."""
values['minimax_api_key'] = convert_to_secret_str(get_from_dict_or_env(
values, 'minimax_api_key', 'MINIMAX_API_KEY'))
values['minimax_group_id'] = get_from_dict_or_env(values,
'minimax_group_id', 'MINIMAX_GROUP_ID')
values['minimax_api_host'] = get_from_dict_or_env(values,
'minimax_api_host', 'MINIMAX_API_HOST', default='https://api.minimax.chat')
values['_client'] = _MinimaxEndpointClient(host=values['minimax_api_host'],
api_key=values['minimax_api_key'], group_id=values['minimax_group_id'])
return values
|
@root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that api key and python package exists in environment."""
values['minimax_api_key'] = convert_to_secret_str(get_from_dict_or_env(
values, 'minimax_api_key', 'MINIMAX_API_KEY'))
values['minimax_group_id'] = get_from_dict_or_env(values,
'minimax_group_id', 'MINIMAX_GROUP_ID')
values['minimax_api_host'] = get_from_dict_or_env(values,
'minimax_api_host', 'MINIMAX_API_HOST', default=
'https://api.minimax.chat')
values['_client'] = _MinimaxEndpointClient(host=values[
'minimax_api_host'], api_key=values['minimax_api_key'], group_id=
values['minimax_group_id'])
return values
|
Validate that api key and python package exists in environment.
|
test_opensearch_with_custom_field_name
|
"""Test indexing and search using custom vector field and text field name."""
docsearch = OpenSearchVectorSearch.from_texts(texts, FakeEmbeddings(),
opensearch_url=DEFAULT_OPENSEARCH_URL, vector_field='my_vector',
text_field='custom_text')
output = docsearch.similarity_search('foo', k=1, vector_field='my_vector',
text_field='custom_text')
assert output == [Document(page_content='foo')]
text_input = ['test', 'add', 'text', 'method']
OpenSearchVectorSearch.add_texts(docsearch, text_input, vector_field=
'my_vector', text_field='custom_text')
output = docsearch.similarity_search('add', k=1, vector_field='my_vector',
text_field='custom_text')
assert output == [Document(page_content='foo')]
|
def test_opensearch_with_custom_field_name() ->None:
"""Test indexing and search using custom vector field and text field name."""
docsearch = OpenSearchVectorSearch.from_texts(texts, FakeEmbeddings(),
opensearch_url=DEFAULT_OPENSEARCH_URL, vector_field='my_vector',
text_field='custom_text')
output = docsearch.similarity_search('foo', k=1, vector_field=
'my_vector', text_field='custom_text')
assert output == [Document(page_content='foo')]
text_input = ['test', 'add', 'text', 'method']
OpenSearchVectorSearch.add_texts(docsearch, text_input, vector_field=
'my_vector', text_field='custom_text')
output = docsearch.similarity_search('add', k=1, vector_field=
'my_vector', text_field='custom_text')
assert output == [Document(page_content='foo')]
|
Test indexing and search using custom vector field and text field name.
|
parse_with_prompt
|
retries = 0
while retries <= self.max_retries:
try:
return self.parser.parse_folder(completion)
except OutputParserException as e:
if retries == self.max_retries:
raise e
else:
retries += 1
completion = self.retry_chain.run(prompt=prompt_value.to_string
(), completion=completion, error=repr(e))
raise OutputParserException('Failed to parse')
|
def parse_with_prompt(self, completion: str, prompt_value: PromptValue) ->T:
retries = 0
while retries <= self.max_retries:
try:
return self.parser.parse_folder(completion)
except OutputParserException as e:
if retries == self.max_retries:
raise e
else:
retries += 1
completion = self.retry_chain.run(prompt=prompt_value.
to_string(), completion=completion, error=repr(e))
raise OutputParserException('Failed to parse')
| null |
results
|
"""Run query through BingSearch and return metadata.
Args:
query: The query to search for.
num_results: The number of results to return.
Returns:
A list of dictionaries with the following keys:
snippet - The description of the result.
title - The title of the result.
link - The link to the result.
"""
metadata_results = []
results = self._bing_search_results(query, count=num_results)
if len(results) == 0:
return [{'Result': 'No good Bing Search Result was found'}]
for result in results:
metadata_result = {'snippet': result['snippet'], 'title': result['name'
], 'link': result['url']}
metadata_results.append(metadata_result)
return metadata_results
|
def results(self, query: str, num_results: int) ->List[Dict]:
"""Run query through BingSearch and return metadata.
Args:
query: The query to search for.
num_results: The number of results to return.
Returns:
A list of dictionaries with the following keys:
snippet - The description of the result.
title - The title of the result.
link - The link to the result.
"""
metadata_results = []
results = self._bing_search_results(query, count=num_results)
if len(results) == 0:
return [{'Result': 'No good Bing Search Result was found'}]
for result in results:
metadata_result = {'snippet': result['snippet'], 'title': result[
'name'], 'link': result['url']}
metadata_results.append(metadata_result)
return metadata_results
|
Run query through BingSearch and return metadata.
Args:
query: The query to search for.
num_results: The number of results to return.
Returns:
A list of dictionaries with the following keys:
snippet - The description of the result.
title - The title of the result.
link - The link to the result.
|
parse
|
lines = text.strip().split('\n')
return LineList(lines=lines)
|
def parse(self, text: str) ->LineList:
lines = text.strip().split('\n')
return LineList(lines=lines)
| null |
_validate_uri
|
if self.target_uri == 'databricks':
return
allowed = ['http', 'https', 'databricks']
if urlparse(self.target_uri).scheme not in allowed:
raise ValueError(
f'Invalid target URI: {self.target_uri}. The scheme must be one of {allowed}.'
)
|
def _validate_uri(self) ->None:
if self.target_uri == 'databricks':
return
allowed = ['http', 'https', 'databricks']
if urlparse(self.target_uri).scheme not in allowed:
raise ValueError(
f'Invalid target URI: {self.target_uri}. The scheme must be one of {allowed}.'
)
| null |
_get_index_id
|
"""Gets the correct index id for the endpoint.
Returns:
The index id if found (which should be found) or throws
ValueError otherwise.
"""
for index in self.endpoint.deployed_indexes:
if index.index == self.index.resource_name:
return index.id
raise ValueError(
f'No index with id {self.index.resource_name} deployed on endpoint {self.endpoint.display_name}.'
)
|
def _get_index_id(self) ->str:
"""Gets the correct index id for the endpoint.
Returns:
The index id if found (which should be found) or throws
ValueError otherwise.
"""
for index in self.endpoint.deployed_indexes:
if index.index == self.index.resource_name:
return index.id
raise ValueError(
f'No index with id {self.index.resource_name} deployed on endpoint {self.endpoint.display_name}.'
)
|
Gets the correct index id for the endpoint.
Returns:
The index id if found (which should be found) or throws
ValueError otherwise.
|
test_all_imports
|
assert sorted(EXPECTED_ALL) == sorted(__all__)
|
def test_all_imports() ->None:
assert sorted(EXPECTED_ALL) == sorted(__all__)
| null |
_stream
|
payload = self._build_payload(messages)
for chunk in self._client.stream(payload):
if chunk.choices:
content = chunk.choices[0].delta.content
yield ChatGenerationChunk(message=AIMessageChunk(content=content))
if run_manager:
run_manager.on_llm_new_token(content)
|
def _stream(self, messages: List[BaseMessage], stop: Optional[List[str]]=
None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any
) ->Iterator[ChatGenerationChunk]:
payload = self._build_payload(messages)
for chunk in self._client.stream(payload):
if chunk.choices:
content = chunk.choices[0].delta.content
yield ChatGenerationChunk(message=AIMessageChunk(content=content))
if run_manager:
run_manager.on_llm_new_token(content)
| null |
__init__
|
super().__init__(*args, **kwargs)
try:
import zhipuai
self.zhipuai = zhipuai
self.zhipuai.api_key = self.zhipuai_api_key
except ImportError:
raise RuntimeError(
"Could not import zhipuai package. Please install it via 'pip install zhipuai'"
)
|
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
try:
import zhipuai
self.zhipuai = zhipuai
self.zhipuai.api_key = self.zhipuai_api_key
except ImportError:
raise RuntimeError(
"Could not import zhipuai package. Please install it via 'pip install zhipuai'"
)
| null |
validate_environment
|
"""
Validates that the Spacy package and the 'en_core_web_sm' model are installed.
Args:
values (Dict): The values provided to the class constructor.
Returns:
The validated values.
Raises:
ValueError: If the Spacy package or the 'en_core_web_sm'
model are not installed.
"""
if importlib.util.find_spec('spacy') is None:
raise ValueError(
'Spacy package not found. Please install it with `pip install spacy`.')
try:
import spacy
values['nlp'] = spacy.load('en_core_web_sm')
except OSError:
raise ValueError(
"Spacy model 'en_core_web_sm' not found. Please install it with `python -m spacy download en_core_web_sm`."
)
return values
|
@root_validator(pre=True)
def validate_environment(cls, values: Dict) ->Dict:
"""
Validates that the Spacy package and the 'en_core_web_sm' model are installed.
Args:
values (Dict): The values provided to the class constructor.
Returns:
The validated values.
Raises:
ValueError: If the Spacy package or the 'en_core_web_sm'
model are not installed.
"""
if importlib.util.find_spec('spacy') is None:
raise ValueError(
'Spacy package not found. Please install it with `pip install spacy`.'
)
try:
import spacy
values['nlp'] = spacy.load('en_core_web_sm')
except OSError:
raise ValueError(
"Spacy model 'en_core_web_sm' not found. Please install it with `python -m spacy download en_core_web_sm`."
)
return values
|
Validates that the Spacy package and the 'en_core_web_sm' model are installed.
Args:
values (Dict): The values provided to the class constructor.
Returns:
The validated values.
Raises:
ValueError: If the Spacy package or the 'en_core_web_sm'
model are not installed.
|
similarity_search_with_score_by_index
|
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
search_k: inspect up to search_k nodes which defaults
to n_trees * n if not provided
Returns:
List of Documents most similar to the query and score for each
"""
idxs, dists = self.index.get_nns_by_item(docstore_index, k, search_k=
search_k, include_distances=True)
return self.process_index_results(idxs, dists)
|
def similarity_search_with_score_by_index(self, docstore_index: int, k: int
=4, search_k: int=-1) ->List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
search_k: inspect up to search_k nodes which defaults
to n_trees * n if not provided
Returns:
List of Documents most similar to the query and score for each
"""
idxs, dists = self.index.get_nns_by_item(docstore_index, k, search_k=
search_k, include_distances=True)
return self.process_index_results(idxs, dists)
|
Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
search_k: inspect up to search_k nodes which defaults
to n_trees * n if not provided
Returns:
List of Documents most similar to the query and score for each
|
get_num_tokens_from_messages
|
"""Calculate num tokens for gpt-3.5-turbo and gpt-4 with tiktoken package.
Official documentation: https://github.com/openai/openai-cookbook/blob/
main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb"""
if sys.version_info[1] <= 7:
return super().get_num_tokens_from_messages(messages)
model, encoding = self._get_encoding_model()
if model.startswith('gpt-3.5-turbo-0301'):
tokens_per_message = 4
tokens_per_name = -1
elif model.startswith('gpt-3.5-turbo') or model.startswith('gpt-4'):
tokens_per_message = 3
tokens_per_name = 1
else:
raise NotImplementedError(
f'get_num_tokens_from_messages() is not presently implemented for model {model}.See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens.'
)
num_tokens = 0
messages_dict = [convert_message_to_dict(m) for m in messages]
for message in messages_dict:
num_tokens += tokens_per_message
for key, value in message.items():
num_tokens += len(encoding.encode(str(value)))
if key == 'name':
num_tokens += tokens_per_name
num_tokens += 3
return num_tokens
|
def get_num_tokens_from_messages(self, messages: List[BaseMessage]) ->int:
"""Calculate num tokens for gpt-3.5-turbo and gpt-4 with tiktoken package.
Official documentation: https://github.com/openai/openai-cookbook/blob/
main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb"""
if sys.version_info[1] <= 7:
return super().get_num_tokens_from_messages(messages)
model, encoding = self._get_encoding_model()
if model.startswith('gpt-3.5-turbo-0301'):
tokens_per_message = 4
tokens_per_name = -1
elif model.startswith('gpt-3.5-turbo') or model.startswith('gpt-4'):
tokens_per_message = 3
tokens_per_name = 1
else:
raise NotImplementedError(
f'get_num_tokens_from_messages() is not presently implemented for model {model}.See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens.'
)
num_tokens = 0
messages_dict = [convert_message_to_dict(m) for m in messages]
for message in messages_dict:
num_tokens += tokens_per_message
for key, value in message.items():
num_tokens += len(encoding.encode(str(value)))
if key == 'name':
num_tokens += tokens_per_name
num_tokens += 3
return num_tokens
|
Calculate num tokens for gpt-3.5-turbo and gpt-4 with tiktoken package.
Official documentation: https://github.com/openai/openai-cookbook/blob/
main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb
|
_call
|
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
_run_manager.on_text(inputs[self.input_key], verbose=self.verbose)
t = self.llm_chain.predict(question=inputs[self.input_key], callbacks=
_run_manager.get_child())
_run_manager.on_text(t, color='green', verbose=self.verbose)
t = t.strip()
try:
parser = self.llm_chain.prompt.output_parser
command_list = parser.parse_folder(t)
except OutputParserException as e:
_run_manager.on_chain_error(e, verbose=self.verbose)
raise e
if self.verbose:
_run_manager.on_text('\nCode: ', verbose=self.verbose)
_run_manager.on_text(str(command_list), color='yellow', verbose=self.
verbose)
output = self.bash_process.run(command_list)
_run_manager.on_text('\nAnswer: ', verbose=self.verbose)
_run_manager.on_text(output, color='yellow', verbose=self.verbose)
return {self.output_key: output}
|
def _call(self, inputs: Dict[str, Any], run_manager: Optional[
CallbackManagerForChainRun]=None) ->Dict[str, str]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
_run_manager.on_text(inputs[self.input_key], verbose=self.verbose)
t = self.llm_chain.predict(question=inputs[self.input_key], callbacks=
_run_manager.get_child())
_run_manager.on_text(t, color='green', verbose=self.verbose)
t = t.strip()
try:
parser = self.llm_chain.prompt.output_parser
command_list = parser.parse_folder(t)
except OutputParserException as e:
_run_manager.on_chain_error(e, verbose=self.verbose)
raise e
if self.verbose:
_run_manager.on_text('\nCode: ', verbose=self.verbose)
_run_manager.on_text(str(command_list), color='yellow', verbose=
self.verbose)
output = self.bash_process.run(command_list)
_run_manager.on_text('\nAnswer: ', verbose=self.verbose)
_run_manager.on_text(output, color='yellow', verbose=self.verbose)
return {self.output_key: output}
| null |
InputType
|
func = getattr(self, '_transform', None) or getattr(self, '_atransform')
try:
params = inspect.signature(func).parameters
first_param = next(iter(params.values()), None)
if first_param and first_param.annotation != inspect.Parameter.empty:
return getattr(first_param.annotation, '__args__', (Any,))[0]
else:
return Any
except ValueError:
return Any
|
@property
def InputType(self) ->Any:
func = getattr(self, '_transform', None) or getattr(self, '_atransform')
try:
params = inspect.signature(func).parameters
first_param = next(iter(params.values()), None)
if first_param and first_param.annotation != inspect.Parameter.empty:
return getattr(first_param.annotation, '__args__', (Any,))[0]
else:
return Any
except ValueError:
return Any
| null |
test_multiple_messages
|
chat = AzureMLChatOnlineEndpoint(content_formatter=LlamaContentFormatter())
message = HumanMessage(content='Hi!')
response = chat.generate([[message], [message]])
assert isinstance(response, LLMResult)
assert len(response.generations) == 2
for generations in response.generations:
assert len(generations) == 1
for generation in generations:
assert isinstance(generation, ChatGeneration)
assert isinstance(generation.text, str)
assert generation.text == generation.message.content
|
def test_multiple_messages() ->None:
chat = AzureMLChatOnlineEndpoint(content_formatter=LlamaContentFormatter())
message = HumanMessage(content='Hi!')
response = chat.generate([[message], [message]])
assert isinstance(response, LLMResult)
assert len(response.generations) == 2
for generations in response.generations:
assert len(generations) == 1
for generation in generations:
assert isinstance(generation, ChatGeneration)
assert isinstance(generation.text, str)
assert generation.text == generation.message.content
| null |
similarity_search_by_vector
|
"""
Return docs most similar to embedding vector.
Examples:
>>> # Search using an embedding
>>> data = vector_store.similarity_search_by_vector(
... embedding=<your_embedding>,
... k=<num_items_to_return>,
... exec_option=<preferred_exec_option>,
... )
Args:
embedding (Union[List[float], np.ndarray]):
Embedding to find similar docs.
k (int): Number of Documents to return. Defaults to 4.
**kwargs: Additional keyword arguments including:
filter (Union[Dict, Callable], optional):
Additional filter before embedding search.
- ``Dict`` - Key-value search on tensors of htype json. True
if all key-value filters are satisfied.
Dict = {"tensor_name_1": {"key": value},
"tensor_name_2": {"key": value}}
- ``Function`` - Any function compatible with
`deeplake.filter`.
Defaults to None.
exec_option (str): Options for search execution include
"python", "compute_engine", or "tensor_db". Defaults to
"python".
- "python" - Pure-python implementation running on the client.
Can be used for data stored anywhere. WARNING: using this
option with big datasets is discouraged due to potential
memory issues.
- "compute_engine" - Performant C++ implementation of the Deep
Lake Compute Engine. Runs on the client and can be used for
any data stored in or connected to Deep Lake. It cannot be
used with in-memory or local datasets.
- "tensor_db" - Performant, fully-hosted Managed Tensor Database.
Responsible for storage and query execution. Only available
for data stored in the Deep Lake Managed Database.
To store datasets in this database, specify
`runtime = {"db_engine": True}` during dataset creation.
distance_metric (str): `L2` for Euclidean, `L1` for Nuclear,
`max` for L-infinity distance, `cos` for cosine similarity,
'dot' for dot product. Defaults to `L2`.
deep_memory (bool): Whether to use the Deep Memory model for improving
search results. Defaults to False if deep_memory is not specified
in the Vector Store initialization. If True, the distance metric
is set to "deepmemory_distance", which represents the metric with
which the model was trained. The search is performed using the Deep
Memory model. If False, the distance metric is set to "COS" or
whatever distance metric user specifies.
Returns:
List[Document]: List of Documents most similar to the query vector.
"""
return self._search(embedding=embedding, k=k,
use_maximal_marginal_relevance=False, return_score=False, **kwargs)
|
def similarity_search_by_vector(self, embedding: Union[List[float], np.
ndarray], k: int=4, **kwargs: Any) ->List[Document]:
"""
Return docs most similar to embedding vector.
Examples:
>>> # Search using an embedding
>>> data = vector_store.similarity_search_by_vector(
... embedding=<your_embedding>,
... k=<num_items_to_return>,
... exec_option=<preferred_exec_option>,
... )
Args:
embedding (Union[List[float], np.ndarray]):
Embedding to find similar docs.
k (int): Number of Documents to return. Defaults to 4.
**kwargs: Additional keyword arguments including:
filter (Union[Dict, Callable], optional):
Additional filter before embedding search.
- ``Dict`` - Key-value search on tensors of htype json. True
if all key-value filters are satisfied.
Dict = {"tensor_name_1": {"key": value},
"tensor_name_2": {"key": value}}
- ``Function`` - Any function compatible with
`deeplake.filter`.
Defaults to None.
exec_option (str): Options for search execution include
"python", "compute_engine", or "tensor_db". Defaults to
"python".
- "python" - Pure-python implementation running on the client.
Can be used for data stored anywhere. WARNING: using this
option with big datasets is discouraged due to potential
memory issues.
- "compute_engine" - Performant C++ implementation of the Deep
Lake Compute Engine. Runs on the client and can be used for
any data stored in or connected to Deep Lake. It cannot be
used with in-memory or local datasets.
- "tensor_db" - Performant, fully-hosted Managed Tensor Database.
Responsible for storage and query execution. Only available
for data stored in the Deep Lake Managed Database.
To store datasets in this database, specify
`runtime = {"db_engine": True}` during dataset creation.
distance_metric (str): `L2` for Euclidean, `L1` for Nuclear,
`max` for L-infinity distance, `cos` for cosine similarity,
'dot' for dot product. Defaults to `L2`.
deep_memory (bool): Whether to use the Deep Memory model for improving
search results. Defaults to False if deep_memory is not specified
in the Vector Store initialization. If True, the distance metric
is set to "deepmemory_distance", which represents the metric with
which the model was trained. The search is performed using the Deep
Memory model. If False, the distance metric is set to "COS" or
whatever distance metric user specifies.
Returns:
List[Document]: List of Documents most similar to the query vector.
"""
return self._search(embedding=embedding, k=k,
use_maximal_marginal_relevance=False, return_score=False, **kwargs)
|
Return docs most similar to embedding vector.
Examples:
>>> # Search using an embedding
>>> data = vector_store.similarity_search_by_vector(
... embedding=<your_embedding>,
... k=<num_items_to_return>,
... exec_option=<preferred_exec_option>,
... )
Args:
embedding (Union[List[float], np.ndarray]):
Embedding to find similar docs.
k (int): Number of Documents to return. Defaults to 4.
**kwargs: Additional keyword arguments including:
filter (Union[Dict, Callable], optional):
Additional filter before embedding search.
- ``Dict`` - Key-value search on tensors of htype json. True
if all key-value filters are satisfied.
Dict = {"tensor_name_1": {"key": value},
"tensor_name_2": {"key": value}}
- ``Function`` - Any function compatible with
`deeplake.filter`.
Defaults to None.
exec_option (str): Options for search execution include
"python", "compute_engine", or "tensor_db". Defaults to
"python".
- "python" - Pure-python implementation running on the client.
Can be used for data stored anywhere. WARNING: using this
option with big datasets is discouraged due to potential
memory issues.
- "compute_engine" - Performant C++ implementation of the Deep
Lake Compute Engine. Runs on the client and can be used for
any data stored in or connected to Deep Lake. It cannot be
used with in-memory or local datasets.
- "tensor_db" - Performant, fully-hosted Managed Tensor Database.
Responsible for storage and query execution. Only available
for data stored in the Deep Lake Managed Database.
To store datasets in this database, specify
`runtime = {"db_engine": True}` during dataset creation.
distance_metric (str): `L2` for Euclidean, `L1` for Nuclear,
`max` for L-infinity distance, `cos` for cosine similarity,
'dot' for dot product. Defaults to `L2`.
deep_memory (bool): Whether to use the Deep Memory model for improving
search results. Defaults to False if deep_memory is not specified
in the Vector Store initialization. If True, the distance metric
is set to "deepmemory_distance", which represents the metric with
which the model was trained. The search is performed using the Deep
Memory model. If False, the distance metric is set to "COS" or
whatever distance metric user specifies.
Returns:
List[Document]: List of Documents most similar to the query vector.
|
input_keys
|
"""Expect input key.
:meta private:
"""
return [self.input_key]
|
@property
def input_keys(self) ->List[str]:
"""Expect input key.
:meta private:
"""
return [self.input_key]
|
Expect input key.
:meta private:
|
load
|
"""Transcribes the audio file and loads the transcript into documents.
It uses the Google Cloud Speech-to-Text API to transcribe the audio file
and blocks until the transcription is finished.
"""
try:
from google.cloud.speech_v2 import RecognizeRequest
except ImportError as exc:
raise ImportError(
'Could not import google-cloud-speech python package. Please install it with `pip install google-cloud-speech`.'
) from exc
request = RecognizeRequest(recognizer=self._recognizer_path, config=self.
config, config_mask=self.config_mask)
if 'gs://' in self.file_path:
request.uri = self.file_path
else:
with open(self.file_path, 'rb') as f:
request.content = f.read()
response = self._client.recognize(request=request)
return [Document(page_content=result.alternatives[0].transcript, metadata={
'language_code': result.language_code, 'result_end_offset': result.
result_end_offset}) for result in response.results]
|
def load(self) ->List[Document]:
"""Transcribes the audio file and loads the transcript into documents.
It uses the Google Cloud Speech-to-Text API to transcribe the audio file
and blocks until the transcription is finished.
"""
try:
from google.cloud.speech_v2 import RecognizeRequest
except ImportError as exc:
raise ImportError(
'Could not import google-cloud-speech python package. Please install it with `pip install google-cloud-speech`.'
) from exc
request = RecognizeRequest(recognizer=self._recognizer_path, config=
self.config, config_mask=self.config_mask)
if 'gs://' in self.file_path:
request.uri = self.file_path
else:
with open(self.file_path, 'rb') as f:
request.content = f.read()
response = self._client.recognize(request=request)
return [Document(page_content=result.alternatives[0].transcript,
metadata={'language_code': result.language_code,
'result_end_offset': result.result_end_offset}) for result in
response.results]
|
Transcribes the audio file and loads the transcript into documents.
It uses the Google Cloud Speech-to-Text API to transcribe the audio file
and blocks until the transcription is finished.
|
__init__
|
try:
from astrapy.db import AstraDB
except (ImportError, ModuleNotFoundError):
raise ImportError(
'Could not import a recent astrapy python package. Please install it with `pip install --upgrade astrapy`.'
)
if astra_db_client is not None:
if token is not None or api_endpoint is not None:
raise ValueError(
"You cannot pass 'astra_db_client' to AstraDB if passing 'token' and 'api_endpoint'."
)
self.filter = filter_criteria
self.projection = projection
self.find_options = find_options or {}
self.nb_prefetched = nb_prefetched
self.extraction_function = extraction_function
if astra_db_client is not None:
astra_db = astra_db_client
else:
astra_db = AstraDB(token=token, api_endpoint=api_endpoint, namespace=
namespace)
self.collection = astra_db.collection(collection_name)
|
def __init__(self, collection_name: str, token: Optional[str]=None,
api_endpoint: Optional[str]=None, astra_db_client: Optional[Any]=None,
namespace: Optional[str]=None, filter_criteria: Optional[Dict[str, Any]
]=None, projection: Optional[Dict[str, Any]]=None, find_options:
Optional[Dict[str, Any]]=None, nb_prefetched: int=1000,
extraction_function: Callable[[Dict], str]=json.dumps) ->None:
try:
from astrapy.db import AstraDB
except (ImportError, ModuleNotFoundError):
raise ImportError(
'Could not import a recent astrapy python package. Please install it with `pip install --upgrade astrapy`.'
)
if astra_db_client is not None:
if token is not None or api_endpoint is not None:
raise ValueError(
"You cannot pass 'astra_db_client' to AstraDB if passing 'token' and 'api_endpoint'."
)
self.filter = filter_criteria
self.projection = projection
self.find_options = find_options or {}
self.nb_prefetched = nb_prefetched
self.extraction_function = extraction_function
if astra_db_client is not None:
astra_db = astra_db_client
else:
astra_db = AstraDB(token=token, api_endpoint=api_endpoint,
namespace=namespace)
self.collection = astra_db.collection(collection_name)
| null |
test_all_imports
|
assert set(__all__) == set(EXPECTED_ALL)
|
def test_all_imports() ->None:
assert set(__all__) == set(EXPECTED_ALL)
| null |
_load_arxiv_from_universal_entry
|
from langchain.agents.load_tools import load_tools
tools = load_tools(['arxiv'], **kwargs)
assert len(tools) == 1, 'loaded more than 1 tool'
return tools[0]
|
def _load_arxiv_from_universal_entry(**kwargs: Any) ->BaseTool:
from langchain.agents.load_tools import load_tools
tools = load_tools(['arxiv'], **kwargs)
assert len(tools) == 1, 'loaded more than 1 tool'
return tools[0]
| null |
get_schema
|
"""Returns the schema of the Neptune database"""
return self.schema
|
@property
def get_schema(self) ->str:
"""Returns the schema of the Neptune database"""
return self.schema
|
Returns the schema of the Neptune database
|
refresh_schema
|
"""
Refreshes the HugeGraph schema information.
"""
schema = self.client.schema()
vertex_schema = schema.getVertexLabels()
edge_schema = schema.getEdgeLabels()
relationships = schema.getRelations()
self.schema = f"""Node properties: {vertex_schema}
Edge properties: {edge_schema}
Relationships: {relationships}
"""
|
def refresh_schema(self) ->None:
"""
Refreshes the HugeGraph schema information.
"""
schema = self.client.schema()
vertex_schema = schema.getVertexLabels()
edge_schema = schema.getEdgeLabels()
relationships = schema.getRelations()
self.schema = f"""Node properties: {vertex_schema}
Edge properties: {edge_schema}
Relationships: {relationships}
"""
|
Refreshes the HugeGraph schema information.
|
combine_document_chain
|
"""Kept for backward compatibility."""
if isinstance(self.reduce_documents_chain, ReduceDocumentsChain):
return self.reduce_documents_chain.combine_documents_chain
else:
raise ValueError(
f'`reduce_documents_chain` is of type {type(self.reduce_documents_chain)} so it does not have this attribute.'
)
|
@property
def combine_document_chain(self) ->BaseCombineDocumentsChain:
"""Kept for backward compatibility."""
if isinstance(self.reduce_documents_chain, ReduceDocumentsChain):
return self.reduce_documents_chain.combine_documents_chain
else:
raise ValueError(
f'`reduce_documents_chain` is of type {type(self.reduce_documents_chain)} so it does not have this attribute.'
)
|
Kept for backward compatibility.
|
test_all_imports
|
assert set(__all__) == set(EXPECTED_ALL)
|
def test_all_imports() ->None:
assert set(__all__) == set(EXPECTED_ALL)
| null |
from_llm
|
"""Initialize from LLM."""
qa_chain = LLMChain(llm=llm, prompt=qa_prompt)
cypher_generation_chain = LLMChain(llm=llm, prompt=cypher_prompt)
return cls(qa_chain=qa_chain, cypher_generation_chain=
cypher_generation_chain, **kwargs)
|
@classmethod
def from_llm(cls, llm: BaseLanguageModel, *, qa_prompt: BasePromptTemplate=
CYPHER_QA_PROMPT, cypher_prompt: BasePromptTemplate=
KUZU_GENERATION_PROMPT, **kwargs: Any) ->KuzuQAChain:
"""Initialize from LLM."""
qa_chain = LLMChain(llm=llm, prompt=qa_prompt)
cypher_generation_chain = LLMChain(llm=llm, prompt=cypher_prompt)
return cls(qa_chain=qa_chain, cypher_generation_chain=
cypher_generation_chain, **kwargs)
|
Initialize from LLM.
|
set_active_branch
|
"""Equivalent to `git checkout branch_name` for this Agent.
Clones formatting from Github.
Returns an Error (as a string) if branch doesn't exist.
"""
curr_branches = [branch.name for branch in self.github_repo_instance.
get_branches()]
if branch_name in curr_branches:
self.active_branch = branch_name
return f'Switched to branch `{branch_name}`'
else:
return (
f'Error {branch_name} does not exist,in repo with current branches: {str(curr_branches)}'
)
|
def set_active_branch(self, branch_name: str) ->str:
"""Equivalent to `git checkout branch_name` for this Agent.
Clones formatting from Github.
Returns an Error (as a string) if branch doesn't exist.
"""
curr_branches = [branch.name for branch in self.github_repo_instance.
get_branches()]
if branch_name in curr_branches:
self.active_branch = branch_name
return f'Switched to branch `{branch_name}`'
else:
return (
f'Error {branch_name} does not exist,in repo with current branches: {str(curr_branches)}'
)
|
Equivalent to `git checkout branch_name` for this Agent.
Clones formatting from Github.
Returns an Error (as a string) if branch doesn't exist.
|
_load_single_chat_session
|
"""Load a single chat session from a file.
Args:
file_path (str): Path to the chat file.
Returns:
ChatSession: The loaded chat session.
"""
with open(file_path, 'r', encoding='utf-8') as file:
txt = file.read()
chat_lines: List[str] = []
current_message = ''
for line in txt.split('\n'):
if self._message_line_regex.match(line):
if current_message:
chat_lines.append(current_message)
current_message = line
else:
current_message += ' ' + line.strip()
if current_message:
chat_lines.append(current_message)
results: List[Union[HumanMessage, AIMessage]] = []
for line in chat_lines:
result = self._message_line_regex.match(line.strip())
if result:
timestamp, sender, text = result.groups()
if not self._ignore_lines.match(text.strip()):
results.append(HumanMessage(role=sender, content=text,
additional_kwargs={'sender': sender, 'events': [{
'message_time': timestamp}]}))
else:
logger.debug(f'Could not parse line: {line}')
return ChatSession(messages=results)
|
def _load_single_chat_session(self, file_path: str) ->ChatSession:
"""Load a single chat session from a file.
Args:
file_path (str): Path to the chat file.
Returns:
ChatSession: The loaded chat session.
"""
with open(file_path, 'r', encoding='utf-8') as file:
txt = file.read()
chat_lines: List[str] = []
current_message = ''
for line in txt.split('\n'):
if self._message_line_regex.match(line):
if current_message:
chat_lines.append(current_message)
current_message = line
else:
current_message += ' ' + line.strip()
if current_message:
chat_lines.append(current_message)
results: List[Union[HumanMessage, AIMessage]] = []
for line in chat_lines:
result = self._message_line_regex.match(line.strip())
if result:
timestamp, sender, text = result.groups()
if not self._ignore_lines.match(text.strip()):
results.append(HumanMessage(role=sender, content=text,
additional_kwargs={'sender': sender, 'events': [{
'message_time': timestamp}]}))
else:
logger.debug(f'Could not parse line: {line}')
return ChatSession(messages=results)
|
Load a single chat session from a file.
Args:
file_path (str): Path to the chat file.
Returns:
ChatSession: The loaded chat session.
|
_create_chat_generation_chunk
|
chunk = _convert_delta_to_message_chunk({'content': data.get('text', '')},
default_chunk_class)
finish_reason = data.get('finish_reason')
generation_info = dict(finish_reason=finish_reason
) if finish_reason is not None else None
default_chunk_class = chunk.__class__
chunk = ChatGenerationChunk(message=chunk, generation_info=generation_info)
return chunk, default_chunk_class
|
def _create_chat_generation_chunk(self, data: Mapping[str, Any],
default_chunk_class):
chunk = _convert_delta_to_message_chunk({'content': data.get('text', ''
)}, default_chunk_class)
finish_reason = data.get('finish_reason')
generation_info = dict(finish_reason=finish_reason
) if finish_reason is not None else None
default_chunk_class = chunk.__class__
chunk = ChatGenerationChunk(message=chunk, generation_info=generation_info)
return chunk, default_chunk_class
| null |
_default_params
|
"""Get the default parameters for calling vllm."""
return {'n': self.n, 'best_of': self.best_of, 'max_tokens': self.
max_new_tokens, 'top_k': self.top_k, 'top_p': self.top_p, 'temperature':
self.temperature, 'presence_penalty': self.presence_penalty,
'frequency_penalty': self.frequency_penalty, 'stop': self.stop,
'ignore_eos': self.ignore_eos, 'use_beam_search': self.use_beam_search,
'logprobs': self.logprobs}
|
@property
def _default_params(self) ->Dict[str, Any]:
"""Get the default parameters for calling vllm."""
return {'n': self.n, 'best_of': self.best_of, 'max_tokens': self.
max_new_tokens, 'top_k': self.top_k, 'top_p': self.top_p,
'temperature': self.temperature, 'presence_penalty': self.
presence_penalty, 'frequency_penalty': self.frequency_penalty,
'stop': self.stop, 'ignore_eos': self.ignore_eos, 'use_beam_search':
self.use_beam_search, 'logprobs': self.logprobs}
|
Get the default parameters for calling vllm.
|
test_get_final_answer
|
"""Test getting final answer."""
llm_output = """Thought: I can now answer the question
Final Answer: 1994"""
action, action_input = get_action_and_input(llm_output)
assert action == 'Final Answer'
assert action_input == '1994'
|
def test_get_final_answer() ->None:
"""Test getting final answer."""
llm_output = 'Thought: I can now answer the question\nFinal Answer: 1994'
action, action_input = get_action_and_input(llm_output)
assert action == 'Final Answer'
assert action_input == '1994'
|
Test getting final answer.
|
_default_params
|
"""Get the default parameters for calling OpenAI API."""
if is_openai_v1():
return super()._default_params
else:
return {**super()._default_params, 'engine': self.deployment_name}
|
@property
def _default_params(self) ->Dict[str, Any]:
"""Get the default parameters for calling OpenAI API."""
if is_openai_v1():
return super()._default_params
else:
return {**super()._default_params, 'engine': self.deployment_name}
|
Get the default parameters for calling OpenAI API.
|
from_documents
|
"""Create a Clarifai vectorstore from a list of documents.
Args:
user_id (str): User ID.
app_id (str): App ID.
documents (List[Document]): List of documents to add.
number_of_docs (Optional[int]): Number of documents to return
during vector search. Defaults to None.
Returns:
Clarifai: Clarifai vectorstore.
"""
texts = [doc.page_content for doc in documents]
metadatas = [doc.metadata for doc in documents]
return cls.from_texts(user_id=user_id, app_id=app_id, texts=texts,
number_of_docs=number_of_docs, pat=pat, metadatas=metadatas)
|
@classmethod
def from_documents(cls, documents: List[Document], embedding: Optional[
Embeddings]=None, user_id: Optional[str]=None, app_id: Optional[str]=
None, number_of_docs: Optional[int]=None, pat: Optional[str]=None, **
kwargs: Any) ->Clarifai:
"""Create a Clarifai vectorstore from a list of documents.
Args:
user_id (str): User ID.
app_id (str): App ID.
documents (List[Document]): List of documents to add.
number_of_docs (Optional[int]): Number of documents to return
during vector search. Defaults to None.
Returns:
Clarifai: Clarifai vectorstore.
"""
texts = [doc.page_content for doc in documents]
metadatas = [doc.metadata for doc in documents]
return cls.from_texts(user_id=user_id, app_id=app_id, texts=texts,
number_of_docs=number_of_docs, pat=pat, metadatas=metadatas)
|
Create a Clarifai vectorstore from a list of documents.
Args:
user_id (str): User ID.
app_id (str): App ID.
documents (List[Document]): List of documents to add.
number_of_docs (Optional[int]): Number of documents to return
during vector search. Defaults to None.
Returns:
Clarifai: Clarifai vectorstore.
|
test_loading_jinja_from_YAML
|
"""Test that loading jinja2 format prompts from YAML raises ValueError."""
prompt_path = EXAMPLE_DIR / 'jinja_injection_prompt.yaml'
with pytest.raises(ValueError, match='.*can lead to arbitrary code execution.*'
):
load_prompt(prompt_path)
|
def test_loading_jinja_from_YAML() ->None:
"""Test that loading jinja2 format prompts from YAML raises ValueError."""
prompt_path = EXAMPLE_DIR / 'jinja_injection_prompt.yaml'
with pytest.raises(ValueError, match=
'.*can lead to arbitrary code execution.*'):
load_prompt(prompt_path)
|
Test that loading jinja2 format prompts from YAML raises ValueError.
|
on_agent_finish
|
self.on_agent_finish_common()
|
def on_agent_finish(self, *args: Any, **kwargs: Any) ->Any:
self.on_agent_finish_common()
| null |
add_texts
|
"""Add text to the Milvus store
Args:
texts (List[str]): The text
metadatas (List[dict]): Metadata dicts, must line up with existing store
"""
self.store.add_texts(texts, metadatas)
|
def add_texts(self, texts: List[str], metadatas: Optional[List[dict]]=None
) ->None:
"""Add text to the Milvus store
Args:
texts (List[str]): The text
metadatas (List[dict]): Metadata dicts, must line up with existing store
"""
self.store.add_texts(texts, metadatas)
|
Add text to the Milvus store
Args:
texts (List[str]): The text
metadatas (List[dict]): Metadata dicts, must line up with existing store
|
ToSelectFrom
|
if not isinstance(anything, list):
raise ValueError('ToSelectFrom must be a list to select from')
return _ToSelectFrom(anything)
|
def ToSelectFrom(anything: Any) ->_ToSelectFrom:
if not isinstance(anything, list):
raise ValueError('ToSelectFrom must be a list to select from')
return _ToSelectFrom(anything)
| null |
__getattr__
|
if name == 'AlphaVantageAPIWrapper':
return _import_alpha_vantage()
elif name == 'ApifyWrapper':
return _import_apify()
elif name == 'ArceeWrapper':
return _import_arcee()
elif name == 'ArxivAPIWrapper':
return _import_arxiv()
elif name == 'LambdaWrapper':
return _import_awslambda()
elif name == 'BibtexparserWrapper':
return _import_bibtex()
elif name == 'BingSearchAPIWrapper':
return _import_bing_search()
elif name == 'BraveSearchWrapper':
return _import_brave_search()
elif name == 'DuckDuckGoSearchAPIWrapper':
return _import_duckduckgo_search()
elif name == 'GoogleLensAPIWrapper':
return _import_google_lens()
elif name == 'GoldenQueryAPIWrapper':
return _import_golden_query()
elif name == 'GoogleJobsAPIWrapper':
return _import_google_jobs()
elif name == 'GoogleScholarAPIWrapper':
return _import_google_scholar()
elif name == 'GoogleFinanceAPIWrapper':
return _import_google_finance()
elif name == 'GoogleTrendsAPIWrapper':
return _import_google_trends()
elif name == 'GooglePlacesAPIWrapper':
return _import_google_places_api()
elif name == 'GoogleSearchAPIWrapper':
return _import_google_search()
elif name == 'GoogleSerperAPIWrapper':
return _import_google_serper()
elif name == 'GraphQLAPIWrapper':
return _import_graphql()
elif name == 'JiraAPIWrapper':
return _import_jira()
elif name == 'MaxComputeAPIWrapper':
return _import_max_compute()
elif name == 'MerriamWebsterAPIWrapper':
return _import_merriam_webster()
elif name == 'MetaphorSearchAPIWrapper':
return _import_metaphor_search()
elif name == 'NasaAPIWrapper':
return _import_nasa()
elif name == 'OpenWeatherMapAPIWrapper':
return _import_openweathermap()
elif name == 'OutlineAPIWrapper':
return _import_outline()
elif name == 'Portkey':
return _import_portkey()
elif name == 'PowerBIDataset':
return _import_powerbi()
elif name == 'PubMedAPIWrapper':
return _import_pubmed()
elif name == 'PythonREPL':
return _import_python()
elif name == 'SceneXplainAPIWrapper':
return _import_scenexplain()
elif name == 'SearchApiAPIWrapper':
return _import_searchapi()
elif name == 'SearxSearchWrapper':
return _import_searx_search()
elif name == 'SerpAPIWrapper':
return _import_serpapi()
elif name == 'SparkSQL':
return _import_spark_sql()
elif name == 'StackExchangeAPIWrapper':
return _import_stackexchange()
elif name == 'SQLDatabase':
return _import_sql_database()
elif name == 'SteamWebAPIWrapper':
return _import_steam_webapi()
elif name == 'TensorflowDatasets':
return _import_tensorflow_datasets()
elif name == 'TwilioAPIWrapper':
return _import_twilio()
elif name == 'WikipediaAPIWrapper':
return _import_wikipedia()
elif name == 'WolframAlphaAPIWrapper':
return _import_wolfram_alpha()
elif name == 'ZapierNLAWrapper':
return _import_zapier()
else:
raise AttributeError(f'Could not find: {name}')
|
def __getattr__(name: str) ->Any:
if name == 'AlphaVantageAPIWrapper':
return _import_alpha_vantage()
elif name == 'ApifyWrapper':
return _import_apify()
elif name == 'ArceeWrapper':
return _import_arcee()
elif name == 'ArxivAPIWrapper':
return _import_arxiv()
elif name == 'LambdaWrapper':
return _import_awslambda()
elif name == 'BibtexparserWrapper':
return _import_bibtex()
elif name == 'BingSearchAPIWrapper':
return _import_bing_search()
elif name == 'BraveSearchWrapper':
return _import_brave_search()
elif name == 'DuckDuckGoSearchAPIWrapper':
return _import_duckduckgo_search()
elif name == 'GoogleLensAPIWrapper':
return _import_google_lens()
elif name == 'GoldenQueryAPIWrapper':
return _import_golden_query()
elif name == 'GoogleJobsAPIWrapper':
return _import_google_jobs()
elif name == 'GoogleScholarAPIWrapper':
return _import_google_scholar()
elif name == 'GoogleFinanceAPIWrapper':
return _import_google_finance()
elif name == 'GoogleTrendsAPIWrapper':
return _import_google_trends()
elif name == 'GooglePlacesAPIWrapper':
return _import_google_places_api()
elif name == 'GoogleSearchAPIWrapper':
return _import_google_search()
elif name == 'GoogleSerperAPIWrapper':
return _import_google_serper()
elif name == 'GraphQLAPIWrapper':
return _import_graphql()
elif name == 'JiraAPIWrapper':
return _import_jira()
elif name == 'MaxComputeAPIWrapper':
return _import_max_compute()
elif name == 'MerriamWebsterAPIWrapper':
return _import_merriam_webster()
elif name == 'MetaphorSearchAPIWrapper':
return _import_metaphor_search()
elif name == 'NasaAPIWrapper':
return _import_nasa()
elif name == 'OpenWeatherMapAPIWrapper':
return _import_openweathermap()
elif name == 'OutlineAPIWrapper':
return _import_outline()
elif name == 'Portkey':
return _import_portkey()
elif name == 'PowerBIDataset':
return _import_powerbi()
elif name == 'PubMedAPIWrapper':
return _import_pubmed()
elif name == 'PythonREPL':
return _import_python()
elif name == 'SceneXplainAPIWrapper':
return _import_scenexplain()
elif name == 'SearchApiAPIWrapper':
return _import_searchapi()
elif name == 'SearxSearchWrapper':
return _import_searx_search()
elif name == 'SerpAPIWrapper':
return _import_serpapi()
elif name == 'SparkSQL':
return _import_spark_sql()
elif name == 'StackExchangeAPIWrapper':
return _import_stackexchange()
elif name == 'SQLDatabase':
return _import_sql_database()
elif name == 'SteamWebAPIWrapper':
return _import_steam_webapi()
elif name == 'TensorflowDatasets':
return _import_tensorflow_datasets()
elif name == 'TwilioAPIWrapper':
return _import_twilio()
elif name == 'WikipediaAPIWrapper':
return _import_wikipedia()
elif name == 'WolframAlphaAPIWrapper':
return _import_wolfram_alpha()
elif name == 'ZapierNLAWrapper':
return _import_zapier()
else:
raise AttributeError(f'Could not find: {name}')
| null |
similarity_search_by_vector
|
"""Perform a similarity search with StarRocks by vectors
Args:
query (str): query string
k (int, optional): Top K neighbors to retrieve. Defaults to 4.
where_str (Optional[str], optional): where condition string.
Defaults to None.
NOTE: Please do not let end-user to fill this and always be aware
of SQL injection. When dealing with metadatas, remember to
use `{self.metadata_column}.attribute` instead of `attribute`
alone. The default name for it is `metadata`.
Returns:
List[Document]: List of (Document, similarity)
"""
q_str = self._build_query_sql(embedding, k, where_str)
try:
return [Document(page_content=r[self.config.column_map['document']],
metadata=json.loads(r[self.config.column_map['metadata']])) for r in
get_named_result(self.connection, q_str)]
except Exception as e:
logger.error(f'\x1b[91m\x1b[1m{type(e)}\x1b[0m \x1b[95m{str(e)}\x1b[0m')
return []
|
def similarity_search_by_vector(self, embedding: List[float], k: int=4,
where_str: Optional[str]=None, **kwargs: Any) ->List[Document]:
"""Perform a similarity search with StarRocks by vectors
Args:
query (str): query string
k (int, optional): Top K neighbors to retrieve. Defaults to 4.
where_str (Optional[str], optional): where condition string.
Defaults to None.
NOTE: Please do not let end-user to fill this and always be aware
of SQL injection. When dealing with metadatas, remember to
use `{self.metadata_column}.attribute` instead of `attribute`
alone. The default name for it is `metadata`.
Returns:
List[Document]: List of (Document, similarity)
"""
q_str = self._build_query_sql(embedding, k, where_str)
try:
return [Document(page_content=r[self.config.column_map['document']],
metadata=json.loads(r[self.config.column_map['metadata']])) for
r in get_named_result(self.connection, q_str)]
except Exception as e:
logger.error(f'\x1b[91m\x1b[1m{type(e)}\x1b[0m \x1b[95m{str(e)}\x1b[0m'
)
return []
|
Perform a similarity search with StarRocks by vectors
Args:
query (str): query string
k (int, optional): Top K neighbors to retrieve. Defaults to 4.
where_str (Optional[str], optional): where condition string.
Defaults to None.
NOTE: Please do not let end-user to fill this and always be aware
of SQL injection. When dealing with metadatas, remember to
use `{self.metadata_column}.attribute` instead of `attribute`
alone. The default name for it is `metadata`.
Returns:
List[Document]: List of (Document, similarity)
|
test_visit_comparison_range_gte
|
comp = Comparison(comparator=Comparator.GTE, attribute='foo', value=1)
expected = {'range': {'metadata.foo': {'gte': 1}}}
actual = DEFAULT_TRANSLATOR.visit_comparison(comp)
assert expected == actual
|
def test_visit_comparison_range_gte() ->None:
comp = Comparison(comparator=Comparator.GTE, attribute='foo', value=1)
expected = {'range': {'metadata.foo': {'gte': 1}}}
actual = DEFAULT_TRANSLATOR.visit_comparison(comp)
assert expected == actual
| null |
similarity_search_by_vector
|
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query vector.
"""
points = self._search_points(embedding, k=k)
return [Document(page_content=p['metadata']['text'], metadata=p['metadata']
) for p in points]
|
def similarity_search_by_vector(self, embedding: List[float], k: int=4, **
kwargs: Any) ->List[Document]:
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query vector.
"""
points = self._search_points(embedding, k=k)
return [Document(page_content=p['metadata']['text'], metadata=p[
'metadata']) for p in points]
|
Return docs most similar to embedding vector.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query vector.
|
_identifying_params
|
"""Get the identifying parameters."""
return {**{'model_name': self.model_name}, **self._default_params}
|
@property
def _identifying_params(self) ->Mapping[str, Any]:
"""Get the identifying parameters."""
return {**{'model_name': self.model_name}, **self._default_params}
|
Get the identifying parameters.
|
on_llm_error
|
"""Run when LLM errors."""
self.step += 1
self.errors += 1
|
def on_llm_error(self, error: BaseException, **kwargs: Any) ->None:
"""Run when LLM errors."""
self.step += 1
self.errors += 1
|
Run when LLM errors.
|
get_tools
|
"""Get the tools in the toolkit."""
warn_deprecated(since='0.0.319', message=
'This tool will be deprecated on 2023-11-17. See https://nla.zapier.com/sunset/ for details'
)
return self.tools
|
def get_tools(self) ->List[BaseTool]:
"""Get the tools in the toolkit."""
warn_deprecated(since='0.0.319', message=
'This tool will be deprecated on 2023-11-17. See https://nla.zapier.com/sunset/ for details'
)
return self.tools
|
Get the tools in the toolkit.
|
completion_with_retry
|
"""Use tenacity to retry the completion call."""
import fireworks.client
retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
@conditional_decorator(use_retry, retry_decorator)
def _completion_with_retry(**kwargs: Any) ->Any:
"""Use tenacity to retry the completion call."""
return fireworks.client.ChatCompletion.create(**kwargs)
return _completion_with_retry(**kwargs)
|
def completion_with_retry(llm: ChatFireworks, use_retry: bool, *,
run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->Any:
"""Use tenacity to retry the completion call."""
import fireworks.client
retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
@conditional_decorator(use_retry, retry_decorator)
def _completion_with_retry(**kwargs: Any) ->Any:
"""Use tenacity to retry the completion call."""
return fireworks.client.ChatCompletion.create(**kwargs)
return _completion_with_retry(**kwargs)
|
Use tenacity to retry the completion call.
|
test_neo4jvector_missing_keyword
|
"""Test hybrid search with missing keyword_index_search."""
text_embeddings = FakeEmbeddingsWithOsDimension().embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
docsearch = Neo4jVector.from_embeddings(text_embeddings=
text_embedding_pairs, embedding=FakeEmbeddingsWithOsDimension(), url=
url, username=username, password=password, pre_delete_collection=True)
try:
Neo4jVector.from_existing_index(embedding=FakeEmbeddingsWithOsDimension
(), url=url, username=username, password=password, index_name=
'vector', search_type=SearchType.HYBRID)
except ValueError as e:
assert str(e
) == 'keyword_index name has to be specified when using hybrid search option'
drop_vector_indexes(docsearch)
|
def test_neo4jvector_missing_keyword() ->None:
"""Test hybrid search with missing keyword_index_search."""
text_embeddings = FakeEmbeddingsWithOsDimension().embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
docsearch = Neo4jVector.from_embeddings(text_embeddings=
text_embedding_pairs, embedding=FakeEmbeddingsWithOsDimension(),
url=url, username=username, password=password,
pre_delete_collection=True)
try:
Neo4jVector.from_existing_index(embedding=
FakeEmbeddingsWithOsDimension(), url=url, username=username,
password=password, index_name='vector', search_type=SearchType.
HYBRID)
except ValueError as e:
assert str(e
) == 'keyword_index name has to be specified when using hybrid search option'
drop_vector_indexes(docsearch)
|
Test hybrid search with missing keyword_index_search.
|
test_redis_as_retriever
|
texts = ['foo', 'foo', 'foo', 'foo', 'bar']
docsearch = Redis.from_texts(texts, ConsistentFakeEmbeddings(), redis_url=
TEST_REDIS_URL)
retriever = docsearch.as_retriever(search_type='similarity', search_kwargs=
{'k': 3})
results = retriever.get_relevant_documents('foo')
assert len(results) == 3
assert all([(d.page_content == 'foo') for d in results])
assert drop(docsearch.index_name)
|
def test_redis_as_retriever() ->None:
texts = ['foo', 'foo', 'foo', 'foo', 'bar']
docsearch = Redis.from_texts(texts, ConsistentFakeEmbeddings(),
redis_url=TEST_REDIS_URL)
retriever = docsearch.as_retriever(search_type='similarity',
search_kwargs={'k': 3})
results = retriever.get_relevant_documents('foo')
assert len(results) == 3
assert all([(d.page_content == 'foo') for d in results])
assert drop(docsearch.index_name)
| null |
embed_documents
|
"""Call out to Gradient's embedding endpoint.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
inputs = [{'input': text} for text in texts]
result = self.client.embed(inputs=inputs).embeddings
return [e.embedding for e in result]
|
def embed_documents(self, texts: List[str]) ->List[List[float]]:
"""Call out to Gradient's embedding endpoint.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
inputs = [{'input': text} for text in texts]
result = self.client.embed(inputs=inputs).embeddings
return [e.embedding for e in result]
|
Call out to Gradient's embedding endpoint.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
|
InputType
|
"""The type of input this runnable accepts specified as a type annotation."""
for cls in self.__class__.__orig_bases__:
type_args = get_args(cls)
if type_args and len(type_args) == 2:
return type_args[0]
raise TypeError(
f"Runnable {self.get_name()} doesn't have an inferable InputType. Override the InputType property to specify the input type."
)
|
@property
def InputType(self) ->Type[Input]:
"""The type of input this runnable accepts specified as a type annotation."""
for cls in self.__class__.__orig_bases__:
type_args = get_args(cls)
if type_args and len(type_args) == 2:
return type_args[0]
raise TypeError(
f"Runnable {self.get_name()} doesn't have an inferable InputType. Override the InputType property to specify the input type."
)
|
The type of input this runnable accepts specified as a type annotation.
|
test_json_validity_evaluator_evaluation_name
|
assert json_validity_evaluator.evaluation_name == 'json_validity'
|
def test_json_validity_evaluator_evaluation_name(json_validity_evaluator:
JsonValidityEvaluator) ->None:
assert json_validity_evaluator.evaluation_name == 'json_validity'
| null |
load
|
"""Load documents."""
try:
from youtube_transcript_api import NoTranscriptFound, TranscriptsDisabled, YouTubeTranscriptApi
except ImportError:
raise ImportError(
'Could not import youtube_transcript_api python package. Please install it with `pip install youtube-transcript-api`.'
)
metadata = {'source': self.video_id}
if self.add_video_info:
video_info = self._get_video_info()
metadata.update(video_info)
try:
transcript_list = YouTubeTranscriptApi.list_transcripts(self.video_id)
except TranscriptsDisabled:
return []
try:
transcript = transcript_list.find_transcript(self.language)
except NoTranscriptFound:
transcript = transcript_list.find_transcript(['en'])
if self.translation is not None:
transcript = transcript.translate(self.translation)
transcript_pieces = transcript.fetch()
transcript = ' '.join([t['text'].strip(' ') for t in transcript_pieces])
return [Document(page_content=transcript, metadata=metadata)]
|
def load(self) ->List[Document]:
"""Load documents."""
try:
from youtube_transcript_api import NoTranscriptFound, TranscriptsDisabled, YouTubeTranscriptApi
except ImportError:
raise ImportError(
'Could not import youtube_transcript_api python package. Please install it with `pip install youtube-transcript-api`.'
)
metadata = {'source': self.video_id}
if self.add_video_info:
video_info = self._get_video_info()
metadata.update(video_info)
try:
transcript_list = YouTubeTranscriptApi.list_transcripts(self.video_id)
except TranscriptsDisabled:
return []
try:
transcript = transcript_list.find_transcript(self.language)
except NoTranscriptFound:
transcript = transcript_list.find_transcript(['en'])
if self.translation is not None:
transcript = transcript.translate(self.translation)
transcript_pieces = transcript.fetch()
transcript = ' '.join([t['text'].strip(' ') for t in transcript_pieces])
return [Document(page_content=transcript, metadata=metadata)]
|
Load documents.
|
_get_prompt_input_key
|
"""Get the input key for the prompt."""
if self.input_key is None:
return get_prompt_input_key(inputs, self.memory_variables)
return self.input_key
|
def _get_prompt_input_key(self, inputs: Dict[str, Any]) ->str:
"""Get the input key for the prompt."""
if self.input_key is None:
return get_prompt_input_key(inputs, self.memory_variables)
return self.input_key
|
Get the input key for the prompt.
|
_run
|
"""Use the tool."""
elevenlabs = _import_elevenlabs()
try:
speech = elevenlabs.generate(text=query, model=self.model)
with tempfile.NamedTemporaryFile(mode='bx', suffix='.wav', delete=False
) as f:
f.write(speech)
return f.name
except Exception as e:
raise RuntimeError(f'Error while running ElevenLabsText2SpeechTool: {e}')
|
def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun]
=None) ->str:
"""Use the tool."""
elevenlabs = _import_elevenlabs()
try:
speech = elevenlabs.generate(text=query, model=self.model)
with tempfile.NamedTemporaryFile(mode='bx', suffix='.wav', delete=False
) as f:
f.write(speech)
return f.name
except Exception as e:
raise RuntimeError(
f'Error while running ElevenLabsText2SpeechTool: {e}')
|
Use the tool.
|
emit_warning
|
"""Emit the warning."""
warn_deprecated(since, message=_message, name=_name, alternative=
_alternative, pending=_pending, obj_type=_obj_type, addendum=_addendum,
removal=removal)
|
def emit_warning() ->None:
"""Emit the warning."""
warn_deprecated(since, message=_message, name=_name, alternative=
_alternative, pending=_pending, obj_type=_obj_type, addendum=
_addendum, removal=removal)
|
Emit the warning.
|
combined_text
|
"""Combine a ResultItem title and excerpt into a single string.
Args:
item: the ResultItem of a Kendra search.
Returns:
A combined text of the title and excerpt of the given item.
"""
text = ''
title = item.get_title()
if title:
text += f'Document Title: {title}\n'
excerpt = clean_excerpt(item.get_excerpt())
if excerpt:
text += f'Document Excerpt: \n{excerpt}\n'
return text
|
def combined_text(item: 'ResultItem') ->str:
"""Combine a ResultItem title and excerpt into a single string.
Args:
item: the ResultItem of a Kendra search.
Returns:
A combined text of the title and excerpt of the given item.
"""
text = ''
title = item.get_title()
if title:
text += f'Document Title: {title}\n'
excerpt = clean_excerpt(item.get_excerpt())
if excerpt:
text += f'Document Excerpt: \n{excerpt}\n'
return text
|
Combine a ResultItem title and excerpt into a single string.
Args:
item: the ResultItem of a Kendra search.
Returns:
A combined text of the title and excerpt of the given item.
|
test_blob_from_pure_path
|
"""Test reading blob from a file path."""
content = b'Hello, World!'
with get_temp_file(content, suffix='.html') as temp_path:
assert isinstance(temp_path, Path)
blob = Blob.from_path(temp_path)
assert blob.encoding == 'utf-8'
assert blob.path == temp_path
assert blob.mimetype == 'text/html'
assert blob.source == str(temp_path)
assert blob.data is None
assert blob.as_bytes() == content
assert blob.as_string() == 'Hello, World!'
with blob.as_bytes_io() as bytes_io:
assert bytes_io.read() == content
|
def test_blob_from_pure_path() ->None:
"""Test reading blob from a file path."""
content = b'Hello, World!'
with get_temp_file(content, suffix='.html') as temp_path:
assert isinstance(temp_path, Path)
blob = Blob.from_path(temp_path)
assert blob.encoding == 'utf-8'
assert blob.path == temp_path
assert blob.mimetype == 'text/html'
assert blob.source == str(temp_path)
assert blob.data is None
assert blob.as_bytes() == content
assert blob.as_string() == 'Hello, World!'
with blob.as_bytes_io() as bytes_io:
assert bytes_io.read() == content
|
Test reading blob from a file path.
|
_get_gptcache
|
"""Get a cache object.
When the corresponding llm model cache does not exist, it will be created."""
_gptcache = self.gptcache_dict.get(llm_string, None)
if not _gptcache:
_gptcache = self._new_gptcache(llm_string)
return _gptcache
|
def _get_gptcache(self, llm_string: str) ->Any:
"""Get a cache object.
When the corresponding llm model cache does not exist, it will be created."""
_gptcache = self.gptcache_dict.get(llm_string, None)
if not _gptcache:
_gptcache = self._new_gptcache(llm_string)
return _gptcache
|
Get a cache object.
When the corresponding llm model cache does not exist, it will be created.
|
parse_obj
|
if isinstance(page_content, object):
return json.dumps(page_content)
return page_content
|
def parse_obj(self, page_content: Union[str, object]) ->str:
if isinstance(page_content, object):
return json.dumps(page_content)
return page_content
| null |
lookup_tool
|
"""Lookup tool by name."""
return {tool.name: tool for tool in self.tools}[name]
|
def lookup_tool(self, name: str) ->BaseTool:
"""Lookup tool by name."""
return {tool.name: tool for tool in self.tools}[name]
|
Lookup tool by name.
|
check_bs_import
|
"""Check that the arguments are valid."""
try:
from bs4 import BeautifulSoup
except ImportError:
raise ImportError(
"The 'beautifulsoup4' package is required to use this tool. Please install it with 'pip install beautifulsoup4'."
)
return values
|
@root_validator
def check_bs_import(cls, values: dict) ->dict:
"""Check that the arguments are valid."""
try:
from bs4 import BeautifulSoup
except ImportError:
raise ImportError(
"The 'beautifulsoup4' package is required to use this tool. Please install it with 'pip install beautifulsoup4'."
)
return values
|
Check that the arguments are valid.
|
test_vald_search_with_score_by_vector
|
"""Test end to end construction and search with scores by vector."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = _vald_from_texts(metadatas=metadatas)
time.sleep(WAIT_TIME)
embedding = FakeEmbeddings().embed_query('foo')
output = docsearch.similarity_search_with_score_by_vector(embedding, k=3)
docs = [o[0] for o in output]
scores = [o[1] for o in output]
assert docs == [Document(page_content='foo'), Document(page_content='bar'),
Document(page_content='baz')]
assert scores[0] < scores[1] < scores[2]
|
def test_vald_search_with_score_by_vector() ->None:
"""Test end to end construction and search with scores by vector."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = _vald_from_texts(metadatas=metadatas)
time.sleep(WAIT_TIME)
embedding = FakeEmbeddings().embed_query('foo')
output = docsearch.similarity_search_with_score_by_vector(embedding, k=3)
docs = [o[0] for o in output]
scores = [o[1] for o in output]
assert docs == [Document(page_content='foo'), Document(page_content=
'bar'), Document(page_content='baz')]
assert scores[0] < scores[1] < scores[2]
|
Test end to end construction and search with scores by vector.
|
_import_vllm
|
from langchain_community.llms.vllm import VLLM
return VLLM
|
def _import_vllm() ->Any:
from langchain_community.llms.vllm import VLLM
return VLLM
| null |
delete_keys
|
"""Delete records from the SQLite database."""
with self._make_session() as session:
session.query(UpsertionRecord).filter(and_(UpsertionRecord.key.in_(keys
), UpsertionRecord.namespace == self.namespace)).delete()
session.commit()
|
def delete_keys(self, keys: Sequence[str]) ->None:
"""Delete records from the SQLite database."""
with self._make_session() as session:
session.query(UpsertionRecord).filter(and_(UpsertionRecord.key.in_(
keys), UpsertionRecord.namespace == self.namespace)).delete()
session.commit()
|
Delete records from the SQLite database.
|
format_to_openai_function_messages
|
"""Convert (AgentAction, tool output) tuples into FunctionMessages.
Args:
intermediate_steps: Steps the LLM has taken to date, along with observations
Returns:
list of messages to send to the LLM for the next prediction
"""
messages = []
for agent_action, observation in intermediate_steps:
messages.extend(_convert_agent_action_to_messages(agent_action,
observation))
return messages
|
def format_to_openai_function_messages(intermediate_steps: Sequence[Tuple[
AgentAction, str]]) ->List[BaseMessage]:
"""Convert (AgentAction, tool output) tuples into FunctionMessages.
Args:
intermediate_steps: Steps the LLM has taken to date, along with observations
Returns:
list of messages to send to the LLM for the next prediction
"""
messages = []
for agent_action, observation in intermediate_steps:
messages.extend(_convert_agent_action_to_messages(agent_action,
observation))
return messages
|
Convert (AgentAction, tool output) tuples into FunctionMessages.
Args:
intermediate_steps: Steps the LLM has taken to date, along with observations
Returns:
list of messages to send to the LLM for the next prediction
|
_run
|
"""Use the Atlassian Jira API to run an operation."""
return self.api_wrapper.run(self.mode, instructions)
|
def _run(self, instructions: str, run_manager: Optional[
CallbackManagerForToolRun]=None) ->str:
"""Use the Atlassian Jira API to run an operation."""
return self.api_wrapper.run(self.mode, instructions)
|
Use the Atlassian Jira API to run an operation.
|
_get_relevant_documents
|
from qdrant_client import QdrantClient, models
client = cast(QdrantClient, self.client)
query_indices, query_values = self.sparse_encoder(query)
results = client.search(self.collection_name, query_filter=self.filter,
query_vector=models.NamedSparseVector(name=self.sparse_vector_name,
vector=models.SparseVector(indices=query_indices, values=query_values)),
limit=self.k, with_vectors=False, **self.search_options)
return [Qdrant._document_from_scored_point(point, self.content_payload_key,
self.metadata_payload_key) for point in results]
|
def _get_relevant_documents(self, query: str, *, run_manager:
CallbackManagerForRetrieverRun) ->List[Document]:
from qdrant_client import QdrantClient, models
client = cast(QdrantClient, self.client)
query_indices, query_values = self.sparse_encoder(query)
results = client.search(self.collection_name, query_filter=self.filter,
query_vector=models.NamedSparseVector(name=self.sparse_vector_name,
vector=models.SparseVector(indices=query_indices, values=
query_values)), limit=self.k, with_vectors=False, **self.search_options
)
return [Qdrant._document_from_scored_point(point, self.
content_payload_key, self.metadata_payload_key) for point in results]
| null |
test_placeholder
|
"""Used for compiling integration tests without running any real tests."""
pass
|
@pytest.mark.compile
def test_placeholder() ->None:
"""Used for compiling integration tests without running any real tests."""
pass
|
Used for compiling integration tests without running any real tests.
|
__init__
|
self.history: List[Dict[str, Union[int, float]]] = [{'step': 0, 'score': 0}]
self.step: int = step
self.i: int = 0
self.num: float = 0
self.denom: float = 0
|
def __init__(self, step: int):
self.history: List[Dict[str, Union[int, float]]] = [{'step': 0, 'score': 0}
]
self.step: int = step
self.i: int = 0
self.num: float = 0
self.denom: float = 0
| null |
test_implements_string_protocol
|
assert issubclass(CriteriaEvalChain, StringEvaluator)
|
def test_implements_string_protocol() ->None:
assert issubclass(CriteriaEvalChain, StringEvaluator)
| null |
input_keys
|
"""Return the input keys.
:meta private:
"""
|
@property
@abstractmethod
def input_keys(self) ->List[str]:
"""Return the input keys.
:meta private:
"""
|
Return the input keys.
:meta private:
|
input_keys
|
"""Return the input keys.
Returns:
List of input keys.
"""
return self._input_keys
|
@property
def input_keys(self) ->List[str]:
"""Return the input keys.
Returns:
List of input keys.
"""
return self._input_keys
|
Return the input keys.
Returns:
List of input keys.
|
_stream
|
"""Stream the chat response in chunks."""
response = self.sse_invoke(prompt)
for r in response.events():
if r.event == 'add':
delta = r.data
yield ChatGenerationChunk(message=AIMessageChunk(content=delta))
if run_manager:
run_manager.on_llm_new_token(delta)
elif r.event == 'error':
raise ValueError(f'Error from ZhipuAI API response: {r.data}')
|
def _stream(self, prompt: List[Dict[str, str]], stop: Optional[List[str]]=
None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any
) ->Iterator[ChatGenerationChunk]:
"""Stream the chat response in chunks."""
response = self.sse_invoke(prompt)
for r in response.events():
if r.event == 'add':
delta = r.data
yield ChatGenerationChunk(message=AIMessageChunk(content=delta))
if run_manager:
run_manager.on_llm_new_token(delta)
elif r.event == 'error':
raise ValueError(f'Error from ZhipuAI API response: {r.data}')
|
Stream the chat response in chunks.
|
test_system_message_single_tool
|
prompt: Any = StructuredChatAgent.create_prompt([Tool(name='foo',
description='Test tool FOO', func=lambda x: x)])
actual = prompt.messages[0].prompt.format()
expected = dedent(
"""
Respond to the human as helpfully and accurately as possible. You have access to the following tools:
foo: Test tool FOO, args: {'tool_input': {'type': 'string'}}
Use a json blob to specify a tool by providing an action key (tool name) and an action_input key (tool input).
Valid "action" values: "Final Answer" or foo
Provide only ONE action per $JSON_BLOB, as shown:
```
{
"action": $TOOL_NAME,
"action_input": $INPUT
}
```
Follow this format:
Question: input question to answer
Thought: consider previous and subsequent steps
Action:
```
$JSON_BLOB
```
Observation: action result
... (repeat Thought/Action/Observation N times)
Thought: I know what to respond
Action:
```
{
"action": "Final Answer",
"action_input": "Final response to human"
}
```
Begin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary. Respond directly if appropriate. Format is Action:```$JSON_BLOB```then Observation:.
Thought:
"""
).strip()
assert actual == expected
|
def test_system_message_single_tool(self) ->None:
prompt: Any = StructuredChatAgent.create_prompt([Tool(name='foo',
description='Test tool FOO', func=lambda x: x)])
actual = prompt.messages[0].prompt.format()
expected = dedent(
"""
Respond to the human as helpfully and accurately as possible. You have access to the following tools:
foo: Test tool FOO, args: {'tool_input': {'type': 'string'}}
Use a json blob to specify a tool by providing an action key (tool name) and an action_input key (tool input).
Valid "action" values: "Final Answer" or foo
Provide only ONE action per $JSON_BLOB, as shown:
```
{
"action": $TOOL_NAME,
"action_input": $INPUT
}
```
Follow this format:
Question: input question to answer
Thought: consider previous and subsequent steps
Action:
```
$JSON_BLOB
```
Observation: action result
... (repeat Thought/Action/Observation N times)
Thought: I know what to respond
Action:
```
{
"action": "Final Answer",
"action_input": "Final response to human"
}
```
Begin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary. Respond directly if appropriate. Format is Action:```$JSON_BLOB```then Observation:.
Thought:
"""
).strip()
assert actual == expected
| null |
_load_file_from_path
|
"""Load a file from a Dropbox path."""
dbx = self._create_dropbox_client()
try:
from dropbox import exceptions
except ImportError:
raise ImportError('You must run `pip install dropbox')
try:
file_metadata = dbx.files_get_metadata(file_path)
if file_metadata.is_downloadable:
_, response = dbx.files_download(file_path)
elif file_metadata.export_info:
_, response = dbx.files_export(file_path, 'markdown')
except exceptions.ApiError as ex:
raise ValueError(
f'Could not load file: {file_path}. Please verify the file pathand try again.'
) from ex
try:
text = response.content.decode('utf-8')
except UnicodeDecodeError:
file_extension = os.path.splitext(file_path)[1].lower()
if file_extension == '.pdf':
print(f'File {file_path} type detected as .pdf')
from langchain_community.document_loaders import UnstructuredPDFLoader
temp_dir = tempfile.TemporaryDirectory()
temp_pdf = Path(temp_dir.name) / 'tmp.pdf'
with open(temp_pdf, mode='wb') as f:
f.write(response.content)
try:
loader = UnstructuredPDFLoader(str(temp_pdf))
docs = loader.load()
if docs:
return docs[0]
except Exception as pdf_ex:
print(f'Error while trying to parse PDF {file_path}: {pdf_ex}')
return None
else:
print(
f'File {file_path} could not be decoded as pdf or text. Skipping.')
return None
metadata = {'source': f'dropbox://{file_path}', 'title': os.path.basename(
file_path)}
return Document(page_content=text, metadata=metadata)
|
def _load_file_from_path(self, file_path: str) ->Optional[Document]:
"""Load a file from a Dropbox path."""
dbx = self._create_dropbox_client()
try:
from dropbox import exceptions
except ImportError:
raise ImportError('You must run `pip install dropbox')
try:
file_metadata = dbx.files_get_metadata(file_path)
if file_metadata.is_downloadable:
_, response = dbx.files_download(file_path)
elif file_metadata.export_info:
_, response = dbx.files_export(file_path, 'markdown')
except exceptions.ApiError as ex:
raise ValueError(
f'Could not load file: {file_path}. Please verify the file pathand try again.'
) from ex
try:
text = response.content.decode('utf-8')
except UnicodeDecodeError:
file_extension = os.path.splitext(file_path)[1].lower()
if file_extension == '.pdf':
print(f'File {file_path} type detected as .pdf')
from langchain_community.document_loaders import UnstructuredPDFLoader
temp_dir = tempfile.TemporaryDirectory()
temp_pdf = Path(temp_dir.name) / 'tmp.pdf'
with open(temp_pdf, mode='wb') as f:
f.write(response.content)
try:
loader = UnstructuredPDFLoader(str(temp_pdf))
docs = loader.load()
if docs:
return docs[0]
except Exception as pdf_ex:
print(f'Error while trying to parse PDF {file_path}: {pdf_ex}')
return None
else:
print(
f'File {file_path} could not be decoded as pdf or text. Skipping.'
)
return None
metadata = {'source': f'dropbox://{file_path}', 'title': os.path.
basename(file_path)}
return Document(page_content=text, metadata=metadata)
|
Load a file from a Dropbox path.
|
_dont_flip_the_cos_score
|
return distance
|
@staticmethod
def _dont_flip_the_cos_score(distance: float) ->float:
return distance
| null |
test_epsilla
|
instance = _test_from_texts()
search = instance.similarity_search(query='bar', k=1)
result_texts = [doc.page_content for doc in search]
assert 'bar' in result_texts
|
def test_epsilla() ->None:
instance = _test_from_texts()
search = instance.similarity_search(query='bar', k=1)
result_texts = [doc.page_content for doc in search]
assert 'bar' in result_texts
| null |
_import_edenai_EdenAiExplicitImageTool
|
from langchain_community.tools.edenai import EdenAiExplicitImageTool
return EdenAiExplicitImageTool
|
def _import_edenai_EdenAiExplicitImageTool() ->Any:
from langchain_community.tools.edenai import EdenAiExplicitImageTool
return EdenAiExplicitImageTool
| null |
test_anthropic_generate
|
"""Test generate method of anthropic."""
chat = ChatAnthropic(model='test')
chat_messages: List[List[BaseMessage]] = [[HumanMessage(content=
'How many toes do dogs have?')]]
messages_copy = [messages.copy() for messages in chat_messages]
result: LLMResult = chat.generate(chat_messages)
assert isinstance(result, LLMResult)
for response in result.generations[0]:
assert isinstance(response, ChatGeneration)
assert isinstance(response.text, str)
assert response.text == response.message.content
assert chat_messages == messages_copy
|
@pytest.mark.scheduled
def test_anthropic_generate() ->None:
"""Test generate method of anthropic."""
chat = ChatAnthropic(model='test')
chat_messages: List[List[BaseMessage]] = [[HumanMessage(content=
'How many toes do dogs have?')]]
messages_copy = [messages.copy() for messages in chat_messages]
result: LLMResult = chat.generate(chat_messages)
assert isinstance(result, LLMResult)
for response in result.generations[0]:
assert isinstance(response, ChatGeneration)
assert isinstance(response.text, str)
assert response.text == response.message.content
assert chat_messages == messages_copy
|
Test generate method of anthropic.
|
marqo_bulk_similarity_search
|
"""Return documents from Marqo using a bulk search, exposes Marqo's
output directly
Args:
queries (Iterable[Union[str, Dict[str, float]]]): A list of queries.
k (int, optional): The number of documents to return for each query.
Defaults to 4.
Returns:
Dict[str, Dict[List[Dict[str, Dict[str, Any]]]]]: A bulk search results
object
"""
bulk_results = {'result': [self._client.index(self._index_name).search(q=
query, searchable_attributes=self._searchable_attributes, limit=k) for
query in queries]}
return bulk_results
|
def marqo_bulk_similarity_search(self, queries: Iterable[Union[str, Dict[
str, float]]], k: int=4) ->Dict[str, List[Dict[str, List[Dict[str, str]]]]
]:
"""Return documents from Marqo using a bulk search, exposes Marqo's
output directly
Args:
queries (Iterable[Union[str, Dict[str, float]]]): A list of queries.
k (int, optional): The number of documents to return for each query.
Defaults to 4.
Returns:
Dict[str, Dict[List[Dict[str, Dict[str, Any]]]]]: A bulk search results
object
"""
bulk_results = {'result': [self._client.index(self._index_name).search(
q=query, searchable_attributes=self._searchable_attributes, limit=k
) for query in queries]}
return bulk_results
|
Return documents from Marqo using a bulk search, exposes Marqo's
output directly
Args:
queries (Iterable[Union[str, Dict[str, float]]]): A list of queries.
k (int, optional): The number of documents to return for each query.
Defaults to 4.
Returns:
Dict[str, Dict[List[Dict[str, Dict[str, Any]]]]]: A bulk search results
object
|
test_disable_collect_metadata
|
"""If collect_metadata is False, no additional metadata should be collected."""
loader_without_metadata = ObsidianLoader(str(OBSIDIAN_EXAMPLE_PATH),
collect_metadata=False)
docs_wo = loader_without_metadata.load()
assert len(docs_wo) == 6
assert all(doc.page_content for doc in docs_wo)
assert all(set(doc.metadata) == STANDARD_METADATA_FIELDS for doc in docs_wo)
|
def test_disable_collect_metadata() ->None:
"""If collect_metadata is False, no additional metadata should be collected."""
loader_without_metadata = ObsidianLoader(str(OBSIDIAN_EXAMPLE_PATH),
collect_metadata=False)
docs_wo = loader_without_metadata.load()
assert len(docs_wo) == 6
assert all(doc.page_content for doc in docs_wo)
assert all(set(doc.metadata) == STANDARD_METADATA_FIELDS for doc in docs_wo
)
|
If collect_metadata is False, no additional metadata should be collected.
|
__init__
|
self._filter = _filter
self._operator = operator
self._left = left
self._right = right
|
def __init__(self, _filter: Optional[str]=None, operator: Optional[
RedisFilterOperator]=None, left: Optional['RedisFilterExpression']=None,
right: Optional['RedisFilterExpression']=None):
self._filter = _filter
self._operator = operator
self._left = left
self._right = right
| null |
test_causal_chain
|
"""
Test causal chain returns a DAG as a pydantic object.
"""
causal_chain = CausalChain.from_univariate_prompt(llm=self.fake_llm)
output = causal_chain(
'jan has three times the number of pets as marcia. marcia has two more pets than cindy.'
)
expected_output = {'chain_answer': None, 'chain_data': CausalModel(
attribute='pet_count', entities=[EntityModel(name='cindy', code='pass',
value=0.0, depends_on=[]), EntityModel(name='marcia', code=
'marcia.value = cindy.value + 2', value=0.0, depends_on=['cindy']),
EntityModel(name='jan', code='jan.value = marcia.value * 3', value=0.0,
depends_on=['marcia'])]), 'narrative_input':
'jan has three times the number of pets as marcia. marcia has two more pets than cindy.'
}
assert output == expected_output
|
def test_causal_chain(self) ->None:
"""
Test causal chain returns a DAG as a pydantic object.
"""
causal_chain = CausalChain.from_univariate_prompt(llm=self.fake_llm)
output = causal_chain(
'jan has three times the number of pets as marcia. marcia has two more pets than cindy.'
)
expected_output = {'chain_answer': None, 'chain_data': CausalModel(
attribute='pet_count', entities=[EntityModel(name='cindy', code=
'pass', value=0.0, depends_on=[]), EntityModel(name='marcia', code=
'marcia.value = cindy.value + 2', value=0.0, depends_on=['cindy']),
EntityModel(name='jan', code='jan.value = marcia.value * 3', value=
0.0, depends_on=['marcia'])]), 'narrative_input':
'jan has three times the number of pets as marcia. marcia has two more pets than cindy.'
}
assert output == expected_output
|
Test causal chain returns a DAG as a pydantic object.
|
_convert_messages_to_prompt
|
"""Format a list of messages into a full prompt for the Anthropic model
Args:
messages (List[BaseMessage]): List of BaseMessage to combine.
Returns:
str: Combined string with necessary HUMAN_PROMPT and AI_PROMPT tags.
"""
prompt_params = {}
if self.HUMAN_PROMPT:
prompt_params['human_prompt'] = self.HUMAN_PROMPT
if self.AI_PROMPT:
prompt_params['ai_prompt'] = self.AI_PROMPT
return convert_messages_to_prompt_anthropic(messages=messages, **prompt_params)
|
def _convert_messages_to_prompt(self, messages: List[BaseMessage]) ->str:
"""Format a list of messages into a full prompt for the Anthropic model
Args:
messages (List[BaseMessage]): List of BaseMessage to combine.
Returns:
str: Combined string with necessary HUMAN_PROMPT and AI_PROMPT tags.
"""
prompt_params = {}
if self.HUMAN_PROMPT:
prompt_params['human_prompt'] = self.HUMAN_PROMPT
if self.AI_PROMPT:
prompt_params['ai_prompt'] = self.AI_PROMPT
return convert_messages_to_prompt_anthropic(messages=messages, **
prompt_params)
|
Format a list of messages into a full prompt for the Anthropic model
Args:
messages (List[BaseMessage]): List of BaseMessage to combine.
Returns:
str: Combined string with necessary HUMAN_PROMPT and AI_PROMPT tags.
|
test_huggingface_instructor_embedding_documents
|
"""Test huggingface embeddings."""
documents = ['foo bar']
model_name = 'hkunlp/instructor-base'
embedding = HuggingFaceInstructEmbeddings(model_name=model_name)
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 768
|
def test_huggingface_instructor_embedding_documents() ->None:
"""Test huggingface embeddings."""
documents = ['foo bar']
model_name = 'hkunlp/instructor-base'
embedding = HuggingFaceInstructEmbeddings(model_name=model_name)
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 768
|
Test huggingface embeddings.
|
__init__
|
logger.warning(
'Using a deprecated class. Please use `from langchain.chains import HypotheticalDocumentEmbedder` instead'
)
from langchain.chains.hyde.base import HypotheticalDocumentEmbedder as H
return H(*args, **kwargs)
|
def __init__(self, *args: Any, **kwargs: Any):
logger.warning(
'Using a deprecated class. Please use `from langchain.chains import HypotheticalDocumentEmbedder` instead'
)
from langchain.chains.hyde.base import HypotheticalDocumentEmbedder as H
return H(*args, **kwargs)
| null |
__init__
|
super().__init__(code)
self.source_lines = self.code.splitlines()
try:
import esprima
except ImportError:
raise ImportError(
'Could not import esprima Python package. Please install it with `pip install esprima`.'
)
|
def __init__(self, code: str):
super().__init__(code)
self.source_lines = self.code.splitlines()
try:
import esprima
except ImportError:
raise ImportError(
'Could not import esprima Python package. Please install it with `pip install esprima`.'
)
| null |
output_keys
|
"""Output keys."""
if self.return_intermediate_steps:
return ['output', 'fallacy_critiques_and_revisions', 'initial_output']
return ['output']
|
@property
def output_keys(self) ->List[str]:
"""Output keys."""
if self.return_intermediate_steps:
return ['output', 'fallacy_critiques_and_revisions', 'initial_output']
return ['output']
|
Output keys.
|
test_load_returns_list_of_documents
|
loader = PolarsDataFrameLoader(sample_data_frame)
docs = loader.load()
assert isinstance(docs, list)
assert all(isinstance(doc, Document) for doc in docs)
assert len(docs) == 2
|
def test_load_returns_list_of_documents(sample_data_frame: pl.DataFrame
) ->None:
loader = PolarsDataFrameLoader(sample_data_frame)
docs = loader.load()
assert isinstance(docs, list)
assert all(isinstance(doc, Document) for doc in docs)
assert len(docs) == 2
| null |
_package_dir
|
"""Return the path to the directory containing the documentation."""
if package_name in ('langchain', 'experimental', 'community', 'core', 'cli'):
return ROOT_DIR / 'libs' / package_name / _package_namespace(package_name)
else:
return ROOT_DIR / 'libs' / 'partners' / package_name / _package_namespace(
package_name)
|
def _package_dir(package_name: str='langchain') ->Path:
"""Return the path to the directory containing the documentation."""
if package_name in ('langchain', 'experimental', 'community', 'core', 'cli'
):
return ROOT_DIR / 'libs' / package_name / _package_namespace(
package_name)
else:
return (ROOT_DIR / 'libs' / 'partners' / package_name /
_package_namespace(package_name))
|
Return the path to the directory containing the documentation.
|
test_runnable_branch_init_coercion
|
"""Verify that runnable branch gets initialized properly."""
runnable = RunnableBranch[int, int](*branches)
for branch in runnable.branches:
condition, body = branch
assert isinstance(condition, Runnable)
assert isinstance(body, Runnable)
assert isinstance(runnable.default, Runnable)
assert runnable.input_schema.schema() == {'title': 'RunnableBranchInput'}
|
@pytest.mark.parametrize('branches', [[(RunnableLambda(lambda x: x > 0),
RunnableLambda(lambda x: x + 1)), RunnableLambda(lambda x: x - 1)], [(
RunnableLambda(lambda x: x > 0), RunnableLambda(lambda x: x + 1)), (
RunnableLambda(lambda x: x > 5), RunnableLambda(lambda x: x + 1)),
RunnableLambda(lambda x: x - 1)], [(lambda x: x > 0, lambda x: x + 1),
(lambda x: x > 5, lambda x: x + 1), lambda x: x - 1]])
def test_runnable_branch_init_coercion(branches: Sequence[Any]) ->None:
"""Verify that runnable branch gets initialized properly."""
runnable = RunnableBranch[int, int](*branches)
for branch in runnable.branches:
condition, body = branch
assert isinstance(condition, Runnable)
assert isinstance(body, Runnable)
assert isinstance(runnable.default, Runnable)
assert runnable.input_schema.schema() == {'title': 'RunnableBranchInput'}
|
Verify that runnable branch gets initialized properly.
|
messages
|
"""Retrieve messages from Zep memory"""
zep_memory: Optional[Memory] = self._get_memory()
if not zep_memory:
return []
messages: List[BaseMessage] = []
if zep_memory.summary:
if len(zep_memory.summary.content) > 0:
messages.append(SystemMessage(content=zep_memory.summary.content))
if zep_memory.messages:
msg: Message
for msg in zep_memory.messages:
metadata: Dict = {'uuid': msg.uuid, 'created_at': msg.created_at,
'token_count': msg.token_count, 'metadata': msg.metadata}
if msg.role == 'ai':
messages.append(AIMessage(content=msg.content,
additional_kwargs=metadata))
else:
messages.append(HumanMessage(content=msg.content,
additional_kwargs=metadata))
return messages
|
@property
def messages(self) ->List[BaseMessage]:
"""Retrieve messages from Zep memory"""
zep_memory: Optional[Memory] = self._get_memory()
if not zep_memory:
return []
messages: List[BaseMessage] = []
if zep_memory.summary:
if len(zep_memory.summary.content) > 0:
messages.append(SystemMessage(content=zep_memory.summary.content))
if zep_memory.messages:
msg: Message
for msg in zep_memory.messages:
metadata: Dict = {'uuid': msg.uuid, 'created_at': msg.
created_at, 'token_count': msg.token_count, 'metadata': msg
.metadata}
if msg.role == 'ai':
messages.append(AIMessage(content=msg.content,
additional_kwargs=metadata))
else:
messages.append(HumanMessage(content=msg.content,
additional_kwargs=metadata))
return messages
|
Retrieve messages from Zep memory
|
_identifying_params
|
"""Get the identifying parameters."""
return {'model_name': self.model_name, 'temperature': self.temperature,
'top_p': self.top_p, 'top_k': self.top_k, 'n': self.n}
|
@property
def _identifying_params(self) ->Dict[str, Any]:
"""Get the identifying parameters."""
return {'model_name': self.model_name, 'temperature': self.temperature,
'top_p': self.top_p, 'top_k': self.top_k, 'n': self.n}
|
Get the identifying parameters.
|
format_named_docs
|
return '\n\n'.join(f"""Source: {source}
{doc.page_content}""" for source,
doc in named_docs)
|
def format_named_docs(named_docs):
return '\n\n'.join(f'Source: {source}\n\n{doc.page_content}' for source,
doc in named_docs)
| null |
get_token_ids
|
"""Get the token IDs using the tiktoken package."""
if sys.version_info[1] < 8:
return super().get_num_tokens(text)
try:
import tiktoken
except ImportError:
raise ImportError(
'Could not import tiktoken python package. This is needed in order to calculate get_num_tokens. Please install it with `pip install tiktoken`.'
)
model_name = self.tiktoken_model_name or self.model_name
try:
enc = tiktoken.encoding_for_model(model_name)
except KeyError:
logger.warning('Warning: model not found. Using cl100k_base encoding.')
model = 'cl100k_base'
enc = tiktoken.get_encoding(model)
return enc.encode(text, allowed_special=self.allowed_special,
disallowed_special=self.disallowed_special)
|
def get_token_ids(self, text: str) ->List[int]:
"""Get the token IDs using the tiktoken package."""
if sys.version_info[1] < 8:
return super().get_num_tokens(text)
try:
import tiktoken
except ImportError:
raise ImportError(
'Could not import tiktoken python package. This is needed in order to calculate get_num_tokens. Please install it with `pip install tiktoken`.'
)
model_name = self.tiktoken_model_name or self.model_name
try:
enc = tiktoken.encoding_for_model(model_name)
except KeyError:
logger.warning('Warning: model not found. Using cl100k_base encoding.')
model = 'cl100k_base'
enc = tiktoken.get_encoding(model)
return enc.encode(text, allowed_special=self.allowed_special,
disallowed_special=self.disallowed_special)
|
Get the token IDs using the tiktoken package.
|
_extract_fields
|
"""Grab the existing fields from the Collection"""
from pymilvus import Collection
if isinstance(self.col, Collection):
schema = self.col.schema
for x in schema.fields:
self.fields.append(x.name)
self.fields.remove(self._primary_field)
|
def _extract_fields(self) ->None:
"""Grab the existing fields from the Collection"""
from pymilvus import Collection
if isinstance(self.col, Collection):
schema = self.col.schema
for x in schema.fields:
self.fields.append(x.name)
self.fields.remove(self._primary_field)
|
Grab the existing fields from the Collection
|
_import_openapi_utils_api_models
|
from langchain_community.tools.openapi.utils.api_models import APIOperation
return APIOperation
|
def _import_openapi_utils_api_models() ->Any:
from langchain_community.tools.openapi.utils.api_models import APIOperation
return APIOperation
| null |
_create_retry_decorator
|
import openai
min_seconds = 4
max_seconds = 10
return retry(reraise=True, stop=stop_after_attempt(embeddings.max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
retry=retry_if_exception_type(openai.error.Timeout) |
retry_if_exception_type(openai.error.APIError) |
retry_if_exception_type(openai.error.APIConnectionError) |
retry_if_exception_type(openai.error.RateLimitError) |
retry_if_exception_type(openai.error.ServiceUnavailableError),
before_sleep=before_sleep_log(logger, logging.WARNING))
|
def _create_retry_decorator(embeddings: LocalAIEmbeddings) ->Callable[[Any],
Any]:
import openai
min_seconds = 4
max_seconds = 10
return retry(reraise=True, stop=stop_after_attempt(embeddings.
max_retries), wait=wait_exponential(multiplier=1, min=min_seconds,
max=max_seconds), retry=retry_if_exception_type(openai.error.
Timeout) | retry_if_exception_type(openai.error.APIError) |
retry_if_exception_type(openai.error.APIConnectionError) |
retry_if_exception_type(openai.error.RateLimitError) |
retry_if_exception_type(openai.error.ServiceUnavailableError),
before_sleep=before_sleep_log(logger, logging.WARNING))
| null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.