method_name
stringlengths 1
78
| method_body
stringlengths 3
9.66k
| full_code
stringlengths 31
10.7k
| docstring
stringlengths 4
4.74k
⌀ |
|---|---|---|---|
__init__
|
"""Initialize a PythonCodeTextSplitter."""
separators = self.get_separators_for_language(Language.PYTHON)
super().__init__(separators=separators, **kwargs)
|
def __init__(self, **kwargs: Any) ->None:
"""Initialize a PythonCodeTextSplitter."""
separators = self.get_separators_for_language(Language.PYTHON)
super().__init__(separators=separators, **kwargs)
|
Initialize a PythonCodeTextSplitter.
|
test_continue_on_failure_false
|
"""Test exception is raised when continue_on_failure=False."""
loader = NewsURLLoader(['badurl.foobar'], continue_on_failure=False)
with pytest.raises(Exception):
loader.load()
|
def test_continue_on_failure_false() ->None:
"""Test exception is raised when continue_on_failure=False."""
loader = NewsURLLoader(['badurl.foobar'], continue_on_failure=False)
with pytest.raises(Exception):
loader.load()
|
Test exception is raised when continue_on_failure=False.
|
headers
|
"""Return headers with API key injected"""
headers_ = self.headers_tmpl.copy()
for header in headers_.values():
if '{nvidia_api_key}' in header['Authorization']:
header['Authorization'] = header['Authorization'].format(nvidia_api_key
=self.nvidia_api_key.get_secret_value())
return headers_
|
@property
def headers(self) ->dict:
"""Return headers with API key injected"""
headers_ = self.headers_tmpl.copy()
for header in headers_.values():
if '{nvidia_api_key}' in header['Authorization']:
header['Authorization'] = header['Authorization'].format(
nvidia_api_key=self.nvidia_api_key.get_secret_value())
return headers_
|
Return headers with API key injected
|
decorator
|
@functools.wraps(func)
def wrapper(*args: Any, **kwargs: Any) ->Any:
"""Validate exactly one arg in each group is not None."""
counts = [sum(1 for arg in arg_group if kwargs.get(arg) is not None) for
arg_group in arg_groups]
invalid_groups = [i for i, count in enumerate(counts) if count != 1]
if invalid_groups:
invalid_group_names = [', '.join(arg_groups[i]) for i in invalid_groups
]
raise ValueError(
f"Exactly one argument in each of the following groups must be defined: {', '.join(invalid_group_names)}"
)
return func(*args, **kwargs)
return wrapper
|
def decorator(func: Callable) ->Callable:
@functools.wraps(func)
def wrapper(*args: Any, **kwargs: Any) ->Any:
"""Validate exactly one arg in each group is not None."""
counts = [sum(1 for arg in arg_group if kwargs.get(arg) is not None
) for arg_group in arg_groups]
invalid_groups = [i for i, count in enumerate(counts) if count != 1]
if invalid_groups:
invalid_group_names = [', '.join(arg_groups[i]) for i in
invalid_groups]
raise ValueError(
f"Exactly one argument in each of the following groups must be defined: {', '.join(invalid_group_names)}"
)
return func(*args, **kwargs)
return wrapper
| null |
__init__
|
try:
from e2b import DataAnalysis
except ImportError as e:
raise ImportError(
'Unable to import e2b, please install with `pip install e2b`.') from e
super().__init__(description=base_description, **kwargs)
self.session = DataAnalysis(api_key=api_key, cwd=cwd, env_vars=env_vars,
on_stdout=on_stdout, on_stderr=on_stderr, on_exit=on_exit, on_artifact=
on_artifact)
|
def __init__(self, api_key: Optional[str]=None, cwd: Optional[str]=None,
env_vars: Optional[EnvVars]=None, on_stdout: Optional[Callable[[str],
Any]]=None, on_stderr: Optional[Callable[[str], Any]]=None, on_artifact:
Optional[Callable[[Artifact], Any]]=None, on_exit: Optional[Callable[[
int], Any]]=None, **kwargs: Any):
try:
from e2b import DataAnalysis
except ImportError as e:
raise ImportError(
'Unable to import e2b, please install with `pip install e2b`.'
) from e
super().__init__(description=base_description, **kwargs)
self.session = DataAnalysis(api_key=api_key, cwd=cwd, env_vars=env_vars,
on_stdout=on_stdout, on_stderr=on_stderr, on_exit=on_exit,
on_artifact=on_artifact)
| null |
ignore_retriever
|
"""Whether to ignore retriever callbacks."""
return self.ignore_retriever_
|
@property
def ignore_retriever(self) ->bool:
"""Whether to ignore retriever callbacks."""
return self.ignore_retriever_
|
Whether to ignore retriever callbacks.
|
_run
|
"""Use the Yahoo Finance News tool."""
try:
import yfinance
except ImportError:
raise ImportError(
'Could not import yfinance python package. Please install it with `pip install yfinance`.'
)
company = yfinance.Ticker(query)
try:
if company.isin is None:
return f'Company ticker {query} not found.'
except (HTTPError, ReadTimeout, ConnectionError):
return f'Company ticker {query} not found.'
links = []
try:
links = [n['link'] for n in company.news if n['type'] == 'STORY']
except (HTTPError, ReadTimeout, ConnectionError):
if not links:
return f'No news found for company that searched with {query} ticker.'
if not links:
return f'No news found for company that searched with {query} ticker.'
loader = WebBaseLoader(web_paths=links)
docs = loader.load()
result = self._format_results(docs, query)
if not result:
return f'No news found for company that searched with {query} ticker.'
return result
|
def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun]
=None) ->str:
"""Use the Yahoo Finance News tool."""
try:
import yfinance
except ImportError:
raise ImportError(
'Could not import yfinance python package. Please install it with `pip install yfinance`.'
)
company = yfinance.Ticker(query)
try:
if company.isin is None:
return f'Company ticker {query} not found.'
except (HTTPError, ReadTimeout, ConnectionError):
return f'Company ticker {query} not found.'
links = []
try:
links = [n['link'] for n in company.news if n['type'] == 'STORY']
except (HTTPError, ReadTimeout, ConnectionError):
if not links:
return (
f'No news found for company that searched with {query} ticker.'
)
if not links:
return f'No news found for company that searched with {query} ticker.'
loader = WebBaseLoader(web_paths=links)
docs = loader.load()
result = self._format_results(docs, query)
if not result:
return f'No news found for company that searched with {query} ticker.'
return result
|
Use the Yahoo Finance News tool.
|
is_lc_serializable
|
return True
|
@classmethod
def is_lc_serializable(cls) ->bool:
return True
| null |
test_tair
|
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
docsearch = Tair.from_texts(texts, FakeEmbeddings(), tair_url=
'redis://localhost:6379')
output = docsearch.similarity_search('foo', k=1)
assert output == [Document(page_content='foo')]
|
def test_tair() ->None:
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
docsearch = Tair.from_texts(texts, FakeEmbeddings(), tair_url=
'redis://localhost:6379')
output = docsearch.similarity_search('foo', k=1)
assert output == [Document(page_content='foo')]
|
Test end to end construction and search.
|
test_parser_unpack_single_arg_operation
|
expected = DEFAULT_PARSER.parse_folder(arg)
actual = DEFAULT_PARSER.parse_folder(f'{op}({arg})')
assert expected == actual
|
@pytest.mark.parametrize('op', ('and', 'or'))
@pytest.mark.parametrize('arg', ('eq("foo", 2)',
'and(eq("foo", 2), lte("bar", 1.1))'))
def test_parser_unpack_single_arg_operation(op: str, arg: str) ->None:
expected = DEFAULT_PARSER.parse_folder(arg)
actual = DEFAULT_PARSER.parse_folder(f'{op}({arg})')
assert expected == actual
| null |
validate_environment
|
"""Validates that the python package exists in environment."""
cls._try_init_vertexai(values)
if values['model_name'] == 'textembedding-gecko-default':
logger.warning(
'Model_name will become a required arg for VertexAIEmbeddings starting from Feb-01-2024. Currently the default is set to textembedding-gecko@001'
)
values['model_name'] = 'textembedding-gecko@001'
try:
from vertexai.language_models import TextEmbeddingModel
except ImportError:
raise_vertex_import_error()
values['client'] = TextEmbeddingModel.from_pretrained(values['model_name'])
return values
|
@root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""Validates that the python package exists in environment."""
cls._try_init_vertexai(values)
if values['model_name'] == 'textembedding-gecko-default':
logger.warning(
'Model_name will become a required arg for VertexAIEmbeddings starting from Feb-01-2024. Currently the default is set to textembedding-gecko@001'
)
values['model_name'] = 'textembedding-gecko@001'
try:
from vertexai.language_models import TextEmbeddingModel
except ImportError:
raise_vertex_import_error()
values['client'] = TextEmbeddingModel.from_pretrained(values['model_name'])
return values
|
Validates that the python package exists in environment.
|
test_all_imports
|
assert set(__all__) == set(EXPECTED_ALL)
|
def test_all_imports() ->None:
assert set(__all__) == set(EXPECTED_ALL)
| null |
test_vectara_mmr
|
output1 = vectara3.max_marginal_relevance_search('generative AI', k=2,
fetch_k=6, lambda_mult=1.0, n_sentence_context=0)
assert len(output1) == 2
assert 'Generative AI promises to revolutionize how' in output1[0].page_content
assert "This is why today we're adding a fundamental capability" in output1[1
].page_content
output2 = vectara3.max_marginal_relevance_search('generative AI', k=2,
fetch_k=6, lambda_mult=0.0, n_sentence_context=0)
assert len(output2) == 2
assert 'Generative AI promises to revolutionize how' in output2[0].page_content
assert 'Neural LLM systems are excellent at understanding the context' in output2[
1].page_content
|
def test_vectara_mmr(vectara3) ->None:
output1 = vectara3.max_marginal_relevance_search('generative AI', k=2,
fetch_k=6, lambda_mult=1.0, n_sentence_context=0)
assert len(output1) == 2
assert 'Generative AI promises to revolutionize how' in output1[0
].page_content
assert "This is why today we're adding a fundamental capability" in output1[
1].page_content
output2 = vectara3.max_marginal_relevance_search('generative AI', k=2,
fetch_k=6, lambda_mult=0.0, n_sentence_context=0)
assert len(output2) == 2
assert 'Generative AI promises to revolutionize how' in output2[0
].page_content
assert 'Neural LLM systems are excellent at understanding the context' in output2[
1].page_content
| null |
_import_ainetwork_rule
|
from langchain_community.tools.ainetwork.rule import AINRuleOps
return AINRuleOps
|
def _import_ainetwork_rule() ->Any:
from langchain_community.tools.ainetwork.rule import AINRuleOps
return AINRuleOps
| null |
_lazy_import_promptlayer
|
"""Lazy import promptlayer to avoid circular imports."""
try:
import promptlayer
except ImportError:
raise ImportError(
'The PromptLayerCallbackHandler requires the promptlayer package. Please install it with `pip install promptlayer`.'
)
return promptlayer
|
def _lazy_import_promptlayer() ->promptlayer:
"""Lazy import promptlayer to avoid circular imports."""
try:
import promptlayer
except ImportError:
raise ImportError(
'The PromptLayerCallbackHandler requires the promptlayer package. Please install it with `pip install promptlayer`.'
)
return promptlayer
|
Lazy import promptlayer to avoid circular imports.
|
test_get_nfts_polygon
|
contract_address = '0x448676ffCd0aDf2D85C1f0565e8dde6924A9A7D9'
result = BlockchainDocumentLoader(contract_address, BlockchainType.
POLYGON_MAINNET).load()
print('Tokens returned for contract on Polygon: ', len(result))
assert len(result) > 0, 'No NFTs returned'
|
@pytest.mark.skipif(not alchemyKeySet, reason='Alchemy API key not provided.')
def test_get_nfts_polygon() ->None:
contract_address = '0x448676ffCd0aDf2D85C1f0565e8dde6924A9A7D9'
result = BlockchainDocumentLoader(contract_address, BlockchainType.
POLYGON_MAINNET).load()
print('Tokens returned for contract on Polygon: ', len(result))
assert len(result) > 0, 'No NFTs returned'
| null |
get_input_schema
|
return InputType
|
def get_input_schema(self, config: Optional[RunnableConfig]=None) ->Type[
BaseModel]:
return InputType
| null |
_make_spacy_pipeline_for_splitting
|
try:
import spacy
except ImportError:
raise ImportError(
'Spacy is not installed, please install it with `pip install spacy`.')
if pipeline == 'sentencizer':
from spacy.lang.en import English
sentencizer = English()
sentencizer.add_pipe('sentencizer')
else:
sentencizer = spacy.load(pipeline, exclude=['ner', 'tagger'])
sentencizer.max_length = max_length
return sentencizer
|
def _make_spacy_pipeline_for_splitting(pipeline: str, *, max_length: int=
1000000) ->Any:
try:
import spacy
except ImportError:
raise ImportError(
'Spacy is not installed, please install it with `pip install spacy`.'
)
if pipeline == 'sentencizer':
from spacy.lang.en import English
sentencizer = English()
sentencizer.add_pipe('sentencizer')
else:
sentencizer = spacy.load(pipeline, exclude=['ner', 'tagger'])
sentencizer.max_length = max_length
return sentencizer
| null |
vector_store
|
from momento import CredentialProvider, PreviewVectorIndexClient, VectorIndexConfigurations
vector_store = None
try:
client = PreviewVectorIndexClient(VectorIndexConfigurations.Default.
latest(), credential_provider=CredentialProvider.
from_environment_variable(API_KEY_ENV_VAR))
vector_store = MomentoVectorIndex(embedding=embedding_openai, client=
client, index_name=random_index_name)
yield vector_store
finally:
if vector_store is not None:
vector_store._client.delete_index(random_index_name)
|
@pytest.fixture(scope='function')
def vector_store(embedding_openai: OpenAIEmbeddings, random_index_name: str
) ->Iterator[MomentoVectorIndex]:
from momento import CredentialProvider, PreviewVectorIndexClient, VectorIndexConfigurations
vector_store = None
try:
client = PreviewVectorIndexClient(VectorIndexConfigurations.Default
.latest(), credential_provider=CredentialProvider.
from_environment_variable(API_KEY_ENV_VAR))
vector_store = MomentoVectorIndex(embedding=embedding_openai,
client=client, index_name=random_index_name)
yield vector_store
finally:
if vector_store is not None:
vector_store._client.delete_index(random_index_name)
| null |
search_api
|
"""Search the API for the query."""
return 'API result'
|
@tool('search', return_direct=True)
def search_api(query: str, *args: Any) ->str:
"""Search the API for the query."""
return 'API result'
|
Search the API for the query.
|
_get_searx_search_results_json
|
wrapper_kwargs = {k: v for k, v in kwargs.items() if k != 'num_results'}
return SearxSearchResults(wrapper=SearxSearchWrapper(**wrapper_kwargs), **
kwargs)
|
def _get_searx_search_results_json(**kwargs: Any) ->BaseTool:
wrapper_kwargs = {k: v for k, v in kwargs.items() if k != 'num_results'}
return SearxSearchResults(wrapper=SearxSearchWrapper(**wrapper_kwargs),
**kwargs)
| null |
get
|
"""GET the URL and return the text."""
return requests.get(url, headers=self.headers, auth=self.auth, **kwargs)
|
def get(self, url: str, **kwargs: Any) ->requests.Response:
"""GET the URL and return the text."""
return requests.get(url, headers=self.headers, auth=self.auth, **kwargs)
|
GET the URL and return the text.
|
on_chain_start
|
pass
|
def on_chain_start(self, serialized: Dict[str, Any], inputs: Dict[str, Any],
**kwargs: Any) ->None:
pass
| null |
test_cassandra_max_marginal_relevance_search
|
"""
Test end to end construction and MMR search.
The embedding function used here ensures `texts` become
the following vectors on a circle (numbered v0 through v3):
______ v2
/ / | v1
v3 | . | query
| / v0
|______/ (N.B. very crude drawing)
With fetch_k==3 and k==2, when query is at (1, ),
one expects that v2 and v0 are returned (in some order).
"""
texts = ['-0.124', '+0.127', '+0.25', '+1.0']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = _vectorstore_from_texts(texts, metadatas=metadatas,
embedding_class=AngularTwoDimensionalEmbeddings)
output = docsearch.max_marginal_relevance_search('0.0', k=2, fetch_k=3)
output_set = {(mmr_doc.page_content, mmr_doc.metadata['page']) for mmr_doc in
output}
assert output_set == {('+0.25', '2.0'), ('-0.124', '0.0')}
|
def test_cassandra_max_marginal_relevance_search() ->None:
"""
Test end to end construction and MMR search.
The embedding function used here ensures `texts` become
the following vectors on a circle (numbered v0 through v3):
______ v2
/ / | v1
v3 | . | query
| / v0
|______/ (N.B. very crude drawing)
With fetch_k==3 and k==2, when query is at (1, ),
one expects that v2 and v0 are returned (in some order).
"""
texts = ['-0.124', '+0.127', '+0.25', '+1.0']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = _vectorstore_from_texts(texts, metadatas=metadatas,
embedding_class=AngularTwoDimensionalEmbeddings)
output = docsearch.max_marginal_relevance_search('0.0', k=2, fetch_k=3)
output_set = {(mmr_doc.page_content, mmr_doc.metadata['page']) for
mmr_doc in output}
assert output_set == {('+0.25', '2.0'), ('-0.124', '0.0')}
|
Test end to end construction and MMR search.
The embedding function used here ensures `texts` become
the following vectors on a circle (numbered v0 through v3):
______ v2
/ / | v1
v3 | . | query
| / v0
|______/ (N.B. very crude drawing)
With fetch_k==3 and k==2, when query is at (1, ),
one expects that v2 and v0 are returned (in some order).
|
clickup_wrapper
|
return ClickupAPIWrapper()
|
@pytest.fixture
def clickup_wrapper() ->ClickupAPIWrapper:
return ClickupAPIWrapper()
| null |
__init__
|
"""Initialize with a path to directory and how to glob over it.
Args:
path: Path to directory.
glob: Glob pattern to use to find files. Defaults to "**/[!.]*"
(all files except hidden).
silent_errors: Whether to silently ignore errors. Defaults to False.
load_hidden: Whether to load hidden files. Defaults to False.
loader_cls: Loader class to use for loading files.
Defaults to UnstructuredFileLoader.
loader_kwargs: Keyword arguments to pass to loader_cls. Defaults to None.
recursive: Whether to recursively search for files. Defaults to False.
show_progress: Whether to show a progress bar. Defaults to False.
use_multithreading: Whether to use multithreading. Defaults to False.
max_concurrency: The maximum number of threads to use. Defaults to 4.
sample_size: The maximum number of files you would like to load from the
directory.
randomize_sample: Shuffle the files to get a random sample.
sample_seed: set the seed of the random shuffle for reproducibility.
"""
if loader_kwargs is None:
loader_kwargs = {}
self.path = path
self.glob = glob
self.load_hidden = load_hidden
self.loader_cls = loader_cls
self.loader_kwargs = loader_kwargs
self.silent_errors = silent_errors
self.recursive = recursive
self.show_progress = show_progress
self.use_multithreading = use_multithreading
self.max_concurrency = max_concurrency
self.sample_size = sample_size
self.randomize_sample = randomize_sample
self.sample_seed = sample_seed
|
def __init__(self, path: str, glob: str='**/[!.]*', silent_errors: bool=
False, load_hidden: bool=False, loader_cls: FILE_LOADER_TYPE=
UnstructuredFileLoader, loader_kwargs: Union[dict, None]=None,
recursive: bool=False, show_progress: bool=False, use_multithreading:
bool=False, max_concurrency: int=4, *, sample_size: int=0,
randomize_sample: bool=False, sample_seed: Union[int, None]=None):
"""Initialize with a path to directory and how to glob over it.
Args:
path: Path to directory.
glob: Glob pattern to use to find files. Defaults to "**/[!.]*"
(all files except hidden).
silent_errors: Whether to silently ignore errors. Defaults to False.
load_hidden: Whether to load hidden files. Defaults to False.
loader_cls: Loader class to use for loading files.
Defaults to UnstructuredFileLoader.
loader_kwargs: Keyword arguments to pass to loader_cls. Defaults to None.
recursive: Whether to recursively search for files. Defaults to False.
show_progress: Whether to show a progress bar. Defaults to False.
use_multithreading: Whether to use multithreading. Defaults to False.
max_concurrency: The maximum number of threads to use. Defaults to 4.
sample_size: The maximum number of files you would like to load from the
directory.
randomize_sample: Shuffle the files to get a random sample.
sample_seed: set the seed of the random shuffle for reproducibility.
"""
if loader_kwargs is None:
loader_kwargs = {}
self.path = path
self.glob = glob
self.load_hidden = load_hidden
self.loader_cls = loader_cls
self.loader_kwargs = loader_kwargs
self.silent_errors = silent_errors
self.recursive = recursive
self.show_progress = show_progress
self.use_multithreading = use_multithreading
self.max_concurrency = max_concurrency
self.sample_size = sample_size
self.randomize_sample = randomize_sample
self.sample_seed = sample_seed
|
Initialize with a path to directory and how to glob over it.
Args:
path: Path to directory.
glob: Glob pattern to use to find files. Defaults to "**/[!.]*"
(all files except hidden).
silent_errors: Whether to silently ignore errors. Defaults to False.
load_hidden: Whether to load hidden files. Defaults to False.
loader_cls: Loader class to use for loading files.
Defaults to UnstructuredFileLoader.
loader_kwargs: Keyword arguments to pass to loader_cls. Defaults to None.
recursive: Whether to recursively search for files. Defaults to False.
show_progress: Whether to show a progress bar. Defaults to False.
use_multithreading: Whether to use multithreading. Defaults to False.
max_concurrency: The maximum number of threads to use. Defaults to 4.
sample_size: The maximum number of files you would like to load from the
directory.
randomize_sample: Shuffle the files to get a random sample.
sample_seed: set the seed of the random shuffle for reproducibility.
|
answers
|
"""Helper accessor on the json result."""
return self.get('answers')
|
@property
def answers(self) ->Any:
"""Helper accessor on the json result."""
return self.get('answers')
|
Helper accessor on the json result.
|
test_visit_operation
|
op = Operation(operator=Operator.AND, arguments=[Comparison(comparator=
Comparator.GTE, attribute='bar', value=5), Comparison(comparator=
Comparator.LT, attribute='bar', value=10), Comparison(comparator=
Comparator.EQ, attribute='baz', value='abcd')])
expected = {'bool': {'must': [{'range': {'metadata.bar': {'gte': 5}}}, {
'range': {'metadata.bar': {'lt': 10}}}, {'term': {
'metadata.baz.keyword': 'abcd'}}]}}
actual = DEFAULT_TRANSLATOR.visit_operation(op)
assert expected == actual
|
def test_visit_operation() ->None:
op = Operation(operator=Operator.AND, arguments=[Comparison(comparator=
Comparator.GTE, attribute='bar', value=5), Comparison(comparator=
Comparator.LT, attribute='bar', value=10), Comparison(comparator=
Comparator.EQ, attribute='baz', value='abcd')])
expected = {'bool': {'must': [{'range': {'metadata.bar': {'gte': 5}}},
{'range': {'metadata.bar': {'lt': 10}}}, {'term': {
'metadata.baz.keyword': 'abcd'}}]}}
actual = DEFAULT_TRANSLATOR.visit_operation(op)
assert expected == actual
| null |
_generate
|
"""Run the LLM on the given prompt and input."""
instances = self._prepare_request(prompts, **kwargs)
response = self.client.predict(endpoint=self.endpoint_path, instances=instances
)
return self._parse_response(response)
|
def _generate(self, prompts: List[str], stop: Optional[List[str]]=None,
run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any
) ->LLMResult:
"""Run the LLM on the given prompt and input."""
instances = self._prepare_request(prompts, **kwargs)
response = self.client.predict(endpoint=self.endpoint_path, instances=
instances)
return self._parse_response(response)
|
Run the LLM on the given prompt and input.
|
validate_environment
|
"""Validate that api key and python package exists in environment."""
values['openai_api_key'] = values['openai_api_key'] or os.getenv(
'AZURE_OPENAI_API_KEY') or os.getenv('OPENAI_API_KEY')
values['openai_api_base'] = values['openai_api_base'] or os.getenv(
'OPENAI_API_BASE')
values['openai_api_version'] = values['openai_api_version'] or os.getenv(
'OPENAI_API_VERSION', default='2023-05-15')
values['openai_api_type'] = get_from_dict_or_env(values, 'openai_api_type',
'OPENAI_API_TYPE', default='azure')
values['openai_organization'] = values['openai_organization'] or os.getenv(
'OPENAI_ORG_ID') or os.getenv('OPENAI_ORGANIZATION')
values['openai_proxy'] = get_from_dict_or_env(values, 'openai_proxy',
'OPENAI_PROXY', default='')
values['azure_endpoint'] = values['azure_endpoint'] or os.getenv(
'AZURE_OPENAI_ENDPOINT')
values['azure_ad_token'] = values['azure_ad_token'] or os.getenv(
'AZURE_OPENAI_AD_TOKEN')
values['chunk_size'] = min(values['chunk_size'], 16)
try:
import openai
except ImportError:
raise ImportError(
'Could not import openai python package. Please install it with `pip install openai`.'
)
if is_openai_v1():
openai_api_base = values['openai_api_base']
if openai_api_base and values['validate_base_url']:
if '/openai' not in openai_api_base:
values['openai_api_base'] += '/openai'
warnings.warn(
f"As of openai>=1.0.0, Azure endpoints should be specified via the `azure_endpoint` param not `openai_api_base` (or alias `base_url`). Updating `openai_api_base` from {openai_api_base} to {values['openai_api_base']}."
)
if values['deployment']:
warnings.warn(
'As of openai>=1.0.0, if `deployment` (or alias `azure_deployment`) is specified then `openai_api_base` (or alias `base_url`) should not be. Instead use `deployment` (or alias `azure_deployment`) and `azure_endpoint`.'
)
if values['deployment'] not in values['openai_api_base']:
warnings.warn(
f"As of openai>=1.0.0, if `openai_api_base` (or alias `base_url`) is specified it is expected to be of the form https://example-resource.azure.openai.com/openai/deployments/example-deployment. Updating {openai_api_base} to {values['openai_api_base']}."
)
values['openai_api_base'] += '/deployments/' + values[
'deployment']
values['deployment'] = None
client_params = {'api_version': values['openai_api_version'],
'azure_endpoint': values['azure_endpoint'], 'azure_deployment':
values['deployment'], 'api_key': values['openai_api_key'],
'azure_ad_token': values['azure_ad_token'],
'azure_ad_token_provider': values['azure_ad_token_provider'],
'organization': values['openai_organization'], 'base_url': values[
'openai_api_base'], 'timeout': values['request_timeout'],
'max_retries': values['max_retries'], 'default_headers': values[
'default_headers'], 'default_query': values['default_query'],
'http_client': values['http_client']}
values['client'] = openai.AzureOpenAI(**client_params).embeddings
values['async_client'] = openai.AsyncAzureOpenAI(**client_params
).embeddings
else:
values['client'] = openai.Embedding
return values
|
@root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that api key and python package exists in environment."""
values['openai_api_key'] = values['openai_api_key'] or os.getenv(
'AZURE_OPENAI_API_KEY') or os.getenv('OPENAI_API_KEY')
values['openai_api_base'] = values['openai_api_base'] or os.getenv(
'OPENAI_API_BASE')
values['openai_api_version'] = values['openai_api_version'] or os.getenv(
'OPENAI_API_VERSION', default='2023-05-15')
values['openai_api_type'] = get_from_dict_or_env(values,
'openai_api_type', 'OPENAI_API_TYPE', default='azure')
values['openai_organization'] = values['openai_organization'] or os.getenv(
'OPENAI_ORG_ID') or os.getenv('OPENAI_ORGANIZATION')
values['openai_proxy'] = get_from_dict_or_env(values, 'openai_proxy',
'OPENAI_PROXY', default='')
values['azure_endpoint'] = values['azure_endpoint'] or os.getenv(
'AZURE_OPENAI_ENDPOINT')
values['azure_ad_token'] = values['azure_ad_token'] or os.getenv(
'AZURE_OPENAI_AD_TOKEN')
values['chunk_size'] = min(values['chunk_size'], 16)
try:
import openai
except ImportError:
raise ImportError(
'Could not import openai python package. Please install it with `pip install openai`.'
)
if is_openai_v1():
openai_api_base = values['openai_api_base']
if openai_api_base and values['validate_base_url']:
if '/openai' not in openai_api_base:
values['openai_api_base'] += '/openai'
warnings.warn(
f"As of openai>=1.0.0, Azure endpoints should be specified via the `azure_endpoint` param not `openai_api_base` (or alias `base_url`). Updating `openai_api_base` from {openai_api_base} to {values['openai_api_base']}."
)
if values['deployment']:
warnings.warn(
'As of openai>=1.0.0, if `deployment` (or alias `azure_deployment`) is specified then `openai_api_base` (or alias `base_url`) should not be. Instead use `deployment` (or alias `azure_deployment`) and `azure_endpoint`.'
)
if values['deployment'] not in values['openai_api_base']:
warnings.warn(
f"As of openai>=1.0.0, if `openai_api_base` (or alias `base_url`) is specified it is expected to be of the form https://example-resource.azure.openai.com/openai/deployments/example-deployment. Updating {openai_api_base} to {values['openai_api_base']}."
)
values['openai_api_base'] += '/deployments/' + values[
'deployment']
values['deployment'] = None
client_params = {'api_version': values['openai_api_version'],
'azure_endpoint': values['azure_endpoint'], 'azure_deployment':
values['deployment'], 'api_key': values['openai_api_key'],
'azure_ad_token': values['azure_ad_token'],
'azure_ad_token_provider': values['azure_ad_token_provider'],
'organization': values['openai_organization'], 'base_url':
values['openai_api_base'], 'timeout': values['request_timeout'],
'max_retries': values['max_retries'], 'default_headers': values
['default_headers'], 'default_query': values['default_query'],
'http_client': values['http_client']}
values['client'] = openai.AzureOpenAI(**client_params).embeddings
values['async_client'] = openai.AsyncAzureOpenAI(**client_params
).embeddings
else:
values['client'] = openai.Embedding
return values
|
Validate that api key and python package exists in environment.
|
ignore_chain
|
"""Whether to ignore chain callbacks."""
return self.ignore_chain_
|
@property
def ignore_chain(self) ->bool:
"""Whether to ignore chain callbacks."""
return self.ignore_chain_
|
Whether to ignore chain callbacks.
|
test_prompt_jinja2_wrong_input_variables
|
"""Test error is raised when name of input variable is wrong."""
template = 'This is a {{ foo }} test.'
input_variables = ['bar']
with pytest.warns(UserWarning):
PromptTemplate(input_variables=input_variables, template=template,
template_format='jinja2', validate_template=True)
assert PromptTemplate(input_variables=input_variables, template=template,
template_format='jinja2').input_variables == ['foo']
|
@pytest.mark.requires('jinja2')
def test_prompt_jinja2_wrong_input_variables() ->None:
"""Test error is raised when name of input variable is wrong."""
template = 'This is a {{ foo }} test.'
input_variables = ['bar']
with pytest.warns(UserWarning):
PromptTemplate(input_variables=input_variables, template=template,
template_format='jinja2', validate_template=True)
assert PromptTemplate(input_variables=input_variables, template=
template, template_format='jinja2').input_variables == ['foo']
|
Test error is raised when name of input variable is wrong.
|
get_lc_namespace
|
"""Get the namespace of the langchain object."""
return ['langchain', 'chat_models', 'bedrock']
|
@classmethod
def get_lc_namespace(cls) ->List[str]:
"""Get the namespace of the langchain object."""
return ['langchain', 'chat_models', 'bedrock']
|
Get the namespace of the langchain object.
|
_identifying_params
|
"""Get the identifying params."""
return {'model': self.model, 'instruction': self.instruction}
|
@property
def _identifying_params(self) ->Mapping[str, Any]:
"""Get the identifying params."""
return {'model': self.model, 'instruction': self.instruction}
|
Get the identifying params.
|
on_llm_end
|
"""End a trace for an LLM run."""
llm_run = self._get_run(run_id, run_type='llm')
llm_run.outputs = response.dict()
for i, generations in enumerate(response.generations):
for j, generation in enumerate(generations):
output_generation = llm_run.outputs['generations'][i][j]
if 'message' in output_generation:
output_generation['message'] = dumpd(cast(ChatGeneration,
generation).message)
llm_run.end_time = datetime.now(timezone.utc)
llm_run.events.append({'name': 'end', 'time': llm_run.end_time})
self._end_trace(llm_run)
self._on_llm_end(llm_run)
return llm_run
|
def on_llm_end(self, response: LLMResult, *, run_id: UUID, **kwargs: Any
) ->Run:
"""End a trace for an LLM run."""
llm_run = self._get_run(run_id, run_type='llm')
llm_run.outputs = response.dict()
for i, generations in enumerate(response.generations):
for j, generation in enumerate(generations):
output_generation = llm_run.outputs['generations'][i][j]
if 'message' in output_generation:
output_generation['message'] = dumpd(cast(ChatGeneration,
generation).message)
llm_run.end_time = datetime.now(timezone.utc)
llm_run.events.append({'name': 'end', 'time': llm_run.end_time})
self._end_trace(llm_run)
self._on_llm_end(llm_run)
return llm_run
|
End a trace for an LLM run.
|
__init__
|
self.step = 0
self.starts = 0
self.ends = 0
self.errors = 0
self.text_ctr = 0
self.ignore_llm_ = False
self.ignore_chain_ = False
self.ignore_agent_ = False
self.ignore_retriever_ = False
self.always_verbose_ = False
self.chain_starts = 0
self.chain_ends = 0
self.llm_starts = 0
self.llm_ends = 0
self.llm_streams = 0
self.tool_starts = 0
self.tool_ends = 0
self.agent_ends = 0
self.on_llm_start_records: list = []
self.on_llm_token_records: list = []
self.on_llm_end_records: list = []
self.on_chain_start_records: list = []
self.on_chain_end_records: list = []
self.on_tool_start_records: list = []
self.on_tool_end_records: list = []
self.on_text_records: list = []
self.on_agent_finish_records: list = []
self.on_agent_action_records: list = []
|
def __init__(self) ->None:
self.step = 0
self.starts = 0
self.ends = 0
self.errors = 0
self.text_ctr = 0
self.ignore_llm_ = False
self.ignore_chain_ = False
self.ignore_agent_ = False
self.ignore_retriever_ = False
self.always_verbose_ = False
self.chain_starts = 0
self.chain_ends = 0
self.llm_starts = 0
self.llm_ends = 0
self.llm_streams = 0
self.tool_starts = 0
self.tool_ends = 0
self.agent_ends = 0
self.on_llm_start_records: list = []
self.on_llm_token_records: list = []
self.on_llm_end_records: list = []
self.on_chain_start_records: list = []
self.on_chain_end_records: list = []
self.on_tool_start_records: list = []
self.on_tool_end_records: list = []
self.on_text_records: list = []
self.on_agent_finish_records: list = []
self.on_agent_action_records: list = []
| null |
list_lists
|
return self.lists
|
def list_lists(self) ->list:
return self.lists
| null |
_import_zep
|
from langchain_community.vectorstores.zep import ZepVectorStore
return ZepVectorStore
|
def _import_zep() ->Any:
from langchain_community.vectorstores.zep import ZepVectorStore
return ZepVectorStore
| null |
test_openai_incorrect_field
|
with pytest.warns(match='not default parameter'):
llm = OpenAIEmbeddings(foo='bar')
assert llm.model_kwargs == {'foo': 'bar'}
|
@pytest.mark.requires('openai')
def test_openai_incorrect_field() ->None:
with pytest.warns(match='not default parameter'):
llm = OpenAIEmbeddings(foo='bar')
assert llm.model_kwargs == {'foo': 'bar'}
| null |
resolve_prompt
|
return ChatPromptTemplate.from_strings(self.get_prompt_strings('resolve'))
|
def resolve_prompt(self) ->ChatPromptTemplate:
return ChatPromptTemplate.from_strings(self.get_prompt_strings('resolve'))
| null |
_validate_ttl
|
if ttl is not None and ttl <= timedelta(seconds=0):
raise ValueError(f'ttl must be positive but was {ttl}.')
|
def _validate_ttl(ttl: Optional[timedelta]) ->None:
if ttl is not None and ttl <= timedelta(seconds=0):
raise ValueError(f'ttl must be positive but was {ttl}.')
| null |
on_tool_end
|
"""End a trace for a tool run."""
tool_run = self._get_run(run_id, run_type='tool')
tool_run.outputs = {'output': output}
tool_run.end_time = datetime.now(timezone.utc)
tool_run.events.append({'name': 'end', 'time': tool_run.end_time})
self._end_trace(tool_run)
self._on_tool_end(tool_run)
return tool_run
|
def on_tool_end(self, output: str, *, run_id: UUID, **kwargs: Any) ->Run:
"""End a trace for a tool run."""
tool_run = self._get_run(run_id, run_type='tool')
tool_run.outputs = {'output': output}
tool_run.end_time = datetime.now(timezone.utc)
tool_run.events.append({'name': 'end', 'time': tool_run.end_time})
self._end_trace(tool_run)
self._on_tool_end(tool_run)
return tool_run
|
End a trace for a tool run.
|
_import_azuresearch
|
from langchain_community.vectorstores.azuresearch import AzureSearch
return AzureSearch
|
def _import_azuresearch() ->Any:
from langchain_community.vectorstores.azuresearch import AzureSearch
return AzureSearch
| null |
prefix_config_spec
|
"""Prefix the id of a ConfigurableFieldSpec.
This is useful when a RunnableConfigurableAlternatives is used as a
ConfigurableField of another RunnableConfigurableAlternatives.
Args:
spec: The ConfigurableFieldSpec to prefix.
prefix: The prefix to add.
Returns:
"""
return ConfigurableFieldSpec(id=f'{prefix}/{spec.id}', name=spec.name,
description=spec.description, annotation=spec.annotation, default=spec.
default, is_shared=spec.is_shared) if not spec.is_shared else spec
|
def prefix_config_spec(spec: ConfigurableFieldSpec, prefix: str
) ->ConfigurableFieldSpec:
"""Prefix the id of a ConfigurableFieldSpec.
This is useful when a RunnableConfigurableAlternatives is used as a
ConfigurableField of another RunnableConfigurableAlternatives.
Args:
spec: The ConfigurableFieldSpec to prefix.
prefix: The prefix to add.
Returns:
"""
return ConfigurableFieldSpec(id=f'{prefix}/{spec.id}', name=spec.name,
description=spec.description, annotation=spec.annotation, default=
spec.default, is_shared=spec.is_shared) if not spec.is_shared else spec
|
Prefix the id of a ConfigurableFieldSpec.
This is useful when a RunnableConfigurableAlternatives is used as a
ConfigurableField of another RunnableConfigurableAlternatives.
Args:
spec: The ConfigurableFieldSpec to prefix.
prefix: The prefix to add.
Returns:
|
__init__
|
self.task = task
self.id = id
self.dep = dep
self.args = args
self.tool = tool
self.status = 'pending'
self.message = ''
self.result = ''
|
def __init__(self, task: str, id: int, dep: List[int], args: Dict, tool:
BaseTool):
self.task = task
self.id = id
self.dep = dep
self.args = args
self.tool = tool
self.status = 'pending'
self.message = ''
self.result = ''
| null |
_identifying_params
|
return {**{'endpoint': self.endpoint, 'model': self.model}, **super().
_identifying_params}
|
@property
def _identifying_params(self) ->Dict[str, Any]:
return {**{'endpoint': self.endpoint, 'model': self.model}, **super().
_identifying_params}
| null |
load
|
"""Load documents."""
max_num = 1000
mark = None
docs = []
while True:
resp = self.client.listObjects(self.bucket, prefix=self.prefix, marker=
mark, max_keys=max_num)
if resp.status < 300:
for content in resp.body.contents:
loader = OBSFileLoader(self.bucket, content.key, client=self.client
)
docs.extend(loader.load())
if resp.body.is_truncated is True:
mark = resp.body.next_marker
else:
break
return docs
|
def load(self) ->List[Document]:
"""Load documents."""
max_num = 1000
mark = None
docs = []
while True:
resp = self.client.listObjects(self.bucket, prefix=self.prefix,
marker=mark, max_keys=max_num)
if resp.status < 300:
for content in resp.body.contents:
loader = OBSFileLoader(self.bucket, content.key, client=
self.client)
docs.extend(loader.load())
if resp.body.is_truncated is True:
mark = resp.body.next_marker
else:
break
return docs
|
Load documents.
|
post
|
"""POST to the URL and return the text."""
return requests.post(url, json=data, headers=self.headers, auth=self.auth,
**kwargs)
|
def post(self, url: str, data: Dict[str, Any], **kwargs: Any
) ->requests.Response:
"""POST to the URL and return the text."""
return requests.post(url, json=data, headers=self.headers, auth=self.
auth, **kwargs)
|
POST to the URL and return the text.
|
add_texts
|
raise NotImplementedError(
'Annoy does not allow to add new data once the index is build.')
|
def add_texts(self, texts: Iterable[str], metadatas: Optional[List[dict]]=
None, **kwargs: Any) ->List[str]:
raise NotImplementedError(
'Annoy does not allow to add new data once the index is build.')
| null |
embeddings
|
return self._embeddings
|
@property
def embeddings(self) ->Optional[Embeddings]:
return self._embeddings
| null |
test_conversation_memory
|
"""Test basic conversation memory functionality."""
good_inputs = {'foo': 'bar', 'baz': 'foo'}
good_outputs = {'bar': 'foo'}
memory.save_context(good_inputs, good_outputs)
bad_inputs = {'foo': 'bar', 'foo1': 'bar'}
with pytest.raises(ValueError):
memory.save_context(bad_inputs, good_outputs)
bad_inputs = {'baz': 'bar'}
with pytest.raises(ValueError):
memory.save_context(bad_inputs, good_outputs)
with pytest.raises(ValueError):
memory.save_context(good_inputs, {})
bad_outputs = {'foo': 'bar', 'foo1': 'bar'}
with pytest.raises(ValueError):
memory.save_context(good_inputs, bad_outputs)
|
@pytest.mark.parametrize('memory', [ConversationBufferMemory(memory_key=
'baz'), ConversationBufferWindowMemory(memory_key='baz'),
ConversationSummaryMemory(llm=FakeLLM(), memory_key='baz')])
def test_conversation_memory(memory: BaseMemory) ->None:
"""Test basic conversation memory functionality."""
good_inputs = {'foo': 'bar', 'baz': 'foo'}
good_outputs = {'bar': 'foo'}
memory.save_context(good_inputs, good_outputs)
bad_inputs = {'foo': 'bar', 'foo1': 'bar'}
with pytest.raises(ValueError):
memory.save_context(bad_inputs, good_outputs)
bad_inputs = {'baz': 'bar'}
with pytest.raises(ValueError):
memory.save_context(bad_inputs, good_outputs)
with pytest.raises(ValueError):
memory.save_context(good_inputs, {})
bad_outputs = {'foo': 'bar', 'foo1': 'bar'}
with pytest.raises(ValueError):
memory.save_context(good_inputs, bad_outputs)
|
Test basic conversation memory functionality.
|
get_relative_path
|
"""Get the relative path, returning an error if unsupported."""
if self.root_dir is None:
return Path(file_path)
return get_validated_relative_path(Path(self.root_dir), file_path)
|
def get_relative_path(self, file_path: str) ->Path:
"""Get the relative path, returning an error if unsupported."""
if self.root_dir is None:
return Path(file_path)
return get_validated_relative_path(Path(self.root_dir), file_path)
|
Get the relative path, returning an error if unsupported.
|
__eq__
|
return isinstance(other, RunLogPatch) and self.ops == other.ops
|
def __eq__(self, other: object) ->bool:
return isinstance(other, RunLogPatch) and self.ops == other.ops
| null |
add_tool
|
self.commands.append(tool)
|
def add_tool(self, tool: BaseTool) ->None:
self.commands.append(tool)
| null |
load
|
"""Load all chat messages."""
result = []
for _, row in self.chat_log.iterrows():
user_id = row[self.user_id_col]
metadata = row.to_dict()
metadata.pop(self.user_id_col)
result.append(Document(page_content=user_id, metadata=metadata))
return result
|
def load(self) ->List[Document]:
"""Load all chat messages."""
result = []
for _, row in self.chat_log.iterrows():
user_id = row[self.user_id_col]
metadata = row.to_dict()
metadata.pop(self.user_id_col)
result.append(Document(page_content=user_id, metadata=metadata))
return result
|
Load all chat messages.
|
add_message
|
"""Append the message to the record in DynamoDB"""
try:
from botocore.exceptions import ClientError
except ImportError as e:
raise ImportError(
'Unable to import botocore, please install with `pip install botocore`.'
) from e
messages = messages_to_dict(self.messages)
_message = message_to_dict(message)
messages.append(_message)
try:
self.table.put_item(Item={**self.key, 'History': messages})
except ClientError as err:
logger.error(err)
|
def add_message(self, message: BaseMessage) ->None:
"""Append the message to the record in DynamoDB"""
try:
from botocore.exceptions import ClientError
except ImportError as e:
raise ImportError(
'Unable to import botocore, please install with `pip install botocore`.'
) from e
messages = messages_to_dict(self.messages)
_message = message_to_dict(message)
messages.append(_message)
try:
self.table.put_item(Item={**self.key, 'History': messages})
except ClientError as err:
logger.error(err)
|
Append the message to the record in DynamoDB
|
test_faiss_add_texts_not_supported
|
"""Test adding of texts to a docstore that doesn't support it."""
docsearch = FAISS(FakeEmbeddings(), None, FakeDocstore(), {})
with pytest.raises(ValueError):
docsearch.add_texts(['foo'])
|
@pytest.mark.requires('faiss')
def test_faiss_add_texts_not_supported() ->None:
"""Test adding of texts to a docstore that doesn't support it."""
docsearch = FAISS(FakeEmbeddings(), None, FakeDocstore(), {})
with pytest.raises(ValueError):
docsearch.add_texts(['foo'])
|
Test adding of texts to a docstore that doesn't support it.
|
test_load_pupmed_from_universal_entry_with_params
|
params = {'top_k_results': 1}
pubmed_tool = _load_pubmed_from_universal_entry(**params)
assert isinstance(pubmed_tool, PubmedQueryRun)
wp = pubmed_tool.api_wrapper
assert wp.top_k_results == 1, 'failed to assert top_k_results'
|
def test_load_pupmed_from_universal_entry_with_params() ->None:
params = {'top_k_results': 1}
pubmed_tool = _load_pubmed_from_universal_entry(**params)
assert isinstance(pubmed_tool, PubmedQueryRun)
wp = pubmed_tool.api_wrapper
assert wp.top_k_results == 1, 'failed to assert top_k_results'
| null |
connection_string_from_db_params
|
"""Return connection string from database parameters."""
return f'postgresql+{driver}://{user}:{password}@{host}:{port}/{database}'
|
@classmethod
def connection_string_from_db_params(cls, driver: str, host: str, port: int,
database: str, user: str, password: str) ->str:
"""Return connection string from database parameters."""
return f'postgresql+{driver}://{user}:{password}@{host}:{port}/{database}'
|
Return connection string from database parameters.
|
_import_jira_tool
|
from langchain_community.tools.jira.tool import JiraAction
return JiraAction
|
def _import_jira_tool() ->Any:
from langchain_community.tools.jira.tool import JiraAction
return JiraAction
| null |
results
|
"""Run query through Searx API and returns the results with metadata.
Args:
query: The query to search for.
query_suffix: Extra suffix appended to the query.
num_results: Limit the number of results to return.
engines: List of engines to use for the query.
categories: List of categories to use for the query.
**kwargs: extra parameters to pass to the searx API.
Returns:
Dict with the following keys:
{
snippet: The description of the result.
title: The title of the result.
link: The link to the result.
engines: The engines used for the result.
category: Searx category of the result.
}
"""
_params = {'q': query}
params = {**self.params, **_params, **kwargs}
if self.query_suffix and len(self.query_suffix) > 0:
params['q'] += ' ' + self.query_suffix
if isinstance(query_suffix, str) and len(query_suffix) > 0:
params['q'] += ' ' + query_suffix
if isinstance(engines, list) and len(engines) > 0:
params['engines'] = ','.join(engines)
if isinstance(categories, list) and len(categories) > 0:
params['categories'] = ','.join(categories)
results = self._searx_api_query(params).results[:num_results]
if len(results) == 0:
return [{'Result': 'No good Search Result was found'}]
return [{'snippet': result.get('content', ''), 'title': result['title'],
'link': result['url'], 'engines': result['engines'], 'category': result
['category']} for result in results]
|
def results(self, query: str, num_results: int, engines: Optional[List[str]
]=None, categories: Optional[List[str]]=None, query_suffix: Optional[
str]='', **kwargs: Any) ->List[Dict]:
"""Run query through Searx API and returns the results with metadata.
Args:
query: The query to search for.
query_suffix: Extra suffix appended to the query.
num_results: Limit the number of results to return.
engines: List of engines to use for the query.
categories: List of categories to use for the query.
**kwargs: extra parameters to pass to the searx API.
Returns:
Dict with the following keys:
{
snippet: The description of the result.
title: The title of the result.
link: The link to the result.
engines: The engines used for the result.
category: Searx category of the result.
}
"""
_params = {'q': query}
params = {**self.params, **_params, **kwargs}
if self.query_suffix and len(self.query_suffix) > 0:
params['q'] += ' ' + self.query_suffix
if isinstance(query_suffix, str) and len(query_suffix) > 0:
params['q'] += ' ' + query_suffix
if isinstance(engines, list) and len(engines) > 0:
params['engines'] = ','.join(engines)
if isinstance(categories, list) and len(categories) > 0:
params['categories'] = ','.join(categories)
results = self._searx_api_query(params).results[:num_results]
if len(results) == 0:
return [{'Result': 'No good Search Result was found'}]
return [{'snippet': result.get('content', ''), 'title': result['title'],
'link': result['url'], 'engines': result['engines'], 'category':
result['category']} for result in results]
|
Run query through Searx API and returns the results with metadata.
Args:
query: The query to search for.
query_suffix: Extra suffix appended to the query.
num_results: Limit the number of results to return.
engines: List of engines to use for the query.
categories: List of categories to use for the query.
**kwargs: extra parameters to pass to the searx API.
Returns:
Dict with the following keys:
{
snippet: The description of the result.
title: The title of the result.
link: The link to the result.
engines: The engines used for the result.
category: Searx category of the result.
}
|
lazy_parse
|
"""Extract the second character of a blob."""
yield Document(page_content=blob.as_string()[1])
|
def lazy_parse(self, blob: Blob) ->Iterator[Document]:
"""Extract the second character of a blob."""
yield Document(page_content=blob.as_string()[1])
|
Extract the second character of a blob.
|
_get_eleven_labs_text2speech
|
return ElevenLabsText2SpeechTool(**kwargs)
|
def _get_eleven_labs_text2speech(**kwargs: Any) ->BaseTool:
return ElevenLabsText2SpeechTool(**kwargs)
| null |
from_llm
|
llm_chain = LLMChain(llm=llm, prompt=prompt)
return cls(llm_chain=llm_chain, **kwargs)
|
@classmethod
def from_llm(cls, llm: BaseLanguageModel, prompt: BasePromptTemplate=PROMPT,
**kwargs: Any) ->LLMSymbolicMathChain:
llm_chain = LLMChain(llm=llm, prompt=prompt)
return cls(llm_chain=llm_chain, **kwargs)
| null |
_start_trace
|
"""Start a trace for a run."""
current_dotted_order = run.start_time.strftime('%Y%m%dT%H%M%S%fZ') + str(run.id
)
if run.parent_run_id:
parent_run = self.run_map.get(str(run.parent_run_id))
if parent_run:
self._add_child_run(parent_run, run)
parent_run.child_execution_order = max(parent_run.
child_execution_order, run.child_execution_order)
run.trace_id = parent_run.trace_id
if parent_run.dotted_order:
run.dotted_order = (parent_run.dotted_order + '.' +
current_dotted_order)
else:
logger.debug(
f'Parent run with UUID {run.parent_run_id} has no dotted_order.'
)
else:
logger.debug(f'Parent run with UUID {run.parent_run_id} not found.')
else:
run.trace_id = run.id
run.dotted_order = current_dotted_order
self.run_map[str(run.id)] = run
self._on_run_create(run)
|
def _start_trace(self, run: Run) ->None:
"""Start a trace for a run."""
current_dotted_order = run.start_time.strftime('%Y%m%dT%H%M%S%fZ') + str(
run.id)
if run.parent_run_id:
parent_run = self.run_map.get(str(run.parent_run_id))
if parent_run:
self._add_child_run(parent_run, run)
parent_run.child_execution_order = max(parent_run.
child_execution_order, run.child_execution_order)
run.trace_id = parent_run.trace_id
if parent_run.dotted_order:
run.dotted_order = (parent_run.dotted_order + '.' +
current_dotted_order)
else:
logger.debug(
f'Parent run with UUID {run.parent_run_id} has no dotted_order.'
)
else:
logger.debug(f'Parent run with UUID {run.parent_run_id} not found.'
)
else:
run.trace_id = run.id
run.dotted_order = current_dotted_order
self.run_map[str(run.id)] = run
self._on_run_create(run)
|
Start a trace for a run.
|
get_pydantic_major_version
|
"""Get the major version of Pydantic."""
try:
import pydantic
return int(pydantic.__version__.split('.')[0])
except ImportError:
return 0
|
def get_pydantic_major_version() ->int:
"""Get the major version of Pydantic."""
try:
import pydantic
return int(pydantic.__version__.split('.')[0])
except ImportError:
return 0
|
Get the major version of Pydantic.
|
api_passed_via_environment_fixture
|
"""Fixture to create an AzureMLChatOnlineEndpoint instance
with API key passed from environment"""
os.environ['AZUREML_ENDPOINT_API_KEY'] = 'my-api-key'
azure_chat = AzureMLChatOnlineEndpoint(endpoint_url=
'https://<your-endpoint>.<your_region>.inference.ml.azure.com/score')
del os.environ['AZUREML_ENDPOINT_API_KEY']
return azure_chat
|
@pytest.fixture(scope='class')
def api_passed_via_environment_fixture() ->AzureMLChatOnlineEndpoint:
"""Fixture to create an AzureMLChatOnlineEndpoint instance
with API key passed from environment"""
os.environ['AZUREML_ENDPOINT_API_KEY'] = 'my-api-key'
azure_chat = AzureMLChatOnlineEndpoint(endpoint_url=
'https://<your-endpoint>.<your_region>.inference.ml.azure.com/score')
del os.environ['AZUREML_ENDPOINT_API_KEY']
return azure_chat
|
Fixture to create an AzureMLChatOnlineEndpoint instance
with API key passed from environment
|
llm
|
return self.task in ('llm/v1/chat', 'llm/v1/completions', 'llama2/chat')
|
@property
def llm(self) ->bool:
return self.task in ('llm/v1/chat', 'llm/v1/completions', 'llama2/chat')
| null |
predict
|
if stop is None:
_stop = None
else:
_stop = list(stop)
return self(text, stop=_stop, **kwargs)
|
def predict(self, text: str, *, stop: Optional[Sequence[str]]=None, **
kwargs: Any) ->str:
if stop is None:
_stop = None
else:
_stop = list(stop)
return self(text, stop=_stop, **kwargs)
| null |
_toxicity_init_validate
|
"""
Validate and initialize toxicity processing configuration.
Args:
max_size (int): Maximum sentence size defined in the
configuration object.
Raises:
Exception: If the maximum sentence size exceeds the 5KB limit.
Note:
This function ensures that the NLTK punkt tokenizer is downloaded
if not already present.
Returns:
None
"""
if max_size > 1024 * 5:
raise Exception('The sentence length should not exceed 5KB.')
try:
nltk = importlib.import_module('nltk')
nltk.data.find('tokenizers/punkt')
return nltk
except ImportError:
raise ModuleNotFoundError(
'Could not import nltk python package. Please install it with `pip install nltk`.'
)
except LookupError:
nltk.download('punkt')
|
def _toxicity_init_validate(self, max_size: int) ->Any:
"""
Validate and initialize toxicity processing configuration.
Args:
max_size (int): Maximum sentence size defined in the
configuration object.
Raises:
Exception: If the maximum sentence size exceeds the 5KB limit.
Note:
This function ensures that the NLTK punkt tokenizer is downloaded
if not already present.
Returns:
None
"""
if max_size > 1024 * 5:
raise Exception('The sentence length should not exceed 5KB.')
try:
nltk = importlib.import_module('nltk')
nltk.data.find('tokenizers/punkt')
return nltk
except ImportError:
raise ModuleNotFoundError(
'Could not import nltk python package. Please install it with `pip install nltk`.'
)
except LookupError:
nltk.download('punkt')
|
Validate and initialize toxicity processing configuration.
Args:
max_size (int): Maximum sentence size defined in the
configuration object.
Raises:
Exception: If the maximum sentence size exceeds the 5KB limit.
Note:
This function ensures that the NLTK punkt tokenizer is downloaded
if not already present.
Returns:
None
|
test_split_by_punctuation
|
parts = VertexAIEmbeddings._split_by_punctuation(
"""Hello, my friend!
How are you?
I have 2 news:
- Good,
- Bad.""")
assert parts == ['Hello', ',', ' ', 'my', ' ', 'friend', '!', '\n', 'How',
' ', 'are', ' ', 'you', '?', '\n', 'I', ' ', 'have', ' ', '2', ' ',
'news', ':', '\n', '\n', '\t', '-', ' ', 'Good', ',', '\n', '\t', '-',
' ', 'Bad', '.']
|
def test_split_by_punctuation() ->None:
parts = VertexAIEmbeddings._split_by_punctuation(
"""Hello, my friend!
How are you?
I have 2 news:
- Good,
- Bad.""")
assert parts == ['Hello', ',', ' ', 'my', ' ', 'friend', '!', '\n',
'How', ' ', 'are', ' ', 'you', '?', '\n', 'I', ' ', 'have', ' ',
'2', ' ', 'news', ':', '\n', '\n', '\t', '-', ' ', 'Good', ',',
'\n', '\t', '-', ' ', 'Bad', '.']
| null |
from_texts
|
"""Add texts to the vectorstore index.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
connection_string: URI to Yellowbrick instance
embedding: Embedding function
table: table to store embeddings
kwargs: vectorstore specific parameters
"""
if connection_string is None:
raise ValueError('connection_string must be provided')
vss = cls(embedding=embedding, connection_string=connection_string, table=table
)
vss.add_texts(texts=texts, metadatas=metadatas)
return vss
|
@classmethod
def from_texts(cls: Type[Yellowbrick], texts: List[str], embedding:
Embeddings, metadatas: Optional[List[dict]]=None, connection_string:
str='', table: str='langchain', **kwargs: Any) ->Yellowbrick:
"""Add texts to the vectorstore index.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
connection_string: URI to Yellowbrick instance
embedding: Embedding function
table: table to store embeddings
kwargs: vectorstore specific parameters
"""
if connection_string is None:
raise ValueError('connection_string must be provided')
vss = cls(embedding=embedding, connection_string=connection_string,
table=table)
vss.add_texts(texts=texts, metadatas=metadatas)
return vss
|
Add texts to the vectorstore index.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
connection_string: URI to Yellowbrick instance
embedding: Embedding function
table: table to store embeddings
kwargs: vectorstore specific parameters
|
_subreddit_posts_loader
|
subreddit = reddit.subreddit(search_query)
method = getattr(subreddit, category)
cat_posts = method(limit=self.number_posts)
"""Format reddit posts into a string."""
for post in cat_posts:
metadata = {'post_subreddit': post.subreddit_name_prefixed,
'post_category': category, 'post_title': post.title, 'post_score':
post.score, 'post_id': post.id, 'post_url': post.url, 'post_author':
post.author}
yield Document(page_content=post.selftext, metadata=metadata)
|
def _subreddit_posts_loader(self, search_query: str, category: str, reddit:
praw.reddit.Reddit) ->Iterable[Document]:
subreddit = reddit.subreddit(search_query)
method = getattr(subreddit, category)
cat_posts = method(limit=self.number_posts)
"""Format reddit posts into a string."""
for post in cat_posts:
metadata = {'post_subreddit': post.subreddit_name_prefixed,
'post_category': category, 'post_title': post.title,
'post_score': post.score, 'post_id': post.id, 'post_url': post.
url, 'post_author': post.author}
yield Document(page_content=post.selftext, metadata=metadata)
| null |
add_message
|
"""Append the message to the record in MongoDB"""
from pymongo import errors
try:
self.collection.insert_one({'SessionId': self.session_id, 'History':
json.dumps(message_to_dict(message))})
except errors.WriteError as err:
logger.error(err)
|
def add_message(self, message: BaseMessage) ->None:
"""Append the message to the record in MongoDB"""
from pymongo import errors
try:
self.collection.insert_one({'SessionId': self.session_id, 'History':
json.dumps(message_to_dict(message))})
except errors.WriteError as err:
logger.error(err)
|
Append the message to the record in MongoDB
|
_process_response
|
text = response.generations[0].text
if stop:
text = enforce_stop_tokens(text, stop)
return text
|
def _process_response(self, response: Any, stop: Optional[List[str]]) ->str:
text = response.generations[0].text
if stop:
text = enforce_stop_tokens(text, stop)
return text
| null |
poetry_conf
|
"""Load the pyproject.toml file."""
with open(PYPROJECT_TOML) as f:
return toml.load(f)['tool']['poetry']
|
@pytest.fixture()
def poetry_conf() ->Dict[str, Any]:
"""Load the pyproject.toml file."""
with open(PYPROJECT_TOML) as f:
return toml.load(f)['tool']['poetry']
|
Load the pyproject.toml file.
|
__init__
|
"""
Initialize the ElasticsearchEmbeddings instance.
Args:
client (MlClient): An Elasticsearch ML client object.
model_id (str): The model_id of the model deployed in the Elasticsearch
cluster.
input_field (str): The name of the key for the input text field in the
document. Defaults to 'text_field'.
"""
self.client = client
self.model_id = model_id
self.input_field = input_field
|
def __init__(self, client: MlClient, model_id: str, *, input_field: str=
'text_field'):
"""
Initialize the ElasticsearchEmbeddings instance.
Args:
client (MlClient): An Elasticsearch ML client object.
model_id (str): The model_id of the model deployed in the Elasticsearch
cluster.
input_field (str): The name of the key for the input text field in the
document. Defaults to 'text_field'.
"""
self.client = client
self.model_id = model_id
self.input_field = input_field
|
Initialize the ElasticsearchEmbeddings instance.
Args:
client (MlClient): An Elasticsearch ML client object.
model_id (str): The model_id of the model deployed in the Elasticsearch
cluster.
input_field (str): The name of the key for the input text field in the
document. Defaults to 'text_field'.
|
test_relevance_score_bound
|
"""Ensures all relevance scores are between 0 and 1."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = Pinecone.from_texts(texts, embedding_openai, index_name=
index_name, metadatas=metadatas)
time.sleep(20)
output = docsearch.similarity_search_with_relevance_scores('foo', k=3)
assert all((1 >= score or np.isclose(score, 1)) and score >= 0 for _, score in
output)
|
@pytest.mark.vcr()
def test_relevance_score_bound(self, embedding_openai: OpenAIEmbeddings
) ->None:
"""Ensures all relevance scores are between 0 and 1."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = Pinecone.from_texts(texts, embedding_openai, index_name=
index_name, metadatas=metadatas)
time.sleep(20)
output = docsearch.similarity_search_with_relevance_scores('foo', k=3)
assert all((1 >= score or np.isclose(score, 1)) and score >= 0 for _,
score in output)
|
Ensures all relevance scores are between 0 and 1.
|
_has_env_vars
|
return all(['ASTRA_DB_APPLICATION_TOKEN' in os.environ,
'ASTRA_DB_API_ENDPOINT' in os.environ])
|
def _has_env_vars() ->bool:
return all(['ASTRA_DB_APPLICATION_TOKEN' in os.environ,
'ASTRA_DB_API_ENDPOINT' in os.environ])
| null |
log
|
if self.path:
with open(self.path, 'a') as f:
f.write(f'{vw_ex}\n\n')
|
def log(self, vw_ex: str) ->None:
if self.path:
with open(self.path, 'a') as f:
f.write(f'{vw_ex}\n\n')
| null |
test_api_key_masked_when_passed_via_constructor
|
llm = Nebula(nebula_api_key='secret-api-key')
print(llm.nebula_api_key, end='')
captured = capsys.readouterr()
assert captured.out == '**********'
|
def test_api_key_masked_when_passed_via_constructor(capsys: CaptureFixture
) ->None:
llm = Nebula(nebula_api_key='secret-api-key')
print(llm.nebula_api_key, end='')
captured = capsys.readouterr()
assert captured.out == '**********'
| null |
test_query_chain
|
"""Test QueryChain translates a question into a query expression."""
llm = OpenAI(temperature=0, max_tokens=512)
query_chain = QueryChain.from_univariate_prompt(llm)
narrative_question = 'How many pets will Marcia end up with? '
data = query_chain(narrative_question)[Constant.chain_data.value]
self.assertEqual(type(data), QueryModel)
|
def test_query_chain(self) ->None:
"""Test QueryChain translates a question into a query expression."""
llm = OpenAI(temperature=0, max_tokens=512)
query_chain = QueryChain.from_univariate_prompt(llm)
narrative_question = 'How many pets will Marcia end up with? '
data = query_chain(narrative_question)[Constant.chain_data.value]
self.assertEqual(type(data), QueryModel)
|
Test QueryChain translates a question into a query expression.
|
__repr__
|
return '\n| '.join(repr(s) if i == 0 else indent_lines_after_first(repr(s),
'| ') for i, s in enumerate(self.steps))
|
def __repr__(self) ->str:
return '\n| '.join(repr(s) if i == 0 else indent_lines_after_first(repr
(s), '| ') for i, s in enumerate(self.steps))
| null |
_import_slack_get_channel
|
from langchain_community.tools.slack.get_channel import SlackGetChannel
return SlackGetChannel
|
def _import_slack_get_channel() ->Any:
from langchain_community.tools.slack.get_channel import SlackGetChannel
return SlackGetChannel
| null |
clear
|
def batched(iterable: Iterable[Any], batch_size: int) ->Iterable[Any]:
iterator = iter(iterable)
while (batch := list(islice(iterator, batch_size))):
yield batch
for keybatch in batched(self.redis_client.scan_iter(
f'{self.full_key_prefix}:*'), 500):
self.redis_client.delete(*keybatch)
|
def clear(self) ->None:
def batched(iterable: Iterable[Any], batch_size: int) ->Iterable[Any]:
iterator = iter(iterable)
while (batch := list(islice(iterator, batch_size))):
yield batch
for keybatch in batched(self.redis_client.scan_iter(
f'{self.full_key_prefix}:*'), 500):
self.redis_client.delete(*keybatch)
| null |
test_max_marginal_relevance_search
|
"""Test end to end construction and MRR search."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = Weaviate.from_texts(texts, embedding_openai, metadatas=
metadatas, weaviate_url=weaviate_url)
standard_ranking = docsearch.similarity_search('foo', k=2)
output = docsearch.max_marginal_relevance_search('foo', k=2, fetch_k=3,
lambda_mult=1.0)
assert output == standard_ranking
output = docsearch.max_marginal_relevance_search('foo', k=2, fetch_k=3,
lambda_mult=0.0)
assert output == [Document(page_content='foo', metadata={'page': 0}),
Document(page_content='bar', metadata={'page': 1})]
|
@pytest.mark.vcr(ignore_localhost=True)
def test_max_marginal_relevance_search(self, weaviate_url: str,
embedding_openai: OpenAIEmbeddings) ->None:
"""Test end to end construction and MRR search."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = Weaviate.from_texts(texts, embedding_openai, metadatas=
metadatas, weaviate_url=weaviate_url)
standard_ranking = docsearch.similarity_search('foo', k=2)
output = docsearch.max_marginal_relevance_search('foo', k=2, fetch_k=3,
lambda_mult=1.0)
assert output == standard_ranking
output = docsearch.max_marginal_relevance_search('foo', k=2, fetch_k=3,
lambda_mult=0.0)
assert output == [Document(page_content='foo', metadata={'page': 0}),
Document(page_content='bar', metadata={'page': 1})]
|
Test end to end construction and MRR search.
|
_load_single_page_from_dump
|
"""Parse a single page."""
try:
import mwparserfromhell
except ImportError as e:
raise ImportError(
"Unable to import 'mwparserfromhell'. Please install with `pip install mwparserfromhell`."
) from e
for revision in page:
code = mwparserfromhell.parse_folder(revision.text)
text = code.strip_code(normalize=True, collapse=True,
keep_template_params=False)
metadata = {'source': page.title}
return Document(page_content=text, metadata=metadata)
|
def _load_single_page_from_dump(self, page) ->Document:
"""Parse a single page."""
try:
import mwparserfromhell
except ImportError as e:
raise ImportError(
"Unable to import 'mwparserfromhell'. Please install with `pip install mwparserfromhell`."
) from e
for revision in page:
code = mwparserfromhell.parse_folder(revision.text)
text = code.strip_code(normalize=True, collapse=True,
keep_template_params=False)
metadata = {'source': page.title}
return Document(page_content=text, metadata=metadata)
|
Parse a single page.
|
query
|
"""Query Neo4j database."""
from neo4j.exceptions import CypherSyntaxError
with self._driver.session(database=self._database) as session:
try:
data = session.run(query, params)
return [r.data() for r in data]
except CypherSyntaxError as e:
raise ValueError(f'Generated Cypher Statement is not valid\n{e}')
|
def query(self, query: str, params: dict={}) ->List[Dict[str, Any]]:
"""Query Neo4j database."""
from neo4j.exceptions import CypherSyntaxError
with self._driver.session(database=self._database) as session:
try:
data = session.run(query, params)
return [r.data() for r in data]
except CypherSyntaxError as e:
raise ValueError(f'Generated Cypher Statement is not valid\n{e}')
|
Query Neo4j database.
|
similarity_search
|
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filters: Filters to apply to the query. Defaults to None.
Returns:
List of Documents most similar to the embedding.
"""
docs_with_score = self.similarity_search_with_score(query=query, k=k,
filters=filters, **kwargs)
return [doc for doc, _ in docs_with_score]
|
def similarity_search(self, query: str, k: int=4, filters: Optional[Any]=
None, **kwargs: Any) ->List[Document]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filters: Filters to apply to the query. Defaults to None.
Returns:
List of Documents most similar to the embedding.
"""
docs_with_score = self.similarity_search_with_score(query=query, k=k,
filters=filters, **kwargs)
return [doc for doc, _ in docs_with_score]
|
Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filters: Filters to apply to the query. Defaults to None.
Returns:
List of Documents most similar to the embedding.
|
_replace_uuid
|
if uuid not in self.uuids_map:
self.uuids_map[uuid] = next(self.uuids_generator)
return self.uuids_map[uuid]
|
def _replace_uuid(self, uuid: UUID) ->UUID:
if uuid not in self.uuids_map:
self.uuids_map[uuid] = next(self.uuids_generator)
return self.uuids_map[uuid]
| null |
_image_analysis
|
try:
import azure.ai.vision as sdk
except ImportError:
pass
image_src_type = detect_file_src_type(image_path)
if image_src_type == 'local':
vision_source = sdk.VisionSource(filename=image_path)
elif image_src_type == 'remote':
vision_source = sdk.VisionSource(url=image_path)
else:
raise ValueError(f'Invalid image path: {image_path}')
image_analyzer = sdk.ImageAnalyzer(self.vision_service, vision_source, self
.analysis_options)
result = image_analyzer.analyze()
res_dict = {}
if result.reason == sdk.ImageAnalysisResultReason.ANALYZED:
if result.caption is not None:
res_dict['caption'] = result.caption.content
if result.objects is not None:
res_dict['objects'] = [obj.name for obj in result.objects]
if result.tags is not None:
res_dict['tags'] = [tag.name for tag in result.tags]
if result.text is not None:
res_dict['text'] = [line.content for line in result.text.lines]
else:
error_details = sdk.ImageAnalysisErrorDetails.from_result(result)
raise RuntimeError(
f"""Image analysis failed.
Reason: {error_details.reason}
Details: {error_details.message}"""
)
return res_dict
|
def _image_analysis(self, image_path: str) ->Dict:
try:
import azure.ai.vision as sdk
except ImportError:
pass
image_src_type = detect_file_src_type(image_path)
if image_src_type == 'local':
vision_source = sdk.VisionSource(filename=image_path)
elif image_src_type == 'remote':
vision_source = sdk.VisionSource(url=image_path)
else:
raise ValueError(f'Invalid image path: {image_path}')
image_analyzer = sdk.ImageAnalyzer(self.vision_service, vision_source,
self.analysis_options)
result = image_analyzer.analyze()
res_dict = {}
if result.reason == sdk.ImageAnalysisResultReason.ANALYZED:
if result.caption is not None:
res_dict['caption'] = result.caption.content
if result.objects is not None:
res_dict['objects'] = [obj.name for obj in result.objects]
if result.tags is not None:
res_dict['tags'] = [tag.name for tag in result.tags]
if result.text is not None:
res_dict['text'] = [line.content for line in result.text.lines]
else:
error_details = sdk.ImageAnalysisErrorDetails.from_result(result)
raise RuntimeError(
f"""Image analysis failed.
Reason: {error_details.reason}
Details: {error_details.message}"""
)
return res_dict
| null |
get_input_schema
|
return create_model('CombineDocumentsInput', **{self.input_key: (List[
Document], None)})
|
def get_input_schema(self, config: Optional[RunnableConfig]=None) ->Type[
BaseModel]:
return create_model('CombineDocumentsInput', **{self.input_key: (List[
Document], None)})
| null |
test_marqo_bulk
|
marqo_search = Marqo(client=client, index_name=INDEX_NAME)
input_documents = ['This is document 1', '2', '3']
ids = marqo_search.add_texts(input_documents)
bulk_results = marqo_search.bulk_similarity_search([
'What is the first document?', '2', '3'], k=3)
assert len(ids) == len(input_documents)
assert bulk_results[0][0].page_content == input_documents[0]
assert bulk_results[1][0].page_content == input_documents[1]
assert bulk_results[2][0].page_content == input_documents[2]
|
def test_marqo_bulk(client: Marqo) ->None:
marqo_search = Marqo(client=client, index_name=INDEX_NAME)
input_documents = ['This is document 1', '2', '3']
ids = marqo_search.add_texts(input_documents)
bulk_results = marqo_search.bulk_similarity_search([
'What is the first document?', '2', '3'], k=3)
assert len(ids) == len(input_documents)
assert bulk_results[0][0].page_content == input_documents[0]
assert bulk_results[1][0].page_content == input_documents[1]
assert bulk_results[2][0].page_content == input_documents[2]
| null |
dialect
|
"""Return string representation of dialect to use."""
return self._engine.dialect.name
|
@property
def dialect(self) ->str:
"""Return string representation of dialect to use."""
return self._engine.dialect.name
|
Return string representation of dialect to use.
|
test_get_action_and_input_newline
|
"""Test getting an action from text where Action Input is a code snippet."""
llm_output = """Now I need to write a unittest for the function.
Action: Python
Action Input:
```
import unittest
unittest.main()
```"""
action, action_input = get_action_and_input(llm_output)
assert action == 'Python'
assert action_input == """```
import unittest
unittest.main()
```"""
|
def test_get_action_and_input_newline() ->None:
"""Test getting an action from text where Action Input is a code snippet."""
llm_output = """Now I need to write a unittest for the function.
Action: Python
Action Input:
```
import unittest
unittest.main()
```"""
action, action_input = get_action_and_input(llm_output)
assert action == 'Python'
assert action_input == '```\nimport unittest\n\nunittest.main()\n```'
|
Test getting an action from text where Action Input is a code snippet.
|
stream_speech
|
"""Stream the text as speech as it is generated.
Play the text in your speakers."""
elevenlabs = _import_elevenlabs()
speech_stream = elevenlabs.generate(text=query, model=self.model, stream=True)
elevenlabs.stream(speech_stream)
|
def stream_speech(self, query: str) ->None:
"""Stream the text as speech as it is generated.
Play the text in your speakers."""
elevenlabs = _import_elevenlabs()
speech_stream = elevenlabs.generate(text=query, model=self.model,
stream=True)
elevenlabs.stream(speech_stream)
|
Stream the text as speech as it is generated.
Play the text in your speakers.
|
test_seq_batch_return_exceptions
|
class ControlledExceptionRunnable(Runnable[str, str]):
def __init__(self, fail_starts_with: str) ->None:
self.fail_starts_with = fail_starts_with
def invoke(self, input: Any, config: Optional[RunnableConfig]=None) ->Any:
raise NotImplementedError()
def _batch(self, inputs: List[str]) ->List:
outputs: List[Any] = []
for input in inputs:
if input.startswith(self.fail_starts_with):
outputs.append(ValueError())
else:
outputs.append(input + 'a')
return outputs
def batch(self, inputs: List[str], config: Optional[Union[
RunnableConfig, List[RunnableConfig]]]=None, *, return_exceptions:
bool=False, **kwargs: Any) ->List[str]:
return self._batch_with_config(self._batch, inputs, config,
return_exceptions=return_exceptions, **kwargs)
chain = ControlledExceptionRunnable('bux') | ControlledExceptionRunnable('bar'
) | ControlledExceptionRunnable('baz') | ControlledExceptionRunnable('foo')
assert isinstance(chain, RunnableSequence)
with pytest.raises(ValueError):
chain.batch(['foo', 'bar', 'baz', 'qux'])
spy = mocker.spy(ControlledExceptionRunnable, 'batch')
tracer = FakeTracer()
inputs = ['foo', 'bar', 'baz', 'qux']
outputs = chain.batch(inputs, dict(callbacks=[tracer]), return_exceptions=True)
assert len(outputs) == 4
assert isinstance(outputs[0], ValueError)
assert isinstance(outputs[1], ValueError)
assert isinstance(outputs[2], ValueError)
assert outputs[3] == 'quxaaaa'
assert spy.call_count == 4
inputs_to_batch = [c[0][1] for c in spy.call_args_list]
assert inputs_to_batch == [['foo', 'bar', 'baz', 'qux'], ['fooa', 'bara',
'baza', 'quxa'], ['fooaa', 'bazaa', 'quxaa'], ['fooaaa', 'quxaaa']]
parent_runs = sorted((r for r in tracer.runs if r.parent_run_id is None),
key=lambda run: inputs.index(run.inputs['input']))
assert len(parent_runs) == 4
parent_run_foo = parent_runs[0]
assert parent_run_foo.inputs['input'] == 'foo'
assert repr(ValueError()) in str(parent_run_foo.error)
assert len(parent_run_foo.child_runs) == 4
assert [r.error for r in parent_run_foo.child_runs[:-1]] == [None, None, None]
assert repr(ValueError()) in str(parent_run_foo.child_runs[-1].error)
parent_run_bar = parent_runs[1]
assert parent_run_bar.inputs['input'] == 'bar'
assert repr(ValueError()) in str(parent_run_bar.error)
assert len(parent_run_bar.child_runs) == 2
assert parent_run_bar.child_runs[0].error is None
assert repr(ValueError()) in str(parent_run_bar.child_runs[1].error)
parent_run_baz = parent_runs[2]
assert parent_run_baz.inputs['input'] == 'baz'
assert repr(ValueError()) in str(parent_run_baz.error)
assert len(parent_run_baz.child_runs) == 3
assert [r.error for r in parent_run_baz.child_runs[:-1]] == [None, None]
assert repr(ValueError()) in str(parent_run_baz.child_runs[-1].error)
parent_run_qux = parent_runs[3]
assert parent_run_qux.inputs['input'] == 'qux'
assert parent_run_qux.error is None
assert parent_run_qux.outputs is not None
assert parent_run_qux.outputs['output'] == 'quxaaaa'
assert len(parent_run_qux.child_runs) == 4
assert [r.error for r in parent_run_qux.child_runs] == [None, None, None, None]
|
@freeze_time('2023-01-01')
def test_seq_batch_return_exceptions(mocker: MockerFixture) ->None:
class ControlledExceptionRunnable(Runnable[str, str]):
def __init__(self, fail_starts_with: str) ->None:
self.fail_starts_with = fail_starts_with
def invoke(self, input: Any, config: Optional[RunnableConfig]=None
) ->Any:
raise NotImplementedError()
def _batch(self, inputs: List[str]) ->List:
outputs: List[Any] = []
for input in inputs:
if input.startswith(self.fail_starts_with):
outputs.append(ValueError())
else:
outputs.append(input + 'a')
return outputs
def batch(self, inputs: List[str], config: Optional[Union[
RunnableConfig, List[RunnableConfig]]]=None, *,
return_exceptions: bool=False, **kwargs: Any) ->List[str]:
return self._batch_with_config(self._batch, inputs, config,
return_exceptions=return_exceptions, **kwargs)
chain = ControlledExceptionRunnable('bux') | ControlledExceptionRunnable(
'bar') | ControlledExceptionRunnable('baz'
) | ControlledExceptionRunnable('foo')
assert isinstance(chain, RunnableSequence)
with pytest.raises(ValueError):
chain.batch(['foo', 'bar', 'baz', 'qux'])
spy = mocker.spy(ControlledExceptionRunnable, 'batch')
tracer = FakeTracer()
inputs = ['foo', 'bar', 'baz', 'qux']
outputs = chain.batch(inputs, dict(callbacks=[tracer]),
return_exceptions=True)
assert len(outputs) == 4
assert isinstance(outputs[0], ValueError)
assert isinstance(outputs[1], ValueError)
assert isinstance(outputs[2], ValueError)
assert outputs[3] == 'quxaaaa'
assert spy.call_count == 4
inputs_to_batch = [c[0][1] for c in spy.call_args_list]
assert inputs_to_batch == [['foo', 'bar', 'baz', 'qux'], ['fooa',
'bara', 'baza', 'quxa'], ['fooaa', 'bazaa', 'quxaa'], ['fooaaa',
'quxaaa']]
parent_runs = sorted((r for r in tracer.runs if r.parent_run_id is None
), key=lambda run: inputs.index(run.inputs['input']))
assert len(parent_runs) == 4
parent_run_foo = parent_runs[0]
assert parent_run_foo.inputs['input'] == 'foo'
assert repr(ValueError()) in str(parent_run_foo.error)
assert len(parent_run_foo.child_runs) == 4
assert [r.error for r in parent_run_foo.child_runs[:-1]] == [None, None,
None]
assert repr(ValueError()) in str(parent_run_foo.child_runs[-1].error)
parent_run_bar = parent_runs[1]
assert parent_run_bar.inputs['input'] == 'bar'
assert repr(ValueError()) in str(parent_run_bar.error)
assert len(parent_run_bar.child_runs) == 2
assert parent_run_bar.child_runs[0].error is None
assert repr(ValueError()) in str(parent_run_bar.child_runs[1].error)
parent_run_baz = parent_runs[2]
assert parent_run_baz.inputs['input'] == 'baz'
assert repr(ValueError()) in str(parent_run_baz.error)
assert len(parent_run_baz.child_runs) == 3
assert [r.error for r in parent_run_baz.child_runs[:-1]] == [None, None]
assert repr(ValueError()) in str(parent_run_baz.child_runs[-1].error)
parent_run_qux = parent_runs[3]
assert parent_run_qux.inputs['input'] == 'qux'
assert parent_run_qux.error is None
assert parent_run_qux.outputs is not None
assert parent_run_qux.outputs['output'] == 'quxaaaa'
assert len(parent_run_qux.child_runs) == 4
assert [r.error for r in parent_run_qux.child_runs] == [None, None,
None, None]
| null |
_get_relevant_documents
|
"""Get documents relevant for a query.
Args:
query: string to find relevant documents for
Returns:
List of relevant documents
"""
query_emb = np.array(self.embeddings.embed_query(query))
if self.search_type == SearchType.similarity:
results = self._similarity_search(query_emb)
elif self.search_type == SearchType.mmr:
results = self._mmr_search(query_emb)
else:
raise ValueError(
f"Search type {self.search_type} does not exist. Choose either 'similarity' or 'mmr'."
)
return results
|
def _get_relevant_documents(self, query: str, *, run_manager:
CallbackManagerForRetrieverRun) ->List[Document]:
"""Get documents relevant for a query.
Args:
query: string to find relevant documents for
Returns:
List of relevant documents
"""
query_emb = np.array(self.embeddings.embed_query(query))
if self.search_type == SearchType.similarity:
results = self._similarity_search(query_emb)
elif self.search_type == SearchType.mmr:
results = self._mmr_search(query_emb)
else:
raise ValueError(
f"Search type {self.search_type} does not exist. Choose either 'similarity' or 'mmr'."
)
return results
|
Get documents relevant for a query.
Args:
query: string to find relevant documents for
Returns:
List of relevant documents
|
test_all_imports
|
assert set(__all__) == set(EXPECTED_ALL)
|
def test_all_imports() ->None:
assert set(__all__) == set(EXPECTED_ALL)
| null |
add_embeddings
|
with Session(self._conn) as session:
collection = self.get_collection(session)
if not collection:
raise ValueError('Collection not found')
for text, metadata, embedding, id in zip(texts, metadatas, embeddings, ids
):
embedding_store = EmbeddingStore(embedding=embedding, document=text,
cmetadata=metadata, custom_id=id)
collection.embeddings.append(embedding_store)
session.add(embedding_store)
session.commit()
|
def add_embeddings(self, texts: List[str], embeddings: List[List[float]],
metadatas: List[dict], ids: List[str], **kwargs: Any) ->None:
with Session(self._conn) as session:
collection = self.get_collection(session)
if not collection:
raise ValueError('Collection not found')
for text, metadata, embedding, id in zip(texts, metadatas,
embeddings, ids):
embedding_store = EmbeddingStore(embedding=embedding, document=
text, cmetadata=metadata, custom_id=id)
collection.embeddings.append(embedding_store)
session.add(embedding_store)
session.commit()
| null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.