method_name
stringlengths 1
78
| method_body
stringlengths 3
9.66k
| full_code
stringlengths 31
10.7k
| docstring
stringlengths 4
4.74k
⌀ |
|---|---|---|---|
test_create_bash_persistent
|
"""Test the pexpect persistent bash terminal"""
session = BashProcess(persistent=True)
response = session.run('echo hello')
response += session.run('echo world')
assert 'hello' in response
assert 'world' in response
|
@pytest.mark.skip(reason='flaky on GHA, TODO to fix')
@pytest.mark.skipif(sys.platform.startswith('win'), reason=
'Test not supported on Windows')
def test_create_bash_persistent() ->None:
"""Test the pexpect persistent bash terminal"""
session = BashProcess(persistent=True)
response = session.run('echo hello')
response += session.run('echo world')
assert 'hello' in response
assert 'world' in response
|
Test the pexpect persistent bash terminal
|
visit_structured_query
|
if structured_query.filter is None:
kwargs = {}
else:
kwargs = {'filter': structured_query.filter.accept(self)}
return structured_query.query, kwargs
|
def visit_structured_query(self, structured_query: StructuredQuery) ->Tuple[
str, dict]:
if structured_query.filter is None:
kwargs = {}
else:
kwargs = {'filter': structured_query.filter.accept(self)}
return structured_query.query, kwargs
| null |
validate_environment
|
"""Validate that api key and endpoint exists in environment."""
metaphor_api_key = get_from_dict_or_env(values, 'metaphor_api_key',
'METAPHOR_API_KEY')
values['metaphor_api_key'] = metaphor_api_key
return values
|
@root_validator(pre=True)
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that api key and endpoint exists in environment."""
metaphor_api_key = get_from_dict_or_env(values, 'metaphor_api_key',
'METAPHOR_API_KEY')
values['metaphor_api_key'] = metaphor_api_key
return values
|
Validate that api key and endpoint exists in environment.
|
embed_query
|
"""Call out to DashScope's embedding endpoint for embedding query text.
Args:
text: The text to embed.
Returns:
Embedding for the text.
"""
embedding = embed_with_retry(self, input=text, text_type='query', model=
self.model)[0]['embedding']
return embedding
|
def embed_query(self, text: str) ->List[float]:
"""Call out to DashScope's embedding endpoint for embedding query text.
Args:
text: The text to embed.
Returns:
Embedding for the text.
"""
embedding = embed_with_retry(self, input=text, text_type='query', model
=self.model)[0]['embedding']
return embedding
|
Call out to DashScope's embedding endpoint for embedding query text.
Args:
text: The text to embed.
Returns:
Embedding for the text.
|
import_installed_app_flow
|
"""Import InstalledAppFlow class.
Returns:
InstalledAppFlow: InstalledAppFlow class.
"""
try:
from google_auth_oauthlib.flow import InstalledAppFlow
except ImportError:
raise ImportError(
'You need to install google-auth-oauthlib to use this toolkit. Try running pip install --upgrade google-auth-oauthlib'
)
return InstalledAppFlow
|
def import_installed_app_flow() ->InstalledAppFlow:
"""Import InstalledAppFlow class.
Returns:
InstalledAppFlow: InstalledAppFlow class.
"""
try:
from google_auth_oauthlib.flow import InstalledAppFlow
except ImportError:
raise ImportError(
'You need to install google-auth-oauthlib to use this toolkit. Try running pip install --upgrade google-auth-oauthlib'
)
return InstalledAppFlow
|
Import InstalledAppFlow class.
Returns:
InstalledAppFlow: InstalledAppFlow class.
|
text
|
"""Print a text on ASCII canvas.
Args:
x (int): x coordinate where the text should start.
y (int): y coordinate where the text should start.
text (str): string that should be printed.
"""
for i, char in enumerate(text):
self.point(x + i, y, char)
|
def text(self, x: int, y: int, text: str) ->None:
"""Print a text on ASCII canvas.
Args:
x (int): x coordinate where the text should start.
y (int): y coordinate where the text should start.
text (str): string that should be printed.
"""
for i, char in enumerate(text):
self.point(x + i, y, char)
|
Print a text on ASCII canvas.
Args:
x (int): x coordinate where the text should start.
y (int): y coordinate where the text should start.
text (str): string that should be printed.
|
_run
|
"""Use the WolframAlpha tool."""
return self.api_wrapper.run(query)
|
def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun]
=None) ->str:
"""Use the WolframAlpha tool."""
return self.api_wrapper.run(query)
|
Use the WolframAlpha tool.
|
invoke
|
if self.func is not None:
call_func_with_variable_args(self.func, input, ensure_config(config),
**kwargs)
return self._call_with_config(identity, input, config)
|
def invoke(self, input: Other, config: Optional[RunnableConfig]=None, **
kwargs: Any) ->Other:
if self.func is not None:
call_func_with_variable_args(self.func, input, ensure_config(config
), **kwargs)
return self._call_with_config(identity, input, config)
| null |
gen
|
yield fake.invoke(input)
yield fake.invoke(input * 2)
yield fake.invoke(input * 3)
|
@chain
def gen(input: str) ->Iterator[int]:
yield fake.invoke(input)
yield fake.invoke(input * 2)
yield fake.invoke(input * 3)
| null |
on_chain_start
|
self.on_chain_start_common()
|
def on_chain_start(self, *args: Any, **kwargs: Any) ->Any:
self.on_chain_start_common()
| null |
clear
|
"""Remove the session's messages from the cache.
Raises:
SdkException: Momento service or network error.
Exception: Unexpected response.
"""
from momento.responses import CacheDelete
delete_response = self.cache_client.delete(self.cache_name, self.key)
if isinstance(delete_response, CacheDelete.Success):
return None
elif isinstance(delete_response, CacheDelete.Error):
raise delete_response.inner_exception
else:
raise Exception(f'Unexpected response: {delete_response}')
|
def clear(self) ->None:
"""Remove the session's messages from the cache.
Raises:
SdkException: Momento service or network error.
Exception: Unexpected response.
"""
from momento.responses import CacheDelete
delete_response = self.cache_client.delete(self.cache_name, self.key)
if isinstance(delete_response, CacheDelete.Success):
return None
elif isinstance(delete_response, CacheDelete.Error):
raise delete_response.inner_exception
else:
raise Exception(f'Unexpected response: {delete_response}')
|
Remove the session's messages from the cache.
Raises:
SdkException: Momento service or network error.
Exception: Unexpected response.
|
_prepare_eval_run
|
wrapped_model = _wrap_in_chain_factory(llm_or_chain_factory, dataset_name)
dataset = client.read_dataset(dataset_name=dataset_name)
examples = list(client.list_examples(dataset_id=dataset.id))
if not examples:
raise ValueError(f'Dataset {dataset_name} has no example rows.')
try:
git_info = get_git_info()
if git_info:
project_metadata = project_metadata or {}
project_metadata = {**project_metadata, 'git': git_info}
project = client.create_project(project_name, reference_dataset_id=
dataset.id, project_extra={'tags': tags} if tags else {}, metadata=
project_metadata)
except (HTTPError, ValueError, LangSmithError) as e:
if 'already exists ' not in str(e):
raise e
uid = uuid.uuid4()
example_msg = f"""
run_on_dataset(
...
project_name="{project_name} - {uid}", # Update since {project_name} already exists
)
"""
raise ValueError(
f"""Test project {project_name} already exists. Please use a different name:
{example_msg}"""
)
comparison_url = dataset.url + f'/compare?selectedSessions={project.id}'
print(
f"""View the evaluation results for project '{project_name}' at:
{comparison_url}
View all tests for Dataset {dataset_name} at:
{dataset.url}"""
, flush=True)
return wrapped_model, project, dataset, examples
|
def _prepare_eval_run(client: Client, dataset_name: str,
llm_or_chain_factory: MODEL_OR_CHAIN_FACTORY, project_name: str,
project_metadata: Optional[Dict[str, Any]]=None, tags: Optional[List[
str]]=None) ->Tuple[MCF, TracerSession, Dataset, List[Example]]:
wrapped_model = _wrap_in_chain_factory(llm_or_chain_factory, dataset_name)
dataset = client.read_dataset(dataset_name=dataset_name)
examples = list(client.list_examples(dataset_id=dataset.id))
if not examples:
raise ValueError(f'Dataset {dataset_name} has no example rows.')
try:
git_info = get_git_info()
if git_info:
project_metadata = project_metadata or {}
project_metadata = {**project_metadata, 'git': git_info}
project = client.create_project(project_name, reference_dataset_id=
dataset.id, project_extra={'tags': tags} if tags else {},
metadata=project_metadata)
except (HTTPError, ValueError, LangSmithError) as e:
if 'already exists ' not in str(e):
raise e
uid = uuid.uuid4()
example_msg = f"""
run_on_dataset(
...
project_name="{project_name} - {uid}", # Update since {project_name} already exists
)
"""
raise ValueError(
f"""Test project {project_name} already exists. Please use a different name:
{example_msg}"""
)
comparison_url = dataset.url + f'/compare?selectedSessions={project.id}'
print(
f"""View the evaluation results for project '{project_name}' at:
{comparison_url}
View all tests for Dataset {dataset_name} at:
{dataset.url}"""
, flush=True)
return wrapped_model, project, dataset, examples
| null |
test_multiple_output_keys_error
|
"""Test run with multiple output keys errors as expected."""
chain = FakeChain(the_output_keys=['foo', 'bar'])
with pytest.raises(ValueError):
chain.run('bar')
|
def test_multiple_output_keys_error() ->None:
"""Test run with multiple output keys errors as expected."""
chain = FakeChain(the_output_keys=['foo', 'bar'])
with pytest.raises(ValueError):
chain.run('bar')
|
Test run with multiple output keys errors as expected.
|
_load_graph_cypher_chain
|
if 'graph' in kwargs:
graph = kwargs.pop('graph')
else:
raise ValueError('`graph` must be present.')
if 'cypher_generation_chain' in config:
cypher_generation_chain_config = config.pop('cypher_generation_chain')
cypher_generation_chain = load_chain_from_config(
cypher_generation_chain_config)
else:
raise ValueError('`cypher_generation_chain` must be present.')
if 'qa_chain' in config:
qa_chain_config = config.pop('qa_chain')
qa_chain = load_chain_from_config(qa_chain_config)
else:
raise ValueError('`qa_chain` must be present.')
return GraphCypherQAChain(graph=graph, cypher_generation_chain=
cypher_generation_chain, qa_chain=qa_chain, **config)
|
def _load_graph_cypher_chain(config: dict, **kwargs: Any) ->GraphCypherQAChain:
if 'graph' in kwargs:
graph = kwargs.pop('graph')
else:
raise ValueError('`graph` must be present.')
if 'cypher_generation_chain' in config:
cypher_generation_chain_config = config.pop('cypher_generation_chain')
cypher_generation_chain = load_chain_from_config(
cypher_generation_chain_config)
else:
raise ValueError('`cypher_generation_chain` must be present.')
if 'qa_chain' in config:
qa_chain_config = config.pop('qa_chain')
qa_chain = load_chain_from_config(qa_chain_config)
else:
raise ValueError('`qa_chain` must be present.')
return GraphCypherQAChain(graph=graph, cypher_generation_chain=
cypher_generation_chain, qa_chain=qa_chain, **config)
| null |
fetch_space_id
|
"""Fetch the space id."""
url = f'{DEFAULT_URL}/team/{team_id}/space'
data = fetch_data(url, access_token, query={'archived': 'false'})
return fetch_first_id(data, 'spaces')
|
def fetch_space_id(team_id: int, access_token: str) ->Optional[int]:
"""Fetch the space id."""
url = f'{DEFAULT_URL}/team/{team_id}/space'
data = fetch_data(url, access_token, query={'archived': 'false'})
return fetch_first_id(data, 'spaces')
|
Fetch the space id.
|
run
|
"""Run query through GoogleSearch and parse result."""
snippets = []
results = self._google_search_results(query, num=self.k)
if len(results) == 0:
return 'No good Google Search Result was found'
for result in results:
if 'snippet' in result:
snippets.append(result['snippet'])
return ' '.join(snippets)
|
def run(self, query: str) ->str:
"""Run query through GoogleSearch and parse result."""
snippets = []
results = self._google_search_results(query, num=self.k)
if len(results) == 0:
return 'No good Google Search Result was found'
for result in results:
if 'snippet' in result:
snippets.append(result['snippet'])
return ' '.join(snippets)
|
Run query through GoogleSearch and parse result.
|
test_few_shot_functionality
|
"""Test that few shot works with examples."""
prefix = 'This is a test about {content}.'
suffix = 'Now you try to talk about {new_content}.'
examples = [{'question': 'foo', 'answer': 'bar'}, {'question': 'baz',
'answer': 'foo'}]
prompt = FewShotPromptTemplate(suffix=suffix, prefix=prefix,
input_variables=['content', 'new_content'], examples=examples,
example_prompt=EXAMPLE_PROMPT, example_separator='\n')
output = prompt.format(content='animals', new_content='party')
expected_output = """This is a test about animals.
foo: bar
baz: foo
Now you try to talk about party."""
assert output == expected_output
|
def test_few_shot_functionality() ->None:
"""Test that few shot works with examples."""
prefix = 'This is a test about {content}.'
suffix = 'Now you try to talk about {new_content}.'
examples = [{'question': 'foo', 'answer': 'bar'}, {'question': 'baz',
'answer': 'foo'}]
prompt = FewShotPromptTemplate(suffix=suffix, prefix=prefix,
input_variables=['content', 'new_content'], examples=examples,
example_prompt=EXAMPLE_PROMPT, example_separator='\n')
output = prompt.format(content='animals', new_content='party')
expected_output = """This is a test about animals.
foo: bar
baz: foo
Now you try to talk about party."""
assert output == expected_output
|
Test that few shot works with examples.
|
_get_prompt_and_tools
|
try:
import pandas as pd
pd.set_option('display.max_columns', None)
except ImportError:
raise ImportError(
'pandas package not found, please install with `pip install pandas`')
if include_df_in_prompt is not None and suffix is not None:
raise ValueError(
'If suffix is specified, include_df_in_prompt should not be.')
if isinstance(df, list):
for item in df:
if not isinstance(item, pd.DataFrame):
raise ValueError(f'Expected pandas object, got {type(df)}')
return _get_multi_prompt(df, prefix=prefix, suffix=suffix,
input_variables=input_variables, include_df_in_prompt=
include_df_in_prompt, number_of_head_rows=number_of_head_rows,
extra_tools=extra_tools)
else:
if not isinstance(df, pd.DataFrame):
raise ValueError(f'Expected pandas object, got {type(df)}')
return _get_single_prompt(df, prefix=prefix, suffix=suffix,
input_variables=input_variables, include_df_in_prompt=
include_df_in_prompt, number_of_head_rows=number_of_head_rows,
extra_tools=extra_tools)
|
def _get_prompt_and_tools(df: Any, prefix: Optional[str]=None, suffix:
Optional[str]=None, input_variables: Optional[List[str]]=None,
include_df_in_prompt: Optional[bool]=True, number_of_head_rows: int=5,
extra_tools: Sequence[BaseTool]=()) ->Tuple[BasePromptTemplate, List[
BaseTool]]:
try:
import pandas as pd
pd.set_option('display.max_columns', None)
except ImportError:
raise ImportError(
'pandas package not found, please install with `pip install pandas`'
)
if include_df_in_prompt is not None and suffix is not None:
raise ValueError(
'If suffix is specified, include_df_in_prompt should not be.')
if isinstance(df, list):
for item in df:
if not isinstance(item, pd.DataFrame):
raise ValueError(f'Expected pandas object, got {type(df)}')
return _get_multi_prompt(df, prefix=prefix, suffix=suffix,
input_variables=input_variables, include_df_in_prompt=
include_df_in_prompt, number_of_head_rows=number_of_head_rows,
extra_tools=extra_tools)
else:
if not isinstance(df, pd.DataFrame):
raise ValueError(f'Expected pandas object, got {type(df)}')
return _get_single_prompt(df, prefix=prefix, suffix=suffix,
input_variables=input_variables, include_df_in_prompt=
include_df_in_prompt, number_of_head_rows=number_of_head_rows,
extra_tools=extra_tools)
| null |
test_chat_ernie_bot
|
chat = ErnieBotChat()
message = HumanMessage(content='Hello')
response = chat([message])
assert isinstance(response, AIMessage)
assert isinstance(response.content, str)
|
def test_chat_ernie_bot() ->None:
chat = ErnieBotChat()
message = HumanMessage(content='Hello')
response = chat([message])
assert isinstance(response, AIMessage)
assert isinstance(response.content, str)
| null |
test_configure_mappers
|
sqlalchemy.orm.configure_mappers()
|
def test_configure_mappers() ->None:
sqlalchemy.orm.configure_mappers()
| null |
_check_all_feedback_passed
|
runs = list(client.list_runs(project_name=_project_name, execution_order=1))
if not runs:
return
feedback = list(client.list_feedback(run_ids=[run.id for run in runs]))
if not feedback:
return
assert all([(f.score == 1) for f in feedback])
|
def _check_all_feedback_passed(_project_name: str, client: Client) ->None:
runs = list(client.list_runs(project_name=_project_name, execution_order=1)
)
if not runs:
return
feedback = list(client.list_feedback(run_ids=[run.id for run in runs]))
if not feedback:
return
assert all([(f.score == 1) for f in feedback])
| null |
test_sitemap_metadata_default
|
"""Test sitemap loader."""
loader = SitemapLoader('https://api.python.langchain.com/sitemap.xml')
documents = loader.load()
assert len(documents) > 1
assert 'source' in documents[0].metadata
assert 'loc' in documents[0].metadata
|
def test_sitemap_metadata_default() ->None:
"""Test sitemap loader."""
loader = SitemapLoader('https://api.python.langchain.com/sitemap.xml')
documents = loader.load()
assert len(documents) > 1
assert 'source' in documents[0].metadata
assert 'loc' in documents[0].metadata
|
Test sitemap loader.
|
test_from_texts
|
input_texts = ['I have a pen.', 'Do you have a pen?', 'I have a bag.']
bm25_retriever = BM25Retriever.from_texts(texts=input_texts)
assert len(bm25_retriever.docs) == 3
assert bm25_retriever.vectorizer.doc_len == [4, 5, 4]
|
@pytest.mark.requires('rank_bm25')
def test_from_texts() ->None:
input_texts = ['I have a pen.', 'Do you have a pen?', 'I have a bag.']
bm25_retriever = BM25Retriever.from_texts(texts=input_texts)
assert len(bm25_retriever.docs) == 3
assert bm25_retriever.vectorizer.doc_len == [4, 5, 4]
| null |
_import_pinecone
|
from langchain_community.vectorstores.pinecone import Pinecone
return Pinecone
|
def _import_pinecone() ->Any:
from langchain_community.vectorstores.pinecone import Pinecone
return Pinecone
| null |
_import_playwright_ExtractHyperlinksTool
|
from langchain_community.tools.playwright import ExtractHyperlinksTool
return ExtractHyperlinksTool
|
def _import_playwright_ExtractHyperlinksTool() ->Any:
from langchain_community.tools.playwright import ExtractHyperlinksTool
return ExtractHyperlinksTool
| null |
test_parse_string_value
|
parsed = cast(Comparison, DEFAULT_PARSER.parse_folder(f'eq("x", {x})'))
actual = parsed.value
assert actual == x[1:-1]
|
@pytest.mark.parametrize('x', ('""', '" "', '"foo"', "'foo'"))
def test_parse_string_value(x: str) ->None:
parsed = cast(Comparison, DEFAULT_PARSER.parse_folder(f'eq("x", {x})'))
actual = parsed.value
assert actual == x[1:-1]
| null |
load_local
|
"""Load FAISS index, docstore, and index_to_docstore_id from disk.
Args:
folder_path: folder path to load index, docstore,
and index_to_docstore_id from.
embeddings: Embeddings to use when generating queries
index_name: for saving with a specific index file name
asynchronous: whether to use async version or not
"""
path = Path(folder_path)
faiss = dependable_faiss_import()
index = faiss.read_index(str(path / '{index_name}.faiss'.format(index_name=
index_name)))
with open(path / '{index_name}.pkl'.format(index_name=index_name), 'rb') as f:
docstore, index_to_docstore_id = pickle.load(f)
return cls(embeddings, index, docstore, index_to_docstore_id, **kwargs)
|
@classmethod
def load_local(cls, folder_path: str, embeddings: Embeddings, index_name:
str='index', **kwargs: Any) ->FAISS:
"""Load FAISS index, docstore, and index_to_docstore_id from disk.
Args:
folder_path: folder path to load index, docstore,
and index_to_docstore_id from.
embeddings: Embeddings to use when generating queries
index_name: for saving with a specific index file name
asynchronous: whether to use async version or not
"""
path = Path(folder_path)
faiss = dependable_faiss_import()
index = faiss.read_index(str(path / '{index_name}.faiss'.format(
index_name=index_name)))
with open(path / '{index_name}.pkl'.format(index_name=index_name), 'rb'
) as f:
docstore, index_to_docstore_id = pickle.load(f)
return cls(embeddings, index, docstore, index_to_docstore_id, **kwargs)
|
Load FAISS index, docstore, and index_to_docstore_id from disk.
Args:
folder_path: folder path to load index, docstore,
and index_to_docstore_id from.
embeddings: Embeddings to use when generating queries
index_name: for saving with a specific index file name
asynchronous: whether to use async version or not
|
test_sequential_bad_outputs
|
"""Test error is raised when bad outputs are specified."""
chain_1 = FakeChain(input_variables=['foo'], output_variables=['bar'])
chain_2 = FakeChain(input_variables=['bar'], output_variables=['baz'])
with pytest.raises(ValueError):
SequentialChain(chains=[chain_1, chain_2], input_variables=['foo'],
output_variables=['test'])
|
def test_sequential_bad_outputs() ->None:
"""Test error is raised when bad outputs are specified."""
chain_1 = FakeChain(input_variables=['foo'], output_variables=['bar'])
chain_2 = FakeChain(input_variables=['bar'], output_variables=['baz'])
with pytest.raises(ValueError):
SequentialChain(chains=[chain_1, chain_2], input_variables=['foo'],
output_variables=['test'])
|
Test error is raised when bad outputs are specified.
|
test_single_input_correct
|
"""Test passing single input works."""
chain = FakeChain()
output = chain('bar')
assert output == {'foo': 'bar', 'bar': 'baz'}
|
def test_single_input_correct() ->None:
"""Test passing single input works."""
chain = FakeChain()
output = chain('bar')
assert output == {'foo': 'bar', 'bar': 'baz'}
|
Test passing single input works.
|
get_default_api_token
|
"""Gets the default Databricks personal access token.
Raises an error if the token cannot be automatically determined.
"""
if (api_token := os.getenv('DATABRICKS_TOKEN')):
return api_token
try:
api_token = get_repl_context().apiToken
if not api_token:
raise ValueError("context doesn't contain apiToken.")
except Exception as e:
raise ValueError(
f"api_token was not set and cannot be automatically inferred. Set environment variable 'DATABRICKS_TOKEN'. Received error: {e}"
)
return api_token
|
def get_default_api_token() ->str:
"""Gets the default Databricks personal access token.
Raises an error if the token cannot be automatically determined.
"""
if (api_token := os.getenv('DATABRICKS_TOKEN')):
return api_token
try:
api_token = get_repl_context().apiToken
if not api_token:
raise ValueError("context doesn't contain apiToken.")
except Exception as e:
raise ValueError(
f"api_token was not set and cannot be automatically inferred. Set environment variable 'DATABRICKS_TOKEN'. Received error: {e}"
)
return api_token
|
Gets the default Databricks personal access token.
Raises an error if the token cannot be automatically determined.
|
_similarity_search_with_relevance_scores
|
return self.similarity_search_with_score(query, k, **kwargs)
|
def _similarity_search_with_relevance_scores(self, query: str, k: int=4, **
kwargs: Any) ->List[Tuple[Document, float]]:
return self.similarity_search_with_score(query, k, **kwargs)
| null |
test_generativeai_stream
|
llm = GoogleGenerativeAI(temperature=0, model='gemini-pro')
outputs = list(llm.stream('Please say foo:'))
assert isinstance(outputs[0], str)
|
def test_generativeai_stream() ->None:
llm = GoogleGenerativeAI(temperature=0, model='gemini-pro')
outputs = list(llm.stream('Please say foo:'))
assert isinstance(outputs[0], str)
| null |
lazy_load
|
"""Load documents lazily with concurrent parsing."""
with concurrent.futures.ThreadPoolExecutor(max_workers=self.num_workers
) as executor:
futures = {executor.submit(self.blob_parser.lazy_parse, blob) for blob in
self.blob_loader.yield_blobs()}
for future in concurrent.futures.as_completed(futures):
yield from future.result()
|
def lazy_load(self) ->Iterator[Document]:
"""Load documents lazily with concurrent parsing."""
with concurrent.futures.ThreadPoolExecutor(max_workers=self.num_workers
) as executor:
futures = {executor.submit(self.blob_parser.lazy_parse, blob) for
blob in self.blob_loader.yield_blobs()}
for future in concurrent.futures.as_completed(futures):
yield from future.result()
|
Load documents lazily with concurrent parsing.
|
add_embeddings
|
"""Add the given texts and embeddings to the vectorstore.
Args:
text_embeddings: Iterable pairs of string and embedding to
add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids: Optional list of unique IDs.
Returns:
List of ids from adding the texts into the vectorstore.
"""
texts, embeddings = zip(*text_embeddings)
return self.__add(texts, embeddings, metadatas=metadatas, ids=ids)
|
def add_embeddings(self, text_embeddings: Iterable[Tuple[str, List[float]]],
metadatas: Optional[List[dict]]=None, ids: Optional[List[str]]=None, **
kwargs: Any) ->List[str]:
"""Add the given texts and embeddings to the vectorstore.
Args:
text_embeddings: Iterable pairs of string and embedding to
add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids: Optional list of unique IDs.
Returns:
List of ids from adding the texts into the vectorstore.
"""
texts, embeddings = zip(*text_embeddings)
return self.__add(texts, embeddings, metadatas=metadatas, ids=ids)
|
Add the given texts and embeddings to the vectorstore.
Args:
text_embeddings: Iterable pairs of string and embedding to
add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids: Optional list of unique IDs.
Returns:
List of ids from adding the texts into the vectorstore.
|
_on_retriever_start
|
"""Process the Retriever Run upon start."""
|
def _on_retriever_start(self, run: Run) ->None:
"""Process the Retriever Run upon start."""
|
Process the Retriever Run upon start.
|
pending
|
return [item for idx, item in enumerate(iterable) if idx not in results_map]
|
def pending(iterable: List[U]) ->List[U]:
return [item for idx, item in enumerate(iterable) if idx not in results_map
]
| null |
_init_resp
|
return {k: None for k in self.callback_columns}
|
def _init_resp(self) ->Dict:
return {k: None for k in self.callback_columns}
| null |
embed
|
payload = {'model': self.model, 'type': embed_type, 'texts': texts}
headers = {'Authorization':
f'Bearer {self.minimax_api_key.get_secret_value()}', 'Content-Type':
'application/json'}
params = {'GroupId': self.minimax_group_id}
response = requests.post(self.endpoint_url, params=params, headers=headers,
json=payload)
parsed_response = response.json()
if parsed_response['base_resp']['status_code'] != 0:
raise ValueError(
f"MiniMax API returned an error: {parsed_response['base_resp']}")
embeddings = parsed_response['vectors']
return embeddings
|
def embed(self, texts: List[str], embed_type: str) ->List[List[float]]:
payload = {'model': self.model, 'type': embed_type, 'texts': texts}
headers = {'Authorization':
f'Bearer {self.minimax_api_key.get_secret_value()}', 'Content-Type':
'application/json'}
params = {'GroupId': self.minimax_group_id}
response = requests.post(self.endpoint_url, params=params, headers=
headers, json=payload)
parsed_response = response.json()
if parsed_response['base_resp']['status_code'] != 0:
raise ValueError(
f"MiniMax API returned an error: {parsed_response['base_resp']}")
embeddings = parsed_response['vectors']
return embeddings
| null |
_import_metaphor_search
|
from langchain_community.utilities.metaphor_search import MetaphorSearchAPIWrapper
return MetaphorSearchAPIWrapper
|
def _import_metaphor_search() ->Any:
from langchain_community.utilities.metaphor_search import MetaphorSearchAPIWrapper
return MetaphorSearchAPIWrapper
| null |
model
|
return Orca(llm=FakeLLM())
|
@pytest.fixture
def model() ->Orca:
return Orca(llm=FakeLLM())
| null |
_run
|
"""Use the tool."""
if action == 'push':
self._check_params(path, text)
if path:
return self._pushFile(id, path)
if text:
return self._pushText(id, text)
elif action == 'pull':
return self._pull(id)
return ''
|
def _run(self, action: str, id: str, path: Optional[str], text: Optional[
str], run_manager: Optional[CallbackManagerForToolRun]=None) ->str:
"""Use the tool."""
if action == 'push':
self._check_params(path, text)
if path:
return self._pushFile(id, path)
if text:
return self._pushText(id, text)
elif action == 'pull':
return self._pull(id)
return ''
|
Use the tool.
|
test__convert_dict_to_message_system
|
message = {'role': 'system', 'content': 'foo'}
result = convert_dict_to_message(message)
expected_output = SystemMessage(content='foo')
assert result == expected_output
|
def test__convert_dict_to_message_system() ->None:
message = {'role': 'system', 'content': 'foo'}
result = convert_dict_to_message(message)
expected_output = SystemMessage(content='foo')
assert result == expected_output
| null |
test_redis_from_texts_return_keys
|
"""Test from_texts_return_keys constructor."""
docsearch, keys = Redis.from_texts_return_keys(texts, FakeEmbeddings(),
redis_url=TEST_REDIS_URL)
output = docsearch.similarity_search('foo', k=1, return_metadata=False)
assert output == TEST_SINGLE_RESULT
assert len(keys) == len(texts)
assert drop(docsearch.index_name)
|
def test_redis_from_texts_return_keys(texts: List[str]) ->None:
"""Test from_texts_return_keys constructor."""
docsearch, keys = Redis.from_texts_return_keys(texts, FakeEmbeddings(),
redis_url=TEST_REDIS_URL)
output = docsearch.similarity_search('foo', k=1, return_metadata=False)
assert output == TEST_SINGLE_RESULT
assert len(keys) == len(texts)
assert drop(docsearch.index_name)
|
Test from_texts_return_keys constructor.
|
get_folders
|
"""
Get all the folders for the team.
"""
url = f'{DEFAULT_URL}/team/' + str(self.team_id) + '/space'
params = self.get_default_params()
response = requests.get(url, headers=self.get_headers(), params=params)
return {'response': response}
|
def get_folders(self) ->Dict:
"""
Get all the folders for the team.
"""
url = f'{DEFAULT_URL}/team/' + str(self.team_id) + '/space'
params = self.get_default_params()
response = requests.get(url, headers=self.get_headers(), params=params)
return {'response': response}
|
Get all the folders for the team.
|
__init__
|
"""Initialize with file path."""
self.file = file
super().__init__(mode=mode, **unstructured_kwargs)
|
def __init__(self, file: Union[IO, Sequence[IO]], mode: str='single', **
unstructured_kwargs: Any):
"""Initialize with file path."""
self.file = file
super().__init__(mode=mode, **unstructured_kwargs)
|
Initialize with file path.
|
test__get_prompts_invalid
|
with pytest.raises(InputFormatError):
_get_prompt(inputs)
|
@pytest.mark.parametrize('inputs', _INVALID_PROMPTS)
def test__get_prompts_invalid(inputs: Dict[str, Any]) ->None:
with pytest.raises(InputFormatError):
_get_prompt(inputs)
| null |
test_add_texts
|
"""Test add_texts dataset."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': str(i)} for i in range(len(texts))]
deeplake_datastore.add_texts(texts=texts, metadatas=metadatas)
with pytest.raises(TypeError):
deeplake_datastore.add_texts(texts=texts, metada=metadatas)
|
def test_add_texts(deeplake_datastore: DeepLake) ->None:
"""Test add_texts dataset."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': str(i)} for i in range(len(texts))]
deeplake_datastore.add_texts(texts=texts, metadatas=metadatas)
with pytest.raises(TypeError):
deeplake_datastore.add_texts(texts=texts, metada=metadatas)
|
Test add_texts dataset.
|
test_json_distance_evaluator_parse_json
|
string = '{"a": 1}'
result = json_distance_evaluator._parse_json(string)
assert result == {'a': 1}
|
@pytest.mark.requires('rapidfuzz')
def test_json_distance_evaluator_parse_json(json_distance_evaluator:
JsonEditDistanceEvaluator) ->None:
string = '{"a": 1}'
result = json_distance_evaluator._parse_json(string)
assert result == {'a': 1}
| null |
max_marginal_relevance_search
|
raise NotImplementedError
|
def max_marginal_relevance_search(self, query: str, k: int=4, fetch_k: int=
20, lambda_mult: float=0.5, **kwargs: Any) ->List[Document]:
raise NotImplementedError
| null |
_get_docs
|
"""Get docs to run questioning over."""
|
@abstractmethod
def _get_docs(self, inputs: Dict[str, Any], *, run_manager:
CallbackManagerForChainRun) ->List[Document]:
"""Get docs to run questioning over."""
|
Get docs to run questioning over.
|
test_similarity_search_with_score
|
"""Test similarity search with score using Approximate Search."""
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = OpenSearchVectorSearch.from_texts(texts, FakeEmbeddings(),
metadatas=metadatas, opensearch_url=DEFAULT_OPENSEARCH_URL)
output = docsearch.similarity_search_with_score('foo', k=2)
assert output == [(Document(page_content='foo', metadata={'page': 0}), 1.0),
(Document(page_content='bar', metadata={'page': 1}), 0.5)]
|
def test_similarity_search_with_score() ->None:
"""Test similarity search with score using Approximate Search."""
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = OpenSearchVectorSearch.from_texts(texts, FakeEmbeddings(),
metadatas=metadatas, opensearch_url=DEFAULT_OPENSEARCH_URL)
output = docsearch.similarity_search_with_score('foo', k=2)
assert output == [(Document(page_content='foo', metadata={'page': 0}),
1.0), (Document(page_content='bar', metadata={'page': 1}), 0.5)]
|
Test similarity search with score using Approximate Search.
|
__init__
|
pass
|
def __init__(self, **kwargs: Any):
pass
| null |
_run
|
return query
|
def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun]
=None) ->str:
return query
| null |
on_tool_start
|
self._require_current_thought().on_tool_start(serialized, input_str, **kwargs)
self._prune_old_thought_containers()
|
def on_tool_start(self, serialized: Dict[str, Any], input_str: str, **
kwargs: Any) ->None:
self._require_current_thought().on_tool_start(serialized, input_str, **
kwargs)
self._prune_old_thought_containers()
| null |
on_llm_start
|
"""Save the prompts in memory when an LLM starts."""
if self.input_type != 'Text':
raise ValueError(
f"""
Label Studio project "{self.project_name}" has an input type <{self.input_type}>. To make it work with the mode="chat", the input type should be <Text>.
Read more here https://labelstud.io/tags/text"""
)
run_id = str(kwargs['run_id'])
self.payload[run_id] = {'prompts': prompts, 'kwargs': kwargs}
|
def on_llm_start(self, serialized: Dict[str, Any], prompts: List[str], **
kwargs: Any) ->None:
"""Save the prompts in memory when an LLM starts."""
if self.input_type != 'Text':
raise ValueError(
f"""
Label Studio project "{self.project_name}" has an input type <{self.input_type}>. To make it work with the mode="chat", the input type should be <Text>.
Read more here https://labelstud.io/tags/text"""
)
run_id = str(kwargs['run_id'])
self.payload[run_id] = {'prompts': prompts, 'kwargs': kwargs}
|
Save the prompts in memory when an LLM starts.
|
on_agent_finish_common
|
self.agent_ends += 1
self.ends += 1
|
def on_agent_finish_common(self) ->None:
self.agent_ends += 1
self.ends += 1
| null |
sorted_values
|
"""Return a list of values in dict sorted by key."""
return [values[val] for val in sorted(values)]
|
def sorted_values(values: Dict[str, str]) ->List[Any]:
"""Return a list of values in dict sorted by key."""
return [values[val] for val in sorted(values)]
|
Return a list of values in dict sorted by key.
|
test_create_documents_with_metadata
|
"""Test create documents with metadata method."""
texts = ['foo bar', 'baz']
splitter = CharacterTextSplitter(separator=' ', chunk_size=3, chunk_overlap=0)
docs = splitter.create_documents(texts, [{'source': '1'}, {'source': '2'}])
expected_docs = [Document(page_content='foo', metadata={'source': '1'}),
Document(page_content='bar', metadata={'source': '1'}), Document(
page_content='baz', metadata={'source': '2'})]
assert docs == expected_docs
|
def test_create_documents_with_metadata() ->None:
"""Test create documents with metadata method."""
texts = ['foo bar', 'baz']
splitter = CharacterTextSplitter(separator=' ', chunk_size=3,
chunk_overlap=0)
docs = splitter.create_documents(texts, [{'source': '1'}, {'source': '2'}])
expected_docs = [Document(page_content='foo', metadata={'source': '1'}),
Document(page_content='bar', metadata={'source': '1'}), Document(
page_content='baz', metadata={'source': '2'})]
assert docs == expected_docs
|
Test create documents with metadata method.
|
test_functionality
|
"""Test correct functionality."""
chain = PythonREPL()
code = 'print(1 + 1)'
output = chain.run(code)
assert output == '2\n'
|
def test_functionality() ->None:
"""Test correct functionality."""
chain = PythonREPL()
code = 'print(1 + 1)'
output = chain.run(code)
assert output == '2\n'
|
Test correct functionality.
|
get_images
|
"""
Extract images.
:param img_path: A string representing the path to the images.
"""
pil_images = [Image.open(os.path.join(img_path, image_name)) for image_name in
os.listdir(img_path) if image_name.endswith('.jpg')]
return pil_images
|
def get_images(img_path):
"""
Extract images.
:param img_path: A string representing the path to the images.
"""
pil_images = [Image.open(os.path.join(img_path, image_name)) for
image_name in os.listdir(img_path) if image_name.endswith('.jpg')]
return pil_images
|
Extract images.
:param img_path: A string representing the path to the images.
|
add_texts
|
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids: Optional list of ids to associate with the texts.
Returns:
List of ids from adding the texts into the vectorstore.
"""
ids = ids or [str(uuid.uuid1().int)[:13] for _ in texts]
metadatas_list = []
texts = list(texts)
embeds = self._embedding.embed_documents(texts)
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
metadata[self._text_key] = text
metadatas_list.append(metadata)
for i in range(0, len(list(texts)), batch_size):
j = i + batch_size
add_res = self._client.vector_add(self._index_name, metadatas_list[i:j],
embeds[i:j], ids[i:j])
if not add_res:
raise Exception('vector add fail')
return ids
|
def add_texts(self, texts: Iterable[str], metadatas: Optional[List[dict]]=
None, ids: Optional[List[str]]=None, text_key: str='text', batch_size:
int=500, **kwargs: Any) ->List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids: Optional list of ids to associate with the texts.
Returns:
List of ids from adding the texts into the vectorstore.
"""
ids = ids or [str(uuid.uuid1().int)[:13] for _ in texts]
metadatas_list = []
texts = list(texts)
embeds = self._embedding.embed_documents(texts)
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
metadata[self._text_key] = text
metadatas_list.append(metadata)
for i in range(0, len(list(texts)), batch_size):
j = i + batch_size
add_res = self._client.vector_add(self._index_name, metadatas_list[
i:j], embeds[i:j], ids[i:j])
if not add_res:
raise Exception('vector add fail')
return ids
|
Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids: Optional list of ids to associate with the texts.
Returns:
List of ids from adding the texts into the vectorstore.
|
_get_next_response_in_sequence
|
queries = cast(Mapping, self.queries)
response = queries[list(queries.keys())[self.response_index]]
self.response_index = self.response_index + 1
return response
|
@property
def _get_next_response_in_sequence(self) ->str:
queries = cast(Mapping, self.queries)
response = queries[list(queries.keys())[self.response_index]]
self.response_index = self.response_index + 1
return response
| null |
_get
|
"""Method for getting from the AI Foundation Model Function API."""
last_inputs = {'url': invoke_url, 'headers': self.headers['call'], 'json':
payload, 'stream': False}
session = self.get_session_fn()
last_response = session.get(**last_inputs)
self._try_raise(last_response)
return last_response, session
|
def _get(self, invoke_url: str, payload: dict={}) ->Tuple[Response, Any]:
"""Method for getting from the AI Foundation Model Function API."""
last_inputs = {'url': invoke_url, 'headers': self.headers['call'],
'json': payload, 'stream': False}
session = self.get_session_fn()
last_response = session.get(**last_inputs)
self._try_raise(last_response)
return last_response, session
|
Method for getting from the AI Foundation Model Function API.
|
_generate_documents_to_add
|
from zep_python.document import Document as ZepDocument
embeddings = None
if self._collection and self._collection.is_auto_embedded:
if self._embedding is not None:
warnings.warn(
"""The collection is set to auto-embed and an embedding
function is present. Ignoring the embedding function."""
, stacklevel=2)
elif self._embedding is not None:
embeddings = self._embedding.embed_documents(list(texts))
if self._collection and self._collection.embedding_dimensions != len(
embeddings[0]):
raise ValueError(
f'The embedding dimensions of the collection and the embedding function do not match. Collection dimensions: {self._collection.embedding_dimensions}, Embedding dimensions: {len(embeddings[0])}'
)
else:
pass
documents: List[ZepDocument] = []
for i, d in enumerate(texts):
documents.append(ZepDocument(content=d, metadata=metadatas[i] if
metadatas else None, document_id=document_ids[i] if document_ids else
None, embedding=embeddings[i] if embeddings else None))
return documents
|
def _generate_documents_to_add(self, texts: Iterable[str], metadatas:
Optional[List[Dict[Any, Any]]]=None, document_ids: Optional[List[str]]=None
) ->List[ZepDocument]:
from zep_python.document import Document as ZepDocument
embeddings = None
if self._collection and self._collection.is_auto_embedded:
if self._embedding is not None:
warnings.warn(
"""The collection is set to auto-embed and an embedding
function is present. Ignoring the embedding function."""
, stacklevel=2)
elif self._embedding is not None:
embeddings = self._embedding.embed_documents(list(texts))
if self._collection and self._collection.embedding_dimensions != len(
embeddings[0]):
raise ValueError(
f'The embedding dimensions of the collection and the embedding function do not match. Collection dimensions: {self._collection.embedding_dimensions}, Embedding dimensions: {len(embeddings[0])}'
)
else:
pass
documents: List[ZepDocument] = []
for i, d in enumerate(texts):
documents.append(ZepDocument(content=d, metadata=metadatas[i] if
metadatas else None, document_id=document_ids[i] if
document_ids else None, embedding=embeddings[i] if embeddings else
None))
return documents
| null |
test_prompt_empty_input_variable
|
"""Test error is raised when empty string input variable."""
with pytest.raises(ValueError):
PromptTemplate(input_variables=[''], template='{}', validate_template=True)
|
def test_prompt_empty_input_variable() ->None:
"""Test error is raised when empty string input variable."""
with pytest.raises(ValueError):
PromptTemplate(input_variables=[''], template='{}',
validate_template=True)
|
Test error is raised when empty string input variable.
|
lower_case_name
|
v = v.lower()
return v
|
@validator('name')
def lower_case_name(cls, v: str) ->str:
v = v.lower()
return v
| null |
test_candidates
|
model = ChatVertexAI(model_name='chat-bison@001', temperature=0.3, n=2)
message = HumanMessage(content='Hello')
response = model.generate(messages=[[message]])
assert isinstance(response, LLMResult)
assert len(response.generations) == 1
assert len(response.generations[0]) == 2
|
@pytest.mark.xfail
@pytest.mark.scheduled
def test_candidates() ->None:
model = ChatVertexAI(model_name='chat-bison@001', temperature=0.3, n=2)
message = HumanMessage(content='Hello')
response = model.generate(messages=[[message]])
assert isinstance(response, LLMResult)
assert len(response.generations) == 1
assert len(response.generations[0]) == 2
| null |
_import_annoy
|
from langchain_community.vectorstores.annoy import Annoy
return Annoy
|
def _import_annoy() ->Any:
from langchain_community.vectorstores.annoy import Annoy
return Annoy
| null |
_get_tools_requests_post
|
return RequestsPostTool(requests_wrapper=TextRequestsWrapper())
|
def _get_tools_requests_post() ->BaseTool:
return RequestsPostTool(requests_wrapper=TextRequestsWrapper())
| null |
_identifying_params
|
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {**{'gradient_api_url': self.gradient_api_url}, **{'model_kwargs':
_model_kwargs}}
|
@property
def _identifying_params(self) ->Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {**{'gradient_api_url': self.gradient_api_url}, **{
'model_kwargs': _model_kwargs}}
|
Get the identifying parameters.
|
test_scann_with_metadatas_and_list_filter
|
texts = ['foo', 'bar', 'baz', 'foo', 'qux']
metadatas = [({'page': i} if i <= 3 else {'page': 3}) for i in range(len(
texts))]
docsearch = ScaNN.from_texts(texts, FakeEmbeddings(), metadatas=metadatas)
expected_docstore = InMemoryDocstore({docsearch.index_to_docstore_id[0]:
Document(page_content='foo', metadata={'page': 0}), docsearch.
index_to_docstore_id[1]: Document(page_content='bar', metadata={'page':
1}), docsearch.index_to_docstore_id[2]: Document(page_content='baz',
metadata={'page': 2}), docsearch.index_to_docstore_id[3]: Document(
page_content='foo', metadata={'page': 3}), docsearch.
index_to_docstore_id[4]: Document(page_content='qux', metadata={'page':
3})})
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
output = docsearch.similarity_search('foor', k=1, filter={'page': [0, 1, 2]})
assert output == [Document(page_content='foo', metadata={'page': 0})]
|
def test_scann_with_metadatas_and_list_filter() ->None:
texts = ['foo', 'bar', 'baz', 'foo', 'qux']
metadatas = [({'page': i} if i <= 3 else {'page': 3}) for i in range(
len(texts))]
docsearch = ScaNN.from_texts(texts, FakeEmbeddings(), metadatas=metadatas)
expected_docstore = InMemoryDocstore({docsearch.index_to_docstore_id[0]:
Document(page_content='foo', metadata={'page': 0}), docsearch.
index_to_docstore_id[1]: Document(page_content='bar', metadata={
'page': 1}), docsearch.index_to_docstore_id[2]: Document(
page_content='baz', metadata={'page': 2}), docsearch.
index_to_docstore_id[3]: Document(page_content='foo', metadata={
'page': 3}), docsearch.index_to_docstore_id[4]: Document(
page_content='qux', metadata={'page': 3})})
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
output = docsearch.similarity_search('foor', k=1, filter={'page': [0, 1,
2]})
assert output == [Document(page_content='foo', metadata={'page': 0})]
| null |
_validate_tools
|
validate_tools_single_input(cls.__name__, tools)
super()._validate_tools(tools)
if len(tools) != 1:
raise ValueError(f'Exactly one tool must be specified, but got {tools}')
tool_names = {tool.name for tool in tools}
if tool_names != {'Intermediate Answer'}:
raise ValueError(
f'Tool name should be Intermediate Answer, got {tool_names}')
|
@classmethod
def _validate_tools(cls, tools: Sequence[BaseTool]) ->None:
validate_tools_single_input(cls.__name__, tools)
super()._validate_tools(tools)
if len(tools) != 1:
raise ValueError(f'Exactly one tool must be specified, but got {tools}'
)
tool_names = {tool.name for tool in tools}
if tool_names != {'Intermediate Answer'}:
raise ValueError(
f'Tool name should be Intermediate Answer, got {tool_names}')
| null |
_import_hologres
|
from langchain_community.vectorstores.hologres import Hologres
return Hologres
|
def _import_hologres() ->Any:
from langchain_community.vectorstores.hologres import Hologres
return Hologres
| null |
on_chain_end_common
|
self.chain_ends += 1
self.ends += 1
|
def on_chain_end_common(self) ->None:
self.chain_ends += 1
self.ends += 1
| null |
__del__
|
try:
self.session_pool.close()
except Exception as e:
logger.warning(f'Could not close session pool. Error: {e}')
|
def __del__(self) ->None:
try:
self.session_pool.close()
except Exception as e:
logger.warning(f'Could not close session pool. Error: {e}')
| null |
test_l2
|
"""Test Flat L2 distance."""
texts = ['foo', 'bar', 'baz']
docsearch = USearch.from_texts(texts, FakeEmbeddings(), metric='l2_sq')
output = docsearch.similarity_search_with_score('far', k=2)
_, score = output[1]
assert score == 1.0
|
def test_l2() ->None:
"""Test Flat L2 distance."""
texts = ['foo', 'bar', 'baz']
docsearch = USearch.from_texts(texts, FakeEmbeddings(), metric='l2_sq')
output = docsearch.similarity_search_with_score('far', k=2)
_, score = output[1]
assert score == 1.0
|
Test Flat L2 distance.
|
preview_as_str
|
"""Same as preview, but returns a stringified version of the JSON for
insertting back into an LLM."""
data = self.preview(*args, **kwargs)
return json.dumps(data)
|
def preview_as_str(self, *args, **kwargs) ->str:
"""Same as preview, but returns a stringified version of the JSON for
insertting back into an LLM."""
data = self.preview(*args, **kwargs)
return json.dumps(data)
|
Same as preview, but returns a stringified version of the JSON for
insertting back into an LLM.
|
_run
|
"""Get the schema for tables in a comma-separated list."""
return self.powerbi.get_table_info(tool_input.split(', '))
|
def _run(self, tool_input: str, run_manager: Optional[
CallbackManagerForToolRun]=None) ->str:
"""Get the schema for tables in a comma-separated list."""
return self.powerbi.get_table_info(tool_input.split(', '))
|
Get the schema for tables in a comma-separated list.
|
_create_table
|
"""
Create VectorStore Table
Args:
dim:dimension of vector
fields_list: the field you want to store
Return:
code,0 for success,1 for failed
"""
type_dict = {'int': vearch.dataType.INT, 'str': vearch.dataType.STRING}
engine_info = {'index_size': 10000, 'retrieval_type': 'IVFPQ',
'retrieval_param': {'ncentroids': 2048, 'nsubvector': 32}}
fields = [vearch.GammaFieldInfo(fi['field'], type_dict[fi['type']]) for fi in
field_list]
vector_field = vearch.GammaVectorInfo(name='text_embedding', type=vearch.
dataType.VECTOR, is_index=True, dimension=dim, model_id='', store_type=
'MemoryOnly', store_param={'cache_size': 10000}, has_source=False)
response_code = self.vearch.create_table(engine_info, name=self.
using_table_name, fields=fields, vector_field=vector_field)
return response_code
|
def _create_table(self, dim: int=1024, field_list: List[dict]=[{'field':
'text', 'type': 'str'}, {'field': 'metadata', 'type': 'str'}]) ->int:
"""
Create VectorStore Table
Args:
dim:dimension of vector
fields_list: the field you want to store
Return:
code,0 for success,1 for failed
"""
type_dict = {'int': vearch.dataType.INT, 'str': vearch.dataType.STRING}
engine_info = {'index_size': 10000, 'retrieval_type': 'IVFPQ',
'retrieval_param': {'ncentroids': 2048, 'nsubvector': 32}}
fields = [vearch.GammaFieldInfo(fi['field'], type_dict[fi['type']]) for
fi in field_list]
vector_field = vearch.GammaVectorInfo(name='text_embedding', type=
vearch.dataType.VECTOR, is_index=True, dimension=dim, model_id='',
store_type='MemoryOnly', store_param={'cache_size': 10000},
has_source=False)
response_code = self.vearch.create_table(engine_info, name=self.
using_table_name, fields=fields, vector_field=vector_field)
return response_code
|
Create VectorStore Table
Args:
dim:dimension of vector
fields_list: the field you want to store
Return:
code,0 for success,1 for failed
|
_import_timescalevector
|
from langchain_community.vectorstores.timescalevector import TimescaleVector
return TimescaleVector
|
def _import_timescalevector() ->Any:
from langchain_community.vectorstores.timescalevector import TimescaleVector
return TimescaleVector
| null |
on_retriever_start
|
"""Run when retriever starts running."""
if run_id is None:
run_id = uuid.uuid4()
handle_event(self.handlers, 'on_retriever_start', 'ignore_retriever',
serialized, query, run_id=run_id, parent_run_id=self.parent_run_id,
tags=self.tags, metadata=self.metadata, **kwargs)
return CallbackManagerForRetrieverRun(run_id=run_id, handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers, parent_run_id=self.
parent_run_id, tags=self.tags, inheritable_tags=self.inheritable_tags,
metadata=self.metadata, inheritable_metadata=self.inheritable_metadata)
|
def on_retriever_start(self, serialized: Dict[str, Any], query: str, run_id:
Optional[UUID]=None, parent_run_id: Optional[UUID]=None, **kwargs: Any
) ->CallbackManagerForRetrieverRun:
"""Run when retriever starts running."""
if run_id is None:
run_id = uuid.uuid4()
handle_event(self.handlers, 'on_retriever_start', 'ignore_retriever',
serialized, query, run_id=run_id, parent_run_id=self.parent_run_id,
tags=self.tags, metadata=self.metadata, **kwargs)
return CallbackManagerForRetrieverRun(run_id=run_id, handlers=self.
handlers, inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags=
self.inheritable_tags, metadata=self.metadata, inheritable_metadata
=self.inheritable_metadata)
|
Run when retriever starts running.
|
test_add_messages
|
file_chat_message_history.add_user_message('Hello!')
file_chat_message_history.add_ai_message('Hi there!')
messages = file_chat_message_history.messages
assert len(messages) == 2
assert isinstance(messages[0], HumanMessage)
assert isinstance(messages[1], AIMessage)
assert messages[0].content == 'Hello!'
assert messages[1].content == 'Hi there!'
|
def test_add_messages(file_chat_message_history: FileChatMessageHistory
) ->None:
file_chat_message_history.add_user_message('Hello!')
file_chat_message_history.add_ai_message('Hi there!')
messages = file_chat_message_history.messages
assert len(messages) == 2
assert isinstance(messages[0], HumanMessage)
assert isinstance(messages[1], AIMessage)
assert messages[0].content == 'Hello!'
assert messages[1].content == 'Hi there!'
| null |
_filter_to_metadata
|
if filter_dict is None:
return {}
else:
return {f'metadata.{mdk}': mdv for mdk, mdv in filter_dict.items()}
|
@staticmethod
def _filter_to_metadata(filter_dict: Optional[Dict[str, str]]) ->Dict[str, Any
]:
if filter_dict is None:
return {}
else:
return {f'metadata.{mdk}': mdv for mdk, mdv in filter_dict.items()}
| null |
test_awadb_with_metadatas_with_scores
|
"""Test end to end construction and scored search."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': str(i)} for i in range(len(texts))]
docsearch = AwaDB.from_texts(table_name='test_awadb', texts=texts,
embedding=FakeEmbeddings(), metadatas=metadatas)
output = docsearch.similarity_search_with_score('foo', k=1)
assert output == [(Document(page_content='foo', metadata={'page': '0'}), 0.0)]
|
def test_awadb_with_metadatas_with_scores() ->None:
"""Test end to end construction and scored search."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': str(i)} for i in range(len(texts))]
docsearch = AwaDB.from_texts(table_name='test_awadb', texts=texts,
embedding=FakeEmbeddings(), metadatas=metadatas)
output = docsearch.similarity_search_with_score('foo', k=1)
assert output == [(Document(page_content='foo', metadata={'page': '0'}),
0.0)]
|
Test end to end construction and scored search.
|
next_thought
|
if thoughts_path not in self.tot_memory or not self.tot_memory[thoughts_path]:
new_thoughts = self.predict_and_parse(problem_description=
problem_description, thoughts=thoughts_path, n=self.c, **kwargs)
if not new_thoughts:
return ''
if isinstance(new_thoughts, list):
self.tot_memory[thoughts_path] = new_thoughts[::-1]
else:
return ''
return self.tot_memory[thoughts_path].pop()
|
def next_thought(self, problem_description: str, thoughts_path: Tuple[str,
...]=(), **kwargs: Any) ->str:
if thoughts_path not in self.tot_memory or not self.tot_memory[
thoughts_path]:
new_thoughts = self.predict_and_parse(problem_description=
problem_description, thoughts=thoughts_path, n=self.c, **kwargs)
if not new_thoughts:
return ''
if isinstance(new_thoughts, list):
self.tot_memory[thoughts_path] = new_thoughts[::-1]
else:
return ''
return self.tot_memory[thoughts_path].pop()
| null |
parse
|
last_line = text.split('\n')[-1]
if not any([(follow in last_line) for follow in self.followups]):
if self.finish_string not in last_line:
raise OutputParserException(f'Could not parse output: {text}')
return AgentFinish({'output': last_line[len(self.finish_string):]}, text)
after_colon = text.split(':')[-1].strip()
return AgentAction('Intermediate Answer', after_colon, text)
|
def parse(self, text: str) ->Union[AgentAction, AgentFinish]:
last_line = text.split('\n')[-1]
if not any([(follow in last_line) for follow in self.followups]):
if self.finish_string not in last_line:
raise OutputParserException(f'Could not parse output: {text}')
return AgentFinish({'output': last_line[len(self.finish_string):]},
text)
after_colon = text.split(':')[-1].strip()
return AgentAction('Intermediate Answer', after_colon, text)
| null |
from_documents
|
"""Construct BESVectorStore wrapper from documents.
Args:
documents: List of documents to add to the Elasticsearch index.
embedding: Embedding function to use to embed the texts.
Do not provide if using a strategy
that doesn't require inference.
kwargs: create index key words arguments
"""
vectorStore = BESVectorStore._bes_vector_store(embedding=embedding, **kwargs)
vectorStore.add_documents(documents)
return vectorStore
|
@classmethod
def from_documents(cls, documents: List[Document], embedding: Optional[
Embeddings]=None, **kwargs: Any) ->'BESVectorStore':
"""Construct BESVectorStore wrapper from documents.
Args:
documents: List of documents to add to the Elasticsearch index.
embedding: Embedding function to use to embed the texts.
Do not provide if using a strategy
that doesn't require inference.
kwargs: create index key words arguments
"""
vectorStore = BESVectorStore._bes_vector_store(embedding=embedding, **
kwargs)
vectorStore.add_documents(documents)
return vectorStore
|
Construct BESVectorStore wrapper from documents.
Args:
documents: List of documents to add to the Elasticsearch index.
embedding: Embedding function to use to embed the texts.
Do not provide if using a strategy
that doesn't require inference.
kwargs: create index key words arguments
|
_embed_documents
|
"""Inference function to send to the remote hardware.
Accepts a sentence_transformer model_id and
returns a list of embeddings for each document in the batch.
"""
return client.encode(*args, **kwargs)
|
def _embed_documents(client: Any, *args: Any, **kwargs: Any) ->List[List[float]
]:
"""Inference function to send to the remote hardware.
Accepts a sentence_transformer model_id and
returns a list of embeddings for each document in the batch.
"""
return client.encode(*args, **kwargs)
|
Inference function to send to the remote hardware.
Accepts a sentence_transformer model_id and
returns a list of embeddings for each document in the batch.
|
test_multi_vector_retriever_initialization
|
vectorstore = InMemoryVectorstoreWithSearch()
retriever = MultiVectorRetriever(vectorstore=vectorstore, docstore=
InMemoryStore(), doc_id='doc_id')
documents = [Document(page_content='test document', metadata={'doc_id': '1'})]
retriever.vectorstore.add_documents(documents, ids=['1'])
retriever.docstore.mset(list(zip(['1'], documents)))
results = retriever.invoke('1')
assert len(results) > 0
assert results[0].page_content == 'test document'
|
def test_multi_vector_retriever_initialization() ->None:
vectorstore = InMemoryVectorstoreWithSearch()
retriever = MultiVectorRetriever(vectorstore=vectorstore, docstore=
InMemoryStore(), doc_id='doc_id')
documents = [Document(page_content='test document', metadata={'doc_id':
'1'})]
retriever.vectorstore.add_documents(documents, ids=['1'])
retriever.docstore.mset(list(zip(['1'], documents)))
results = retriever.invoke('1')
assert len(results) > 0
assert results[0].page_content == 'test document'
| null |
test_person
|
p = Person(secret='hello')
assert dumps(p, pretty=True) == snapshot
sp = SpecialPerson(another_secret='Wooo', secret='Hmm')
assert dumps(sp, pretty=True) == snapshot
assert Person.lc_id() == ['tests', 'unit_tests', 'load', 'test_dump', 'Person']
|
def test_person(snapshot: Any) ->None:
p = Person(secret='hello')
assert dumps(p, pretty=True) == snapshot
sp = SpecialPerson(another_secret='Wooo', secret='Hmm')
assert dumps(sp, pretty=True) == snapshot
assert Person.lc_id() == ['tests', 'unit_tests', 'load', 'test_dump',
'Person']
| null |
_message_from_dict
|
_type = message['type']
if _type == 'human':
return HumanMessage(**message['data'])
elif _type == 'ai':
return AIMessage(**message['data'])
elif _type == 'system':
return SystemMessage(**message['data'])
elif _type == 'chat':
return ChatMessage(**message['data'])
elif _type == 'function':
return FunctionMessage(**message['data'])
elif _type == 'tool':
return ToolMessage(**message['data'])
elif _type == 'AIMessageChunk':
return AIMessageChunk(**message['data'])
elif _type == 'HumanMessageChunk':
return HumanMessageChunk(**message['data'])
elif _type == 'FunctionMessageChunk':
return FunctionMessageChunk(**message['data'])
elif _type == 'ToolMessageChunk':
return ToolMessageChunk(**message['data'])
elif _type == 'SystemMessageChunk':
return SystemMessageChunk(**message['data'])
else:
raise ValueError(f'Got unexpected message type: {_type}')
|
def _message_from_dict(message: dict) ->BaseMessage:
_type = message['type']
if _type == 'human':
return HumanMessage(**message['data'])
elif _type == 'ai':
return AIMessage(**message['data'])
elif _type == 'system':
return SystemMessage(**message['data'])
elif _type == 'chat':
return ChatMessage(**message['data'])
elif _type == 'function':
return FunctionMessage(**message['data'])
elif _type == 'tool':
return ToolMessage(**message['data'])
elif _type == 'AIMessageChunk':
return AIMessageChunk(**message['data'])
elif _type == 'HumanMessageChunk':
return HumanMessageChunk(**message['data'])
elif _type == 'FunctionMessageChunk':
return FunctionMessageChunk(**message['data'])
elif _type == 'ToolMessageChunk':
return ToolMessageChunk(**message['data'])
elif _type == 'SystemMessageChunk':
return SystemMessageChunk(**message['data'])
else:
raise ValueError(f'Got unexpected message type: {_type}')
| null |
lazy_load
|
"""Lazy load records from dataframe."""
crs_str = self.data_frame.crs.to_string() if self.data_frame.crs else None
geometry_type = self.data_frame.geometry.geom_type.iloc[0]
for _, row in self.data_frame.iterrows():
geom = row[self.page_content_column]
xmin, ymin, xmax, ymax = geom.bounds
metadata = row.to_dict()
metadata['crs'] = crs_str
metadata['geometry_type'] = geometry_type
metadata['xmin'] = xmin
metadata['ymin'] = ymin
metadata['xmax'] = xmax
metadata['ymax'] = ymax
metadata.pop(self.page_content_column)
yield Document(page_content=geom.wkt, metadata=metadata)
|
def lazy_load(self) ->Iterator[Document]:
"""Lazy load records from dataframe."""
crs_str = self.data_frame.crs.to_string() if self.data_frame.crs else None
geometry_type = self.data_frame.geometry.geom_type.iloc[0]
for _, row in self.data_frame.iterrows():
geom = row[self.page_content_column]
xmin, ymin, xmax, ymax = geom.bounds
metadata = row.to_dict()
metadata['crs'] = crs_str
metadata['geometry_type'] = geometry_type
metadata['xmin'] = xmin
metadata['ymin'] = ymin
metadata['xmax'] = xmax
metadata['ymax'] = ymax
metadata.pop(self.page_content_column)
yield Document(page_content=geom.wkt, metadata=metadata)
|
Lazy load records from dataframe.
|
test_pai_eas_v2_streaming
|
llm = PaiEasEndpoint(eas_service_url=os.getenv('EAS_SERVICE_URL'),
eas_service_token=os.getenv('EAS_SERVICE_TOKEN'), version='2.0')
generator = llm.stream("Q: How do you say 'hello' in German? A:'", stop=['.'])
stream_results_string = ''
assert isinstance(generator, Generator)
for chunk in generator:
assert isinstance(chunk, str)
stream_results_string = chunk
assert len(stream_results_string.strip()) > 1
|
def test_pai_eas_v2_streaming() ->None:
llm = PaiEasEndpoint(eas_service_url=os.getenv('EAS_SERVICE_URL'),
eas_service_token=os.getenv('EAS_SERVICE_TOKEN'), version='2.0')
generator = llm.stream("Q: How do you say 'hello' in German? A:'", stop
=['.'])
stream_results_string = ''
assert isinstance(generator, Generator)
for chunk in generator:
assert isinstance(chunk, str)
stream_results_string = chunk
assert len(stream_results_string.strip()) > 1
| null |
default_loader_func
|
return UnstructuredFileLoader(file_path)
|
def default_loader_func(file_path: str) ->BaseLoader:
return UnstructuredFileLoader(file_path)
| null |
test_get_layer_properties_with_description
|
loader = ArcGISLoader(layer=mock_feature_layer, gis=mock_gis, lyr_desc=
'Custom Description')
props = loader._get_layer_properties('Custom Description')
assert props['layer_description'] == 'Custom Description'
|
def test_get_layer_properties_with_description(arcgis_mocks,
mock_feature_layer, mock_gis):
loader = ArcGISLoader(layer=mock_feature_layer, gis=mock_gis, lyr_desc=
'Custom Description')
props = loader._get_layer_properties('Custom Description')
assert props['layer_description'] == 'Custom Description'
| null |
encode_strip_start_and_stop_token_ids
|
return self._encode(text)[1:-1]
|
def encode_strip_start_and_stop_token_ids(text: str) ->List[int]:
return self._encode(text)[1:-1]
| null |
max_marginal_relevance_search_with_score_by_vector
|
"""Return docs and their similarity scores selected using the maximal marginal
relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch before filtering to
pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents and similarity scores selected by maximal marginal
relevance and score for each.
"""
if 'score_threshold' in kwargs:
score_threshold = kwargs.pop('score_threshold')
else:
score_threshold = MAX_FLOAT
scores, indices = self.vector_index.query(np.array([np.array(embedding).
astype(np.float32)]).astype(np.float32), k=fetch_k if filter is None else
fetch_k * 2, **kwargs)
results = self.process_index_results(ids=indices[0], scores=scores[0],
filter=filter, k=fetch_k if filter is None else fetch_k * 2,
score_threshold=score_threshold)
embeddings = [self.embedding.embed_documents([doc.page_content])[0] for doc,
_ in results]
mmr_selected = maximal_marginal_relevance(np.array([embedding], dtype=np.
float32), embeddings, k=k, lambda_mult=lambda_mult)
docs_and_scores = []
for i in mmr_selected:
docs_and_scores.append(results[i])
return docs_and_scores
|
def max_marginal_relevance_search_with_score_by_vector(self, embedding:
List[float], *, k: int=4, fetch_k: int=20, lambda_mult: float=0.5,
filter: Optional[Dict[str, Any]]=None, **kwargs: Any) ->List[Tuple[
Document, float]]:
"""Return docs and their similarity scores selected using the maximal marginal
relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch before filtering to
pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents and similarity scores selected by maximal marginal
relevance and score for each.
"""
if 'score_threshold' in kwargs:
score_threshold = kwargs.pop('score_threshold')
else:
score_threshold = MAX_FLOAT
scores, indices = self.vector_index.query(np.array([np.array(embedding)
.astype(np.float32)]).astype(np.float32), k=fetch_k if filter is
None else fetch_k * 2, **kwargs)
results = self.process_index_results(ids=indices[0], scores=scores[0],
filter=filter, k=fetch_k if filter is None else fetch_k * 2,
score_threshold=score_threshold)
embeddings = [self.embedding.embed_documents([doc.page_content])[0] for
doc, _ in results]
mmr_selected = maximal_marginal_relevance(np.array([embedding], dtype=
np.float32), embeddings, k=k, lambda_mult=lambda_mult)
docs_and_scores = []
for i in mmr_selected:
docs_and_scores.append(results[i])
return docs_and_scores
|
Return docs and their similarity scores selected using the maximal marginal
relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch before filtering to
pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents and similarity scores selected by maximal marginal
relevance and score for each.
|
add_texts
|
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts (Iterable[str]): Texts to add to the vectorstore.
metadatas (Optional[List[dict]], optional): Optional list of metadatas.
ids (Optional[List[str]], optional): Optional list of IDs.
batch_size (int): Number of concurrent requests to send to the server.
ttl_seconds (Optional[int], optional): Optional time-to-live
for the added texts.
Returns:
List[str]: List of IDs of the added texts.
"""
_texts = list(texts)
if ids is None:
ids = [uuid.uuid4().hex for _ in _texts]
if metadatas is None:
metadatas = [{} for _ in _texts]
ttl_seconds = ttl_seconds or self.ttl_seconds
embedding_vectors = self.embedding.embed_documents(_texts)
for i in range(0, len(_texts), batch_size):
batch_texts = _texts[i:i + batch_size]
batch_embedding_vectors = embedding_vectors[i:i + batch_size]
batch_ids = ids[i:i + batch_size]
batch_metadatas = metadatas[i:i + batch_size]
futures = [self.table.put_async(text, embedding_vector, text_id,
metadata, ttl_seconds) for text, embedding_vector, text_id,
metadata in zip(batch_texts, batch_embedding_vectors, batch_ids,
batch_metadatas)]
for future in futures:
future.result()
return ids
|
def add_texts(self, texts: Iterable[str], metadatas: Optional[List[dict]]=
None, ids: Optional[List[str]]=None, batch_size: int=16, ttl_seconds:
Optional[int]=None, **kwargs: Any) ->List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts (Iterable[str]): Texts to add to the vectorstore.
metadatas (Optional[List[dict]], optional): Optional list of metadatas.
ids (Optional[List[str]], optional): Optional list of IDs.
batch_size (int): Number of concurrent requests to send to the server.
ttl_seconds (Optional[int], optional): Optional time-to-live
for the added texts.
Returns:
List[str]: List of IDs of the added texts.
"""
_texts = list(texts)
if ids is None:
ids = [uuid.uuid4().hex for _ in _texts]
if metadatas is None:
metadatas = [{} for _ in _texts]
ttl_seconds = ttl_seconds or self.ttl_seconds
embedding_vectors = self.embedding.embed_documents(_texts)
for i in range(0, len(_texts), batch_size):
batch_texts = _texts[i:i + batch_size]
batch_embedding_vectors = embedding_vectors[i:i + batch_size]
batch_ids = ids[i:i + batch_size]
batch_metadatas = metadatas[i:i + batch_size]
futures = [self.table.put_async(text, embedding_vector, text_id,
metadata, ttl_seconds) for text, embedding_vector, text_id,
metadata in zip(batch_texts, batch_embedding_vectors, batch_ids,
batch_metadatas)]
for future in futures:
future.result()
return ids
|
Run more texts through the embeddings and add to the vectorstore.
Args:
texts (Iterable[str]): Texts to add to the vectorstore.
metadatas (Optional[List[dict]], optional): Optional list of metadatas.
ids (Optional[List[str]], optional): Optional list of IDs.
batch_size (int): Number of concurrent requests to send to the server.
ttl_seconds (Optional[int], optional): Optional time-to-live
for the added texts.
Returns:
List[str]: List of IDs of the added texts.
|
lc_secrets
|
"""A map of constructor argument names to secret ids.
For example,
{"openai_api_key": "OPENAI_API_KEY"}
"""
return dict()
|
@property
def lc_secrets(self) ->Dict[str, str]:
"""A map of constructor argument names to secret ids.
For example,
{"openai_api_key": "OPENAI_API_KEY"}
"""
return dict()
|
A map of constructor argument names to secret ids.
For example,
{"openai_api_key": "OPENAI_API_KEY"}
|
json
|
return self.json_data
|
def json(self) ->Dict:
return self.json_data
| null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.