method_name
stringlengths 1
78
| method_body
stringlengths 3
9.66k
| full_code
stringlengths 31
10.7k
| docstring
stringlengths 4
4.74k
⌀ |
|---|---|---|---|
get
|
return self.store.get(key, default)
|
def get(self, key: str, default: Optional[str]=None) ->Optional[str]:
return self.store.get(key, default)
| null |
_run
|
return 'foo'
|
def _run(self, some_arg: str, run_manager: Optional[
CallbackManagerForToolRun]=None, **kwargs: Any) ->str:
return 'foo'
| null |
_initialize_table
|
"""Validates or creates the BigQuery table."""
from google.cloud import bigquery
table_ref = bigquery.TableReference.from_string(self._full_table_id)
table = self.bq_client.create_table(table_ref, exists_ok=True)
changed_schema = False
schema = table.schema.copy()
columns = {c.name: c for c in schema}
if self.doc_id_field not in columns:
changed_schema = True
schema.append(bigquery.SchemaField(name=self.doc_id_field, field_type=
'STRING'))
elif columns[self.doc_id_field].field_type != 'STRING' or columns[self.
doc_id_field].mode == 'REPEATED':
raise ValueError(f'Column {self.doc_id_field} must be of STRING type')
if self.metadata_field not in columns:
changed_schema = True
schema.append(bigquery.SchemaField(name=self.metadata_field, field_type
='JSON'))
elif columns[self.metadata_field].field_type not in ['JSON', 'STRING'
] or columns[self.metadata_field].mode == 'REPEATED':
raise ValueError(
f'Column {self.metadata_field} must be of STRING or JSON type')
if self.content_field not in columns:
changed_schema = True
schema.append(bigquery.SchemaField(name=self.content_field, field_type=
'STRING'))
elif columns[self.content_field].field_type != 'STRING' or columns[self.
content_field].mode == 'REPEATED':
raise ValueError(f'Column {self.content_field} must be of STRING type')
if self.text_embedding_field not in columns:
changed_schema = True
schema.append(bigquery.SchemaField(name=self.text_embedding_field,
field_type='FLOAT64', mode='REPEATED'))
elif columns[self.text_embedding_field].field_type not in ('FLOAT', 'FLOAT64'
) or columns[self.text_embedding_field].mode != 'REPEATED':
raise ValueError(
f'Column {self.text_embedding_field} must be of ARRAY<FLOAT64> type')
if changed_schema:
self._logger.debug('Updated table `%s` schema.', self.full_table_id)
table.schema = schema
table = self.bq_client.update_table(table, fields=['schema'])
return table
|
def _initialize_table(self) ->Any:
"""Validates or creates the BigQuery table."""
from google.cloud import bigquery
table_ref = bigquery.TableReference.from_string(self._full_table_id)
table = self.bq_client.create_table(table_ref, exists_ok=True)
changed_schema = False
schema = table.schema.copy()
columns = {c.name: c for c in schema}
if self.doc_id_field not in columns:
changed_schema = True
schema.append(bigquery.SchemaField(name=self.doc_id_field,
field_type='STRING'))
elif columns[self.doc_id_field].field_type != 'STRING' or columns[self.
doc_id_field].mode == 'REPEATED':
raise ValueError(f'Column {self.doc_id_field} must be of STRING type')
if self.metadata_field not in columns:
changed_schema = True
schema.append(bigquery.SchemaField(name=self.metadata_field,
field_type='JSON'))
elif columns[self.metadata_field].field_type not in ['JSON', 'STRING'
] or columns[self.metadata_field].mode == 'REPEATED':
raise ValueError(
f'Column {self.metadata_field} must be of STRING or JSON type')
if self.content_field not in columns:
changed_schema = True
schema.append(bigquery.SchemaField(name=self.content_field,
field_type='STRING'))
elif columns[self.content_field].field_type != 'STRING' or columns[self
.content_field].mode == 'REPEATED':
raise ValueError(f'Column {self.content_field} must be of STRING type')
if self.text_embedding_field not in columns:
changed_schema = True
schema.append(bigquery.SchemaField(name=self.text_embedding_field,
field_type='FLOAT64', mode='REPEATED'))
elif columns[self.text_embedding_field].field_type not in ('FLOAT',
'FLOAT64') or columns[self.text_embedding_field].mode != 'REPEATED':
raise ValueError(
f'Column {self.text_embedding_field} must be of ARRAY<FLOAT64> type'
)
if changed_schema:
self._logger.debug('Updated table `%s` schema.', self.full_table_id)
table.schema = schema
table = self.bq_client.update_table(table, fields=['schema'])
return table
|
Validates or creates the BigQuery table.
|
on_agent_finish
|
"""Run when agent ends running."""
self.metrics['step'] += 1
self.metrics['agent_ends'] += 1
self.metrics['ends'] += 1
agent_ends = self.metrics['agent_ends']
resp: Dict[str, Any] = {}
resp.update({'action': 'on_agent_finish', 'output': finish.return_values[
'output'], 'log': finish.log})
resp.update(self.metrics)
self.mlflg.metrics(self.metrics, step=self.metrics['step'])
self.records['on_agent_finish_records'].append(resp)
self.records['action_records'].append(resp)
self.mlflg.jsonf(resp, f'agent_finish_{agent_ends}')
|
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) ->None:
"""Run when agent ends running."""
self.metrics['step'] += 1
self.metrics['agent_ends'] += 1
self.metrics['ends'] += 1
agent_ends = self.metrics['agent_ends']
resp: Dict[str, Any] = {}
resp.update({'action': 'on_agent_finish', 'output': finish.
return_values['output'], 'log': finish.log})
resp.update(self.metrics)
self.mlflg.metrics(self.metrics, step=self.metrics['step'])
self.records['on_agent_finish_records'].append(resp)
self.records['action_records'].append(resp)
self.mlflg.jsonf(resp, f'agent_finish_{agent_ends}')
|
Run when agent ends running.
|
test_valid_formatting
|
"""Test formatting works as expected."""
template = 'This is a {foo} test.'
output = formatter.format(template, foo='good')
expected_output = 'This is a good test.'
assert output == expected_output
|
def test_valid_formatting() ->None:
"""Test formatting works as expected."""
template = 'This is a {foo} test.'
output = formatter.format(template, foo='good')
expected_output = 'This is a good test.'
assert output == expected_output
|
Test formatting works as expected.
|
stream
|
if type(self)._stream == BaseLLM._stream:
yield self.invoke(input, config=config, stop=stop, **kwargs)
else:
prompt = self._convert_input(input).to_string()
config = ensure_config(config)
params = self.dict()
params['stop'] = stop
params = {**params, **kwargs}
options = {'stop': stop}
callback_manager = CallbackManager.configure(config.get('callbacks'),
self.callbacks, self.verbose, config.get('tags'), self.tags, config
.get('metadata'), self.metadata)
run_manager, = callback_manager.on_llm_start(dumpd(self), [prompt],
invocation_params=params, options=options, name=config.get(
'run_name'), batch_size=1)
generation: Optional[GenerationChunk] = None
try:
for chunk in self._stream(prompt, stop=stop, run_manager=
run_manager, **kwargs):
yield chunk.text
if generation is None:
generation = chunk
else:
generation += chunk
assert generation is not None
except BaseException as e:
run_manager.on_llm_error(e, response=LLMResult(generations=[[
generation]] if generation else []))
raise e
else:
run_manager.on_llm_end(LLMResult(generations=[[generation]]))
|
def stream(self, input: LanguageModelInput, config: Optional[RunnableConfig
]=None, *, stop: Optional[List[str]]=None, **kwargs: Any) ->Iterator[str]:
if type(self)._stream == BaseLLM._stream:
yield self.invoke(input, config=config, stop=stop, **kwargs)
else:
prompt = self._convert_input(input).to_string()
config = ensure_config(config)
params = self.dict()
params['stop'] = stop
params = {**params, **kwargs}
options = {'stop': stop}
callback_manager = CallbackManager.configure(config.get('callbacks'
), self.callbacks, self.verbose, config.get('tags'), self.tags,
config.get('metadata'), self.metadata)
run_manager, = callback_manager.on_llm_start(dumpd(self), [prompt],
invocation_params=params, options=options, name=config.get(
'run_name'), batch_size=1)
generation: Optional[GenerationChunk] = None
try:
for chunk in self._stream(prompt, stop=stop, run_manager=
run_manager, **kwargs):
yield chunk.text
if generation is None:
generation = chunk
else:
generation += chunk
assert generation is not None
except BaseException as e:
run_manager.on_llm_error(e, response=LLMResult(generations=[[
generation]] if generation else []))
raise e
else:
run_manager.on_llm_end(LLMResult(generations=[[generation]]))
| null |
get_tool_label
|
"""Return the label for an LLMThought that has an associated
tool.
Parameters
----------
tool
The tool's ToolRecord
is_complete
True if the thought is complete; False if the thought
is still receiving input.
Returns
-------
The markdown label for the thought's container.
"""
input = tool.input_str
name = tool.name
emoji = CHECKMARK_EMOJI if is_complete else THINKING_EMOJI
if name == '_Exception':
emoji = EXCEPTION_EMOJI
name = 'Parsing error'
idx = min([60, len(input)])
input = input[0:idx]
if len(tool.input_str) > idx:
input = input + '...'
input = input.replace('\n', ' ')
label = f'{emoji} **{name}:** {input}'
return label
|
def get_tool_label(self, tool: ToolRecord, is_complete: bool) ->str:
"""Return the label for an LLMThought that has an associated
tool.
Parameters
----------
tool
The tool's ToolRecord
is_complete
True if the thought is complete; False if the thought
is still receiving input.
Returns
-------
The markdown label for the thought's container.
"""
input = tool.input_str
name = tool.name
emoji = CHECKMARK_EMOJI if is_complete else THINKING_EMOJI
if name == '_Exception':
emoji = EXCEPTION_EMOJI
name = 'Parsing error'
idx = min([60, len(input)])
input = input[0:idx]
if len(tool.input_str) > idx:
input = input + '...'
input = input.replace('\n', ' ')
label = f'{emoji} **{name}:** {input}'
return label
|
Return the label for an LLMThought that has an associated
tool.
Parameters
----------
tool
The tool's ToolRecord
is_complete
True if the thought is complete; False if the thought
is still receiving input.
Returns
-------
The markdown label for the thought's container.
|
_get_llm_cache
|
index_name = self._index_name(llm_string)
if index_name in self._cache_dict:
return self._cache_dict[index_name]
try:
self._cache_dict[index_name] = RedisVectorstore.from_existing_index(
embedding=self.embedding, index_name=index_name, redis_url=self.
redis_url, schema=cast(Dict, self.DEFAULT_SCHEMA))
except ValueError:
redis = RedisVectorstore(embedding=self.embedding, index_name=
index_name, redis_url=self.redis_url, index_schema=cast(Dict, self.
DEFAULT_SCHEMA))
_embedding = self.embedding.embed_query(text='test')
redis._create_index_if_not_exist(dim=len(_embedding))
self._cache_dict[index_name] = redis
return self._cache_dict[index_name]
|
def _get_llm_cache(self, llm_string: str) ->RedisVectorstore:
index_name = self._index_name(llm_string)
if index_name in self._cache_dict:
return self._cache_dict[index_name]
try:
self._cache_dict[index_name] = RedisVectorstore.from_existing_index(
embedding=self.embedding, index_name=index_name, redis_url=self
.redis_url, schema=cast(Dict, self.DEFAULT_SCHEMA))
except ValueError:
redis = RedisVectorstore(embedding=self.embedding, index_name=
index_name, redis_url=self.redis_url, index_schema=cast(Dict,
self.DEFAULT_SCHEMA))
_embedding = self.embedding.embed_query(text='test')
redis._create_index_if_not_exist(dim=len(_embedding))
self._cache_dict[index_name] = redis
return self._cache_dict[index_name]
| null |
_stream
|
params = self._format_params(messages=messages, stop=stop, **kwargs)
with self._client.beta.messages.stream(**params) as stream:
for text in stream.text_stream:
yield ChatGenerationChunk(message=AIMessageChunk(content=text))
|
def _stream(self, messages: List[BaseMessage], stop: Optional[List[str]]=
None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any
) ->Iterator[ChatGenerationChunk]:
params = self._format_params(messages=messages, stop=stop, **kwargs)
with self._client.beta.messages.stream(**params) as stream:
for text in stream.text_stream:
yield ChatGenerationChunk(message=AIMessageChunk(content=text))
| null |
validate_environment
|
"""Validate that api key and python package exists in environment."""
steamship_api_key = get_from_dict_or_env(values, 'steamship_api_key',
'STEAMSHIP_API_KEY')
try:
from steamship import Steamship
except ImportError:
raise ImportError(
'steamship is not installed. Please install it with `pip install steamship`'
)
steamship = Steamship(api_key=steamship_api_key)
values['steamship'] = steamship
if 'steamship_api_key' in values:
del values['steamship_api_key']
return values
|
@root_validator(pre=True)
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that api key and python package exists in environment."""
steamship_api_key = get_from_dict_or_env(values, 'steamship_api_key',
'STEAMSHIP_API_KEY')
try:
from steamship import Steamship
except ImportError:
raise ImportError(
'steamship is not installed. Please install it with `pip install steamship`'
)
steamship = Steamship(api_key=steamship_api_key)
values['steamship'] = steamship
if 'steamship_api_key' in values:
del values['steamship_api_key']
return values
|
Validate that api key and python package exists in environment.
|
_get_tags
|
"""Get combined tags for a run."""
tags = set(run.tags or [])
tags.update(self.tags or [])
return list(tags)
|
def _get_tags(self, run: Run) ->List[str]:
"""Get combined tags for a run."""
tags = set(run.tags or [])
tags.update(self.tags or [])
return list(tags)
|
Get combined tags for a run.
|
ignore_chain
|
"""Whether to ignore chain callbacks."""
return self.ignore_chain_
|
@property
def ignore_chain(self) ->bool:
"""Whether to ignore chain callbacks."""
return self.ignore_chain_
|
Whether to ignore chain callbacks.
|
prompt_length
|
"""Return the prompt length given the documents passed in.
This can be used by a caller to determine whether passing in a list
of documents would exceed a certain prompt length. This useful when
trying to ensure that the size of a prompt remains below a certain
context limit.
Args:
docs: List[Document], a list of documents to use to calculate the
total prompt length.
Returns:
Returns None if the method does not depend on the prompt length,
otherwise the length of the prompt in tokens.
"""
return None
|
def prompt_length(self, docs: List[Document], **kwargs: Any) ->Optional[int]:
"""Return the prompt length given the documents passed in.
This can be used by a caller to determine whether passing in a list
of documents would exceed a certain prompt length. This useful when
trying to ensure that the size of a prompt remains below a certain
context limit.
Args:
docs: List[Document], a list of documents to use to calculate the
total prompt length.
Returns:
Returns None if the method does not depend on the prompt length,
otherwise the length of the prompt in tokens.
"""
return None
|
Return the prompt length given the documents passed in.
This can be used by a caller to determine whether passing in a list
of documents would exceed a certain prompt length. This useful when
trying to ensure that the size of a prompt remains below a certain
context limit.
Args:
docs: List[Document], a list of documents to use to calculate the
total prompt length.
Returns:
Returns None if the method does not depend on the prompt length,
otherwise the length of the prompt in tokens.
|
test_load_returns_no_result
|
"""Test that returns no docs"""
docs = api_client.load_docs('1605.08386WWW')
assert len(docs) == 0
|
def test_load_returns_no_result(api_client: PubMedAPIWrapper) ->None:
"""Test that returns no docs"""
docs = api_client.load_docs('1605.08386WWW')
assert len(docs) == 0
|
Test that returns no docs
|
_default_parsing_function
|
return str(content.get_text())
|
def _default_parsing_function(content: Any) ->str:
return str(content.get_text())
| null |
test_initialization
|
loader = GoogleSpeechToTextLoader(project_id='test_project_id', file_path=
'./testfile.mp3')
assert loader.project_id == 'test_project_id'
assert loader.file_path == './testfile.mp3'
assert loader.location == 'us-central1'
assert loader.recognizer_id == '_'
|
@pytest.mark.requires('google.api_core')
def test_initialization() ->None:
loader = GoogleSpeechToTextLoader(project_id='test_project_id',
file_path='./testfile.mp3')
assert loader.project_id == 'test_project_id'
assert loader.file_path == './testfile.mp3'
assert loader.location == 'us-central1'
assert loader.recognizer_id == '_'
| null |
create_kv_docstore
|
"""Create a store for langchain Document objects from a bytes store.
This store does run time type checking to ensure that the values are
Document objects.
Args:
store: A bytes store to use as the underlying store.
key_encoder: A function to encode keys; if None uses identity function.
Returns:
A key-value store for documents.
"""
return EncoderBackedStore(store, key_encoder or _identity,
_dump_document_as_bytes, _load_document_from_bytes)
|
def create_kv_docstore(store: ByteStore, *, key_encoder: Optional[Callable[
[str], str]]=None) ->BaseStore[str, Document]:
"""Create a store for langchain Document objects from a bytes store.
This store does run time type checking to ensure that the values are
Document objects.
Args:
store: A bytes store to use as the underlying store.
key_encoder: A function to encode keys; if None uses identity function.
Returns:
A key-value store for documents.
"""
return EncoderBackedStore(store, key_encoder or _identity,
_dump_document_as_bytes, _load_document_from_bytes)
|
Create a store for langchain Document objects from a bytes store.
This store does run time type checking to ensure that the values are
Document objects.
Args:
store: A bytes store to use as the underlying store.
key_encoder: A function to encode keys; if None uses identity function.
Returns:
A key-value store for documents.
|
_import_ddg_search_tool_DuckDuckGoSearchResults
|
from langchain_community.tools.ddg_search.tool import DuckDuckGoSearchResults
return DuckDuckGoSearchResults
|
def _import_ddg_search_tool_DuckDuckGoSearchResults() ->Any:
from langchain_community.tools.ddg_search.tool import DuckDuckGoSearchResults
return DuckDuckGoSearchResults
| null |
_llm_type
|
"""Return type of llm."""
return 'deepinfra'
|
@property
def _llm_type(self) ->str:
"""Return type of llm."""
return 'deepinfra'
|
Return type of llm.
|
teardown_class
|
collection = prepare_collection()
collection.delete_many({})
|
@classmethod
def teardown_class(cls) ->None:
collection = prepare_collection()
collection.delete_many({})
| null |
test_parse_disallowed_comparator
|
parser = get_parser(allowed_comparators=[Comparator.EQ])
with pytest.raises(ValueError):
parser.parse_folder('gt("a", 2)')
|
def test_parse_disallowed_comparator() ->None:
parser = get_parser(allowed_comparators=[Comparator.EQ])
with pytest.raises(ValueError):
parser.parse_folder('gt("a", 2)')
| null |
validate_translator
|
"""Validate translator."""
if 'structured_query_translator' not in values:
values['structured_query_translator'] = _get_builtin_translator(values[
'vectorstore'])
return values
|
@root_validator(pre=True)
def validate_translator(cls, values: Dict) ->Dict:
"""Validate translator."""
if 'structured_query_translator' not in values:
values['structured_query_translator'] = _get_builtin_translator(values
['vectorstore'])
return values
|
Validate translator.
|
_get_link_ratio
|
links = section.find_all('a')
total_text = ''.join(str(s) for s in section.stripped_strings)
if len(total_text) == 0:
return 0
link_text = ''.join(str(string.string.strip()) for link in links for string in
link.strings if string)
return len(link_text) / len(total_text)
|
def _get_link_ratio(section: Tag) ->float:
links = section.find_all('a')
total_text = ''.join(str(s) for s in section.stripped_strings)
if len(total_text) == 0:
return 0
link_text = ''.join(str(string.string.strip()) for link in links for
string in link.strings if string)
return len(link_text) / len(total_text)
| null |
retrieve_existing_index
|
"""
Check if the vector index exists in the Neo4j database
and returns its embedding dimension.
This method queries the Neo4j database for existing indexes
and attempts to retrieve the dimension of the vector index
with the specified name. If the index exists, its dimension is returned.
If the index doesn't exist, `None` is returned.
Returns:
int or None: The embedding dimension of the existing index if found.
"""
index_information = self.query(
"SHOW INDEXES YIELD name, type, labelsOrTypes, properties, options WHERE type = 'VECTOR' AND (name = $index_name OR (labelsOrTypes[0] = $node_label AND properties[0] = $embedding_node_property)) RETURN name, labelsOrTypes, properties, options "
, params={'index_name': self.index_name, 'node_label': self.node_label,
'embedding_node_property': self.embedding_node_property})
index_information = sort_by_index_name(index_information, self.index_name)
try:
self.index_name = index_information[0]['name']
self.node_label = index_information[0]['labelsOrTypes'][0]
self.embedding_node_property = index_information[0]['properties'][0]
embedding_dimension = index_information[0]['options']['indexConfig'][
'vector.dimensions']
return embedding_dimension
except IndexError:
return None
|
def retrieve_existing_index(self) ->Optional[int]:
"""
Check if the vector index exists in the Neo4j database
and returns its embedding dimension.
This method queries the Neo4j database for existing indexes
and attempts to retrieve the dimension of the vector index
with the specified name. If the index exists, its dimension is returned.
If the index doesn't exist, `None` is returned.
Returns:
int or None: The embedding dimension of the existing index if found.
"""
index_information = self.query(
"SHOW INDEXES YIELD name, type, labelsOrTypes, properties, options WHERE type = 'VECTOR' AND (name = $index_name OR (labelsOrTypes[0] = $node_label AND properties[0] = $embedding_node_property)) RETURN name, labelsOrTypes, properties, options "
, params={'index_name': self.index_name, 'node_label': self.
node_label, 'embedding_node_property': self.embedding_node_property})
index_information = sort_by_index_name(index_information, self.index_name)
try:
self.index_name = index_information[0]['name']
self.node_label = index_information[0]['labelsOrTypes'][0]
self.embedding_node_property = index_information[0]['properties'][0]
embedding_dimension = index_information[0]['options']['indexConfig'][
'vector.dimensions']
return embedding_dimension
except IndexError:
return None
|
Check if the vector index exists in the Neo4j database
and returns its embedding dimension.
This method queries the Neo4j database for existing indexes
and attempts to retrieve the dimension of the vector index
with the specified name. If the index exists, its dimension is returned.
If the index doesn't exist, `None` is returned.
Returns:
int or None: The embedding dimension of the existing index if found.
|
__init__
|
self.file_path = file_path if isinstance(file_path, str) else str(file_path)
self.encoding = encoding
self.namespaces = namespaces
self.skip_redirects = skip_redirects
self.stop_on_error = stop_on_error
|
def __init__(self, file_path: Union[str, Path], encoding: Optional[str]=
'utf8', namespaces: Optional[Sequence[int]]=None, skip_redirects:
Optional[bool]=False, stop_on_error: Optional[bool]=True):
self.file_path = file_path if isinstance(file_path, str) else str(file_path
)
self.encoding = encoding
self.namespaces = namespaces
self.skip_redirects = skip_redirects
self.stop_on_error = stop_on_error
| null |
embed_documents
|
"""Compute doc embeddings using Cloudflare Workers AI.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
if self.strip_new_lines:
texts = [text.replace('\n', ' ') for text in texts]
batches = [texts[i:i + self.batch_size] for i in range(0, len(texts), self.
batch_size)]
embeddings = []
for batch in batches:
response = requests.post(
f'{self.api_base_url}/{self.account_id}/ai/run/{self.model_name}',
headers=self.headers, json={'text': batch})
embeddings.extend(response.json()['result']['data'])
return embeddings
|
def embed_documents(self, texts: List[str]) ->List[List[float]]:
"""Compute doc embeddings using Cloudflare Workers AI.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
if self.strip_new_lines:
texts = [text.replace('\n', ' ') for text in texts]
batches = [texts[i:i + self.batch_size] for i in range(0, len(texts),
self.batch_size)]
embeddings = []
for batch in batches:
response = requests.post(
f'{self.api_base_url}/{self.account_id}/ai/run/{self.model_name}',
headers=self.headers, json={'text': batch})
embeddings.extend(response.json()['result']['data'])
return embeddings
|
Compute doc embeddings using Cloudflare Workers AI.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
|
_import_elastic_knn_search
|
from langchain_community.vectorstores.elastic_vector_search import ElasticKnnSearch
return ElasticKnnSearch
|
def _import_elastic_knn_search() ->Any:
from langchain_community.vectorstores.elastic_vector_search import ElasticKnnSearch
return ElasticKnnSearch
| null |
_import_cohere
|
from langchain_community.llms.cohere import Cohere
return Cohere
|
def _import_cohere() ->Any:
from langchain_community.llms.cohere import Cohere
return Cohere
| null |
pending
|
return any(task.pending() for task in self.tasks)
|
def pending(self) ->bool:
return any(task.pending() for task in self.tasks)
| null |
_prepare
|
...
|
@abstractmethod
def _prepare(self, config: Optional[RunnableConfig]=None) ->Tuple[Runnable[
Input, Output], RunnableConfig]:
...
| null |
load
|
"""Load and return all documents."""
return list(self.lazy_load())
|
def load(self) ->List[Document]:
"""Load and return all documents."""
return list(self.lazy_load())
|
Load and return all documents.
|
weaviate_url
|
"""Return the weaviate url."""
from weaviate import Client
url = 'http://localhost:8080'
yield url
client = Client(url)
client.schema.delete_all()
|
@pytest.fixture(scope='class', autouse=True)
def weaviate_url(self) ->Union[str, Generator[str, None, None]]:
"""Return the weaviate url."""
from weaviate import Client
url = 'http://localhost:8080'
yield url
client = Client(url)
client.schema.delete_all()
|
Return the weaviate url.
|
delete_triple
|
"""Delete a triple from the graph."""
if self._graph.has_edge(knowledge_triple.subject, knowledge_triple.object_):
self._graph.remove_edge(knowledge_triple.subject, knowledge_triple.object_)
|
def delete_triple(self, knowledge_triple: KnowledgeTriple) ->None:
"""Delete a triple from the graph."""
if self._graph.has_edge(knowledge_triple.subject, knowledge_triple.object_
):
self._graph.remove_edge(knowledge_triple.subject, knowledge_triple.
object_)
|
Delete a triple from the graph.
|
get_pseudoanonymizer_mapping
|
try:
from faker import Faker
except ImportError as e:
raise ImportError(
'Could not import faker, please install it with `pip install Faker`.'
) from e
fake = Faker()
fake.seed_instance(seed)
return {'PERSON': lambda _: fake.name(), 'EMAIL_ADDRESS': lambda _: fake.
email(), 'PHONE_NUMBER': lambda _: fake.phone_number(), 'IBAN_CODE': lambda
_: fake.iban(), 'CREDIT_CARD': lambda _: fake.credit_card_number(),
'CRYPTO': lambda _: 'bc1' + ''.join(fake.random_choices(string.
ascii_lowercase + string.digits, length=26)), 'IP_ADDRESS': lambda _:
fake.ipv4_public(), 'LOCATION': lambda _: fake.city(), 'DATE_TIME': lambda
_: fake.date(), 'NRP': lambda _: str(fake.random_number(digits=8,
fix_len=True)), 'MEDICAL_LICENSE': lambda _: fake.bothify(text=
'??######').upper(), 'URL': lambda _: fake.url(), 'US_BANK_NUMBER': lambda
_: fake.bban(), 'US_DRIVER_LICENSE': lambda _: str(fake.random_number(
digits=9, fix_len=True)), 'US_ITIN': lambda _: fake.bothify(text=
'9##-7#-####'), 'US_PASSPORT': lambda _: fake.bothify(text='#####??').
upper(), 'US_SSN': lambda _: fake.ssn(), 'UK_NHS': lambda _: str(fake.
random_number(digits=10, fix_len=True)), 'ES_NIF': lambda _: fake.
bothify(text='########?').upper(), 'IT_FISCAL_CODE': lambda _: fake.
bothify(text='??????##?##?###?').upper(), 'IT_DRIVER_LICENSE': lambda _:
fake.bothify(text='?A#######?').upper(), 'IT_VAT_CODE': lambda _: fake.
bothify(text='IT???????????'), 'IT_PASSPORT': lambda _: str(fake.
random_number(digits=9, fix_len=True)), 'IT_IDENTITY_CARD': lambda _:
lambda _: str(fake.random_number(digits=7, fix_len=True)),
'SG_NRIC_FIN': lambda _: fake.bothify(text='????####?').upper(),
'AU_ABN': lambda _: str(fake.random_number(digits=11, fix_len=True)),
'AU_ACN': lambda _: str(fake.random_number(digits=9, fix_len=True)),
'AU_TFN': lambda _: str(fake.random_number(digits=9, fix_len=True)),
'AU_MEDICARE': lambda _: str(fake.random_number(digits=10, fix_len=True))}
|
def get_pseudoanonymizer_mapping(seed: Optional[int]=None) ->Dict[str, Callable
]:
try:
from faker import Faker
except ImportError as e:
raise ImportError(
'Could not import faker, please install it with `pip install Faker`.'
) from e
fake = Faker()
fake.seed_instance(seed)
return {'PERSON': lambda _: fake.name(), 'EMAIL_ADDRESS': lambda _:
fake.email(), 'PHONE_NUMBER': lambda _: fake.phone_number(),
'IBAN_CODE': lambda _: fake.iban(), 'CREDIT_CARD': lambda _: fake.
credit_card_number(), 'CRYPTO': lambda _: 'bc1' + ''.join(fake.
random_choices(string.ascii_lowercase + string.digits, length=26)),
'IP_ADDRESS': lambda _: fake.ipv4_public(), 'LOCATION': lambda _:
fake.city(), 'DATE_TIME': lambda _: fake.date(), 'NRP': lambda _:
str(fake.random_number(digits=8, fix_len=True)), 'MEDICAL_LICENSE':
lambda _: fake.bothify(text='??######').upper(), 'URL': lambda _:
fake.url(), 'US_BANK_NUMBER': lambda _: fake.bban(),
'US_DRIVER_LICENSE': lambda _: str(fake.random_number(digits=9,
fix_len=True)), 'US_ITIN': lambda _: fake.bothify(text=
'9##-7#-####'), 'US_PASSPORT': lambda _: fake.bothify(text=
'#####??').upper(), 'US_SSN': lambda _: fake.ssn(), 'UK_NHS': lambda
_: str(fake.random_number(digits=10, fix_len=True)), 'ES_NIF': lambda
_: fake.bothify(text='########?').upper(), 'IT_FISCAL_CODE': lambda
_: fake.bothify(text='??????##?##?###?').upper(),
'IT_DRIVER_LICENSE': lambda _: fake.bothify(text='?A#######?').
upper(), 'IT_VAT_CODE': lambda _: fake.bothify(text='IT???????????'
), 'IT_PASSPORT': lambda _: str(fake.random_number(digits=9,
fix_len=True)), 'IT_IDENTITY_CARD': lambda _: lambda _: str(fake.
random_number(digits=7, fix_len=True)), 'SG_NRIC_FIN': lambda _:
fake.bothify(text='????####?').upper(), 'AU_ABN': lambda _: str(
fake.random_number(digits=11, fix_len=True)), 'AU_ACN': lambda _:
str(fake.random_number(digits=9, fix_len=True)), 'AU_TFN': lambda _:
str(fake.random_number(digits=9, fix_len=True)), 'AU_MEDICARE': lambda
_: str(fake.random_number(digits=10, fix_len=True))}
| null |
add_content_field
|
if self.text is None:
self.text = []
for field in self.text:
if field.name == self.content_key:
return
self.text.append(TextFieldSchema(name=self.content_key))
|
def add_content_field(self) ->None:
if self.text is None:
self.text = []
for field in self.text:
if field.name == self.content_key:
return
self.text.append(TextFieldSchema(name=self.content_key))
| null |
test_bad_action_input_line
|
"""Test handling when no action input found."""
llm_output = """Thought: I need to search for NBA
Action: Search
Thought: NBA"""
with pytest.raises(OutputParserException) as e_info:
get_action_and_input(llm_output)
assert e_info.value.observation is not None
|
def test_bad_action_input_line() ->None:
"""Test handling when no action input found."""
llm_output = (
'Thought: I need to search for NBA\nAction: Search\nThought: NBA')
with pytest.raises(OutputParserException) as e_info:
get_action_and_input(llm_output)
assert e_info.value.observation is not None
|
Test handling when no action input found.
|
test_list_output_parser
|
parser = QuestionListOutputParser()
result = parser.parse_folder(text)
assert result.lines == expected
|
@pytest.mark.parametrize('text,expected', (('1. Line one.\n', [
'1. Line one.\n']), ('1. Line one.', ['1. Line one.']), (
"""1. Line one.
2. Line two.
""", ['1. Line one.\n', '2. Line two.\n']),
("""1. Line one.
2. Line two.""", ['1. Line one.\n', '2. Line two.']),
("""1. Line one.
2. Line two.
3. Line three.""", ['1. Line one.\n',
'2. Line two.\n', '3. Line three.'])))
def test_list_output_parser(text: str, expected: List[str]) ->None:
parser = QuestionListOutputParser()
result = parser.parse_folder(text)
assert result.lines == expected
| null |
test_cohere_call
|
"""Test valid call to cohere."""
llm = Cohere(max_tokens=10)
output = llm('Say foo:')
assert isinstance(output, str)
|
def test_cohere_call() ->None:
"""Test valid call to cohere."""
llm = Cohere(max_tokens=10)
output = llm('Say foo:')
assert isinstance(output, str)
|
Test valid call to cohere.
|
similarity_search_by_vector
|
"""Run similarity search on query embedding
Args:
embedding (List[float]): Query embedding
k (int): Number of results to return. Defaults to 4.
Returns:
List of Documents most similar to the query
"""
async def _similarity_search_by_vector() ->List[Document]:
await self.initialize()
return await self.asimilarity_search_by_vector(embedding, k, **kwargs)
return asyncio.run(_similarity_search_by_vector())
|
def similarity_search_by_vector(self, embedding: List[float], k: int=4, **
kwargs: Any) ->List[Document]:
"""Run similarity search on query embedding
Args:
embedding (List[float]): Query embedding
k (int): Number of results to return. Defaults to 4.
Returns:
List of Documents most similar to the query
"""
async def _similarity_search_by_vector() ->List[Document]:
await self.initialize()
return await self.asimilarity_search_by_vector(embedding, k, **kwargs)
return asyncio.run(_similarity_search_by_vector())
|
Run similarity search on query embedding
Args:
embedding (List[float]): Query embedding
k (int): Number of results to return. Defaults to 4.
Returns:
List of Documents most similar to the query
|
test_prompt_from_examples_valid
|
"""Test prompt can be successfully constructed from examples."""
template = """Test Prompt:
Question: who are you?
Answer: foo
Question: what are you?
Answer: bar
Question: {question}
Answer:"""
input_variables = ['question']
example_separator = '\n\n'
prefix = 'Test Prompt:'
suffix = """Question: {question}
Answer:"""
examples = ["""Question: who are you?
Answer: foo""",
"""Question: what are you?
Answer: bar"""]
prompt_from_examples = PromptTemplate.from_examples(examples, suffix,
input_variables, example_separator=example_separator, prefix=prefix)
prompt_from_template = PromptTemplate(input_variables=input_variables,
template=template)
assert prompt_from_examples.template == prompt_from_template.template
assert prompt_from_examples.input_variables == prompt_from_template.input_variables
|
def test_prompt_from_examples_valid() ->None:
"""Test prompt can be successfully constructed from examples."""
template = """Test Prompt:
Question: who are you?
Answer: foo
Question: what are you?
Answer: bar
Question: {question}
Answer:"""
input_variables = ['question']
example_separator = '\n\n'
prefix = 'Test Prompt:'
suffix = 'Question: {question}\nAnswer:'
examples = ['Question: who are you?\nAnswer: foo',
'Question: what are you?\nAnswer: bar']
prompt_from_examples = PromptTemplate.from_examples(examples, suffix,
input_variables, example_separator=example_separator, prefix=prefix)
prompt_from_template = PromptTemplate(input_variables=input_variables,
template=template)
assert prompt_from_examples.template == prompt_from_template.template
assert prompt_from_examples.input_variables == prompt_from_template.input_variables
|
Test prompt can be successfully constructed from examples.
|
__init__
|
"""Initialize with API token and the URLs to scrape"""
self.api_token = api_token
"""Browserless API token."""
self.urls = urls
"""List of URLs to scrape."""
self.text_content = text_content
|
def __init__(self, api_token: str, urls: Union[str, List[str]],
text_content: bool=True):
"""Initialize with API token and the URLs to scrape"""
self.api_token = api_token
"""Browserless API token."""
self.urls = urls
"""List of URLs to scrape."""
self.text_content = text_content
|
Initialize with API token and the URLs to scrape
|
_identifying_params
|
return self._default_params
|
@property
def _identifying_params(self) ->Dict[str, Any]:
return self._default_params
| null |
lazy_parse
|
"""Lazily parse the blob."""
with blob.as_bytes_io() as file_obj:
poller = self.client.begin_analyze_document(self.api_model, file_obj,
content_type='application/octet-stream', output_content_format=
'markdown' if self.mode == 'markdown' else 'text')
result = poller.result()
if self.mode in ['single', 'markdown']:
yield from self._generate_docs_single(result)
elif self.mode == ['page']:
yield from self._generate_docs_page(result)
else:
yield from self._generate_docs_object(result)
|
def lazy_parse(self, blob: Blob) ->Iterator[Document]:
"""Lazily parse the blob."""
with blob.as_bytes_io() as file_obj:
poller = self.client.begin_analyze_document(self.api_model,
file_obj, content_type='application/octet-stream',
output_content_format='markdown' if self.mode == 'markdown' else
'text')
result = poller.result()
if self.mode in ['single', 'markdown']:
yield from self._generate_docs_single(result)
elif self.mode == ['page']:
yield from self._generate_docs_page(result)
else:
yield from self._generate_docs_object(result)
|
Lazily parse the blob.
|
_embed_with_retry
|
return embeddings.client.generate_embeddings(*args, **kwargs)
|
@retry_decorator
def _embed_with_retry(*args: Any, **kwargs: Any) ->Any:
return embeddings.client.generate_embeddings(*args, **kwargs)
| null |
load
|
"""Load text from the url(s) in web_path."""
try:
asyncio.get_running_loop()
with ThreadPoolExecutor(max_workers=1) as executor:
future = executor.submit(asyncio.run, self.fetch_all(self.web_paths))
results = future.result()
except RuntimeError:
results = asyncio.run(self.fetch_all(self.web_paths))
docs = []
for i, text in enumerate(cast(List[str], results)):
soup = self._scrape(self.web_paths[i])
if not soup:
continue
metadata = _build_metadata(soup, self.web_paths[i])
docs.append(Document(page_content=text, metadata=metadata))
return docs
|
def load(self) ->List[Document]:
"""Load text from the url(s) in web_path."""
try:
asyncio.get_running_loop()
with ThreadPoolExecutor(max_workers=1) as executor:
future = executor.submit(asyncio.run, self.fetch_all(self.
web_paths))
results = future.result()
except RuntimeError:
results = asyncio.run(self.fetch_all(self.web_paths))
docs = []
for i, text in enumerate(cast(List[str], results)):
soup = self._scrape(self.web_paths[i])
if not soup:
continue
metadata = _build_metadata(soup, self.web_paths[i])
docs.append(Document(page_content=text, metadata=metadata))
return docs
|
Load text from the url(s) in web_path.
|
_import_faiss
|
from langchain_community.vectorstores.faiss import FAISS
return FAISS
|
def _import_faiss() ->Any:
from langchain_community.vectorstores.faiss import FAISS
return FAISS
| null |
test_parse_chat_history_correct
|
from vertexai.language_models import ChatMessage
text_context = (
'My name is Ned. You are my personal assistant. My favorite movies are Lord of the Rings and Hobbit.'
)
context = SystemMessage(content=text_context)
text_question = (
'Hello, could you recommend a good movie for me to watch this evening, please?'
)
question = HumanMessage(content=text_question)
text_answer = (
'Sure, You might enjoy The Lord of the Rings: The Fellowship of the Ring (2001): This is the first movie in the Lord of the Rings trilogy.'
)
answer = AIMessage(content=text_answer)
history = _parse_chat_history([context, question, answer, question, answer])
assert history.context == context.content
assert len(history.history) == 4
assert history.history == [ChatMessage(content=text_question, author='user'
), ChatMessage(content=text_answer, author='bot'), ChatMessage(content=
text_question, author='user'), ChatMessage(content=text_answer, author=
'bot')]
|
def test_parse_chat_history_correct() ->None:
from vertexai.language_models import ChatMessage
text_context = (
'My name is Ned. You are my personal assistant. My favorite movies are Lord of the Rings and Hobbit.'
)
context = SystemMessage(content=text_context)
text_question = (
'Hello, could you recommend a good movie for me to watch this evening, please?'
)
question = HumanMessage(content=text_question)
text_answer = (
'Sure, You might enjoy The Lord of the Rings: The Fellowship of the Ring (2001): This is the first movie in the Lord of the Rings trilogy.'
)
answer = AIMessage(content=text_answer)
history = _parse_chat_history([context, question, answer, question, answer]
)
assert history.context == context.content
assert len(history.history) == 4
assert history.history == [ChatMessage(content=text_question, author=
'user'), ChatMessage(content=text_answer, author='bot'),
ChatMessage(content=text_question, author='user'), ChatMessage(
content=text_answer, author='bot')]
| null |
output_keys
|
"""Expect output key.
:meta private:
"""
return [self.output_key]
|
@property
def output_keys(self) ->List[str]:
"""Expect output key.
:meta private:
"""
return [self.output_key]
|
Expect output key.
:meta private:
|
input_iter
|
for token in STREAMED_TOKENS:
yield AIMessageChunk(content='', additional_kwargs={'function_call': {
'arguments': token}})
|
def input_iter(_: Any) ->Iterator[AIMessageChunk]:
for token in STREAMED_TOKENS:
yield AIMessageChunk(content='', additional_kwargs={'function_call':
{'arguments': token}})
| null |
test_hippo_add_extra
|
"""Test end to end construction and MRR search."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = _hippo_from_texts(metadatas=metadatas)
docsearch.add_texts(texts, metadatas)
output = docsearch.similarity_search('foo', k=1)
print(output)
assert len(output) == 1
|
def test_hippo_add_extra() ->None:
"""Test end to end construction and MRR search."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = _hippo_from_texts(metadatas=metadatas)
docsearch.add_texts(texts, metadatas)
output = docsearch.similarity_search('foo', k=1)
print(output)
assert len(output) == 1
|
Test end to end construction and MRR search.
|
load_agent
|
"""Unified method for loading an agent from LangChainHub or local fs.
Args:
path: Path to the agent file.
**kwargs: Additional keyword arguments passed to the agent executor.
Returns:
An agent executor.
"""
valid_suffixes = {'json', 'yaml'}
if (hub_result := try_load_from_hub(path, _load_agent_from_file, 'agents',
valid_suffixes)):
return hub_result
else:
return _load_agent_from_file(path, **kwargs)
|
def load_agent(path: Union[str, Path], **kwargs: Any) ->Union[
BaseSingleActionAgent, BaseMultiActionAgent]:
"""Unified method for loading an agent from LangChainHub or local fs.
Args:
path: Path to the agent file.
**kwargs: Additional keyword arguments passed to the agent executor.
Returns:
An agent executor.
"""
valid_suffixes = {'json', 'yaml'}
if (hub_result := try_load_from_hub(path, _load_agent_from_file,
'agents', valid_suffixes)):
return hub_result
else:
return _load_agent_from_file(path, **kwargs)
|
Unified method for loading an agent from LangChainHub or local fs.
Args:
path: Path to the agent file.
**kwargs: Additional keyword arguments passed to the agent executor.
Returns:
An agent executor.
|
_import_vectorstore_tool_VectorStoreQAWithSourcesTool
|
from langchain_community.tools.vectorstore.tool import VectorStoreQAWithSourcesTool
return VectorStoreQAWithSourcesTool
|
def _import_vectorstore_tool_VectorStoreQAWithSourcesTool() ->Any:
from langchain_community.tools.vectorstore.tool import VectorStoreQAWithSourcesTool
return VectorStoreQAWithSourcesTool
| null |
load_memory_variables
|
"""Return key-value pairs given the text input to the chain."""
|
@abstractmethod
def load_memory_variables(self, inputs: Dict[str, Any]) ->Dict[str, Any]:
"""Return key-value pairs given the text input to the chain."""
|
Return key-value pairs given the text input to the chain.
|
add_texts
|
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids: Optional list of ids to associate with the texts.
refresh_indices: Whether to refresh the Elasticsearch indices
after adding the texts.
create_index_if_not_exists: Whether to create the Elasticsearch
index if it doesn't already exist.
*bulk_kwargs: Additional arguments to pass to Elasticsearch bulk.
- chunk_size: Optional. Number of texts to add to the
index at a time. Defaults to 500.
Returns:
List of ids from adding the texts into the vectorstore.
"""
if self.embedding is not None:
embeddings = self.embedding.embed_documents(list(texts))
else:
embeddings = None
return self.__add(texts, embeddings, metadatas=metadatas, ids=ids,
refresh_indices=refresh_indices, create_index_if_not_exists=
create_index_if_not_exists, bulk_kwargs=bulk_kwargs, kwargs=kwargs)
|
def add_texts(self, texts: Iterable[str], metadatas: Optional[List[Dict[Any,
Any]]]=None, ids: Optional[List[str]]=None, refresh_indices: bool=True,
create_index_if_not_exists: bool=True, bulk_kwargs: Optional[Dict]=None,
**kwargs: Any) ->List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids: Optional list of ids to associate with the texts.
refresh_indices: Whether to refresh the Elasticsearch indices
after adding the texts.
create_index_if_not_exists: Whether to create the Elasticsearch
index if it doesn't already exist.
*bulk_kwargs: Additional arguments to pass to Elasticsearch bulk.
- chunk_size: Optional. Number of texts to add to the
index at a time. Defaults to 500.
Returns:
List of ids from adding the texts into the vectorstore.
"""
if self.embedding is not None:
embeddings = self.embedding.embed_documents(list(texts))
else:
embeddings = None
return self.__add(texts, embeddings, metadatas=metadatas, ids=ids,
refresh_indices=refresh_indices, create_index_if_not_exists=
create_index_if_not_exists, bulk_kwargs=bulk_kwargs, kwargs=kwargs)
|
Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids: Optional list of ids to associate with the texts.
refresh_indices: Whether to refresh the Elasticsearch indices
after adding the texts.
create_index_if_not_exists: Whether to create the Elasticsearch
index if it doesn't already exist.
*bulk_kwargs: Additional arguments to pass to Elasticsearch bulk.
- chunk_size: Optional. Number of texts to add to the
index at a time. Defaults to 500.
Returns:
List of ids from adding the texts into the vectorstore.
|
test_similarity_search_with_score_by_vector
|
"""Test vector similarity with score by vector."""
texts = ['foo', 'bar', 'baz']
docsearch = TileDB.from_texts(texts=texts, embedding=
ConsistentFakeEmbeddings(), index_uri=f'{str(tmp_path)}/flat',
index_type='FLAT')
query_vec = FakeEmbeddings().embed_query(text='foo')
output = docsearch.similarity_search_with_score_by_vector(query_vec, k=1)
assert len(output) == 1
assert output[0][0] == Document(page_content='foo')
docsearch = TileDB.from_texts(texts=texts, embedding=
ConsistentFakeEmbeddings(), index_uri=f'{str(tmp_path)}/ivf_flat',
index_type='IVF_FLAT')
query_vec = FakeEmbeddings().embed_query(text='foo')
output = docsearch.similarity_search_with_score_by_vector(query_vec, k=1,
nprobe=docsearch.vector_index.partitions)
assert len(output) == 1
assert output[0][0] == Document(page_content='foo')
|
@pytest.mark.requires('tiledb-vector-search')
def test_similarity_search_with_score_by_vector(tmp_path: Path) ->None:
"""Test vector similarity with score by vector."""
texts = ['foo', 'bar', 'baz']
docsearch = TileDB.from_texts(texts=texts, embedding=
ConsistentFakeEmbeddings(), index_uri=f'{str(tmp_path)}/flat',
index_type='FLAT')
query_vec = FakeEmbeddings().embed_query(text='foo')
output = docsearch.similarity_search_with_score_by_vector(query_vec, k=1)
assert len(output) == 1
assert output[0][0] == Document(page_content='foo')
docsearch = TileDB.from_texts(texts=texts, embedding=
ConsistentFakeEmbeddings(), index_uri=f'{str(tmp_path)}/ivf_flat',
index_type='IVF_FLAT')
query_vec = FakeEmbeddings().embed_query(text='foo')
output = docsearch.similarity_search_with_score_by_vector(query_vec, k=
1, nprobe=docsearch.vector_index.partitions)
assert len(output) == 1
assert output[0][0] == Document(page_content='foo')
|
Test vector similarity with score by vector.
|
get_lc_namespace
|
"""Get the namespace of the langchain object."""
return ['langchain', 'schema', 'messages']
|
@classmethod
def get_lc_namespace(cls) ->List[str]:
"""Get the namespace of the langchain object."""
return ['langchain', 'schema', 'messages']
|
Get the namespace of the langchain object.
|
test_api_key_is_string
|
chat = ChatGoogleGenerativeAI(model='gemini-nano', google_api_key=
'secret-api-key')
assert isinstance(chat.google_api_key, SecretStr)
|
def test_api_key_is_string() ->None:
chat = ChatGoogleGenerativeAI(model='gemini-nano', google_api_key=
'secret-api-key')
assert isinstance(chat.google_api_key, SecretStr)
| null |
test_sklearn_with_metadatas
|
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': str(i)} for i in range(len(texts))]
docsearch = SKLearnVectorStore.from_texts(texts, FakeEmbeddings(),
metadatas=metadatas)
output = docsearch.similarity_search('foo', k=1)
assert output[0].metadata['page'] == '0'
|
@pytest.mark.requires('numpy', 'sklearn')
def test_sklearn_with_metadatas() ->None:
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': str(i)} for i in range(len(texts))]
docsearch = SKLearnVectorStore.from_texts(texts, FakeEmbeddings(),
metadatas=metadatas)
output = docsearch.similarity_search('foo', k=1)
assert output[0].metadata['page'] == '0'
|
Test end to end construction and search.
|
test_gradient_wrong_setup3
|
with pytest.raises(Exception):
GradientEmbeddings(gradient_api_url='-', gradient_access_token=
_GRADIENT_SECRET, gradient_workspace_id=_GRADIENT_WORKSPACE_ID,
model=_MODEL_ID)
|
def test_gradient_wrong_setup3() ->None:
with pytest.raises(Exception):
GradientEmbeddings(gradient_api_url='-', gradient_access_token=
_GRADIENT_SECRET, gradient_workspace_id=_GRADIENT_WORKSPACE_ID,
model=_MODEL_ID)
| null |
get_task
|
"""
Retrieve a specific task.
"""
params, error = load_query(query, fault_tolerant=True)
if params is None:
return {'Error': error}
url = f"{DEFAULT_URL}/task/{params['task_id']}"
params = {'custom_task_ids': 'true', 'team_id': self.team_id,
'include_subtasks': 'true'}
response = requests.get(url, headers=self.get_headers(), params=params)
data = response.json()
parsed_task = parse_dict_through_component(data, Task, fault_tolerant=
fault_tolerant)
return parsed_task
|
def get_task(self, query: str, fault_tolerant: bool=True) ->Dict:
"""
Retrieve a specific task.
"""
params, error = load_query(query, fault_tolerant=True)
if params is None:
return {'Error': error}
url = f"{DEFAULT_URL}/task/{params['task_id']}"
params = {'custom_task_ids': 'true', 'team_id': self.team_id,
'include_subtasks': 'true'}
response = requests.get(url, headers=self.get_headers(), params=params)
data = response.json()
parsed_task = parse_dict_through_component(data, Task, fault_tolerant=
fault_tolerant)
return parsed_task
|
Retrieve a specific task.
|
_chat
|
base_url = f'{self.ernie_api_base}/rpc/2.0/ai_custom/v1/wenxinworkshop/chat'
model_paths = {'ERNIE-Bot-turbo': 'eb-instant', 'ERNIE-Bot': 'completions',
'ERNIE-Bot-8K': 'ernie_bot_8k', 'ERNIE-Bot-4': 'completions_pro',
'ERNIE-Bot-turbo-AI': 'ai_apaas', 'BLOOMZ-7B': 'bloomz_7b1',
'Llama-2-7b-chat': 'llama_2_7b', 'Llama-2-13b-chat': 'llama_2_13b',
'Llama-2-70b-chat': 'llama_2_70b'}
if self.model_name in model_paths:
url = f'{base_url}/{model_paths[self.model_name]}'
else:
raise ValueError(f'Got unknown model_name {self.model_name}')
resp = requests.post(url, timeout=self.request_timeout, headers={
'Content-Type': 'application/json'}, params={'access_token': self.
access_token}, json=payload)
return resp.json()
|
def _chat(self, payload: object) ->dict:
base_url = (
f'{self.ernie_api_base}/rpc/2.0/ai_custom/v1/wenxinworkshop/chat')
model_paths = {'ERNIE-Bot-turbo': 'eb-instant', 'ERNIE-Bot':
'completions', 'ERNIE-Bot-8K': 'ernie_bot_8k', 'ERNIE-Bot-4':
'completions_pro', 'ERNIE-Bot-turbo-AI': 'ai_apaas', 'BLOOMZ-7B':
'bloomz_7b1', 'Llama-2-7b-chat': 'llama_2_7b', 'Llama-2-13b-chat':
'llama_2_13b', 'Llama-2-70b-chat': 'llama_2_70b'}
if self.model_name in model_paths:
url = f'{base_url}/{model_paths[self.model_name]}'
else:
raise ValueError(f'Got unknown model_name {self.model_name}')
resp = requests.post(url, timeout=self.request_timeout, headers={
'Content-Type': 'application/json'}, params={'access_token': self.
access_token}, json=payload)
return resp.json()
| null |
get_num_tokens
|
"""Get the number of tokens present in the text.
Useful for checking if an input will fit in a model's context window.
Args:
text: The string input to tokenize.
Returns:
The integer number of tokens in the text.
"""
if self.is_gemini:
raise ValueError('Counting tokens is not yet supported!')
result = self.client.count_text_tokens(model=self.model, prompt=text)
return result['token_count']
|
def get_num_tokens(self, text: str) ->int:
"""Get the number of tokens present in the text.
Useful for checking if an input will fit in a model's context window.
Args:
text: The string input to tokenize.
Returns:
The integer number of tokens in the text.
"""
if self.is_gemini:
raise ValueError('Counting tokens is not yet supported!')
result = self.client.count_text_tokens(model=self.model, prompt=text)
return result['token_count']
|
Get the number of tokens present in the text.
Useful for checking if an input will fit in a model's context window.
Args:
text: The string input to tokenize.
Returns:
The integer number of tokens in the text.
|
find_and_replace
|
rtn = source
finds = sorted(replacements.keys())
for find in finds:
replace = replacements[find]
rtn = rtn.replace(find, replace)
return rtn
|
def find_and_replace(source: str, replacements: Dict[str, str]) ->str:
rtn = source
finds = sorted(replacements.keys())
for find in finds:
replace = replacements[find]
rtn = rtn.replace(find, replace)
return rtn
| null |
_llm_type
|
"""Return type of llm."""
return 'Anyscale LLM'
|
@property
def _llm_type(self) ->str:
"""Return type of llm."""
return 'Anyscale LLM'
|
Return type of llm.
|
logout
|
"""
Logout to cleanup resources
Args: no args
Returns: None
"""
self._jag.logout(self._token)
|
def logout(self) ->None:
"""
Logout to cleanup resources
Args: no args
Returns: None
"""
self._jag.logout(self._token)
|
Logout to cleanup resources
Args: no args
Returns: None
|
_get_docs
|
question = inputs[self.question_key]
docs = self.vectorstore.similarity_search(question, k=self.k, **self.
search_kwargs)
return self._reduce_tokens_below_limit(docs)
|
def _get_docs(self, inputs: Dict[str, Any], *, run_manager:
CallbackManagerForChainRun) ->List[Document]:
question = inputs[self.question_key]
docs = self.vectorstore.similarity_search(question, k=self.k, **self.
search_kwargs)
return self._reduce_tokens_below_limit(docs)
| null |
test_sanitize_input
|
query = """
```
p = 5
```
"""
expected = 'p = 5'
actual = sanitize_input(query)
assert expected == actual
query = """
```python
p = 5
```
"""
expected = 'p = 5'
actual = sanitize_input(query)
assert expected == actual
query = '\n p = 5\n '
expected = 'p = 5'
actual = sanitize_input(query)
assert expected == actual
|
def test_sanitize_input() ->None:
query = """
```
p = 5
```
"""
expected = 'p = 5'
actual = sanitize_input(query)
assert expected == actual
query = """
```python
p = 5
```
"""
expected = 'p = 5'
actual = sanitize_input(query)
assert expected == actual
query = '\n p = 5\n '
expected = 'p = 5'
actual = sanitize_input(query)
assert expected == actual
| null |
get_req_generation
|
"""Method for an end-to-end post query with NVE post-processing."""
response = self.get_req(model_name, payload, invoke_url)
output, _ = self.postprocess(response, stop=stop)
return output
|
def get_req_generation(self, model_name: Optional[str]=None, payload: dict=
{}, invoke_url: Optional[str]=None, stop: Optional[Sequence[str]]=None
) ->dict:
"""Method for an end-to-end post query with NVE post-processing."""
response = self.get_req(model_name, payload, invoke_url)
output, _ = self.postprocess(response, stop=stop)
return output
|
Method for an end-to-end post query with NVE post-processing.
|
format_property_key
|
words = s.split()
if not words:
return s
first_word = words[0].lower()
capitalized_words = [word.capitalize() for word in words[1:]]
return ''.join([first_word] + capitalized_words)
|
def format_property_key(s: str) ->str:
words = s.split()
if not words:
return s
first_word = words[0].lower()
capitalized_words = [word.capitalize() for word in words[1:]]
return ''.join([first_word] + capitalized_words)
| null |
_get_tags
|
req_tag = urllib.request.Request(self._get_tag_url.format(id=note_id))
with urllib.request.urlopen(req_tag) as response:
json_data = json.loads(response.read().decode())
return [tag['title'] for tag in json_data['items']]
|
def _get_tags(self, note_id: str) ->List[str]:
req_tag = urllib.request.Request(self._get_tag_url.format(id=note_id))
with urllib.request.urlopen(req_tag) as response:
json_data = json.loads(response.read().decode())
return [tag['title'] for tag in json_data['items']]
| null |
test_hologres_with_metadatas_with_scores
|
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': str(i)} for i in range(len(texts))]
docsearch = Hologres.from_texts(texts=texts, table_name='test_table',
embedding=FakeEmbeddingsWithAdaDimension(), metadatas=metadatas,
connection_string=CONNECTION_STRING, pre_delete_table=True)
output = docsearch.similarity_search_with_score('foo', k=1)
assert output == [(Document(page_content='foo', metadata={'page': '0'}), 0.0)]
|
def test_hologres_with_metadatas_with_scores() ->None:
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': str(i)} for i in range(len(texts))]
docsearch = Hologres.from_texts(texts=texts, table_name='test_table',
embedding=FakeEmbeddingsWithAdaDimension(), metadatas=metadatas,
connection_string=CONNECTION_STRING, pre_delete_table=True)
output = docsearch.similarity_search_with_score('foo', k=1)
assert output == [(Document(page_content='foo', metadata={'page': '0'}),
0.0)]
|
Test end to end construction and search.
|
lazy_load
|
"""Lazy loads the chat data from the folder.
Yields:
ChatSession: A chat session containing the loaded messages.
"""
inbox_path = self.directory_path / 'inbox'
for _dir in inbox_path.iterdir():
if _dir.is_dir():
for _file in _dir.iterdir():
if _file.suffix.lower() == '.json':
file_loader = SingleFileFacebookMessengerChatLoader(path=_file)
for result in file_loader.lazy_load():
yield result
|
def lazy_load(self) ->Iterator[ChatSession]:
"""Lazy loads the chat data from the folder.
Yields:
ChatSession: A chat session containing the loaded messages.
"""
inbox_path = self.directory_path / 'inbox'
for _dir in inbox_path.iterdir():
if _dir.is_dir():
for _file in _dir.iterdir():
if _file.suffix.lower() == '.json':
file_loader = SingleFileFacebookMessengerChatLoader(path
=_file)
for result in file_loader.lazy_load():
yield result
|
Lazy loads the chat data from the folder.
Yields:
ChatSession: A chat session containing the loaded messages.
|
_create_index
|
"""Create a index on the collection"""
from transwarp_hippo_api.hippo_client import HippoTable
from transwarp_hippo_api.hippo_type import IndexType, MetricType
if isinstance(self.col, HippoTable) and self._get_index() is None:
if self._get_index() is None:
if self.index_params is None:
self.index_params = {'index_name': 'langchain_auto_create',
'metric_type': MetricType.L2, 'index_type': IndexType.
IVF_FLAT, 'nlist': 10}
self.col.create_index(self._vector_field, self.index_params[
'index_name'], self.index_params['index_type'], self.
index_params['metric_type'], nlist=self.index_params['nlist'])
logger.debug(self.col.activate_index(self.index_params[
'index_name']))
logger.info('create index successfully')
else:
index_dict = {'IVF_FLAT': IndexType.IVF_FLAT, 'FLAT': IndexType
.FLAT, 'IVF_SQ': IndexType.IVF_SQ, 'IVF_PQ': IndexType.
IVF_PQ, 'HNSW': IndexType.HNSW}
metric_dict = {'ip': MetricType.IP, 'IP': MetricType.IP, 'l2':
MetricType.L2, 'L2': MetricType.L2}
self.index_params['metric_type'] = metric_dict[self.
index_params['metric_type']]
if self.index_params['index_type'] == 'FLAT':
self.index_params['index_type'] = index_dict[self.
index_params['index_type']]
self.col.create_index(self._vector_field, self.index_params
['index_name'], self.index_params['index_type'], self.
index_params['metric_type'])
logger.debug(self.col.activate_index(self.index_params[
'index_name']))
elif self.index_params['index_type'
] == 'IVF_FLAT' or self.index_params['index_type'] == 'IVF_SQ':
self.index_params['index_type'] = index_dict[self.
index_params['index_type']]
self.col.create_index(self._vector_field, self.index_params
['index_name'], self.index_params['index_type'], self.
index_params['metric_type'], nlist=self.index_params.
get('nlist', 10), nprobe=self.index_params.get('nprobe',
10))
logger.debug(self.col.activate_index(self.index_params[
'index_name']))
elif self.index_params['index_type'] == 'IVF_PQ':
self.index_params['index_type'] = index_dict[self.
index_params['index_type']]
self.col.create_index(self._vector_field, self.index_params
['index_name'], self.index_params['index_type'], self.
index_params['metric_type'], nlist=self.index_params.
get('nlist', 10), nprobe=self.index_params.get('nprobe',
10), nbits=self.index_params.get('nbits', 8), m=self.
index_params.get('m'))
logger.debug(self.col.activate_index(self.index_params[
'index_name']))
elif self.index_params['index_type'] == 'HNSW':
self.index_params['index_type'] = index_dict[self.
index_params['index_type']]
self.col.create_index(self._vector_field, self.index_params
['index_name'], self.index_params['index_type'], self.
index_params['metric_type'], M=self.index_params.get(
'M'), ef_construction=self.index_params.get(
'ef_construction'), ef_search=self.index_params.get(
'ef_search'))
logger.debug(self.col.activate_index(self.index_params[
'index_name']))
else:
raise ValueError(
'Index name does not match, please enter the correct index name. (FLAT, IVF_FLAT, IVF_PQ,IVF_SQ, HNSW)'
)
|
def _create_index(self) ->None:
"""Create a index on the collection"""
from transwarp_hippo_api.hippo_client import HippoTable
from transwarp_hippo_api.hippo_type import IndexType, MetricType
if isinstance(self.col, HippoTable) and self._get_index() is None:
if self._get_index() is None:
if self.index_params is None:
self.index_params = {'index_name': 'langchain_auto_create',
'metric_type': MetricType.L2, 'index_type': IndexType.
IVF_FLAT, 'nlist': 10}
self.col.create_index(self._vector_field, self.index_params
['index_name'], self.index_params['index_type'], self.
index_params['metric_type'], nlist=self.index_params[
'nlist'])
logger.debug(self.col.activate_index(self.index_params[
'index_name']))
logger.info('create index successfully')
else:
index_dict = {'IVF_FLAT': IndexType.IVF_FLAT, 'FLAT':
IndexType.FLAT, 'IVF_SQ': IndexType.IVF_SQ, 'IVF_PQ':
IndexType.IVF_PQ, 'HNSW': IndexType.HNSW}
metric_dict = {'ip': MetricType.IP, 'IP': MetricType.IP,
'l2': MetricType.L2, 'L2': MetricType.L2}
self.index_params['metric_type'] = metric_dict[self.
index_params['metric_type']]
if self.index_params['index_type'] == 'FLAT':
self.index_params['index_type'] = index_dict[self.
index_params['index_type']]
self.col.create_index(self._vector_field, self.
index_params['index_name'], self.index_params[
'index_type'], self.index_params['metric_type'])
logger.debug(self.col.activate_index(self.index_params[
'index_name']))
elif self.index_params['index_type'
] == 'IVF_FLAT' or self.index_params['index_type'
] == 'IVF_SQ':
self.index_params['index_type'] = index_dict[self.
index_params['index_type']]
self.col.create_index(self._vector_field, self.
index_params['index_name'], self.index_params[
'index_type'], self.index_params['metric_type'],
nlist=self.index_params.get('nlist', 10), nprobe=
self.index_params.get('nprobe', 10))
logger.debug(self.col.activate_index(self.index_params[
'index_name']))
elif self.index_params['index_type'] == 'IVF_PQ':
self.index_params['index_type'] = index_dict[self.
index_params['index_type']]
self.col.create_index(self._vector_field, self.
index_params['index_name'], self.index_params[
'index_type'], self.index_params['metric_type'],
nlist=self.index_params.get('nlist', 10), nprobe=
self.index_params.get('nprobe', 10), nbits=self.
index_params.get('nbits', 8), m=self.index_params.
get('m'))
logger.debug(self.col.activate_index(self.index_params[
'index_name']))
elif self.index_params['index_type'] == 'HNSW':
self.index_params['index_type'] = index_dict[self.
index_params['index_type']]
self.col.create_index(self._vector_field, self.
index_params['index_name'], self.index_params[
'index_type'], self.index_params['metric_type'], M=
self.index_params.get('M'), ef_construction=self.
index_params.get('ef_construction'), ef_search=self
.index_params.get('ef_search'))
logger.debug(self.col.activate_index(self.index_params[
'index_name']))
else:
raise ValueError(
'Index name does not match, please enter the correct index name. (FLAT, IVF_FLAT, IVF_PQ,IVF_SQ, HNSW)'
)
|
Create a index on the collection
|
predict
|
"""Pass a single string input to the model and return a string prediction.
Use this method when passing in raw text. If you want to pass in specific
types of chat messages, use predict_messages.
Args:
text: String input to pass to the model.
stop: Stop words to use when generating. Model output is cut off at the
first occurrence of any of these substrings.
**kwargs: Arbitrary additional keyword arguments. These are usually passed
to the model provider API call.
Returns:
Top model prediction as a string.
"""
|
@abstractmethod
def predict(self, text: str, *, stop: Optional[Sequence[str]]=None, **
kwargs: Any) ->str:
"""Pass a single string input to the model and return a string prediction.
Use this method when passing in raw text. If you want to pass in specific
types of chat messages, use predict_messages.
Args:
text: String input to pass to the model.
stop: Stop words to use when generating. Model output is cut off at the
first occurrence of any of these substrings.
**kwargs: Arbitrary additional keyword arguments. These are usually passed
to the model provider API call.
Returns:
Top model prediction as a string.
"""
|
Pass a single string input to the model and return a string prediction.
Use this method when passing in raw text. If you want to pass in specific
types of chat messages, use predict_messages.
Args:
text: String input to pass to the model.
stop: Stop words to use when generating. Model output is cut off at the
first occurrence of any of these substrings.
**kwargs: Arbitrary additional keyword arguments. These are usually passed
to the model provider API call.
Returns:
Top model prediction as a string.
|
test_visit_structured_query_no_filter
|
query = 'What is the capital of France?'
structured_query = StructuredQuery(query=query, filter=None)
expected: Tuple[str, Dict] = (query, {})
actual = translator.visit_structured_query(structured_query)
assert expected == actual
|
def test_visit_structured_query_no_filter(translator: RedisTranslator) ->None:
query = 'What is the capital of France?'
structured_query = StructuredQuery(query=query, filter=None)
expected: Tuple[str, Dict] = (query, {})
actual = translator.visit_structured_query(structured_query)
assert expected == actual
| null |
evaluate_strings
|
"""Evaluate Chain or LLM output, based on optional input and label.
Args:
prediction (str): The LLM or chain prediction to evaluate.
reference (Optional[str], optional): The reference label to evaluate against.
input (Optional[str], optional): The input to consider during evaluation.
**kwargs: Additional keyword arguments, including callbacks, tags, etc.
Returns:
dict: The evaluation results containing the score or value.
"""
self._check_evaluation_args(reference=reference, input=input)
return self._evaluate_strings(prediction=prediction, reference=reference,
input=input, **kwargs)
|
def evaluate_strings(self, *, prediction: str, reference: Optional[str]=
None, input: Optional[str]=None, **kwargs: Any) ->dict:
"""Evaluate Chain or LLM output, based on optional input and label.
Args:
prediction (str): The LLM or chain prediction to evaluate.
reference (Optional[str], optional): The reference label to evaluate against.
input (Optional[str], optional): The input to consider during evaluation.
**kwargs: Additional keyword arguments, including callbacks, tags, etc.
Returns:
dict: The evaluation results containing the score or value.
"""
self._check_evaluation_args(reference=reference, input=input)
return self._evaluate_strings(prediction=prediction, reference=
reference, input=input, **kwargs)
|
Evaluate Chain or LLM output, based on optional input and label.
Args:
prediction (str): The LLM or chain prediction to evaluate.
reference (Optional[str], optional): The reference label to evaluate against.
input (Optional[str], optional): The input to consider during evaluation.
**kwargs: Additional keyword arguments, including callbacks, tags, etc.
Returns:
dict: The evaluation results containing the score or value.
|
test_init
|
mock_client.return_value = MagicMock()
FalkorDBGraph(database=self.graph, host=self.host, port=self.port)
|
@patch('redis.Redis')
def test_init(self, mock_client: Any) ->None:
mock_client.return_value = MagicMock()
FalkorDBGraph(database=self.graph, host=self.host, port=self.port)
| null |
mock_trello_client
|
"""Fixture that creates a mock for trello.TrelloClient."""
with patch('trello.TrelloClient') as mock_trello_client:
list_objs = list_to_objects(TRELLO_LISTS)
cards_qa_objs = card_list_to_objects(TRELLO_CARDS_QA)
boards = [MockBoard('5555eaafea917522902a2a2c', 'Research', [],
list_objs), MockBoard('55559f6002dd973ad8cdbfb7', 'QA',
cards_qa_objs, list_objs)]
mock_trello_client.return_value.list_boards.return_value = boards
yield mock_trello_client.return_value
|
@pytest.fixture
def mock_trello_client() ->Any:
"""Fixture that creates a mock for trello.TrelloClient."""
with patch('trello.TrelloClient') as mock_trello_client:
list_objs = list_to_objects(TRELLO_LISTS)
cards_qa_objs = card_list_to_objects(TRELLO_CARDS_QA)
boards = [MockBoard('5555eaafea917522902a2a2c', 'Research', [],
list_objs), MockBoard('55559f6002dd973ad8cdbfb7', 'QA',
cards_qa_objs, list_objs)]
mock_trello_client.return_value.list_boards.return_value = boards
yield mock_trello_client.return_value
|
Fixture that creates a mock for trello.TrelloClient.
|
__init__
|
self._embedding = embedding
self.target = host + ':' + str(port)
self.grpc_options = grpc_options
self.grpc_use_secure = grpc_use_secure
self.grpc_credentials = grpc_credentials
|
def __init__(self, embedding: Embeddings, host: str='localhost', port: int=
8080, grpc_options: Tuple=(('grpc.keepalive_time_ms', 1000 * 10), (
'grpc.keepalive_timeout_ms', 1000 * 10)), grpc_use_secure: bool=False,
grpc_credentials: Optional[Any]=None):
self._embedding = embedding
self.target = host + ':' + str(port)
self.grpc_options = grpc_options
self.grpc_use_secure = grpc_use_secure
self.grpc_credentials = grpc_credentials
| null |
_import_milvus
|
from langchain_community.vectorstores.milvus import Milvus
return Milvus
|
def _import_milvus() ->Any:
from langchain_community.vectorstores.milvus import Milvus
return Milvus
| null |
validate_prompt
|
prompt: BasePromptTemplate = values['prompt']
if 'agent_scratchpad' not in prompt.input_variables:
raise ValueError(
f'`agent_scratchpad` should be one of the variables in the prompt, got {prompt.input_variables}'
)
return values
|
@root_validator
def validate_prompt(cls, values: dict) ->dict:
prompt: BasePromptTemplate = values['prompt']
if 'agent_scratchpad' not in prompt.input_variables:
raise ValueError(
f'`agent_scratchpad` should be one of the variables in the prompt, got {prompt.input_variables}'
)
return values
| null |
clear
|
"""Remove the container and its contents entirely. A cleared container can't
be reused.
"""
self._container = self._parent_cursor.empty()
self._child_records.clear()
|
def clear(self) ->None:
"""Remove the container and its contents entirely. A cleared container can't
be reused.
"""
self._container = self._parent_cursor.empty()
self._child_records.clear()
|
Remove the container and its contents entirely. A cleared container can't
be reused.
|
stop_stream
|
"""Close the streaming connection."""
if signal:
self._send_stop_signals(model_name, request_id)
self.client.stop_stream()
|
def stop_stream(self, model_name: str, request_id: str, signal: bool=True
) ->None:
"""Close the streaming connection."""
if signal:
self._send_stop_signals(model_name, request_id)
self.client.stop_stream()
|
Close the streaming connection.
|
_create_collection
|
fields = [{'name': 'vec', 'type': 'float[]', 'num_dim': num_dim}, {'name':
f'{self._text_key}', 'type': 'string'}, {'name': '.*', 'type': 'auto'}]
self._typesense_client.collections.create({'name': self.
_typesense_collection_name, 'fields': fields})
|
def _create_collection(self, num_dim: int) ->None:
fields = [{'name': 'vec', 'type': 'float[]', 'num_dim': num_dim}, {
'name': f'{self._text_key}', 'type': 'string'}, {'name': '.*',
'type': 'auto'}]
self._typesense_client.collections.create({'name': self.
_typesense_collection_name, 'fields': fields})
| null |
embed_query
|
return self.embed_documents([text])[0]
|
def embed_query(self, text: str) ->List[float]:
return self.embed_documents([text])[0]
| null |
test_api_key_masked_when_passed_via_constructor
|
"""Test initialization with an API key provided via the initializer"""
llm = Minimax(minimax_api_key='secret-api-key', minimax_group_id='group_id')
print(llm.minimax_api_key, end='')
captured = capsys.readouterr()
assert captured.out == '**********'
|
def test_api_key_masked_when_passed_via_constructor(capsys: CaptureFixture
) ->None:
"""Test initialization with an API key provided via the initializer"""
llm = Minimax(minimax_api_key='secret-api-key', minimax_group_id='group_id'
)
print(llm.minimax_api_key, end='')
captured = capsys.readouterr()
assert captured.out == '**********'
|
Test initialization with an API key provided via the initializer
|
test_md_header_text_splitter_1
|
"""Test markdown splitter by header: Case 1."""
markdown_document = """# Foo
## Bar
Hi this is Jim
Hi this is Joe
## Baz
Hi this is Molly"""
headers_to_split_on = [('#', 'Header 1'), ('##', 'Header 2')]
markdown_splitter = MarkdownHeaderTextSplitter(headers_to_split_on=
headers_to_split_on)
output = markdown_splitter.split_text(markdown_document)
expected_output = [Document(page_content=
"""Hi this is Jim
Hi this is Joe""", metadata={'Header 1': 'Foo',
'Header 2': 'Bar'}), Document(page_content='Hi this is Molly', metadata
={'Header 1': 'Foo', 'Header 2': 'Baz'})]
assert output == expected_output
|
def test_md_header_text_splitter_1() ->None:
"""Test markdown splitter by header: Case 1."""
markdown_document = """# Foo
## Bar
Hi this is Jim
Hi this is Joe
## Baz
Hi this is Molly"""
headers_to_split_on = [('#', 'Header 1'), ('##', 'Header 2')]
markdown_splitter = MarkdownHeaderTextSplitter(headers_to_split_on=
headers_to_split_on)
output = markdown_splitter.split_text(markdown_document)
expected_output = [Document(page_content=
'Hi this is Jim \nHi this is Joe', metadata={'Header 1': 'Foo',
'Header 2': 'Bar'}), Document(page_content='Hi this is Molly',
metadata={'Header 1': 'Foo', 'Header 2': 'Baz'})]
assert output == expected_output
|
Test markdown splitter by header: Case 1.
|
_parse_chat_history
|
"""Parse a sequence of messages into history."""
chat_history = []
for message in history:
content = cast(str, message.content)
if isinstance(message, HumanMessage):
chat_history.append(_parse_message('USER', content))
if isinstance(message, AIMessage):
chat_history.append(_parse_message('BOT', content))
return chat_history
|
def _parse_chat_history(history: List[BaseMessage]) ->List:
"""Parse a sequence of messages into history."""
chat_history = []
for message in history:
content = cast(str, message.content)
if isinstance(message, HumanMessage):
chat_history.append(_parse_message('USER', content))
if isinstance(message, AIMessage):
chat_history.append(_parse_message('BOT', content))
return chat_history
|
Parse a sequence of messages into history.
|
from_documents
|
"""Return VectorStore initialized from documents and embeddings."""
texts = [d.page_content for d in documents]
metadatas = [d.metadata for d in documents]
return cls.from_texts(texts, embedding, metadatas=metadatas, **kwargs)
|
@classmethod
def from_documents(cls: Type[VST], documents: List[Document], embedding:
Embeddings, **kwargs: Any) ->VST:
"""Return VectorStore initialized from documents and embeddings."""
texts = [d.page_content for d in documents]
metadatas = [d.metadata for d in documents]
return cls.from_texts(texts, embedding, metadatas=metadatas, **kwargs)
|
Return VectorStore initialized from documents and embeddings.
|
delete
|
"""Delete by vector ID.
Args:
ids (List[str]): List of ids to delete.
kwargs (Any): Other optional parameters (unused)
Returns:
Optional[bool]: True if deletion is successful,
False otherwise, None if not implemented.
"""
from momento.responses.vector_index import DeleteItemBatch
if ids is None:
return True
response = self._client.delete_item_batch(self.index_name, ids)
return isinstance(response, DeleteItemBatch.Success)
|
def delete(self, ids: Optional[List[str]]=None, **kwargs: Any) ->Optional[bool
]:
"""Delete by vector ID.
Args:
ids (List[str]): List of ids to delete.
kwargs (Any): Other optional parameters (unused)
Returns:
Optional[bool]: True if deletion is successful,
False otherwise, None if not implemented.
"""
from momento.responses.vector_index import DeleteItemBatch
if ids is None:
return True
response = self._client.delete_item_batch(self.index_name, ids)
return isinstance(response, DeleteItemBatch.Success)
|
Delete by vector ID.
Args:
ids (List[str]): List of ids to delete.
kwargs (Any): Other optional parameters (unused)
Returns:
Optional[bool]: True if deletion is successful,
False otherwise, None if not implemented.
|
_get_index
|
"""Return the vector index information if it exists"""
from transwarp_hippo_api.hippo_client import HippoTable
if isinstance(self.col, HippoTable):
table_info = self.hc.get_table_info(self.table_name, self.database_name
).get(self.table_name, {})
embedding_indexes = table_info.get('embedding_indexes', None)
if embedding_indexes is None:
return None
else:
for x in self.hc.get_table_info(self.table_name, self.database_name)[
self.table_name]['embedding_indexes']:
logger.debug(f'[_get_index] embedding_indexes {embedding_indexes}')
if x['column'] == self._vector_field:
return x
return None
|
def _get_index(self) ->Optional[Dict[str, Any]]:
"""Return the vector index information if it exists"""
from transwarp_hippo_api.hippo_client import HippoTable
if isinstance(self.col, HippoTable):
table_info = self.hc.get_table_info(self.table_name, self.database_name
).get(self.table_name, {})
embedding_indexes = table_info.get('embedding_indexes', None)
if embedding_indexes is None:
return None
else:
for x in self.hc.get_table_info(self.table_name, self.database_name
)[self.table_name]['embedding_indexes']:
logger.debug(
f'[_get_index] embedding_indexes {embedding_indexes}')
if x['column'] == self._vector_field:
return x
return None
|
Return the vector index information if it exists
|
construct_url_and_params
|
params = self.get_params(query)
params['source'] = 'python'
if self.serpapi_api_key:
params['serp_api_key'] = self.serpapi_api_key
params['output'] = 'json'
url = 'https://serpapi.com/search'
return url, params
|
def construct_url_and_params() ->Tuple[str, Dict[str, str]]:
params = self.get_params(query)
params['source'] = 'python'
if self.serpapi_api_key:
params['serp_api_key'] = self.serpapi_api_key
params['output'] = 'json'
url = 'https://serpapi.com/search'
return url, params
| null |
_import_arxiv
|
from langchain_community.utilities.arxiv import ArxivAPIWrapper
return ArxivAPIWrapper
|
def _import_arxiv() ->Any:
from langchain_community.utilities.arxiv import ArxivAPIWrapper
return ArxivAPIWrapper
| null |
full_form
|
return type_mapping[x] if x in type_mapping else x
|
def full_form(x: str) ->str:
return type_mapping[x] if x in type_mapping else x
| null |
llm_prefix
|
"""Prefix to append the llm call with."""
return 'Thought:'
|
@property
def llm_prefix(self) ->str:
"""Prefix to append the llm call with."""
return 'Thought:'
|
Prefix to append the llm call with.
|
_identifying_params
|
"""Get the identifying parameters."""
return {}
|
@property
def _identifying_params(self) ->Dict[str, Any]:
"""Get the identifying parameters."""
return {}
|
Get the identifying parameters.
|
get_input_schema
|
return create_model('ChainInput', **{k: (Any, None) for k in self.input_keys})
|
def get_input_schema(self, config: Optional[RunnableConfig]=None) ->Type[
BaseModel]:
return create_model('ChainInput', **{k: (Any, None) for k in self.
input_keys})
| null |
to_string
|
"""Return prompt value as string."""
|
@abstractmethod
def to_string(self) ->str:
"""Return prompt value as string."""
|
Return prompt value as string.
|
requests
|
return Requests(headers=self.headers, aiosession=self.aiosession, auth=self
.auth)
|
@property
def requests(self) ->Requests:
return Requests(headers=self.headers, aiosession=self.aiosession, auth=
self.auth)
| null |
test_search_similarity
|
r = zep_vectorstore.search(query='Test Document', search_type='similarity', k=2
)
assert len(r) == 2
assert r[0].page_content == 'Test Document'
assert r[0].metadata == {'key': 'value'}
|
@pytest.mark.requires('zep_python')
def test_search_similarity(zep_vectorstore: ZepVectorStore) ->None:
r = zep_vectorstore.search(query='Test Document', search_type=
'similarity', k=2)
assert len(r) == 2
assert r[0].page_content == 'Test Document'
assert r[0].metadata == {'key': 'value'}
| null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.