method_name
stringlengths 1
78
| method_body
stringlengths 3
9.66k
| full_code
stringlengths 31
10.7k
| docstring
stringlengths 4
4.74k
⌀ |
|---|---|---|---|
is_lc_serializable
|
return False
|
@classmethod
def is_lc_serializable(cls) ->bool:
return False
| null |
_chat
|
if self.hunyuan_secret_key is None:
raise ValueError('Hunyuan secret key is not set.')
parameters = {**self._default_params, **kwargs}
headers = parameters.pop('headers', {})
timestamp = parameters.pop('timestamp', int(time.time()))
expired = parameters.pop('expired', timestamp + 24 * 60 * 60)
payload = {'timestamp': timestamp, 'expired': expired, 'messages': [
_convert_message_to_dict(m) for m in messages], **parameters}
if self.streaming:
payload['stream'] = 1
url = self.hunyuan_api_base + DEFAULT_PATH
res = requests.post(url=url, timeout=self.request_timeout, headers={
'Content-Type': 'application/json', 'Authorization': _signature(
secret_key=self.hunyuan_secret_key, url=url, payload=payload), **
headers}, json=payload, stream=self.streaming)
return res
|
def _chat(self, messages: List[BaseMessage], **kwargs: Any
) ->requests.Response:
if self.hunyuan_secret_key is None:
raise ValueError('Hunyuan secret key is not set.')
parameters = {**self._default_params, **kwargs}
headers = parameters.pop('headers', {})
timestamp = parameters.pop('timestamp', int(time.time()))
expired = parameters.pop('expired', timestamp + 24 * 60 * 60)
payload = {'timestamp': timestamp, 'expired': expired, 'messages': [
_convert_message_to_dict(m) for m in messages], **parameters}
if self.streaming:
payload['stream'] = 1
url = self.hunyuan_api_base + DEFAULT_PATH
res = requests.post(url=url, timeout=self.request_timeout, headers={
'Content-Type': 'application/json', 'Authorization': _signature(
secret_key=self.hunyuan_secret_key, url=url, payload=payload), **
headers}, json=payload, stream=self.streaming)
return res
| null |
_run
|
mailbox = self.account.mailbox()
message = mailbox.new_message()
message.body = body
message.subject = subject
message.to.add(to)
if cc is not None:
message.cc.add(cc)
if bcc is not None:
message.bcc.add(bcc)
message.send()
output = 'Message sent: ' + str(message)
return output
|
def _run(self, body: str, to: List[str], subject: str, cc: Optional[List[
str]]=None, bcc: Optional[List[str]]=None, run_manager: Optional[
CallbackManagerForToolRun]=None) ->str:
mailbox = self.account.mailbox()
message = mailbox.new_message()
message.body = body
message.subject = subject
message.to.add(to)
if cc is not None:
message.cc.add(cc)
if bcc is not None:
message.bcc.add(bcc)
message.send()
output = 'Message sent: ' + str(message)
return output
| null |
get_aggregate_feedback
|
"""Return quantiles for the feedback scores.
This method calculates and prints the quantiles for the feedback scores
across all feedback keys.
Returns:
A DataFrame containing the quantiles for each feedback key.
"""
df = self.to_dataframe()
to_drop = [col for col in df.columns if col.startswith('inputs.') or col.
startswith('outputs.') or col.startswith('reference')]
return df.describe(include='all').drop(to_drop, axis=1)
|
def get_aggregate_feedback(self) ->pd.DataFrame:
"""Return quantiles for the feedback scores.
This method calculates and prints the quantiles for the feedback scores
across all feedback keys.
Returns:
A DataFrame containing the quantiles for each feedback key.
"""
df = self.to_dataframe()
to_drop = [col for col in df.columns if col.startswith('inputs.') or
col.startswith('outputs.') or col.startswith('reference')]
return df.describe(include='all').drop(to_drop, axis=1)
|
Return quantiles for the feedback scores.
This method calculates and prints the quantiles for the feedback scores
across all feedback keys.
Returns:
A DataFrame containing the quantiles for each feedback key.
|
_get_anthropic_client
|
try:
import anthropic
except ImportError:
raise ImportError(
'Could not import anthropic python package. This is needed in order to accurately tokenize the text for anthropic models. Please install it with `pip install anthropic`.'
)
return anthropic.Anthropic()
|
def _get_anthropic_client() ->Any:
try:
import anthropic
except ImportError:
raise ImportError(
'Could not import anthropic python package. This is needed in order to accurately tokenize the text for anthropic models. Please install it with `pip install anthropic`.'
)
return anthropic.Anthropic()
| null |
embeddings
|
return self.embedding_function if isinstance(self.embedding_function,
Embeddings) else None
|
@property
def embeddings(self) ->Optional[Embeddings]:
return self.embedding_function if isinstance(self.embedding_function,
Embeddings) else None
| null |
on_chain_error
|
"""Run when chain errors.
Args:
error (Exception or KeyboardInterrupt): The error.
"""
self.ended = True
return self.parent_run_manager.on_chain_error(error, **kwargs)
|
def on_chain_error(self, error: BaseException, **kwargs: Any) ->None:
"""Run when chain errors.
Args:
error (Exception or KeyboardInterrupt): The error.
"""
self.ended = True
return self.parent_run_manager.on_chain_error(error, **kwargs)
|
Run when chain errors.
Args:
error (Exception or KeyboardInterrupt): The error.
|
_import_bigquery
|
from langchain_community.vectorstores.bigquery_vector_search import BigQueryVectorSearch
return BigQueryVectorSearch
|
def _import_bigquery() ->Any:
from langchain_community.vectorstores.bigquery_vector_search import BigQueryVectorSearch
return BigQueryVectorSearch
| null |
_generate
|
should_stream = stream if stream is not None else self.streaming
if should_stream:
generation: Optional[GenerationChunk] = None
stream_iter = self._stream(prompts[0], stop=stop, run_manager=
run_manager, **kwargs)
for chunk in stream_iter:
if generation is None:
generation = chunk
else:
generation += chunk
assert generation is not None
return LLMResult(generations=[[generation]])
payload = self._build_payload(prompts)
response = self._client.chat(payload)
return self._create_llm_result(response)
|
def _generate(self, prompts: List[str], stop: Optional[List[str]]=None,
run_manager: Optional[CallbackManagerForLLMRun]=None, stream: Optional[
bool]=None, **kwargs: Any) ->LLMResult:
should_stream = stream if stream is not None else self.streaming
if should_stream:
generation: Optional[GenerationChunk] = None
stream_iter = self._stream(prompts[0], stop=stop, run_manager=
run_manager, **kwargs)
for chunk in stream_iter:
if generation is None:
generation = chunk
else:
generation += chunk
assert generation is not None
return LLMResult(generations=[[generation]])
payload = self._build_payload(prompts)
response = self._client.chat(payload)
return self._create_llm_result(response)
| null |
_import_golden_query
|
from langchain_community.utilities.golden_query import GoldenQueryAPIWrapper
return GoldenQueryAPIWrapper
|
def _import_golden_query() ->Any:
from langchain_community.utilities.golden_query import GoldenQueryAPIWrapper
return GoldenQueryAPIWrapper
| null |
test_configurable_fields_prefix_keys
|
fake_chat = FakeListChatModel(responses=['b']).configurable_fields(responses
=ConfigurableFieldMultiOption(id='responses', name='Chat Responses',
options={'hello': 'A good morning to you!', 'bye': 'See you later!',
'helpful': 'How can I help you?'}, default=['hello', 'bye']), sleep=
ConfigurableField(id='chat_sleep', is_shared=True))
fake_llm = FakeListLLM(responses=['a']).configurable_fields(responses=
ConfigurableField(id='responses', name='LLM Responses', description=
'A list of fake responses for this LLM')).configurable_alternatives(
ConfigurableField(id='llm', name='LLM'), chat=fake_chat |
StrOutputParser(), prefix_keys=True)
prompt = PromptTemplate.from_template('Hello, {name}!').configurable_fields(
template=ConfigurableFieldSingleOption(id='prompt_template', name=
'Prompt Template', description='The prompt template for this chain',
options={'hello': 'Hello, {name}!', 'good_morning':
'A very good morning to you, {name}!'}, default='hello'))
chain = prompt | fake_llm
assert chain.config_schema().schema() == {'title': 'RunnableSequenceConfig',
'type': 'object', 'properties': {'configurable': {'$ref':
'#/definitions/Configurable'}}, 'definitions': {'LLM': {'title': 'LLM',
'description': 'An enumeration.', 'enum': ['chat', 'default'], 'type':
'string'}, 'Chat_Responses': {'title': 'Chat Responses', 'description':
'An enumeration.', 'enum': ['hello', 'bye', 'helpful'], 'type':
'string'}, 'Prompt_Template': {'title': 'Prompt Template',
'description': 'An enumeration.', 'enum': ['hello', 'good_morning'],
'type': 'string'}, 'Configurable': {'title': 'Configurable', 'type':
'object', 'properties': {'prompt_template': {'title': 'Prompt Template',
'description': 'The prompt template for this chain', 'default': 'hello',
'allOf': [{'$ref': '#/definitions/Prompt_Template'}]}, 'llm': {'title':
'LLM', 'default': 'default', 'allOf': [{'$ref': '#/definitions/LLM'}]},
'chat_sleep': {'title': 'Chat Sleep', 'type': 'number'},
'llm==chat/responses': {'title': 'Chat Responses', 'default': ['hello',
'bye'], 'type': 'array', 'items': {'$ref':
'#/definitions/Chat_Responses'}}, 'llm==default/responses': {'title':
'LLM Responses', 'description': 'A list of fake responses for this LLM',
'default': ['a'], 'type': 'array', 'items': {'type': 'string'}}}}}}
|
def test_configurable_fields_prefix_keys() ->None:
fake_chat = FakeListChatModel(responses=['b']).configurable_fields(
responses=ConfigurableFieldMultiOption(id='responses', name=
'Chat Responses', options={'hello': 'A good morning to you!', 'bye':
'See you later!', 'helpful': 'How can I help you?'}, default=[
'hello', 'bye']), sleep=ConfigurableField(id='chat_sleep',
is_shared=True))
fake_llm = FakeListLLM(responses=['a']).configurable_fields(responses=
ConfigurableField(id='responses', name='LLM Responses', description
='A list of fake responses for this LLM')).configurable_alternatives(
ConfigurableField(id='llm', name='LLM'), chat=fake_chat |
StrOutputParser(), prefix_keys=True)
prompt = PromptTemplate.from_template('Hello, {name}!'
).configurable_fields(template=ConfigurableFieldSingleOption(id=
'prompt_template', name='Prompt Template', description=
'The prompt template for this chain', options={'hello':
'Hello, {name}!', 'good_morning':
'A very good morning to you, {name}!'}, default='hello'))
chain = prompt | fake_llm
assert chain.config_schema().schema() == {'title':
'RunnableSequenceConfig', 'type': 'object', 'properties': {
'configurable': {'$ref': '#/definitions/Configurable'}},
'definitions': {'LLM': {'title': 'LLM', 'description':
'An enumeration.', 'enum': ['chat', 'default'], 'type': 'string'},
'Chat_Responses': {'title': 'Chat Responses', 'description':
'An enumeration.', 'enum': ['hello', 'bye', 'helpful'], 'type':
'string'}, 'Prompt_Template': {'title': 'Prompt Template',
'description': 'An enumeration.', 'enum': ['hello', 'good_morning'],
'type': 'string'}, 'Configurable': {'title': 'Configurable', 'type':
'object', 'properties': {'prompt_template': {'title':
'Prompt Template', 'description':
'The prompt template for this chain', 'default': 'hello', 'allOf':
[{'$ref': '#/definitions/Prompt_Template'}]}, 'llm': {'title':
'LLM', 'default': 'default', 'allOf': [{'$ref': '#/definitions/LLM'
}]}, 'chat_sleep': {'title': 'Chat Sleep', 'type': 'number'},
'llm==chat/responses': {'title': 'Chat Responses', 'default': [
'hello', 'bye'], 'type': 'array', 'items': {'$ref':
'#/definitions/Chat_Responses'}}, 'llm==default/responses': {
'title': 'LLM Responses', 'description':
'A list of fake responses for this LLM', 'default': ['a'], 'type':
'array', 'items': {'type': 'string'}}}}}}
| null |
__init__
|
self._iterator = iter(iterable)
self._buffers: List[Deque[T]] = [deque() for _ in range(n)]
self._children = tuple(tee_peer(iterator=self._iterator, buffer=buffer,
peers=self._buffers, lock=lock if lock is not None else NoLock()) for
buffer in self._buffers)
|
def __init__(self, iterable: Iterator[T], n: int=2, *, lock: Optional[
ContextManager[Any]]=None):
self._iterator = iter(iterable)
self._buffers: List[Deque[T]] = [deque() for _ in range(n)]
self._children = tuple(tee_peer(iterator=self._iterator, buffer=buffer,
peers=self._buffers, lock=lock if lock is not None else NoLock()) for
buffer in self._buffers)
| null |
__init__
|
"""Instantiate AsyncFinalIteratorCallbackHandler.
Args:
answer_prefix_tokens: Token sequence that prefixes the answer.
Default is ["Final", "Answer", ":"]
strip_tokens: Ignore white spaces and new lines when comparing
answer_prefix_tokens to last tokens? (to determine if answer has been
reached)
stream_prefix: Should answer prefix itself also be streamed?
"""
super().__init__()
if answer_prefix_tokens is None:
self.answer_prefix_tokens = DEFAULT_ANSWER_PREFIX_TOKENS
else:
self.answer_prefix_tokens = answer_prefix_tokens
if strip_tokens:
self.answer_prefix_tokens_stripped = [token.strip() for token in self.
answer_prefix_tokens]
else:
self.answer_prefix_tokens_stripped = self.answer_prefix_tokens
self.last_tokens = [''] * len(self.answer_prefix_tokens)
self.last_tokens_stripped = [''] * len(self.answer_prefix_tokens)
self.strip_tokens = strip_tokens
self.stream_prefix = stream_prefix
self.answer_reached = False
|
def __init__(self, *, answer_prefix_tokens: Optional[List[str]]=None,
strip_tokens: bool=True, stream_prefix: bool=False) ->None:
"""Instantiate AsyncFinalIteratorCallbackHandler.
Args:
answer_prefix_tokens: Token sequence that prefixes the answer.
Default is ["Final", "Answer", ":"]
strip_tokens: Ignore white spaces and new lines when comparing
answer_prefix_tokens to last tokens? (to determine if answer has been
reached)
stream_prefix: Should answer prefix itself also be streamed?
"""
super().__init__()
if answer_prefix_tokens is None:
self.answer_prefix_tokens = DEFAULT_ANSWER_PREFIX_TOKENS
else:
self.answer_prefix_tokens = answer_prefix_tokens
if strip_tokens:
self.answer_prefix_tokens_stripped = [token.strip() for token in
self.answer_prefix_tokens]
else:
self.answer_prefix_tokens_stripped = self.answer_prefix_tokens
self.last_tokens = [''] * len(self.answer_prefix_tokens)
self.last_tokens_stripped = [''] * len(self.answer_prefix_tokens)
self.strip_tokens = strip_tokens
self.stream_prefix = stream_prefix
self.answer_reached = False
|
Instantiate AsyncFinalIteratorCallbackHandler.
Args:
answer_prefix_tokens: Token sequence that prefixes the answer.
Default is ["Final", "Answer", ":"]
strip_tokens: Ignore white spaces and new lines when comparing
answer_prefix_tokens to last tokens? (to determine if answer has been
reached)
stream_prefix: Should answer prefix itself also be streamed?
|
validate_environment
|
values['qianfan_ak'] = convert_to_secret_str(get_from_dict_or_env(values,
'qianfan_ak', 'QIANFAN_AK', default=''))
values['qianfan_sk'] = convert_to_secret_str(get_from_dict_or_env(values,
'qianfan_sk', 'QIANFAN_SK', default=''))
params = {**values.get('init_kwargs', {}), 'model': values['model']}
if values['qianfan_ak'].get_secret_value() != '':
params['ak'] = values['qianfan_ak'].get_secret_value()
if values['qianfan_sk'].get_secret_value() != '':
params['sk'] = values['qianfan_sk'].get_secret_value()
if values['endpoint'] is not None and values['endpoint'] != '':
params['endpoint'] = values['endpoint']
try:
import qianfan
values['client'] = qianfan.Completion(**params)
except ImportError:
raise ImportError(
'qianfan package not found, please install it with `pip install qianfan`'
)
return values
|
@root_validator()
def validate_environment(cls, values: Dict) ->Dict:
values['qianfan_ak'] = convert_to_secret_str(get_from_dict_or_env(
values, 'qianfan_ak', 'QIANFAN_AK', default=''))
values['qianfan_sk'] = convert_to_secret_str(get_from_dict_or_env(
values, 'qianfan_sk', 'QIANFAN_SK', default=''))
params = {**values.get('init_kwargs', {}), 'model': values['model']}
if values['qianfan_ak'].get_secret_value() != '':
params['ak'] = values['qianfan_ak'].get_secret_value()
if values['qianfan_sk'].get_secret_value() != '':
params['sk'] = values['qianfan_sk'].get_secret_value()
if values['endpoint'] is not None and values['endpoint'] != '':
params['endpoint'] = values['endpoint']
try:
import qianfan
values['client'] = qianfan.Completion(**params)
except ImportError:
raise ImportError(
'qianfan package not found, please install it with `pip install qianfan`'
)
return values
| null |
on_llm_start
|
"""Save the prompts in memory when an LLM starts."""
self.prompts.update({str(kwargs['parent_run_id'] or kwargs['run_id']): prompts}
)
|
def on_llm_start(self, serialized: Dict[str, Any], prompts: List[str], **
kwargs: Any) ->None:
"""Save the prompts in memory when an LLM starts."""
self.prompts.update({str(kwargs['parent_run_id'] or kwargs['run_id']):
prompts})
|
Save the prompts in memory when an LLM starts.
|
from_existing_index
|
connection_string = cls.get_connection_string(kwargs)
store = cls(connection_string=connection_string, collection_name=
collection_name, embedding_function=embedding, pre_delete_collection=
pre_delete_collection)
return store
|
@classmethod
def from_existing_index(cls: Type[PGEmbedding], embedding: Embeddings,
collection_name: str=_LANGCHAIN_DEFAULT_COLLECTION_NAME,
pre_delete_collection: bool=False, **kwargs: Any) ->PGEmbedding:
connection_string = cls.get_connection_string(kwargs)
store = cls(connection_string=connection_string, collection_name=
collection_name, embedding_function=embedding,
pre_delete_collection=pre_delete_collection)
return store
| null |
_format_request_payload
|
chat_messages = [LlamaContentFormatter._convert_message_to_dict(message) for
message in messages]
prompt = json.dumps({'input_data': {'input_string': chat_messages,
'parameters': model_kwargs}})
return self.format_request_payload(prompt=prompt, model_kwargs=model_kwargs)
|
def _format_request_payload(self, messages: List[BaseMessage], model_kwargs:
Dict) ->bytes:
chat_messages = [LlamaContentFormatter._convert_message_to_dict(message
) for message in messages]
prompt = json.dumps({'input_data': {'input_string': chat_messages,
'parameters': model_kwargs}})
return self.format_request_payload(prompt=prompt, model_kwargs=model_kwargs
)
| null |
_make_request_body_for_models
|
"""Make the request body for generate/retrieve models endpoint"""
_model_kwargs = self.model_kwargs or {}
_params = {**_model_kwargs, **kwargs}
filters = [DALMFilter(**f) for f in _params.get('filters', [])]
return dict(model_id=self.model_id, query=prompt, size=_params.get('size',
3), filters=filters, id=self.model_id)
|
def _make_request_body_for_models(self, prompt: str, **kwargs: Mapping[str,
Any]) ->Mapping[str, Any]:
"""Make the request body for generate/retrieve models endpoint"""
_model_kwargs = self.model_kwargs or {}
_params = {**_model_kwargs, **kwargs}
filters = [DALMFilter(**f) for f in _params.get('filters', [])]
return dict(model_id=self.model_id, query=prompt, size=_params.get(
'size', 3), filters=filters, id=self.model_id)
|
Make the request body for generate/retrieve models endpoint
|
_type
|
return 'structured_chat_with_retries'
|
@property
def _type(self) ->str:
return 'structured_chat_with_retries'
| null |
_fstring_Constant
|
assert isinstance(t.value, str)
value = t.value.replace('{', '{{').replace('}', '}}')
write(value)
|
def _fstring_Constant(self, t, write):
assert isinstance(t.value, str)
value = t.value.replace('{', '{{').replace('}', '}}')
write(value)
| null |
on_chain_error
|
"""Run when chain errors."""
self.step += 1
self.errors += 1
|
def on_chain_error(self, error: BaseException, **kwargs: Any) ->None:
"""Run when chain errors."""
self.step += 1
self.errors += 1
|
Run when chain errors.
|
search
|
"""Search for document.
If page exists, return the page summary, and a Document object.
If page does not exist, return similar entries.
"""
|
@abstractmethod
def search(self, search: str) ->Union[str, Document]:
"""Search for document.
If page exists, return the page summary, and a Document object.
If page does not exist, return similar entries.
"""
|
Search for document.
If page exists, return the page summary, and a Document object.
If page does not exist, return similar entries.
|
random_string
|
return str(uuid.uuid4())
|
def random_string() ->str:
return str(uuid.uuid4())
| null |
test_api_key_masked_when_passed_via_constructor
|
gpt_router = GPTRouter(gpt_router_api_base='https://example.com',
gpt_router_api_key='secret-api-key')
print(gpt_router.gpt_router_api_key, end='')
captured = capsys.readouterr()
assert captured.out == '**********'
|
def test_api_key_masked_when_passed_via_constructor(capsys: CaptureFixture
) ->None:
gpt_router = GPTRouter(gpt_router_api_base='https://example.com',
gpt_router_api_key='secret-api-key')
print(gpt_router.gpt_router_api_key, end='')
captured = capsys.readouterr()
assert captured.out == '**********'
| null |
scrape
|
"""Scrape data from webpage and return it in BeautifulSoup format."""
if parser is None:
parser = self.default_parser
return self._scrape(self.web_path, parser=parser, bs_kwargs=self.bs_kwargs)
|
def scrape(self, parser: Union[str, None]=None) ->Any:
"""Scrape data from webpage and return it in BeautifulSoup format."""
if parser is None:
parser = self.default_parser
return self._scrape(self.web_path, parser=parser, bs_kwargs=self.bs_kwargs)
|
Scrape data from webpage and return it in BeautifulSoup format.
|
_llm_type
|
"""Return type of llm."""
return 'fake'
|
@property
def _llm_type(self) ->str:
"""Return type of llm."""
return 'fake'
|
Return type of llm.
|
output_keys
|
return [self.output_key]
|
@property
def output_keys(self) ->List[str]:
return [self.output_key]
| null |
test_api_key_is_secret_string
|
llm = Nebula(nebula_api_key='secret-api-key')
assert isinstance(llm.nebula_api_key, SecretStr)
assert llm.nebula_api_key.get_secret_value() == 'secret-api-key'
|
def test_api_key_is_secret_string() ->None:
llm = Nebula(nebula_api_key='secret-api-key')
assert isinstance(llm.nebula_api_key, SecretStr)
assert llm.nebula_api_key.get_secret_value() == 'secret-api-key'
| null |
exists
|
return self.redis_client.exists(f'{self.full_key_prefix}:{key}') == 1
|
def exists(self, key: str) ->bool:
return self.redis_client.exists(f'{self.full_key_prefix}:{key}') == 1
| null |
_import_office365_messages_search
|
from langchain_community.tools.office365.messages_search import O365SearchEmails
return O365SearchEmails
|
def _import_office365_messages_search() ->Any:
from langchain_community.tools.office365.messages_search import O365SearchEmails
return O365SearchEmails
| null |
_dependable_tweepy_import
|
try:
import tweepy
except ImportError:
raise ImportError(
'tweepy package not found, please install it with `pip install tweepy`'
)
return tweepy
|
def _dependable_tweepy_import() ->tweepy:
try:
import tweepy
except ImportError:
raise ImportError(
'tweepy package not found, please install it with `pip install tweepy`'
)
return tweepy
| null |
tool_func
|
"""Return the arguments directly."""
return f'{arg1} {arg2} {arg3}'
|
@tool(args_schema=_MockSchema)
def tool_func(arg1: int, arg2: bool, arg3: Optional[dict]=None) ->str:
"""Return the arguments directly."""
return f'{arg1} {arg2} {arg3}'
|
Return the arguments directly.
|
lazy_parse
|
"""Lazily parse the blob."""
if not self.extract_images:
from pdfminer.high_level import extract_text
with blob.as_bytes_io() as pdf_file_obj:
if self.concatenate_pages:
text = extract_text(pdf_file_obj)
metadata = {'source': blob.source}
yield Document(page_content=text, metadata=metadata)
else:
from pdfminer.pdfpage import PDFPage
pages = PDFPage.get_pages(pdf_file_obj)
for i, _ in enumerate(pages):
text = extract_text(pdf_file_obj, page_numbers=[i])
metadata = {'source': blob.source, 'page': str(i)}
yield Document(page_content=text, metadata=metadata)
else:
import io
from pdfminer.converter import PDFPageAggregator, TextConverter
from pdfminer.layout import LAParams
from pdfminer.pdfinterp import PDFPageInterpreter, PDFResourceManager
from pdfminer.pdfpage import PDFPage
text_io = io.StringIO()
with blob.as_bytes_io() as pdf_file_obj:
pages = PDFPage.get_pages(pdf_file_obj)
rsrcmgr = PDFResourceManager()
device_for_text = TextConverter(rsrcmgr, text_io, laparams=LAParams())
device_for_image = PDFPageAggregator(rsrcmgr, laparams=LAParams())
interpreter_for_text = PDFPageInterpreter(rsrcmgr, device_for_text)
interpreter_for_image = PDFPageInterpreter(rsrcmgr, device_for_image)
for i, page in enumerate(pages):
interpreter_for_text.process_page(page)
interpreter_for_image.process_page(page)
content = text_io.getvalue() + self._extract_images_from_page(
device_for_image.get_result())
text_io.truncate(0)
text_io.seek(0)
metadata = {'source': blob.source, 'page': str(i)}
yield Document(page_content=content, metadata=metadata)
|
def lazy_parse(self, blob: Blob) ->Iterator[Document]:
"""Lazily parse the blob."""
if not self.extract_images:
from pdfminer.high_level import extract_text
with blob.as_bytes_io() as pdf_file_obj:
if self.concatenate_pages:
text = extract_text(pdf_file_obj)
metadata = {'source': blob.source}
yield Document(page_content=text, metadata=metadata)
else:
from pdfminer.pdfpage import PDFPage
pages = PDFPage.get_pages(pdf_file_obj)
for i, _ in enumerate(pages):
text = extract_text(pdf_file_obj, page_numbers=[i])
metadata = {'source': blob.source, 'page': str(i)}
yield Document(page_content=text, metadata=metadata)
else:
import io
from pdfminer.converter import PDFPageAggregator, TextConverter
from pdfminer.layout import LAParams
from pdfminer.pdfinterp import PDFPageInterpreter, PDFResourceManager
from pdfminer.pdfpage import PDFPage
text_io = io.StringIO()
with blob.as_bytes_io() as pdf_file_obj:
pages = PDFPage.get_pages(pdf_file_obj)
rsrcmgr = PDFResourceManager()
device_for_text = TextConverter(rsrcmgr, text_io, laparams=
LAParams())
device_for_image = PDFPageAggregator(rsrcmgr, laparams=LAParams())
interpreter_for_text = PDFPageInterpreter(rsrcmgr, device_for_text)
interpreter_for_image = PDFPageInterpreter(rsrcmgr,
device_for_image)
for i, page in enumerate(pages):
interpreter_for_text.process_page(page)
interpreter_for_image.process_page(page)
content = text_io.getvalue() + self._extract_images_from_page(
device_for_image.get_result())
text_io.truncate(0)
text_io.seek(0)
metadata = {'source': blob.source, 'page': str(i)}
yield Document(page_content=content, metadata=metadata)
|
Lazily parse the blob.
|
validate_environment
|
"""Validate that the python package exists in the environment."""
try:
import tokenizers
except ImportError:
raise ImportError(
'Could not import tokenizers python package. Please install it with `pip install tokenizers`.'
)
try:
from rwkv.model import RWKV as RWKVMODEL
from rwkv.utils import PIPELINE
values['tokenizer'] = tokenizers.Tokenizer.from_file(values['tokens_path'])
rwkv_keys = cls._rwkv_param_names()
model_kwargs = {k: v for k, v in values.items() if k in rwkv_keys}
model_kwargs['verbose'] = values['rwkv_verbose']
values['client'] = RWKVMODEL(values['model'], strategy=values[
'strategy'], **model_kwargs)
values['pipeline'] = PIPELINE(values['client'], values['tokens_path'])
except ImportError:
raise ImportError(
'Could not import rwkv python package. Please install it with `pip install rwkv`.'
)
return values
|
@root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that the python package exists in the environment."""
try:
import tokenizers
except ImportError:
raise ImportError(
'Could not import tokenizers python package. Please install it with `pip install tokenizers`.'
)
try:
from rwkv.model import RWKV as RWKVMODEL
from rwkv.utils import PIPELINE
values['tokenizer'] = tokenizers.Tokenizer.from_file(values[
'tokens_path'])
rwkv_keys = cls._rwkv_param_names()
model_kwargs = {k: v for k, v in values.items() if k in rwkv_keys}
model_kwargs['verbose'] = values['rwkv_verbose']
values['client'] = RWKVMODEL(values['model'], strategy=values[
'strategy'], **model_kwargs)
values['pipeline'] = PIPELINE(values['client'], values['tokens_path'])
except ImportError:
raise ImportError(
'Could not import rwkv python package. Please install it with `pip install rwkv`.'
)
return values
|
Validate that the python package exists in the environment.
|
_import_vertex
|
from langchain_community.llms.vertexai import VertexAI
return VertexAI
|
def _import_vertex() ->Any:
from langchain_community.llms.vertexai import VertexAI
return VertexAI
| null |
_on_llm_start
|
"""Persist an LLM run."""
if run.parent_run_id is None:
run.reference_example_id = self.example_id
self._submit(self._persist_run_single, _copy(run))
|
def _on_llm_start(self, run: Run) ->None:
"""Persist an LLM run."""
if run.parent_run_id is None:
run.reference_example_id = self.example_id
self._submit(self._persist_run_single, _copy(run))
|
Persist an LLM run.
|
similarity_search
|
"""Return documents most similar to the query
Args:
query: String to query the vectorstore with.
k: Number of documents to return.
Returns:
List of documents most similar to the query.
"""
embedding = self._embedding.embed_query(query)
docs = self._connection.search(embedding).limit(k).to_df()
return [Document(page_content=row[self._text_key], metadata=row[docs.
columns != self._text_key]) for _, row in docs.iterrows()]
|
def similarity_search(self, query: str, k: int=4, **kwargs: Any) ->List[
Document]:
"""Return documents most similar to the query
Args:
query: String to query the vectorstore with.
k: Number of documents to return.
Returns:
List of documents most similar to the query.
"""
embedding = self._embedding.embed_query(query)
docs = self._connection.search(embedding).limit(k).to_df()
return [Document(page_content=row[self._text_key], metadata=row[docs.
columns != self._text_key]) for _, row in docs.iterrows()]
|
Return documents most similar to the query
Args:
query: String to query the vectorstore with.
k: Number of documents to return.
Returns:
List of documents most similar to the query.
|
check_compatibility
|
"""Check if a vectorstore is compatible with the indexing API."""
methods = ['delete', 'add_documents']
for method in methods:
if not hasattr(vector_store, method):
return False
if getattr(vector_store, 'delete') == VectorStore.delete:
return False
return True
|
def check_compatibility(vector_store: VectorStore) ->bool:
"""Check if a vectorstore is compatible with the indexing API."""
methods = ['delete', 'add_documents']
for method in methods:
if not hasattr(vector_store, method):
return False
if getattr(vector_store, 'delete') == VectorStore.delete:
return False
return True
|
Check if a vectorstore is compatible with the indexing API.
|
test_python_text_splitter
|
splitter = PythonCodeTextSplitter(chunk_size=30, chunk_overlap=0)
splits = splitter.split_text(FAKE_PYTHON_TEXT)
split_0 = """class Foo:
def bar():"""
split_1 = 'def foo():'
split_2 = 'def testing_func():'
split_3 = 'def bar():'
expected_splits = [split_0, split_1, split_2, split_3]
assert splits == expected_splits
|
def test_python_text_splitter() ->None:
splitter = PythonCodeTextSplitter(chunk_size=30, chunk_overlap=0)
splits = splitter.split_text(FAKE_PYTHON_TEXT)
split_0 = """class Foo:
def bar():"""
split_1 = 'def foo():'
split_2 = 'def testing_func():'
split_3 = 'def bar():'
expected_splits = [split_0, split_1, split_2, split_3]
assert splits == expected_splits
| null |
_evaluate_string_pairs
|
"""Evaluate whether output A is preferred to output B.
Args:
prediction (str): The output string from the first model.
prediction_b (str): The output string from the second model.
input (str, optional): The input or task string.
callbacks (Callbacks, optional): The callbacks to use.
reference (str, optional): The reference string, if any.
**kwargs (Any): Additional keyword arguments.
Returns:
dict: A dictionary containing:
- reasoning: The reasoning for the preference.
- value: The preference value, which is either 'A', 'B', or None
for no preference.
- score: The preference score, which is 1 for 'A', 0 for 'B',
and 0.5 for None.
"""
input_ = self._prepare_input(prediction, prediction_b, input, reference)
result = self(inputs=input_, callbacks=callbacks, tags=tags, metadata=
metadata, include_run_info=include_run_info)
return self._prepare_output(result)
|
def _evaluate_string_pairs(self, *, prediction: str, prediction_b: str,
input: Optional[str]=None, reference: Optional[str]=None, callbacks:
Callbacks=None, tags: Optional[List[str]]=None, metadata: Optional[Dict
[str, Any]]=None, include_run_info: bool=False, **kwargs: Any) ->dict:
"""Evaluate whether output A is preferred to output B.
Args:
prediction (str): The output string from the first model.
prediction_b (str): The output string from the second model.
input (str, optional): The input or task string.
callbacks (Callbacks, optional): The callbacks to use.
reference (str, optional): The reference string, if any.
**kwargs (Any): Additional keyword arguments.
Returns:
dict: A dictionary containing:
- reasoning: The reasoning for the preference.
- value: The preference value, which is either 'A', 'B', or None
for no preference.
- score: The preference score, which is 1 for 'A', 0 for 'B',
and 0.5 for None.
"""
input_ = self._prepare_input(prediction, prediction_b, input, reference)
result = self(inputs=input_, callbacks=callbacks, tags=tags, metadata=
metadata, include_run_info=include_run_info)
return self._prepare_output(result)
|
Evaluate whether output A is preferred to output B.
Args:
prediction (str): The output string from the first model.
prediction_b (str): The output string from the second model.
input (str, optional): The input or task string.
callbacks (Callbacks, optional): The callbacks to use.
reference (str, optional): The reference string, if any.
**kwargs (Any): Additional keyword arguments.
Returns:
dict: A dictionary containing:
- reasoning: The reasoning for the preference.
- value: The preference value, which is either 'A', 'B', or None
for no preference.
- score: The preference score, which is 1 for 'A', 0 for 'B',
and 0.5 for None.
|
_keyword
|
if t.arg is None:
self.write('**')
else:
self.write(t.arg)
self.write('=')
self.dispatch(t.value)
|
def _keyword(self, t):
if t.arg is None:
self.write('**')
else:
self.write(t.arg)
self.write('=')
self.dispatch(t.value)
| null |
loader
|
nonlocal file_contents
assert file_contents is None
file_contents = Path(file_path).read_text()
|
def loader(file_path: str) ->None:
nonlocal file_contents
assert file_contents is None
file_contents = Path(file_path).read_text()
| null |
test_json_distance_evaluator_evaluate_strings_list_diff_length
|
prediction = '[{"a": 1, "b": 2}, {"a": 2, "b": 3}]'
reference = '[{"a": 1, "b": 2}]'
result = json_distance_evaluator._evaluate_strings(prediction=prediction,
reference=reference)
pytest.approx(len('{"a":2,"b":3}') / len(reference.replace(' ', '')),
result['score'])
|
@pytest.mark.requires('rapidfuzz')
def test_json_distance_evaluator_evaluate_strings_list_diff_length(
json_distance_evaluator: JsonEditDistanceEvaluator) ->None:
prediction = '[{"a": 1, "b": 2}, {"a": 2, "b": 3}]'
reference = '[{"a": 1, "b": 2}]'
result = json_distance_evaluator._evaluate_strings(prediction=
prediction, reference=reference)
pytest.approx(len('{"a":2,"b":3}') / len(reference.replace(' ', '')),
result['score'])
| null |
_call_after_llm_before_scoring
|
next_chain_inputs = event.inputs.copy()
value = next(iter(event.to_select_from.values()))
v = value[event.selected.index
] if event.selected else event.to_select_from.values()
next_chain_inputs.update({self.selected_based_on_input_key: str(event.
based_on), self.selected_input_key: v})
return next_chain_inputs, event
|
def _call_after_llm_before_scoring(self, llm_response: str, event:
PickBestEvent) ->Tuple[Dict[str, Any], PickBestEvent]:
next_chain_inputs = event.inputs.copy()
value = next(iter(event.to_select_from.values()))
v = value[event.selected.index
] if event.selected else event.to_select_from.values()
next_chain_inputs.update({self.selected_based_on_input_key: str(event.
based_on), self.selected_input_key: v})
return next_chain_inputs, event
| null |
get_connection_string
|
connection_string: str = get_from_dict_or_env(data=kwargs, key=
'connection_string', env_key='PG_CONNECTION_STRING')
if not connection_string:
raise ValueError(
'Postgres connection string is requiredEither pass it as a parameteror set the PG_CONNECTION_STRING environment variable.'
)
return connection_string
|
@classmethod
def get_connection_string(cls, kwargs: Dict[str, Any]) ->str:
connection_string: str = get_from_dict_or_env(data=kwargs, key=
'connection_string', env_key='PG_CONNECTION_STRING')
if not connection_string:
raise ValueError(
'Postgres connection string is requiredEither pass it as a parameteror set the PG_CONNECTION_STRING environment variable.'
)
return connection_string
| null |
get_lc_namespace
|
"""Get the namespace of the langchain object."""
return ['langchain', 'schema', 'messages']
|
@classmethod
def get_lc_namespace(cls) ->List[str]:
"""Get the namespace of the langchain object."""
return ['langchain', 'schema', 'messages']
|
Get the namespace of the langchain object.
|
test_tiledb_mmr_with_metadatas_and_list_filter
|
texts = ['foo', 'fou', 'foy', 'foo']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = TileDB.from_texts(texts=texts, metadatas=metadatas, embedding=
ConsistentFakeEmbeddings(), index_uri=f'{str(tmp_path)}/flat',
index_type='FLAT')
query_vec = ConsistentFakeEmbeddings().embed_query(text='foo')
output = docsearch.max_marginal_relevance_search_with_score_by_vector(query_vec
, k=3, lambda_mult=0.1, filter={'page': [0, 1, 2]})
assert len(output) == 3
assert output[0][0] == Document(page_content='foo', metadata={'page': 0})
assert output[0][1] == 0.0
assert output[1][0] != Document(page_content='foo', metadata={'page': 0})
assert output[2][0] != Document(page_content='foo', metadata={'page': 0})
docsearch = TileDB.from_texts(texts=texts, metadatas=metadatas, embedding=
ConsistentFakeEmbeddings(), index_uri=f'{str(tmp_path)}/ivf_flat',
index_type='IVF_FLAT')
query_vec = ConsistentFakeEmbeddings().embed_query(text='foo')
output = docsearch.max_marginal_relevance_search_with_score_by_vector(query_vec
, k=3, lambda_mult=0.1, filter={'page': [0, 1, 2]}, nprobe=docsearch.
vector_index.partitions)
assert len(output) == 3
assert output[0][0] == Document(page_content='foo', metadata={'page': 0})
assert output[0][1] == 0.0
assert output[1][0] != Document(page_content='foo', metadata={'page': 0})
assert output[2][0] != Document(page_content='foo', metadata={'page': 0})
|
@pytest.mark.requires('tiledb-vector-search')
def test_tiledb_mmr_with_metadatas_and_list_filter(tmp_path: Path) ->None:
texts = ['foo', 'fou', 'foy', 'foo']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = TileDB.from_texts(texts=texts, metadatas=metadatas,
embedding=ConsistentFakeEmbeddings(), index_uri=
f'{str(tmp_path)}/flat', index_type='FLAT')
query_vec = ConsistentFakeEmbeddings().embed_query(text='foo')
output = docsearch.max_marginal_relevance_search_with_score_by_vector(
query_vec, k=3, lambda_mult=0.1, filter={'page': [0, 1, 2]})
assert len(output) == 3
assert output[0][0] == Document(page_content='foo', metadata={'page': 0})
assert output[0][1] == 0.0
assert output[1][0] != Document(page_content='foo', metadata={'page': 0})
assert output[2][0] != Document(page_content='foo', metadata={'page': 0})
docsearch = TileDB.from_texts(texts=texts, metadatas=metadatas,
embedding=ConsistentFakeEmbeddings(), index_uri=
f'{str(tmp_path)}/ivf_flat', index_type='IVF_FLAT')
query_vec = ConsistentFakeEmbeddings().embed_query(text='foo')
output = docsearch.max_marginal_relevance_search_with_score_by_vector(
query_vec, k=3, lambda_mult=0.1, filter={'page': [0, 1, 2]}, nprobe
=docsearch.vector_index.partitions)
assert len(output) == 3
assert output[0][0] == Document(page_content='foo', metadata={'page': 0})
assert output[0][1] == 0.0
assert output[1][0] != Document(page_content='foo', metadata={'page': 0})
assert output[2][0] != Document(page_content='foo', metadata={'page': 0})
| null |
test_bittensor_call
|
"""Test valid call to validator endpoint."""
llm = NIBittensorLLM(system_prompt='Your task is to answer user prompt.')
output = llm('Say foo:')
assert isinstance(output, str)
|
def test_bittensor_call() ->None:
"""Test valid call to validator endpoint."""
llm = NIBittensorLLM(system_prompt='Your task is to answer user prompt.')
output = llm('Say foo:')
assert isinstance(output, str)
|
Test valid call to validator endpoint.
|
validate_environments
|
"""Validate Arcee environment variables."""
values['arcee_api_key'] = convert_to_secret_str(get_from_dict_or_env(values,
'arcee_api_key', 'ARCEE_API_KEY'))
values['arcee_api_url'] = get_from_dict_or_env(values, 'arcee_api_url',
'ARCEE_API_URL')
values['arcee_app_url'] = get_from_dict_or_env(values, 'arcee_app_url',
'ARCEE_APP_URL')
values['arcee_api_version'] = get_from_dict_or_env(values,
'arcee_api_version', 'ARCEE_API_VERSION')
if values.get('model_kwargs'):
kw = values['model_kwargs']
if kw.get('size') is not None:
if not kw.get('size') >= 0:
raise ValueError('`size` must be positive')
if kw.get('filters') is not None:
if not isinstance(kw.get('filters'), List):
raise ValueError('`filters` must be a list')
for f in kw.get('filters'):
DALMFilter(**f)
return values
|
@root_validator(pre=False)
def validate_environments(cls, values: Dict) ->Dict:
"""Validate Arcee environment variables."""
values['arcee_api_key'] = convert_to_secret_str(get_from_dict_or_env(
values, 'arcee_api_key', 'ARCEE_API_KEY'))
values['arcee_api_url'] = get_from_dict_or_env(values, 'arcee_api_url',
'ARCEE_API_URL')
values['arcee_app_url'] = get_from_dict_or_env(values, 'arcee_app_url',
'ARCEE_APP_URL')
values['arcee_api_version'] = get_from_dict_or_env(values,
'arcee_api_version', 'ARCEE_API_VERSION')
if values.get('model_kwargs'):
kw = values['model_kwargs']
if kw.get('size') is not None:
if not kw.get('size') >= 0:
raise ValueError('`size` must be positive')
if kw.get('filters') is not None:
if not isinstance(kw.get('filters'), List):
raise ValueError('`filters` must be a list')
for f in kw.get('filters'):
DALMFilter(**f)
return values
|
Validate Arcee environment variables.
|
_get_embedding_collection_store
|
global _classes
if _classes is not None:
return _classes
from pgvector.sqlalchemy import Vector
class CollectionStore(BaseModel):
"""Collection store."""
__tablename__ = 'langchain_pg_collection'
name = sqlalchemy.Column(sqlalchemy.String)
cmetadata = sqlalchemy.Column(JSON)
embeddings = relationship('EmbeddingStore', back_populates='collection',
passive_deletes=True)
@classmethod
def get_by_name(cls, session: Session, name: str) ->Optional[
'CollectionStore']:
return session.query(cls).filter(cls.name == name).first()
@classmethod
def get_or_create(cls, session: Session, name: str, cmetadata: Optional
[dict]=None) ->Tuple['CollectionStore', bool]:
"""
Get or create a collection.
Returns [Collection, bool] where the bool is True if the collection was created.
"""
created = False
collection = cls.get_by_name(session, name)
if collection:
return collection, created
collection = cls(name=name, cmetadata=cmetadata)
session.add(collection)
session.commit()
created = True
return collection, created
class EmbeddingStore(BaseModel):
"""Embedding store."""
__tablename__ = 'langchain_pg_embedding'
collection_id = sqlalchemy.Column(UUID(as_uuid=True), sqlalchemy.
ForeignKey(f'{CollectionStore.__tablename__}.uuid', ondelete='CASCADE')
)
collection = relationship(CollectionStore, back_populates='embeddings')
embedding: Vector = sqlalchemy.Column(Vector(None))
document = sqlalchemy.Column(sqlalchemy.String, nullable=True)
cmetadata = sqlalchemy.Column(JSON, nullable=True)
custom_id = sqlalchemy.Column(sqlalchemy.String, nullable=True)
_classes = EmbeddingStore, CollectionStore
return _classes
|
def _get_embedding_collection_store() ->Any:
global _classes
if _classes is not None:
return _classes
from pgvector.sqlalchemy import Vector
class CollectionStore(BaseModel):
"""Collection store."""
__tablename__ = 'langchain_pg_collection'
name = sqlalchemy.Column(sqlalchemy.String)
cmetadata = sqlalchemy.Column(JSON)
embeddings = relationship('EmbeddingStore', back_populates=
'collection', passive_deletes=True)
@classmethod
def get_by_name(cls, session: Session, name: str) ->Optional[
'CollectionStore']:
return session.query(cls).filter(cls.name == name).first()
@classmethod
def get_or_create(cls, session: Session, name: str, cmetadata:
Optional[dict]=None) ->Tuple['CollectionStore', bool]:
"""
Get or create a collection.
Returns [Collection, bool] where the bool is True if the collection was created.
"""
created = False
collection = cls.get_by_name(session, name)
if collection:
return collection, created
collection = cls(name=name, cmetadata=cmetadata)
session.add(collection)
session.commit()
created = True
return collection, created
class EmbeddingStore(BaseModel):
"""Embedding store."""
__tablename__ = 'langchain_pg_embedding'
collection_id = sqlalchemy.Column(UUID(as_uuid=True), sqlalchemy.
ForeignKey(f'{CollectionStore.__tablename__}.uuid', ondelete=
'CASCADE'))
collection = relationship(CollectionStore, back_populates='embeddings')
embedding: Vector = sqlalchemy.Column(Vector(None))
document = sqlalchemy.Column(sqlalchemy.String, nullable=True)
cmetadata = sqlalchemy.Column(JSON, nullable=True)
custom_id = sqlalchemy.Column(sqlalchemy.String, nullable=True)
_classes = EmbeddingStore, CollectionStore
return _classes
| null |
test_langchain_together_embedding_query
|
"""Test cohere embeddings."""
document = 'foo bar'
embedding = TogetherEmbeddings(model=
'togethercomputer/m2-bert-80M-8k-retrieval')
output = embedding.embed_query(document)
assert len(output) > 0
|
def test_langchain_together_embedding_query() ->None:
"""Test cohere embeddings."""
document = 'foo bar'
embedding = TogetherEmbeddings(model=
'togethercomputer/m2-bert-80M-8k-retrieval')
output = embedding.embed_query(document)
assert len(output) > 0
|
Test cohere embeddings.
|
_make_session
|
"""Create a context manager for the session, bind to _conn string."""
yield Session(self._bind)
|
@contextlib.contextmanager
def _make_session(self) ->Generator[Session, None, None]:
"""Create a context manager for the session, bind to _conn string."""
yield Session(self._bind)
|
Create a context manager for the session, bind to _conn string.
|
test_ai_message_first
|
with pytest.raises(ValueError) as info:
model.predict_messages([AIMessage(content='ai-msg-1')])
assert info.value.args[0
] == 'messages list must start with a SystemMessage or UserMessage'
|
def test_ai_message_first(model: Llama2Chat) ->None:
with pytest.raises(ValueError) as info:
model.predict_messages([AIMessage(content='ai-msg-1')])
assert info.value.args[0
] == 'messages list must start with a SystemMessage or UserMessage'
| null |
process_value
|
"""Convert a value to a string and add double quotes if it is a string.
It required for comparators involving strings.
Args:
value: The value to convert.
Returns:
The converted value as a string.
"""
if isinstance(value, str):
return f'"{value}"'
else:
return str(value)
|
def process_value(value: Union[int, float, str]) ->str:
"""Convert a value to a string and add double quotes if it is a string.
It required for comparators involving strings.
Args:
value: The value to convert.
Returns:
The converted value as a string.
"""
if isinstance(value, str):
return f'"{value}"'
else:
return str(value)
|
Convert a value to a string and add double quotes if it is a string.
It required for comparators involving strings.
Args:
value: The value to convert.
Returns:
The converted value as a string.
|
_on_chain_end
|
"""Process the Chain Run."""
self._process_end_trace(run)
|
def _on_chain_end(self, run: 'Run') ->None:
"""Process the Chain Run."""
self._process_end_trace(run)
|
Process the Chain Run.
|
_call
|
"""Call out to Amazon API Gateway model.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = se("Tell me a joke.")
"""
_model_kwargs = self.model_kwargs or {}
payload = self.content_handler.transform_input(prompt, _model_kwargs)
try:
response = requests.post(self.api_url, headers=self.headers, json=payload)
text = self.content_handler.transform_output(response)
except Exception as error:
raise ValueError(f'Error raised by the service: {error}')
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
|
def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager:
Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str:
"""Call out to Amazon API Gateway model.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = se("Tell me a joke.")
"""
_model_kwargs = self.model_kwargs or {}
payload = self.content_handler.transform_input(prompt, _model_kwargs)
try:
response = requests.post(self.api_url, headers=self.headers, json=
payload)
text = self.content_handler.transform_output(response)
except Exception as error:
raise ValueError(f'Error raised by the service: {error}')
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
|
Call out to Amazon API Gateway model.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = se("Tell me a joke.")
|
requires_reference
|
"""
This evaluator requires a reference.
"""
return True
|
@property
def requires_reference(self) ->bool:
"""
This evaluator requires a reference.
"""
return True
|
This evaluator requires a reference.
|
split_text
|
"""Split text into multiple components."""
|
@abstractmethod
def split_text(self, text: str) ->List[str]:
"""Split text into multiple components."""
|
Split text into multiple components.
|
on_llm_start
|
if self.__has_valid_config is False:
return
try:
user_id = _get_user_id(metadata)
user_props = _get_user_props(metadata)
params = kwargs.get('invocation_params', {})
params.update(serialized.get('kwargs', {}))
name = params.get('model') or params.get('model_name') or params.get(
'model_id')
if not name and 'anthropic' in params.get('_type'):
name = 'claude-2'
extra = {param: params.get(param) for param in PARAMS_TO_CAPTURE if
params.get(param) is not None}
input = _parse_input(prompts)
self.__track_event('llm', 'start', user_id=user_id, run_id=str(run_id),
parent_run_id=str(parent_run_id) if parent_run_id else None, name=
name, input=input, tags=tags, extra=extra, metadata=metadata,
user_props=user_props, app_id=self.__app_id)
except Exception as e:
warnings.warn(f'[LLMonitor] An error occurred in on_llm_start: {e}')
|
def on_llm_start(self, serialized: Dict[str, Any], prompts: List[str], *,
run_id: UUID, parent_run_id: Union[UUID, None]=None, tags: Union[List[
str], None]=None, metadata: Union[Dict[str, Any], None]=None, **kwargs: Any
) ->None:
if self.__has_valid_config is False:
return
try:
user_id = _get_user_id(metadata)
user_props = _get_user_props(metadata)
params = kwargs.get('invocation_params', {})
params.update(serialized.get('kwargs', {}))
name = params.get('model') or params.get('model_name') or params.get(
'model_id')
if not name and 'anthropic' in params.get('_type'):
name = 'claude-2'
extra = {param: params.get(param) for param in PARAMS_TO_CAPTURE if
params.get(param) is not None}
input = _parse_input(prompts)
self.__track_event('llm', 'start', user_id=user_id, run_id=str(
run_id), parent_run_id=str(parent_run_id) if parent_run_id else
None, name=name, input=input, tags=tags, extra=extra, metadata=
metadata, user_props=user_props, app_id=self.__app_id)
except Exception as e:
warnings.warn(f'[LLMonitor] An error occurred in on_llm_start: {e}')
| null |
_identifying_params
|
"""Get the identifying parameters."""
return {**{'model_kwargs': self.model_kwargs}}
|
@property
def _identifying_params(self) ->Mapping[str, Any]:
"""Get the identifying parameters."""
return {**{'model_kwargs': self.model_kwargs}}
|
Get the identifying parameters.
|
query
|
result = self.execute(query, retry=retry)
columns = result.keys()
d: Dict[str, list] = {}
for col_num in range(result.col_size()):
col_name = columns[col_num]
col_list = result.column_values(col_name)
d[col_name] = [x.cast() for x in col_list]
return d
|
def query(self, query: str, retry: int=0) ->Dict[str, Any]:
result = self.execute(query, retry=retry)
columns = result.keys()
d: Dict[str, list] = {}
for col_num in range(result.col_size()):
col_name = columns[col_num]
col_list = result.column_values(col_name)
d[col_name] = [x.cast() for x in col_list]
return d
| null |
similarity_search_with_score
|
"""Run similarity search with Chroma with distance.
Args:
query (str): Query text to search for.
k (int): Number of results to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List[Tuple[Document, float]]: List of documents most similar to
the query text and cosine distance in float for each.
Lower score represents more similarity.
"""
if self._embedding_function is None:
results = self.__query_collection(query_texts=[query], n_results=k,
where=filter, where_document=where_document, **kwargs)
else:
query_embedding = self._embedding_function.embed_query(query)
results = self.__query_collection(query_embeddings=[query_embedding],
n_results=k, where=filter, where_document=where_document, **kwargs)
return _results_to_docs_and_scores(results)
|
def similarity_search_with_score(self, query: str, k: int=DEFAULT_K, filter:
Optional[Dict[str, str]]=None, where_document: Optional[Dict[str, str]]
=None, **kwargs: Any) ->List[Tuple[Document, float]]:
"""Run similarity search with Chroma with distance.
Args:
query (str): Query text to search for.
k (int): Number of results to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List[Tuple[Document, float]]: List of documents most similar to
the query text and cosine distance in float for each.
Lower score represents more similarity.
"""
if self._embedding_function is None:
results = self.__query_collection(query_texts=[query], n_results=k,
where=filter, where_document=where_document, **kwargs)
else:
query_embedding = self._embedding_function.embed_query(query)
results = self.__query_collection(query_embeddings=[query_embedding
], n_results=k, where=filter, where_document=where_document, **
kwargs)
return _results_to_docs_and_scores(results)
|
Run similarity search with Chroma with distance.
Args:
query (str): Query text to search for.
k (int): Number of results to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List[Tuple[Document, float]]: List of documents most similar to
the query text and cosine distance in float for each.
Lower score represents more similarity.
|
InputType
|
for step in self.steps.values():
if step.InputType:
return step.InputType
return Any
|
@property
def InputType(self) ->Any:
for step in self.steps.values():
if step.InputType:
return step.InputType
return Any
| null |
lazy_parse
|
"""Parses a blob lazily.
Args:
blobs: a Blob to parse
This is a long-running operation. A recommended way is to batch
documents together and use the `batch_parse()` method.
"""
yield from self.batch_parse([blob], gcs_output_path=self._gcs_output_path)
|
def lazy_parse(self, blob: Blob) ->Iterator[Document]:
"""Parses a blob lazily.
Args:
blobs: a Blob to parse
This is a long-running operation. A recommended way is to batch
documents together and use the `batch_parse()` method.
"""
yield from self.batch_parse([blob], gcs_output_path=self._gcs_output_path)
|
Parses a blob lazily.
Args:
blobs: a Blob to parse
This is a long-running operation. A recommended way is to batch
documents together and use the `batch_parse()` method.
|
foo
|
"""Docstring
Args:
bar: str
"""
assert callbacks is not None
return 'foo' + bar
|
def foo(bar: str, callbacks: Optional[CallbackManagerForToolRun]=None) ->str:
"""Docstring
Args:
bar: str
"""
assert callbacks is not None
return 'foo' + bar
|
Docstring
Args:
bar: str
|
lazy_load
|
"""Lazy load documents"""
if self.web_path and self._is_s3_url(self.web_path):
blob = Blob(path=self.web_path)
else:
blob = Blob.from_path(self.file_path)
if AmazonTextractPDFLoader._get_number_of_pages(blob) > 1:
raise ValueError(
f'the file {blob.path} is a multi-page document, but not stored on S3. Textract requires multi-page documents to be on S3.'
)
yield from self.parser.parse_folder(blob)
|
def lazy_load(self) ->Iterator[Document]:
"""Lazy load documents"""
if self.web_path and self._is_s3_url(self.web_path):
blob = Blob(path=self.web_path)
else:
blob = Blob.from_path(self.file_path)
if AmazonTextractPDFLoader._get_number_of_pages(blob) > 1:
raise ValueError(
f'the file {blob.path} is a multi-page document, but not stored on S3. Textract requires multi-page documents to be on S3.'
)
yield from self.parser.parse_folder(blob)
|
Lazy load documents
|
test_compatible_vectorstore_documentation
|
"""Test which vectorstores are compatible with the indexing API.
This serves as a reminder to update the documentation in [1]
that specifies which vectorstores are compatible with the
indexing API.
Ideally if a developer adds a new vectorstore or modifies
an existing one in such a way that affects its compatibility
with the Indexing API, he/she will see this failed test
case and 1) update docs in [1] and 2) update the `documented`
dict in this test case.
[1] langchain/docs/docs_skeleton/docs/modules/data_connection/indexing.ipynb
"""
def check_compatibility(vector_store: VectorStore) ->bool:
"""Check if a vectorstore is compatible with the indexing API."""
methods = ['delete', 'add_documents']
for method in methods:
if not hasattr(vector_store, method):
return False
if getattr(vector_store, 'delete') == VectorStore.delete:
return False
return True
compatible = set()
for class_name in langchain_community.vectorstores.__all__:
cls = getattr(langchain_community.vectorstores, class_name)
if issubclass(cls, VectorStore):
is_compatible = check_compatibility(cls)
if is_compatible:
compatible.add(class_name)
documented = {'AnalyticDB', 'AstraDB', 'AzureCosmosDBVectorSearch', 'AwaDB',
'Bagel', 'Cassandra', 'Chroma', 'DashVector', 'DatabricksVectorSearch',
'DeepLake', 'Dingo', 'ElasticVectorSearch', 'ElasticsearchStore',
'FAISS', 'MomentoVectorIndex', 'MyScale', 'PGVector', 'Pinecone',
'Qdrant', 'Redis', 'ScaNN', 'SemaDB', 'SupabaseVectorStore',
'SurrealDBStore', 'TileDB', 'TimescaleVector', 'Vald', 'Vearch',
'VespaStore', 'Weaviate', 'ZepVectorStore'}
assert compatible == documented
|
def test_compatible_vectorstore_documentation() ->None:
"""Test which vectorstores are compatible with the indexing API.
This serves as a reminder to update the documentation in [1]
that specifies which vectorstores are compatible with the
indexing API.
Ideally if a developer adds a new vectorstore or modifies
an existing one in such a way that affects its compatibility
with the Indexing API, he/she will see this failed test
case and 1) update docs in [1] and 2) update the `documented`
dict in this test case.
[1] langchain/docs/docs_skeleton/docs/modules/data_connection/indexing.ipynb
"""
def check_compatibility(vector_store: VectorStore) ->bool:
"""Check if a vectorstore is compatible with the indexing API."""
methods = ['delete', 'add_documents']
for method in methods:
if not hasattr(vector_store, method):
return False
if getattr(vector_store, 'delete') == VectorStore.delete:
return False
return True
compatible = set()
for class_name in langchain_community.vectorstores.__all__:
cls = getattr(langchain_community.vectorstores, class_name)
if issubclass(cls, VectorStore):
is_compatible = check_compatibility(cls)
if is_compatible:
compatible.add(class_name)
documented = {'AnalyticDB', 'AstraDB', 'AzureCosmosDBVectorSearch',
'AwaDB', 'Bagel', 'Cassandra', 'Chroma', 'DashVector',
'DatabricksVectorSearch', 'DeepLake', 'Dingo',
'ElasticVectorSearch', 'ElasticsearchStore', 'FAISS',
'MomentoVectorIndex', 'MyScale', 'PGVector', 'Pinecone', 'Qdrant',
'Redis', 'ScaNN', 'SemaDB', 'SupabaseVectorStore', 'SurrealDBStore',
'TileDB', 'TimescaleVector', 'Vald', 'Vearch', 'VespaStore',
'Weaviate', 'ZepVectorStore'}
assert compatible == documented
|
Test which vectorstores are compatible with the indexing API.
This serves as a reminder to update the documentation in [1]
that specifies which vectorstores are compatible with the
indexing API.
Ideally if a developer adds a new vectorstore or modifies
an existing one in such a way that affects its compatibility
with the Indexing API, he/she will see this failed test
case and 1) update docs in [1] and 2) update the `documented`
dict in this test case.
[1] langchain/docs/docs_skeleton/docs/modules/data_connection/indexing.ipynb
|
buffer
|
return self.chat_memory.messages
|
@property
def buffer(self) ->List[BaseMessage]:
return self.chat_memory.messages
| null |
api_client
|
return RedditSearchAPIWrapper()
|
@pytest.fixture
def api_client() ->RedditSearchAPIWrapper:
return RedditSearchAPIWrapper()
| null |
test_init_from_client
|
import chromadb
client = chromadb.Client(chromadb.config.Settings())
Chroma(client=client)
|
def test_init_from_client() ->None:
import chromadb
client = chromadb.Client(chromadb.config.Settings())
Chroma(client=client)
| null |
load_chain
|
"""Unified method for loading a chain from LangChainHub or local fs."""
if (hub_result := try_load_from_hub(path, _load_chain_from_file, 'chains',
{'json', 'yaml'}, **kwargs)):
return hub_result
else:
return _load_chain_from_file(path, **kwargs)
|
def load_chain(path: Union[str, Path], **kwargs: Any) ->Chain:
"""Unified method for loading a chain from LangChainHub or local fs."""
if (hub_result := try_load_from_hub(path, _load_chain_from_file,
'chains', {'json', 'yaml'}, **kwargs)):
return hub_result
else:
return _load_chain_from_file(path, **kwargs)
|
Unified method for loading a chain from LangChainHub or local fs.
|
_import_ainetwork_transfer
|
from langchain_community.tools.ainetwork.transfer import AINTransfer
return AINTransfer
|
def _import_ainetwork_transfer() ->Any:
from langchain_community.tools.ainetwork.transfer import AINTransfer
return AINTransfer
| null |
generate
|
"""Generate LLM result from inputs."""
prompts, stop = self.prep_prompts(input_list, run_manager=run_manager)
callbacks = run_manager.get_child() if run_manager else None
if isinstance(self.llm, BaseLanguageModel):
return self.llm.generate_prompt(prompts, stop, callbacks=callbacks, **
self.llm_kwargs)
else:
results = self.llm.bind(stop=stop, **self.llm_kwargs).batch(cast(List,
prompts), {'callbacks': callbacks})
generations: List[List[Generation]] = []
for res in results:
if isinstance(res, BaseMessage):
generations.append([ChatGeneration(message=res)])
else:
generations.append([Generation(text=res)])
return LLMResult(generations=generations)
|
def generate(self, input_list: List[Dict[str, Any]], run_manager: Optional[
CallbackManagerForChainRun]=None) ->LLMResult:
"""Generate LLM result from inputs."""
prompts, stop = self.prep_prompts(input_list, run_manager=run_manager)
callbacks = run_manager.get_child() if run_manager else None
if isinstance(self.llm, BaseLanguageModel):
return self.llm.generate_prompt(prompts, stop, callbacks=callbacks,
**self.llm_kwargs)
else:
results = self.llm.bind(stop=stop, **self.llm_kwargs).batch(cast(
List, prompts), {'callbacks': callbacks})
generations: List[List[Generation]] = []
for res in results:
if isinstance(res, BaseMessage):
generations.append([ChatGeneration(message=res)])
else:
generations.append([Generation(text=res)])
return LLMResult(generations=generations)
|
Generate LLM result from inputs.
|
test_structured_tool_from_function_docstring
|
"""Test that structured tools can be created from functions."""
def foo(bar: int, baz: str) ->str:
"""Docstring
Args:
bar: int
baz: str
"""
raise NotImplementedError()
structured_tool = StructuredTool.from_function(foo)
assert structured_tool.name == 'foo'
assert structured_tool.args == {'bar': {'title': 'Bar', 'type': 'integer'},
'baz': {'title': 'Baz', 'type': 'string'}}
assert structured_tool.args_schema.schema() == {'properties': {'bar': {
'title': 'Bar', 'type': 'integer'}, 'baz': {'title': 'Baz', 'type':
'string'}}, 'title': 'fooSchemaSchema', 'type': 'object', 'required': [
'bar', 'baz']}
prefix = 'foo(bar: int, baz: str) -> str - '
assert foo.__doc__ is not None
assert structured_tool.description == prefix + foo.__doc__.strip()
|
def test_structured_tool_from_function_docstring() ->None:
"""Test that structured tools can be created from functions."""
def foo(bar: int, baz: str) ->str:
"""Docstring
Args:
bar: int
baz: str
"""
raise NotImplementedError()
structured_tool = StructuredTool.from_function(foo)
assert structured_tool.name == 'foo'
assert structured_tool.args == {'bar': {'title': 'Bar', 'type':
'integer'}, 'baz': {'title': 'Baz', 'type': 'string'}}
assert structured_tool.args_schema.schema() == {'properties': {'bar': {
'title': 'Bar', 'type': 'integer'}, 'baz': {'title': 'Baz', 'type':
'string'}}, 'title': 'fooSchemaSchema', 'type': 'object',
'required': ['bar', 'baz']}
prefix = 'foo(bar: int, baz: str) -> str - '
assert foo.__doc__ is not None
assert structured_tool.description == prefix + foo.__doc__.strip()
|
Test that structured tools can be created from functions.
|
__init__
|
"""Initialize Trello loader.
Args:
client: Trello API client.
board_name: The name of the Trello board.
include_card_name: Whether to include the name of the card in the document.
include_comments: Whether to include the comments on the card in the
document.
include_checklist: Whether to include the checklist on the card in the
document.
card_filter: Filter on card status. Valid values are "closed", "open",
"all".
extra_metadata: List of additional metadata fields to include as document
metadata.Valid values are "due_date", "labels", "list", "closed".
"""
self.client = client
self.board_name = board_name
self.include_card_name = include_card_name
self.include_comments = include_comments
self.include_checklist = include_checklist
self.extra_metadata = extra_metadata
self.card_filter = card_filter
|
def __init__(self, client: TrelloClient, board_name: str, *,
include_card_name: bool=True, include_comments: bool=True,
include_checklist: bool=True, card_filter: Literal['closed', 'open',
'all']='all', extra_metadata: Tuple[str, ...]=('due_date', 'labels',
'list', 'closed')):
"""Initialize Trello loader.
Args:
client: Trello API client.
board_name: The name of the Trello board.
include_card_name: Whether to include the name of the card in the document.
include_comments: Whether to include the comments on the card in the
document.
include_checklist: Whether to include the checklist on the card in the
document.
card_filter: Filter on card status. Valid values are "closed", "open",
"all".
extra_metadata: List of additional metadata fields to include as document
metadata.Valid values are "due_date", "labels", "list", "closed".
"""
self.client = client
self.board_name = board_name
self.include_card_name = include_card_name
self.include_comments = include_comments
self.include_checklist = include_checklist
self.extra_metadata = extra_metadata
self.card_filter = card_filter
|
Initialize Trello loader.
Args:
client: Trello API client.
board_name: The name of the Trello board.
include_card_name: Whether to include the name of the card in the document.
include_comments: Whether to include the comments on the card in the
document.
include_checklist: Whether to include the checklist on the card in the
document.
card_filter: Filter on card status. Valid values are "closed", "open",
"all".
extra_metadata: List of additional metadata fields to include as document
metadata.Valid values are "due_date", "labels", "list", "closed".
|
_pushText
|
field = {'textfield': {'text': {'body': text, 'format': 0}},
'processing_options': {'ml_text': self._config['enable_ml']}}
return self._pushField(id, field)
|
def _pushText(self, id: str, text: str) ->str:
field = {'textfield': {'text': {'body': text, 'format': 0}},
'processing_options': {'ml_text': self._config['enable_ml']}}
return self._pushField(id, field)
| null |
_return
|
if run_manager:
run_manager.on_agent_finish(output, color='green', verbose=self.verbose)
final_output = output.return_values
if self.return_intermediate_steps:
final_output['intermediate_steps'] = intermediate_steps
return final_output
|
def _return(self, output: AgentFinish, intermediate_steps: list,
run_manager: Optional[CallbackManagerForChainRun]=None) ->Dict[str, Any]:
if run_manager:
run_manager.on_agent_finish(output, color='green', verbose=self.verbose
)
final_output = output.return_values
if self.return_intermediate_steps:
final_output['intermediate_steps'] = intermediate_steps
return final_output
| null |
_chain_type
|
return 'api_chain'
|
@property
def _chain_type(self) ->str:
return 'api_chain'
| null |
test_incremental_delete
|
"""Test indexing with incremental deletion strategy."""
loader = ToyLoader(documents=[Document(page_content=
'This is a test document.', metadata={'source': '1'}), Document(
page_content='This is another document.', metadata={'source': '2'})])
with patch.object(record_manager, 'get_time', return_value=datetime(2021, 1,
2).timestamp()):
assert index(loader, record_manager, vector_store, cleanup=
'incremental', source_id_key='source') == {'num_added': 2,
'num_deleted': 0, 'num_skipped': 0, 'num_updated': 0}
doc_texts = set(vector_store.store.get(uid).page_content for uid in
vector_store.store)
assert doc_texts == {'This is another document.', 'This is a test document.'}
with patch.object(record_manager, 'get_time', return_value=datetime(2021, 1,
2).timestamp()):
assert index(loader, record_manager, vector_store, cleanup=
'incremental', source_id_key='source') == {'num_added': 0,
'num_deleted': 0, 'num_skipped': 2, 'num_updated': 0}
loader = ToyLoader(documents=[Document(page_content='mutated document 1',
metadata={'source': '1'}), Document(page_content='mutated document 2',
metadata={'source': '1'}), Document(page_content=
'This is another document.', metadata={'source': '2'})])
with patch.object(record_manager, 'get_time', return_value=datetime(2021, 1,
3).timestamp()):
assert index(loader, record_manager, vector_store, cleanup=
'incremental', source_id_key='source') == {'num_added': 2,
'num_deleted': 1, 'num_skipped': 1, 'num_updated': 0}
doc_texts = set(vector_store.store.get(uid).page_content for uid in
vector_store.store)
assert doc_texts == {'mutated document 1', 'mutated document 2',
'This is another document.'}
|
def test_incremental_delete(record_manager: SQLRecordManager, vector_store:
InMemoryVectorStore) ->None:
"""Test indexing with incremental deletion strategy."""
loader = ToyLoader(documents=[Document(page_content=
'This is a test document.', metadata={'source': '1'}), Document(
page_content='This is another document.', metadata={'source': '2'})])
with patch.object(record_manager, 'get_time', return_value=datetime(
2021, 1, 2).timestamp()):
assert index(loader, record_manager, vector_store, cleanup=
'incremental', source_id_key='source') == {'num_added': 2,
'num_deleted': 0, 'num_skipped': 0, 'num_updated': 0}
doc_texts = set(vector_store.store.get(uid).page_content for uid in
vector_store.store)
assert doc_texts == {'This is another document.',
'This is a test document.'}
with patch.object(record_manager, 'get_time', return_value=datetime(
2021, 1, 2).timestamp()):
assert index(loader, record_manager, vector_store, cleanup=
'incremental', source_id_key='source') == {'num_added': 0,
'num_deleted': 0, 'num_skipped': 2, 'num_updated': 0}
loader = ToyLoader(documents=[Document(page_content=
'mutated document 1', metadata={'source': '1'}), Document(
page_content='mutated document 2', metadata={'source': '1'}),
Document(page_content='This is another document.', metadata={
'source': '2'})])
with patch.object(record_manager, 'get_time', return_value=datetime(
2021, 1, 3).timestamp()):
assert index(loader, record_manager, vector_store, cleanup=
'incremental', source_id_key='source') == {'num_added': 2,
'num_deleted': 1, 'num_skipped': 1, 'num_updated': 0}
doc_texts = set(vector_store.store.get(uid).page_content for uid in
vector_store.store)
assert doc_texts == {'mutated document 1', 'mutated document 2',
'This is another document.'}
|
Test indexing with incremental deletion strategy.
|
test_visit_operation
|
op = Operation(operator=Operator.AND, arguments=[Comparison(comparator=
Comparator.EQ, attribute='foo', value='hello'), Comparison(comparator=
Comparator.GTE, attribute='bar', value={'type': 'date', 'date':
'2023-09-13'}), Comparison(comparator=Comparator.LTE, attribute='abc',
value=1.4)])
expected = {'operands': [{'operator': 'Equal', 'path': ['foo'], 'valueText':
'hello'}, {'operator': 'GreaterThanEqual', 'path': ['bar'], 'valueDate':
'2023-09-13T00:00:00Z'}, {'operator': 'LessThanEqual', 'path': ['abc'],
'valueNumber': 1.4}], 'operator': 'And'}
actual = DEFAULT_TRANSLATOR.visit_operation(op)
assert expected == actual
|
def test_visit_operation() ->None:
op = Operation(operator=Operator.AND, arguments=[Comparison(comparator=
Comparator.EQ, attribute='foo', value='hello'), Comparison(
comparator=Comparator.GTE, attribute='bar', value={'type': 'date',
'date': '2023-09-13'}), Comparison(comparator=Comparator.LTE,
attribute='abc', value=1.4)])
expected = {'operands': [{'operator': 'Equal', 'path': ['foo'],
'valueText': 'hello'}, {'operator': 'GreaterThanEqual', 'path': [
'bar'], 'valueDate': '2023-09-13T00:00:00Z'}, {'operator':
'LessThanEqual', 'path': ['abc'], 'valueNumber': 1.4}], 'operator':
'And'}
actual = DEFAULT_TRANSLATOR.visit_operation(op)
assert expected == actual
| null |
invoke
|
config = ensure_config(config)
return self.get_relevant_documents(input, callbacks=config.get('callbacks'),
tags=config.get('tags'), metadata=config.get('metadata'), run_name=
config.get('run_name'))
|
def invoke(self, input: str, config: Optional[RunnableConfig]=None) ->List[
Document]:
config = ensure_config(config)
return self.get_relevant_documents(input, callbacks=config.get(
'callbacks'), tags=config.get('tags'), metadata=config.get(
'metadata'), run_name=config.get('run_name'))
| null |
_create_retry_decorator
|
from grpc import RpcError
min_seconds = 1
max_seconds = 60
return retry(reraise=True, stop=stop_after_attempt(llm.max_retries), wait=
wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry
=retry_if_exception_type(RpcError), before_sleep=before_sleep_log(
logger, logging.WARNING))
|
def _create_retry_decorator(llm: ChatYandexGPT) ->Callable[[Any], Any]:
from grpc import RpcError
min_seconds = 1
max_seconds = 60
return retry(reraise=True, stop=stop_after_attempt(llm.max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=
max_seconds), retry=retry_if_exception_type(RpcError), before_sleep
=before_sleep_log(logger, logging.WARNING))
| null |
__init__
|
super().__init__(pydantic_object=LineList)
|
def __init__(self) ->None:
super().__init__(pydantic_object=LineList)
| null |
test_gpt_router_streaming_callback
|
"""Test that streaming correctly invokes on_llm_new_token callback."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
anthropic_claude = GPTRouterModel(name='claude-instant-1.2', provider_name=
'anthropic')
chat = GPTRouter(models_priority_list=[anthropic_claude], streaming=True,
callback_manager=callback_manager, verbose=True)
message = HumanMessage(content='Write me a 5 line poem.')
chat([message])
assert callback_handler.llm_streams > 1
|
def test_gpt_router_streaming_callback() ->None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
anthropic_claude = GPTRouterModel(name='claude-instant-1.2',
provider_name='anthropic')
chat = GPTRouter(models_priority_list=[anthropic_claude], streaming=
True, callback_manager=callback_manager, verbose=True)
message = HumanMessage(content='Write me a 5 line poem.')
chat([message])
assert callback_handler.llm_streams > 1
|
Test that streaming correctly invokes on_llm_new_token callback.
|
parse
|
try:
parsed = json.loads(text, strict=False)
except json.JSONDecodeError:
preprocessed_text = preprocess_json_input(text)
try:
parsed = json.loads(preprocessed_text, strict=False)
except Exception:
return AutoGPTAction(name='ERROR', args={'error':
f'Could not parse invalid json: {text}'})
try:
return AutoGPTAction(name=parsed['command']['name'], args=parsed[
'command']['args'])
except (KeyError, TypeError):
return AutoGPTAction(name='ERROR', args={'error':
f'Incomplete command args: {parsed}'})
|
def parse(self, text: str) ->AutoGPTAction:
try:
parsed = json.loads(text, strict=False)
except json.JSONDecodeError:
preprocessed_text = preprocess_json_input(text)
try:
parsed = json.loads(preprocessed_text, strict=False)
except Exception:
return AutoGPTAction(name='ERROR', args={'error':
f'Could not parse invalid json: {text}'})
try:
return AutoGPTAction(name=parsed['command']['name'], args=parsed[
'command']['args'])
except (KeyError, TypeError):
return AutoGPTAction(name='ERROR', args={'error':
f'Incomplete command args: {parsed}'})
| null |
test_indexing_with_no_docs
|
"""Check edge case when loader returns no new docs."""
loader = ToyLoader(documents=[])
assert index(loader, record_manager, vector_store, cleanup='full') == {
'num_added': 0, 'num_deleted': 0, 'num_skipped': 0, 'num_updated': 0}
|
def test_indexing_with_no_docs(record_manager: SQLRecordManager,
vector_store: VectorStore) ->None:
"""Check edge case when loader returns no new docs."""
loader = ToyLoader(documents=[])
assert index(loader, record_manager, vector_store, cleanup='full') == {
'num_added': 0, 'num_deleted': 0, 'num_skipped': 0, 'num_updated': 0}
|
Check edge case when loader returns no new docs.
|
_llm_type
|
return 'wasm-chat'
|
@property
def _llm_type(self) ->str:
return 'wasm-chat'
| null |
__init__
|
try:
import kuzu
except ImportError:
raise ImportError(
'Could not import Kùzu python package.Please install Kùzu with `pip install kuzu`.'
)
self.db = db
self.conn = kuzu.Connection(self.db)
self.database = database
self.refresh_schema()
|
def __init__(self, db: Any, database: str='kuzu') ->None:
try:
import kuzu
except ImportError:
raise ImportError(
'Could not import Kùzu python package.Please install Kùzu with `pip install kuzu`.'
)
self.db = db
self.conn = kuzu.Connection(self.db)
self.database = database
self.refresh_schema()
| null |
__init__
|
"""Initialize the UpstashRedisStore with HTTP API.
Must provide either an Upstash Redis client or a url.
Args:
client: An Upstash Redis instance
url: UPSTASH_REDIS_REST_URL
token: UPSTASH_REDIS_REST_TOKEN
ttl: time to expire keys in seconds if provided,
if None keys will never expire
namespace: if provided, all keys will be prefixed with this namespace
"""
try:
from upstash_redis import Redis
except ImportError as e:
raise ImportError(
'UpstashRedisStore requires the upstash_redis library to be installed. pip install upstash_redis'
) from e
if client and url:
raise ValueError(
'Either an Upstash Redis client or a url must be provided, not both.')
if client:
if not isinstance(client, Redis):
raise TypeError(
f'Expected Upstash Redis client, got {type(client).__name__}.')
_client = client
else:
if not url or not token:
raise ValueError(
'Either an Upstash Redis client or url and token must be provided.'
)
_client = Redis(url=url, token=token)
self.client = _client
if not isinstance(ttl, int) and ttl is not None:
raise TypeError(f'Expected int or None, got {type(ttl)} instead.')
self.ttl = ttl
self.namespace = namespace
|
def __init__(self, *, client: Any=None, url: Optional[str]=None, token:
Optional[str]=None, ttl: Optional[int]=None, namespace: Optional[str]=None
) ->None:
"""Initialize the UpstashRedisStore with HTTP API.
Must provide either an Upstash Redis client or a url.
Args:
client: An Upstash Redis instance
url: UPSTASH_REDIS_REST_URL
token: UPSTASH_REDIS_REST_TOKEN
ttl: time to expire keys in seconds if provided,
if None keys will never expire
namespace: if provided, all keys will be prefixed with this namespace
"""
try:
from upstash_redis import Redis
except ImportError as e:
raise ImportError(
'UpstashRedisStore requires the upstash_redis library to be installed. pip install upstash_redis'
) from e
if client and url:
raise ValueError(
'Either an Upstash Redis client or a url must be provided, not both.'
)
if client:
if not isinstance(client, Redis):
raise TypeError(
f'Expected Upstash Redis client, got {type(client).__name__}.')
_client = client
else:
if not url or not token:
raise ValueError(
'Either an Upstash Redis client or url and token must be provided.'
)
_client = Redis(url=url, token=token)
self.client = _client
if not isinstance(ttl, int) and ttl is not None:
raise TypeError(f'Expected int or None, got {type(ttl)} instead.')
self.ttl = ttl
self.namespace = namespace
|
Initialize the UpstashRedisStore with HTTP API.
Must provide either an Upstash Redis client or a url.
Args:
client: An Upstash Redis instance
url: UPSTASH_REDIS_REST_URL
token: UPSTASH_REDIS_REST_TOKEN
ttl: time to expire keys in seconds if provided,
if None keys will never expire
namespace: if provided, all keys will be prefixed with this namespace
|
authorization
|
if self.beam_client_id:
credential_str = self.beam_client_id + ':' + self.beam_client_secret
else:
credential_str = self.beam_client_secret
return base64.b64encode(credential_str.encode()).decode()
|
@property
def authorization(self) ->str:
if self.beam_client_id:
credential_str = self.beam_client_id + ':' + self.beam_client_secret
else:
credential_str = self.beam_client_secret
return base64.b64encode(credential_str.encode()).decode()
| null |
test_all_imports
|
assert set(__all__) == set(EXPECTED_ALL)
|
def test_all_imports() ->None:
assert set(__all__) == set(EXPECTED_ALL)
| null |
_similarity_search
|
"""
Perform a similarity search.
Args:
query_emb: Query represented as an embedding
Returns:
A list of documents most similar to the query
"""
docs = self._search(query_emb=query_emb, top_k=self.top_k)
results = [self._docarray_to_langchain_doc(doc) for doc in docs]
return results
|
def _similarity_search(self, query_emb: np.ndarray) ->List[Document]:
"""
Perform a similarity search.
Args:
query_emb: Query represented as an embedding
Returns:
A list of documents most similar to the query
"""
docs = self._search(query_emb=query_emb, top_k=self.top_k)
results = [self._docarray_to_langchain_doc(doc) for doc in docs]
return results
|
Perform a similarity search.
Args:
query_emb: Query represented as an embedding
Returns:
A list of documents most similar to the query
|
get_entity_knowledge
|
"""Get information about an entity."""
import networkx as nx
if not self._graph.has_node(entity):
return []
results = []
for src, sink in nx.dfs_edges(self._graph, entity, depth_limit=depth):
relation = self._graph[src][sink]['relation']
results.append(f'{src} {relation} {sink}')
return results
|
def get_entity_knowledge(self, entity: str, depth: int=1) ->List[str]:
"""Get information about an entity."""
import networkx as nx
if not self._graph.has_node(entity):
return []
results = []
for src, sink in nx.dfs_edges(self._graph, entity, depth_limit=depth):
relation = self._graph[src][sink]['relation']
results.append(f'{src} {relation} {sink}')
return results
|
Get information about an entity.
|
test_load_no_result
|
docs = retriever.get_relevant_documents(
'NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL'
)
assert not docs
|
def test_load_no_result(retriever: WikipediaRetriever) ->None:
docs = retriever.get_relevant_documents(
'NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL'
)
assert not docs
| null |
__getattr__
|
if name == 'AnalyticDB':
return _import_analyticdb()
elif name == 'AlibabaCloudOpenSearch':
return _import_alibaba_cloud_open_search()
elif name == 'AlibabaCloudOpenSearchSettings':
return _import_alibaba_cloud_open_search_settings()
elif name == 'AzureCosmosDBVectorSearch':
return _import_azure_cosmos_db()
elif name == 'ElasticKnnSearch':
return _import_elastic_knn_search()
elif name == 'ElasticVectorSearch':
return _import_elastic_vector_search()
elif name == 'Annoy':
return _import_annoy()
elif name == 'AtlasDB':
return _import_atlas()
elif name == 'AwaDB':
return _import_awadb()
elif name == 'AzureSearch':
return _import_azuresearch()
elif name == 'Bagel':
return _import_bageldb()
elif name == 'BigQueryVectorSearch':
return _import_bigquery()
elif name == 'BESVectorStore':
return _import_baiducloud_vector_search()
elif name == 'Cassandra':
return _import_cassandra()
elif name == 'AstraDB':
return _import_astradb()
elif name == 'Chroma':
return _import_chroma()
elif name == 'Clarifai':
return _import_clarifai()
elif name == 'ClickhouseSettings':
return _import_clickhouse_settings()
elif name == 'Clickhouse':
return _import_clickhouse()
elif name == 'DashVector':
return _import_dashvector()
elif name == 'DatabricksVectorSearch':
return _import_databricks_vector_search()
elif name == 'DeepLake':
return _import_deeplake()
elif name == 'Dingo':
return _import_dingo()
elif name == 'DocArrayInMemorySearch':
return _import_docarray_inmemory()
elif name == 'DocArrayHnswSearch':
return _import_docarray_hnsw()
elif name == 'ElasticsearchStore':
return _import_elasticsearch()
elif name == 'Epsilla':
return _import_epsilla()
elif name == 'FAISS':
return _import_faiss()
elif name == 'Hologres':
return _import_hologres()
elif name == 'LanceDB':
return _import_lancedb()
elif name == 'LLMRails':
return _import_llm_rails()
elif name == 'Marqo':
return _import_marqo()
elif name == 'MatchingEngine':
return _import_matching_engine()
elif name == 'Meilisearch':
return _import_meilisearch()
elif name == 'Milvus':
return _import_milvus()
elif name == 'MomentoVectorIndex':
return _import_momento_vector_index()
elif name == 'MongoDBAtlasVectorSearch':
return _import_mongodb_atlas()
elif name == 'MyScaleSettings':
return _import_myscale_settings()
elif name == 'MyScale':
return _import_myscale()
elif name == 'Neo4jVector':
return _import_neo4j_vector()
elif name == 'OpenSearchVectorSearch':
return _import_opensearch_vector_search()
elif name == 'PGEmbedding':
return _import_pgembedding()
elif name == 'PGVector':
return _import_pgvector()
elif name == 'Pinecone':
return _import_pinecone()
elif name == 'Qdrant':
return _import_qdrant()
elif name == 'Redis':
return _import_redis()
elif name == 'Rockset':
return _import_rocksetdb()
elif name == 'ScaNN':
return _import_scann()
elif name == 'SemaDB':
return _import_semadb()
elif name == 'SingleStoreDB':
return _import_singlestoredb()
elif name == 'SKLearnVectorStore':
return _import_sklearn()
elif name == 'SQLiteVSS':
return _import_sqlitevss()
elif name == 'StarRocks':
return _import_starrocks()
elif name == 'SupabaseVectorStore':
return _import_supabase()
elif name == 'SurrealDBStore':
return _import_surrealdb()
elif name == 'Tair':
return _import_tair()
elif name == 'TencentVectorDB':
return _import_tencentvectordb()
elif name == 'TileDB':
return _import_tiledb()
elif name == 'Tigris':
return _import_tigris()
elif name == 'TimescaleVector':
return _import_timescalevector()
elif name == 'Typesense':
return _import_typesense()
elif name == 'USearch':
return _import_usearch()
elif name == 'Vald':
return _import_vald()
elif name == 'Vearch':
return _import_vearch()
elif name == 'Vectara':
return _import_vectara()
elif name == 'Weaviate':
return _import_weaviate()
elif name == 'Yellowbrick':
return _import_yellowbrick()
elif name == 'ZepVectorStore':
return _import_zep()
elif name == 'Zilliz':
return _import_zilliz()
elif name == 'VespaStore':
return _import_vespa()
else:
raise AttributeError(f'Could not find: {name}')
|
def __getattr__(name: str) ->Any:
if name == 'AnalyticDB':
return _import_analyticdb()
elif name == 'AlibabaCloudOpenSearch':
return _import_alibaba_cloud_open_search()
elif name == 'AlibabaCloudOpenSearchSettings':
return _import_alibaba_cloud_open_search_settings()
elif name == 'AzureCosmosDBVectorSearch':
return _import_azure_cosmos_db()
elif name == 'ElasticKnnSearch':
return _import_elastic_knn_search()
elif name == 'ElasticVectorSearch':
return _import_elastic_vector_search()
elif name == 'Annoy':
return _import_annoy()
elif name == 'AtlasDB':
return _import_atlas()
elif name == 'AwaDB':
return _import_awadb()
elif name == 'AzureSearch':
return _import_azuresearch()
elif name == 'Bagel':
return _import_bageldb()
elif name == 'BigQueryVectorSearch':
return _import_bigquery()
elif name == 'BESVectorStore':
return _import_baiducloud_vector_search()
elif name == 'Cassandra':
return _import_cassandra()
elif name == 'AstraDB':
return _import_astradb()
elif name == 'Chroma':
return _import_chroma()
elif name == 'Clarifai':
return _import_clarifai()
elif name == 'ClickhouseSettings':
return _import_clickhouse_settings()
elif name == 'Clickhouse':
return _import_clickhouse()
elif name == 'DashVector':
return _import_dashvector()
elif name == 'DatabricksVectorSearch':
return _import_databricks_vector_search()
elif name == 'DeepLake':
return _import_deeplake()
elif name == 'Dingo':
return _import_dingo()
elif name == 'DocArrayInMemorySearch':
return _import_docarray_inmemory()
elif name == 'DocArrayHnswSearch':
return _import_docarray_hnsw()
elif name == 'ElasticsearchStore':
return _import_elasticsearch()
elif name == 'Epsilla':
return _import_epsilla()
elif name == 'FAISS':
return _import_faiss()
elif name == 'Hologres':
return _import_hologres()
elif name == 'LanceDB':
return _import_lancedb()
elif name == 'LLMRails':
return _import_llm_rails()
elif name == 'Marqo':
return _import_marqo()
elif name == 'MatchingEngine':
return _import_matching_engine()
elif name == 'Meilisearch':
return _import_meilisearch()
elif name == 'Milvus':
return _import_milvus()
elif name == 'MomentoVectorIndex':
return _import_momento_vector_index()
elif name == 'MongoDBAtlasVectorSearch':
return _import_mongodb_atlas()
elif name == 'MyScaleSettings':
return _import_myscale_settings()
elif name == 'MyScale':
return _import_myscale()
elif name == 'Neo4jVector':
return _import_neo4j_vector()
elif name == 'OpenSearchVectorSearch':
return _import_opensearch_vector_search()
elif name == 'PGEmbedding':
return _import_pgembedding()
elif name == 'PGVector':
return _import_pgvector()
elif name == 'Pinecone':
return _import_pinecone()
elif name == 'Qdrant':
return _import_qdrant()
elif name == 'Redis':
return _import_redis()
elif name == 'Rockset':
return _import_rocksetdb()
elif name == 'ScaNN':
return _import_scann()
elif name == 'SemaDB':
return _import_semadb()
elif name == 'SingleStoreDB':
return _import_singlestoredb()
elif name == 'SKLearnVectorStore':
return _import_sklearn()
elif name == 'SQLiteVSS':
return _import_sqlitevss()
elif name == 'StarRocks':
return _import_starrocks()
elif name == 'SupabaseVectorStore':
return _import_supabase()
elif name == 'SurrealDBStore':
return _import_surrealdb()
elif name == 'Tair':
return _import_tair()
elif name == 'TencentVectorDB':
return _import_tencentvectordb()
elif name == 'TileDB':
return _import_tiledb()
elif name == 'Tigris':
return _import_tigris()
elif name == 'TimescaleVector':
return _import_timescalevector()
elif name == 'Typesense':
return _import_typesense()
elif name == 'USearch':
return _import_usearch()
elif name == 'Vald':
return _import_vald()
elif name == 'Vearch':
return _import_vearch()
elif name == 'Vectara':
return _import_vectara()
elif name == 'Weaviate':
return _import_weaviate()
elif name == 'Yellowbrick':
return _import_yellowbrick()
elif name == 'ZepVectorStore':
return _import_zep()
elif name == 'Zilliz':
return _import_zilliz()
elif name == 'VespaStore':
return _import_vespa()
else:
raise AttributeError(f'Could not find: {name}')
| null |
similarity_search_with_score
|
"""
Run similarity search with Deep Lake with distance returned.
Examples:
>>> data = vector_store.similarity_search_with_score(
... query=<your_query>,
... embedding=<your_embedding_function>
... k=<number_of_items_to_return>,
... exec_option=<preferred_exec_option>,
... )
Args:
query (str): Query text to search for.
k (int): Number of results to return. Defaults to 4.
**kwargs: Additional keyword arguments. Some of these arguments are:
distance_metric: `L2` for Euclidean, `L1` for Nuclear, `max` L-infinity
distance, `cos` for cosine similarity, 'dot' for dot product.
Defaults to `L2`.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
embedding_function (Callable): Embedding function to use. Defaults
to None.
exec_option (str): DeepLakeVectorStore supports 3 ways to perform
searching. It could be either "python", "compute_engine" or
"tensor_db". Defaults to "python".
- "python" - Pure-python implementation running on the client.
Can be used for data stored anywhere. WARNING: using this
option with big datasets is discouraged due to potential
memory issues.
- "compute_engine" - Performant C++ implementation of the Deep
Lake Compute Engine. Runs on the client and can be used for
any data stored in or connected to Deep Lake. It cannot be used
with in-memory or local datasets.
- "tensor_db" - Performant, fully-hosted Managed Tensor Database.
Responsible for storage and query execution. Only available for
data stored in the Deep Lake Managed Database. To store datasets
in this database, specify `runtime = {"db_engine": True}`
during dataset creation.
deep_memory (bool): Whether to use the Deep Memory model for improving
search results. Defaults to False if deep_memory is not specified
in the Vector Store initialization. If True, the distance metric
is set to "deepmemory_distance", which represents the metric with
which the model was trained. The search is performed using the Deep
Memory model. If False, the distance metric is set to "COS" or
whatever distance metric user specifies.
Returns:
List[Tuple[Document, float]]: List of documents most similar to the query
text with distance in float."""
return self._search(query=query, k=k, return_score=True, **kwargs)
|
def similarity_search_with_score(self, query: str, k: int=4, **kwargs: Any
) ->List[Tuple[Document, float]]:
"""
Run similarity search with Deep Lake with distance returned.
Examples:
>>> data = vector_store.similarity_search_with_score(
... query=<your_query>,
... embedding=<your_embedding_function>
... k=<number_of_items_to_return>,
... exec_option=<preferred_exec_option>,
... )
Args:
query (str): Query text to search for.
k (int): Number of results to return. Defaults to 4.
**kwargs: Additional keyword arguments. Some of these arguments are:
distance_metric: `L2` for Euclidean, `L1` for Nuclear, `max` L-infinity
distance, `cos` for cosine similarity, 'dot' for dot product.
Defaults to `L2`.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
embedding_function (Callable): Embedding function to use. Defaults
to None.
exec_option (str): DeepLakeVectorStore supports 3 ways to perform
searching. It could be either "python", "compute_engine" or
"tensor_db". Defaults to "python".
- "python" - Pure-python implementation running on the client.
Can be used for data stored anywhere. WARNING: using this
option with big datasets is discouraged due to potential
memory issues.
- "compute_engine" - Performant C++ implementation of the Deep
Lake Compute Engine. Runs on the client and can be used for
any data stored in or connected to Deep Lake. It cannot be used
with in-memory or local datasets.
- "tensor_db" - Performant, fully-hosted Managed Tensor Database.
Responsible for storage and query execution. Only available for
data stored in the Deep Lake Managed Database. To store datasets
in this database, specify `runtime = {"db_engine": True}`
during dataset creation.
deep_memory (bool): Whether to use the Deep Memory model for improving
search results. Defaults to False if deep_memory is not specified
in the Vector Store initialization. If True, the distance metric
is set to "deepmemory_distance", which represents the metric with
which the model was trained. The search is performed using the Deep
Memory model. If False, the distance metric is set to "COS" or
whatever distance metric user specifies.
Returns:
List[Tuple[Document, float]]: List of documents most similar to the query
text with distance in float."""
return self._search(query=query, k=k, return_score=True, **kwargs)
|
Run similarity search with Deep Lake with distance returned.
Examples:
>>> data = vector_store.similarity_search_with_score(
... query=<your_query>,
... embedding=<your_embedding_function>
... k=<number_of_items_to_return>,
... exec_option=<preferred_exec_option>,
... )
Args:
query (str): Query text to search for.
k (int): Number of results to return. Defaults to 4.
**kwargs: Additional keyword arguments. Some of these arguments are:
distance_metric: `L2` for Euclidean, `L1` for Nuclear, `max` L-infinity
distance, `cos` for cosine similarity, 'dot' for dot product.
Defaults to `L2`.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
embedding_function (Callable): Embedding function to use. Defaults
to None.
exec_option (str): DeepLakeVectorStore supports 3 ways to perform
searching. It could be either "python", "compute_engine" or
"tensor_db". Defaults to "python".
- "python" - Pure-python implementation running on the client.
Can be used for data stored anywhere. WARNING: using this
option with big datasets is discouraged due to potential
memory issues.
- "compute_engine" - Performant C++ implementation of the Deep
Lake Compute Engine. Runs on the client and can be used for
any data stored in or connected to Deep Lake. It cannot be used
with in-memory or local datasets.
- "tensor_db" - Performant, fully-hosted Managed Tensor Database.
Responsible for storage and query execution. Only available for
data stored in the Deep Lake Managed Database. To store datasets
in this database, specify `runtime = {"db_engine": True}`
during dataset creation.
deep_memory (bool): Whether to use the Deep Memory model for improving
search results. Defaults to False if deep_memory is not specified
in the Vector Store initialization. If True, the distance metric
is set to "deepmemory_distance", which represents the metric with
which the model was trained. The search is performed using the Deep
Memory model. If False, the distance metric is set to "COS" or
whatever distance metric user specifies.
Returns:
List[Tuple[Document, float]]: List of documents most similar to the query
text with distance in float.
|
_run
|
body = {'this': tool_input}
response = requests.post(self.url, data=body)
return response.text
|
def _run(self, tool_input: str, run_manager: Optional[
CallbackManagerForToolRun]=None) ->str:
body = {'this': tool_input}
response = requests.post(self.url, data=body)
return response.text
| null |
test_action_w_namespace_no_emb
|
str1 = 'test1'
str2 = 'test2'
str3 = 'test3'
expected = [{'test_namespace': str1}, {'test_namespace': str2}, {
'test_namespace': str3}]
assert base.embed([{'test_namespace': str1}, {'test_namespace': str2}, {
'test_namespace': str3}], MockEncoder()) == expected
|
@pytest.mark.requires('vowpal_wabbit_next')
def test_action_w_namespace_no_emb() ->None:
str1 = 'test1'
str2 = 'test2'
str3 = 'test3'
expected = [{'test_namespace': str1}, {'test_namespace': str2}, {
'test_namespace': str3}]
assert base.embed([{'test_namespace': str1}, {'test_namespace': str2},
{'test_namespace': str3}], MockEncoder()) == expected
| null |
_root_to_dict
|
"""Converts xml tree to python dictionary."""
result: Dict[str, List[Any]] = {root.tag: []}
for child in root:
if len(child) == 0:
result[root.tag].append({child.tag: child.text})
else:
result[root.tag].append(self._root_to_dict(child))
return result
|
def _root_to_dict(self, root: ET.Element) ->Dict[str, List[Any]]:
"""Converts xml tree to python dictionary."""
result: Dict[str, List[Any]] = {root.tag: []}
for child in root:
if len(child) == 0:
result[root.tag].append({child.tag: child.text})
else:
result[root.tag].append(self._root_to_dict(child))
return result
|
Converts xml tree to python dictionary.
|
test_faiss_local_save_load
|
"""Test end to end serialization."""
texts = ['foo', 'bar', 'baz']
docsearch = FAISS.from_texts(texts, FakeEmbeddings())
temp_timestamp = datetime.datetime.utcnow().strftime('%Y%m%d-%H%M%S')
with tempfile.TemporaryDirectory(suffix='_' + temp_timestamp + '/'
) as temp_folder:
docsearch.save_local(temp_folder)
new_docsearch = FAISS.load_local(temp_folder, FakeEmbeddings())
assert new_docsearch.index is not None
|
@pytest.mark.requires('faiss')
def test_faiss_local_save_load() ->None:
"""Test end to end serialization."""
texts = ['foo', 'bar', 'baz']
docsearch = FAISS.from_texts(texts, FakeEmbeddings())
temp_timestamp = datetime.datetime.utcnow().strftime('%Y%m%d-%H%M%S')
with tempfile.TemporaryDirectory(suffix='_' + temp_timestamp + '/'
) as temp_folder:
docsearch.save_local(temp_folder)
new_docsearch = FAISS.load_local(temp_folder, FakeEmbeddings())
assert new_docsearch.index is not None
|
Test end to end serialization.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.