method_name
stringlengths 1
78
| method_body
stringlengths 3
9.66k
| full_code
stringlengths 31
10.7k
| docstring
stringlengths 4
4.74k
⌀ |
|---|---|---|---|
__str__
|
return f'ContextSet({_print_keys(list(self.keys.keys()))})'
|
def __str__(self) ->str:
return f'ContextSet({_print_keys(list(self.keys.keys()))})'
| null |
test_python_ast_repl_one_line_return
|
arr = np.array([1, 2, 3, 4, 5])
tool = PythonAstREPLTool(locals={'arr': arr})
program = '`(arr**2).sum() # Returns sum of squares`'
assert tool.run(program) == 55
|
@pytest.mark.skipif(sys.version_info < (3, 9), reason=
'Requires python version >= 3.9 to run.')
def test_python_ast_repl_one_line_return() ->None:
arr = np.array([1, 2, 3, 4, 5])
tool = PythonAstREPLTool(locals={'arr': arr})
program = '`(arr**2).sum() # Returns sum of squares`'
assert tool.run(program) == 55
| null |
test_get_validated_relative_path_for_symlink_inside_root
|
"""Test that symlink pointing inside the root directory is allowed."""
with TemporaryDirectory() as temp_dir:
root = Path(temp_dir)
user_path = 'symlink_inside_root'
target_path = 'data/sub/file.txt'
symlink_path = root / user_path
target_path_ = root / target_path
symlink_path.symlink_to(target_path_)
expected = target_path_.resolve()
result = get_validated_relative_path(root, user_path)
assert result == expected
symlink_path.unlink()
|
def test_get_validated_relative_path_for_symlink_inside_root() ->None:
"""Test that symlink pointing inside the root directory is allowed."""
with TemporaryDirectory() as temp_dir:
root = Path(temp_dir)
user_path = 'symlink_inside_root'
target_path = 'data/sub/file.txt'
symlink_path = root / user_path
target_path_ = root / target_path
symlink_path.symlink_to(target_path_)
expected = target_path_.resolve()
result = get_validated_relative_path(root, user_path)
assert result == expected
symlink_path.unlink()
|
Test that symlink pointing inside the root directory is allowed.
|
__init__
|
"""
:param lakefs_access_key: [required] lakeFS server's access key
:param lakefs_secret_key: [required] lakeFS server's secret key
:param lakefs_endpoint: [required] lakeFS server's endpoint address,
ex: https://example.my-lakefs.com
:param repo: [optional, default = ''] target repository
:param ref: [optional, default = 'main'] target ref (branch name,
tag, or commit ID)
:param path: [optional, default = ''] target path
"""
self.__lakefs_client = LakeFSClient(lakefs_access_key, lakefs_secret_key,
lakefs_endpoint)
self.repo = '' if repo is None or repo == '' else str(repo)
self.ref = 'main' if ref is None or ref == '' else str(ref)
self.path = '' if path is None else str(path)
|
def __init__(self, lakefs_access_key: str, lakefs_secret_key: str,
lakefs_endpoint: str, repo: Optional[str]=None, ref: Optional[str]=
'main', path: Optional[str]=''):
"""
:param lakefs_access_key: [required] lakeFS server's access key
:param lakefs_secret_key: [required] lakeFS server's secret key
:param lakefs_endpoint: [required] lakeFS server's endpoint address,
ex: https://example.my-lakefs.com
:param repo: [optional, default = ''] target repository
:param ref: [optional, default = 'main'] target ref (branch name,
tag, or commit ID)
:param path: [optional, default = ''] target path
"""
self.__lakefs_client = LakeFSClient(lakefs_access_key,
lakefs_secret_key, lakefs_endpoint)
self.repo = '' if repo is None or repo == '' else str(repo)
self.ref = 'main' if ref is None or ref == '' else str(ref)
self.path = '' if path is None else str(path)
|
:param lakefs_access_key: [required] lakeFS server's access key
:param lakefs_secret_key: [required] lakeFS server's secret key
:param lakefs_endpoint: [required] lakeFS server's endpoint address,
ex: https://example.my-lakefs.com
:param repo: [optional, default = ''] target repository
:param ref: [optional, default = 'main'] target ref (branch name,
tag, or commit ID)
:param path: [optional, default = ''] target path
|
test_api_key_masked_when_passed_via_constructor
|
llm = ChatJavelinAIGateway(gateway_uri='<javelin-ai-gateway-uri>', route=
'<javelin-ai-gateway-chat-route>', javelin_api_key='secret-api-key',
params={'temperature': 0.1})
assert str(llm.javelin_api_key) == '**********'
assert 'secret-api-key' not in repr(llm.javelin_api_key)
assert 'secret-api-key' not in repr(llm)
|
@pytest.mark.requires('javelin_sdk')
def test_api_key_masked_when_passed_via_constructor() ->None:
llm = ChatJavelinAIGateway(gateway_uri='<javelin-ai-gateway-uri>',
route='<javelin-ai-gateway-chat-route>', javelin_api_key=
'secret-api-key', params={'temperature': 0.1})
assert str(llm.javelin_api_key) == '**********'
assert 'secret-api-key' not in repr(llm.javelin_api_key)
assert 'secret-api-key' not in repr(llm)
| null |
empty_proposals
|
return []
|
def empty_proposals(x):
return []
| null |
test_visit_structured_query
|
query = 'What is the capital of France?'
structured_query = StructuredQuery(query=query, filter=None)
expected: Tuple[str, Dict] = (query, {})
actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query)
assert expected == actual
comp = Comparison(comparator=Comparator.LT, attribute='foo', value=['1', '2'])
structured_query = StructuredQuery(query=query, filter=comp)
expected = query, {'tql':
"SELECT * WHERE (metadata['foo'] < 1 or metadata['foo'] < 2)"}
actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query)
assert expected == actual
op = Operation(operator=Operator.AND, arguments=[Comparison(comparator=
Comparator.LT, attribute='foo', value=2), Comparison(comparator=
Comparator.EQ, attribute='bar', value='baz'), Comparison(comparator=
Comparator.LT, attribute='abc', value=['1', '2'])])
structured_query = StructuredQuery(query=query, filter=op)
expected = query, {'tql':
"SELECT * WHERE (metadata['foo'] < 2 and metadata['bar'] == 'baz' and (metadata['abc'] < 1 or metadata['abc'] < 2))"
}
actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query)
assert expected == actual
|
def test_visit_structured_query() ->None:
query = 'What is the capital of France?'
structured_query = StructuredQuery(query=query, filter=None)
expected: Tuple[str, Dict] = (query, {})
actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query)
assert expected == actual
comp = Comparison(comparator=Comparator.LT, attribute='foo', value=['1',
'2'])
structured_query = StructuredQuery(query=query, filter=comp)
expected = query, {'tql':
"SELECT * WHERE (metadata['foo'] < 1 or metadata['foo'] < 2)"}
actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query)
assert expected == actual
op = Operation(operator=Operator.AND, arguments=[Comparison(comparator=
Comparator.LT, attribute='foo', value=2), Comparison(comparator=
Comparator.EQ, attribute='bar', value='baz'), Comparison(comparator
=Comparator.LT, attribute='abc', value=['1', '2'])])
structured_query = StructuredQuery(query=query, filter=op)
expected = query, {'tql':
"SELECT * WHERE (metadata['foo'] < 2 and metadata['bar'] == 'baz' and (metadata['abc'] < 1 or metadata['abc'] < 2))"
}
actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query)
assert expected == actual
| null |
_import_starrocks
|
from langchain_community.vectorstores.starrocks import StarRocks
return StarRocks
|
def _import_starrocks() ->Any:
from langchain_community.vectorstores.starrocks import StarRocks
return StarRocks
| null |
_import_edenai
|
from langchain_community.llms.edenai import EdenAI
return EdenAI
|
def _import_edenai() ->Any:
from langchain_community.llms.edenai import EdenAI
return EdenAI
| null |
_import_semadb
|
from langchain_community.vectorstores.semadb import SemaDB
return SemaDB
|
def _import_semadb() ->Any:
from langchain_community.vectorstores.semadb import SemaDB
return SemaDB
| null |
test_fastembed_embedding_documents
|
"""Test fastembed embeddings for documents."""
documents = ['foo bar', 'bar foo']
embedding = FastEmbedEmbeddings(model_name=model_name, max_length=
max_length, doc_embed_type=doc_embed_type, threads=threads)
output = embedding.embed_documents(documents)
assert len(output) == 2
assert len(output[0]) == 384
|
@pytest.mark.parametrize('model_name', [
'sentence-transformers/all-MiniLM-L6-v2', 'BAAI/bge-small-en-v1.5'])
@pytest.mark.parametrize('max_length', [50, 512])
@pytest.mark.parametrize('doc_embed_type', ['default', 'passage'])
@pytest.mark.parametrize('threads', [0, 10])
def test_fastembed_embedding_documents(model_name: str, max_length: int,
doc_embed_type: str, threads: int) ->None:
"""Test fastembed embeddings for documents."""
documents = ['foo bar', 'bar foo']
embedding = FastEmbedEmbeddings(model_name=model_name, max_length=
max_length, doc_embed_type=doc_embed_type, threads=threads)
output = embedding.embed_documents(documents)
assert len(output) == 2
assert len(output[0]) == 384
|
Test fastembed embeddings for documents.
|
invoke
|
if return_exceptions:
try:
return runnable.invoke(input, config, **kwargs)
except Exception as e:
return e
else:
return runnable.invoke(input, config, **kwargs)
|
def invoke(runnable: Runnable, input: Input, config: RunnableConfig) ->Union[
Output, Exception]:
if return_exceptions:
try:
return runnable.invoke(input, config, **kwargs)
except Exception as e:
return e
else:
return runnable.invoke(input, config, **kwargs)
| null |
get_tools
|
"""Get the tools for all the API operations."""
return list(self.nla_tools)
|
def get_tools(self) ->List[BaseTool]:
"""Get the tools for all the API operations."""
return list(self.nla_tools)
|
Get the tools for all the API operations.
|
_on_tool_start
|
crumbs = self.get_breadcrumbs(run)
self.function_callback(
f"{get_colored_text('[tool/start]', color='green')} " + get_bolded_text
(f"""[{crumbs}] Entering Tool run with input:
""") +
f'"{run.inputs[\'input\'].strip()}"')
|
def _on_tool_start(self, run: Run) ->None:
crumbs = self.get_breadcrumbs(run)
self.function_callback(
f"{get_colored_text('[tool/start]', color='green')} " +
get_bolded_text(f"""[{crumbs}] Entering Tool run with input:
""") +
f'"{run.inputs[\'input\'].strip()}"')
| null |
on_llm_error
|
self._require_current_thought().on_llm_error(error, **kwargs)
self._prune_old_thought_containers()
|
def on_llm_error(self, error: BaseException, **kwargs: Any) ->None:
self._require_current_thought().on_llm_error(error, **kwargs)
self._prune_old_thought_containers()
| null |
_check_in_allowed_domain
|
"""Check if a URL is in the allowed domains.
Args:
url (str): The input URL.
limit_to_domains (Sequence[str]): The allowed domains.
Returns:
bool: True if the URL is in the allowed domains, False otherwise.
"""
scheme, domain = _extract_scheme_and_domain(url)
for allowed_domain in limit_to_domains:
allowed_scheme, allowed_domain = _extract_scheme_and_domain(allowed_domain)
if scheme == allowed_scheme and domain == allowed_domain:
return True
return False
|
def _check_in_allowed_domain(url: str, limit_to_domains: Sequence[str]) ->bool:
"""Check if a URL is in the allowed domains.
Args:
url (str): The input URL.
limit_to_domains (Sequence[str]): The allowed domains.
Returns:
bool: True if the URL is in the allowed domains, False otherwise.
"""
scheme, domain = _extract_scheme_and_domain(url)
for allowed_domain in limit_to_domains:
allowed_scheme, allowed_domain = _extract_scheme_and_domain(
allowed_domain)
if scheme == allowed_scheme and domain == allowed_domain:
return True
return False
|
Check if a URL is in the allowed domains.
Args:
url (str): The input URL.
limit_to_domains (Sequence[str]): The allowed domains.
Returns:
bool: True if the URL is in the allowed domains, False otherwise.
|
validate_chains
|
"""Validate that return messages is not True."""
if values.get('return_messages', False):
raise ValueError(
'return_messages must be False for ConversationStringBufferMemory')
return values
|
@root_validator()
def validate_chains(cls, values: Dict) ->Dict:
"""Validate that return messages is not True."""
if values.get('return_messages', False):
raise ValueError(
'return_messages must be False for ConversationStringBufferMemory')
return values
|
Validate that return messages is not True.
|
search
|
"""Search for a document.
Args:
search: search string
Returns:
Document if found, else error message.
"""
r = self._lookup_fn(search)
if isinstance(r, str):
return Document(page_content=r, metadata={'source': search})
elif isinstance(r, Document):
return r
raise ValueError(f'Unexpected type of document {type(r)}')
|
def search(self, search: str) ->Document:
"""Search for a document.
Args:
search: search string
Returns:
Document if found, else error message.
"""
r = self._lookup_fn(search)
if isinstance(r, str):
return Document(page_content=r, metadata={'source': search})
elif isinstance(r, Document):
return r
raise ValueError(f'Unexpected type of document {type(r)}')
|
Search for a document.
Args:
search: search string
Returns:
Document if found, else error message.
|
repo_lookup
|
return repo_dict[owner_repo_commit]
|
def repo_lookup(owner_repo_commit: str, **kwargs: Any) ->ChatPromptTemplate:
return repo_dict[owner_repo_commit]
| null |
create_index
|
version_num = client.info()['version']['number'][0]
version_num = int(version_num)
if version_num >= 8:
client.indices.create(index=index_name, mappings=mapping)
else:
client.indices.create(index=index_name, body={'mappings': mapping})
|
def create_index(self, client: Any, index_name: str, mapping: Dict) ->None:
version_num = client.info()['version']['number'][0]
version_num = int(version_num)
if version_num >= 8:
client.indices.create(index=index_name, mappings=mapping)
else:
client.indices.create(index=index_name, body={'mappings': mapping})
| null |
image_summarize
|
"""
Make image summary
:param img_base64: Base64 encoded string for image
:param prompt: Text prompt for summarizatiomn
:return: Image summarization prompt
"""
chat = ChatOpenAI(model='gpt-4-vision-preview', max_tokens=1024)
msg = chat.invoke([HumanMessage(content=[{'type': 'text', 'text': prompt},
{'type': 'image_url', 'image_url': {'url':
f'data:image/jpeg;base64,{img_base64}'}}])])
return msg.content
|
def image_summarize(img_base64, prompt):
"""
Make image summary
:param img_base64: Base64 encoded string for image
:param prompt: Text prompt for summarizatiomn
:return: Image summarization prompt
"""
chat = ChatOpenAI(model='gpt-4-vision-preview', max_tokens=1024)
msg = chat.invoke([HumanMessage(content=[{'type': 'text', 'text':
prompt}, {'type': 'image_url', 'image_url': {'url':
f'data:image/jpeg;base64,{img_base64}'}}])])
return msg.content
|
Make image summary
:param img_base64: Base64 encoded string for image
:param prompt: Text prompt for summarizatiomn
:return: Image summarization prompt
|
test_saving_loading_endpoint_llm
|
"""Test saving/loading an HuggingFaceHub LLM."""
llm = HuggingFaceEndpoint(endpoint_url='', task='text-generation',
model_kwargs={'max_new_tokens': 10})
llm.save(file_path=tmp_path / 'hf.yaml')
loaded_llm = load_llm(tmp_path / 'hf.yaml')
assert_llm_equality(llm, loaded_llm)
|
def test_saving_loading_endpoint_llm(tmp_path: Path) ->None:
"""Test saving/loading an HuggingFaceHub LLM."""
llm = HuggingFaceEndpoint(endpoint_url='', task='text-generation',
model_kwargs={'max_new_tokens': 10})
llm.save(file_path=tmp_path / 'hf.yaml')
loaded_llm = load_llm(tmp_path / 'hf.yaml')
assert_llm_equality(llm, loaded_llm)
|
Test saving/loading an HuggingFaceHub LLM.
|
lc_attributes
|
return {'openai_api_type': self.openai_api_type, 'openai_api_version': self
.openai_api_version}
|
@property
def lc_attributes(self) ->Dict[str, Any]:
return {'openai_api_type': self.openai_api_type, 'openai_api_version':
self.openai_api_version}
| null |
__init__
|
"""Initialize callback handler."""
self.file = cast(TextIO, open(filename, mode, encoding='utf-8'))
self.color = color
|
def __init__(self, filename: str, mode: str='a', color: Optional[str]=None
) ->None:
"""Initialize callback handler."""
self.file = cast(TextIO, open(filename, mode, encoding='utf-8'))
self.color = color
|
Initialize callback handler.
|
embed
|
"""call the embedding of model
Args:
model (str): to embedding model
texts (List[str]): List of sentences to embed.
Returns:
List[List[float]]: List of vectors for each sentence
"""
perm_texts, unpermute_func = self._permute(texts)
perm_texts_batched = self._batch(perm_texts)
map_args = self._sync_request_embed, [model] * len(perm_texts_batched
), perm_texts_batched
if len(perm_texts_batched) == 1:
embeddings_batch_perm = list(map(*map_args))
else:
with ThreadPoolExecutor(32) as p:
embeddings_batch_perm = list(p.map(*map_args))
embeddings_perm = self._unbatch(embeddings_batch_perm)
embeddings = unpermute_func(embeddings_perm)
return embeddings
|
def embed(self, model: str, texts: List[str]) ->List[List[float]]:
"""call the embedding of model
Args:
model (str): to embedding model
texts (List[str]): List of sentences to embed.
Returns:
List[List[float]]: List of vectors for each sentence
"""
perm_texts, unpermute_func = self._permute(texts)
perm_texts_batched = self._batch(perm_texts)
map_args = self._sync_request_embed, [model] * len(perm_texts_batched
), perm_texts_batched
if len(perm_texts_batched) == 1:
embeddings_batch_perm = list(map(*map_args))
else:
with ThreadPoolExecutor(32) as p:
embeddings_batch_perm = list(p.map(*map_args))
embeddings_perm = self._unbatch(embeddings_batch_perm)
embeddings = unpermute_func(embeddings_perm)
return embeddings
|
call the embedding of model
Args:
model (str): to embedding model
texts (List[str]): List of sentences to embed.
Returns:
List[List[float]]: List of vectors for each sentence
|
requires_input
|
"""Return whether the chain requires an input.
Returns:
bool: True if the chain requires an input, False otherwise.
"""
return True
|
@property
def requires_input(self) ->bool:
"""Return whether the chain requires an input.
Returns:
bool: True if the chain requires an input, False otherwise.
"""
return True
|
Return whether the chain requires an input.
Returns:
bool: True if the chain requires an input, False otherwise.
|
make_final_outputs
|
prepared_outputs = AddableDict(self.agent_executor.prep_outputs(self.inputs,
outputs, return_only_outputs=True))
if self.include_run_info:
prepared_outputs[RUN_KEY] = RunInfo(run_id=run_manager.run_id)
return prepared_outputs
|
def make_final_outputs(self, outputs: Dict[str, Any], run_manager: Union[
CallbackManagerForChainRun, AsyncCallbackManagerForChainRun]
) ->AddableDict:
prepared_outputs = AddableDict(self.agent_executor.prep_outputs(self.
inputs, outputs, return_only_outputs=True))
if self.include_run_info:
prepared_outputs[RUN_KEY] = RunInfo(run_id=run_manager.run_id)
return prepared_outputs
| null |
_get_chat_history
|
buffer = ''
for dialogue_turn in chat_history:
if isinstance(dialogue_turn, BaseMessage):
role_prefix = _ROLE_MAP.get(dialogue_turn.type,
f'{dialogue_turn.type}: ')
buffer += f'\n{role_prefix}{dialogue_turn.content}'
elif isinstance(dialogue_turn, tuple):
human = 'Human: ' + dialogue_turn[0]
ai = 'Assistant: ' + dialogue_turn[1]
buffer += '\n' + '\n'.join([human, ai])
else:
raise ValueError(
f'Unsupported chat history format: {type(dialogue_turn)}. Full chat history: {chat_history} '
)
return buffer
|
def _get_chat_history(chat_history: List[CHAT_TURN_TYPE]) ->str:
buffer = ''
for dialogue_turn in chat_history:
if isinstance(dialogue_turn, BaseMessage):
role_prefix = _ROLE_MAP.get(dialogue_turn.type,
f'{dialogue_turn.type}: ')
buffer += f'\n{role_prefix}{dialogue_turn.content}'
elif isinstance(dialogue_turn, tuple):
human = 'Human: ' + dialogue_turn[0]
ai = 'Assistant: ' + dialogue_turn[1]
buffer += '\n' + '\n'.join([human, ai])
else:
raise ValueError(
f'Unsupported chat history format: {type(dialogue_turn)}. Full chat history: {chat_history} '
)
return buffer
| null |
_import_deepsparse
|
from langchain_community.llms.deepsparse import DeepSparse
return DeepSparse
|
def _import_deepsparse() ->Any:
from langchain_community.llms.deepsparse import DeepSparse
return DeepSparse
| null |
create_new_keyword_index
|
"""
This method constructs a Cypher query and executes it
to create a new full text index in Neo4j.
"""
node_props = text_node_properties or [self.text_node_property]
fts_index_query = (
f"CREATE FULLTEXT INDEX {self.keyword_index_name} FOR (n:`{self.node_label}`) ON EACH [{', '.join([('n.`' + el + '`') for el in node_props])}]"
)
self.query(fts_index_query)
|
def create_new_keyword_index(self, text_node_properties: List[str]=[]) ->None:
"""
This method constructs a Cypher query and executes it
to create a new full text index in Neo4j.
"""
node_props = text_node_properties or [self.text_node_property]
fts_index_query = (
f"CREATE FULLTEXT INDEX {self.keyword_index_name} FOR (n:`{self.node_label}`) ON EACH [{', '.join([('n.`' + el + '`') for el in node_props])}]"
)
self.query(fts_index_query)
|
This method constructs a Cypher query and executes it
to create a new full text index in Neo4j.
|
test_tracer_llm_run
|
"""Test tracer on an LLM run."""
uuid = uuid4()
compare_run = Run(id=uuid, parent_run_id=None, start_time=datetime.now(
timezone.utc), end_time=datetime.now(timezone.utc), events=[{'name':
'start', 'time': datetime.now(timezone.utc)}, {'name': 'end', 'time':
datetime.now(timezone.utc)}], extra={}, execution_order=1,
child_execution_order=1, serialized=SERIALIZED, inputs={'prompts': []},
outputs=LLMResult(generations=[[]]), error=None, run_type='llm',
trace_id=uuid, dotted_order=f'20230101T000000000000Z{uuid}')
tracer = FakeTracer()
tracer.on_llm_start(serialized=SERIALIZED, prompts=[], run_id=uuid)
tracer.on_llm_end(response=LLMResult(generations=[[]]), run_id=uuid)
assert tracer.runs == [compare_run]
|
@freeze_time('2023-01-01')
def test_tracer_llm_run() ->None:
"""Test tracer on an LLM run."""
uuid = uuid4()
compare_run = Run(id=uuid, parent_run_id=None, start_time=datetime.now(
timezone.utc), end_time=datetime.now(timezone.utc), events=[{'name':
'start', 'time': datetime.now(timezone.utc)}, {'name': 'end',
'time': datetime.now(timezone.utc)}], extra={}, execution_order=1,
child_execution_order=1, serialized=SERIALIZED, inputs={'prompts':
[]}, outputs=LLMResult(generations=[[]]), error=None, run_type=
'llm', trace_id=uuid, dotted_order=f'20230101T000000000000Z{uuid}')
tracer = FakeTracer()
tracer.on_llm_start(serialized=SERIALIZED, prompts=[], run_id=uuid)
tracer.on_llm_end(response=LLMResult(generations=[[]]), run_id=uuid)
assert tracer.runs == [compare_run]
|
Test tracer on an LLM run.
|
_type
|
"""Return the type of the output parser.
Returns:
str: The type of the output parser.
"""
return 'pairwise_string_result'
|
@property
def _type(self) ->str:
"""Return the type of the output parser.
Returns:
str: The type of the output parser.
"""
return 'pairwise_string_result'
|
Return the type of the output parser.
Returns:
str: The type of the output parser.
|
_yellowbrick_vector_from_texts
|
return Yellowbrick.from_texts(fake_texts, FakeEmbeddings(), metadatas,
YELLOWBRICK_URL, YELLOWBRICK_TABLE)
|
def _yellowbrick_vector_from_texts(metadatas: Optional[List[dict]]=None,
drop: bool=True) ->Yellowbrick:
return Yellowbrick.from_texts(fake_texts, FakeEmbeddings(), metadatas,
YELLOWBRICK_URL, YELLOWBRICK_TABLE)
| null |
_get_gcs_client
|
"""Lazily creates a GCS client.
Returns:
A configured GCS client.
"""
from google.cloud import storage
return storage.Client(credentials=credentials, project=project_id,
client_info=get_client_info(module='vertex-ai-matching-engine'))
|
@classmethod
def _get_gcs_client(cls, credentials: 'Credentials', project_id: str
) ->'storage.Client':
"""Lazily creates a GCS client.
Returns:
A configured GCS client.
"""
from google.cloud import storage
return storage.Client(credentials=credentials, project=project_id,
client_info=get_client_info(module='vertex-ai-matching-engine'))
|
Lazily creates a GCS client.
Returns:
A configured GCS client.
|
load
|
"""Load webpages as Documents."""
soup = self.scrape()
text = soup.select_one("main[class='skin-handler']").text
metadata = {'source': self.web_path}
return [Document(page_content=text, metadata=metadata)]
|
def load(self) ->List[Document]:
"""Load webpages as Documents."""
soup = self.scrape()
text = soup.select_one("main[class='skin-handler']").text
metadata = {'source': self.web_path}
return [Document(page_content=text, metadata=metadata)]
|
Load webpages as Documents.
|
_get_response
|
if run.status == 'completed':
import openai
messages = self.client.beta.threads.messages.list(run.thread_id, order=
'asc')
new_messages = [msg for msg in messages if msg.run_id == run.id]
if not self.as_agent:
return new_messages
answer: Any = [msg_content for msg in new_messages for msg_content in
msg.content]
if all(isinstance(content, openai.types.beta.threads.MessageContentText
) for content in answer):
answer = '\n'.join(content.text.value for content in answer)
return OpenAIAssistantFinish(return_values={'output': answer,
'thread_id': run.thread_id, 'run_id': run.id}, log='', run_id=run.
id, thread_id=run.thread_id)
elif run.status == 'requires_action':
if not self.as_agent:
return run.required_action.submit_tool_outputs.tool_calls
actions = []
for tool_call in run.required_action.submit_tool_outputs.tool_calls:
function = tool_call.function
try:
args = json.loads(function.arguments, strict=False)
except JSONDecodeError as e:
raise ValueError(
f'Received invalid JSON function arguments: {function.arguments} for function {function.name}'
) from e
if len(args) == 1 and '__arg1' in args:
args = args['__arg1']
actions.append(OpenAIAssistantAction(tool=function.name, tool_input
=args, tool_call_id=tool_call.id, log='', run_id=run.id,
thread_id=run.thread_id))
return actions
else:
run_info = json.dumps(run.dict(), indent=2)
raise ValueError(
f'Unexpected run status: {run.status}. Full run info:\n\n{run_info})')
|
def _get_response(self, run: Any) ->Any:
if run.status == 'completed':
import openai
messages = self.client.beta.threads.messages.list(run.thread_id,
order='asc')
new_messages = [msg for msg in messages if msg.run_id == run.id]
if not self.as_agent:
return new_messages
answer: Any = [msg_content for msg in new_messages for msg_content in
msg.content]
if all(isinstance(content, openai.types.beta.threads.
MessageContentText) for content in answer):
answer = '\n'.join(content.text.value for content in answer)
return OpenAIAssistantFinish(return_values={'output': answer,
'thread_id': run.thread_id, 'run_id': run.id}, log='', run_id=
run.id, thread_id=run.thread_id)
elif run.status == 'requires_action':
if not self.as_agent:
return run.required_action.submit_tool_outputs.tool_calls
actions = []
for tool_call in run.required_action.submit_tool_outputs.tool_calls:
function = tool_call.function
try:
args = json.loads(function.arguments, strict=False)
except JSONDecodeError as e:
raise ValueError(
f'Received invalid JSON function arguments: {function.arguments} for function {function.name}'
) from e
if len(args) == 1 and '__arg1' in args:
args = args['__arg1']
actions.append(OpenAIAssistantAction(tool=function.name,
tool_input=args, tool_call_id=tool_call.id, log='', run_id=
run.id, thread_id=run.thread_id))
return actions
else:
run_info = json.dumps(run.dict(), indent=2)
raise ValueError(
f'Unexpected run status: {run.status}. Full run info:\n\n{run_info})'
)
| null |
visit_structured_query
|
if structured_query.filter is None:
kwargs = {}
else:
kwargs = {'postgrest_filter': structured_query.filter.accept(self)}
return structured_query.query, kwargs
|
def visit_structured_query(self, structured_query: StructuredQuery) ->Tuple[
str, Dict[str, str]]:
if structured_query.filter is None:
kwargs = {}
else:
kwargs = {'postgrest_filter': structured_query.filter.accept(self)}
return structured_query.query, kwargs
| null |
test_news_call
|
search = DataForSeoAPIWrapper(params={'se_type': 'news'},
json_result_fields=['title', 'snippet'])
output = search.results('iphone')
assert any('Apple' in d['title'] or 'Apple' in d['snippet'] for d in output)
|
def test_news_call() ->None:
search = DataForSeoAPIWrapper(params={'se_type': 'news'},
json_result_fields=['title', 'snippet'])
output = search.results('iphone')
assert any('Apple' in d['title'] or 'Apple' in d['snippet'] for d in output
)
| null |
on_chain_end
|
"""On chain end, do nothing."""
|
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) ->None:
"""On chain end, do nothing."""
|
On chain end, do nothing.
|
__init__
|
self.dimension = dimension
|
def __init__(self, dimension: int) ->None:
self.dimension = dimension
| null |
_get_dg
|
if index is not None:
assert 0 <= index < len(self._child_records), f'Bad index: {index}'
return self._child_records[index].dg
return self._container
|
def _get_dg(self, index: Optional[int]) ->DeltaGenerator:
if index is not None:
assert 0 <= index < len(self._child_records), f'Bad index: {index}'
return self._child_records[index].dg
return self._container
| null |
_run
|
"""Get the names of the tables."""
return ', '.join(self.powerbi.get_table_names())
|
def _run(self, tool_input: Optional[str]=None, run_manager: Optional[
CallbackManagerForToolRun]=None) ->str:
"""Get the names of the tables."""
return ', '.join(self.powerbi.get_table_names())
|
Get the names of the tables.
|
is_valid
|
return True
|
def is_valid(self) ->bool:
return True
| null |
embed_documents
|
"""Embed a list of documents using NLP Cloud.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
return self.client.embeddings(texts)['embeddings']
|
def embed_documents(self, texts: List[str]) ->List[List[float]]:
"""Embed a list of documents using NLP Cloud.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
return self.client.embeddings(texts)['embeddings']
|
Embed a list of documents using NLP Cloud.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
|
test_initialization_without_arcgis
|
with patch.dict('sys.modules', {'arcgis': None}):
with pytest.raises(ImportError, match=
'arcgis is required to use the ArcGIS Loader'):
ArcGISLoader(layer=mock_feature_layer, gis=mock_gis)
|
def test_initialization_without_arcgis(mock_feature_layer, mock_gis):
with patch.dict('sys.modules', {'arcgis': None}):
with pytest.raises(ImportError, match=
'arcgis is required to use the ArcGIS Loader'):
ArcGISLoader(layer=mock_feature_layer, gis=mock_gis)
| null |
get_full_module_name
|
"""Get full module name using inspect"""
module = importlib.import_module(module_path)
class_ = getattr(module, class_name)
return inspect.getmodule(class_).__name__
|
def get_full_module_name(module_path, class_name):
"""Get full module name using inspect"""
module = importlib.import_module(module_path)
class_ = getattr(module, class_name)
return inspect.getmodule(class_).__name__
|
Get full module name using inspect
|
from_texts
|
"""Create a Chroma vectorstore from a raw documents.
If a persist_directory is specified, the collection will be persisted there.
Otherwise, the data will be ephemeral in-memory.
Args:
texts (List[str]): List of texts to add to the collection.
collection_name (str): Name of the collection to create.
persist_directory (Optional[str]): Directory to persist the collection.
embedding (Optional[Embeddings]): Embedding function. Defaults to None.
metadatas (Optional[List[dict]]): List of metadatas. Defaults to None.
ids (Optional[List[str]]): List of document IDs. Defaults to None.
client_settings (Optional[chromadb.config.Settings]): Chroma client settings
collection_metadata (Optional[Dict]): Collection configurations.
Defaults to None.
Returns:
Chroma: Chroma vectorstore.
"""
chroma_collection = cls(collection_name=collection_name, embedding_function
=embedding, persist_directory=persist_directory, client_settings=
client_settings, client=client, collection_metadata=collection_metadata,
**kwargs)
if ids is None:
ids = [str(uuid.uuid1()) for _ in texts]
if hasattr(chroma_collection._client, 'max_batch_size'):
from chromadb.utils.batch_utils import create_batches
for batch in create_batches(api=chroma_collection._client, ids=ids,
metadatas=metadatas, documents=texts):
chroma_collection.add_texts(texts=batch[3] if batch[3] else [],
metadatas=batch[2] if batch[2] else None, ids=batch[0])
else:
chroma_collection.add_texts(texts=texts, metadatas=metadatas, ids=ids)
return chroma_collection
|
@classmethod
def from_texts(cls: Type[Chroma], texts: List[str], embedding: Optional[
Embeddings]=None, metadatas: Optional[List[dict]]=None, ids: Optional[
List[str]]=None, collection_name: str=
_LANGCHAIN_DEFAULT_COLLECTION_NAME, persist_directory: Optional[str]=
None, client_settings: Optional[chromadb.config.Settings]=None, client:
Optional[chromadb.Client]=None, collection_metadata: Optional[Dict]=
None, **kwargs: Any) ->Chroma:
"""Create a Chroma vectorstore from a raw documents.
If a persist_directory is specified, the collection will be persisted there.
Otherwise, the data will be ephemeral in-memory.
Args:
texts (List[str]): List of texts to add to the collection.
collection_name (str): Name of the collection to create.
persist_directory (Optional[str]): Directory to persist the collection.
embedding (Optional[Embeddings]): Embedding function. Defaults to None.
metadatas (Optional[List[dict]]): List of metadatas. Defaults to None.
ids (Optional[List[str]]): List of document IDs. Defaults to None.
client_settings (Optional[chromadb.config.Settings]): Chroma client settings
collection_metadata (Optional[Dict]): Collection configurations.
Defaults to None.
Returns:
Chroma: Chroma vectorstore.
"""
chroma_collection = cls(collection_name=collection_name,
embedding_function=embedding, persist_directory=persist_directory,
client_settings=client_settings, client=client, collection_metadata
=collection_metadata, **kwargs)
if ids is None:
ids = [str(uuid.uuid1()) for _ in texts]
if hasattr(chroma_collection._client, 'max_batch_size'):
from chromadb.utils.batch_utils import create_batches
for batch in create_batches(api=chroma_collection._client, ids=ids,
metadatas=metadatas, documents=texts):
chroma_collection.add_texts(texts=batch[3] if batch[3] else [],
metadatas=batch[2] if batch[2] else None, ids=batch[0])
else:
chroma_collection.add_texts(texts=texts, metadatas=metadatas, ids=ids)
return chroma_collection
|
Create a Chroma vectorstore from a raw documents.
If a persist_directory is specified, the collection will be persisted there.
Otherwise, the data will be ephemeral in-memory.
Args:
texts (List[str]): List of texts to add to the collection.
collection_name (str): Name of the collection to create.
persist_directory (Optional[str]): Directory to persist the collection.
embedding (Optional[Embeddings]): Embedding function. Defaults to None.
metadatas (Optional[List[dict]]): List of metadatas. Defaults to None.
ids (Optional[List[str]]): List of document IDs. Defaults to None.
client_settings (Optional[chromadb.config.Settings]): Chroma client settings
collection_metadata (Optional[Dict]): Collection configurations.
Defaults to None.
Returns:
Chroma: Chroma vectorstore.
|
_invocation_params
|
"""Get the parameters used to invoke the model."""
params: Dict[str, Any] = {'model': self.model_name, **self._default_params,
'logit_bias': None}
if not is_openai_v1():
params.update({'api_key': self.openai_api_key, 'api_base': self.
openai_api_base})
return params
|
@property
def _invocation_params(self) ->Dict[str, Any]:
"""Get the parameters used to invoke the model."""
params: Dict[str, Any] = {'model': self.model_name, **self.
_default_params, 'logit_bias': None}
if not is_openai_v1():
params.update({'api_key': self.openai_api_key, 'api_base': self.
openai_api_base})
return params
|
Get the parameters used to invoke the model.
|
convert_to_base64
|
"""
Convert PIL images to Base64 encoded strings
:param pil_image: PIL image
:return: Re-sized Base64 string
"""
buffered = BytesIO()
pil_image.save(buffered, format='JPEG')
img_str = base64.b64encode(buffered.getvalue()).decode('utf-8')
img_str = resize_base64_image(img_str, size=(960, 540))
return img_str
|
def convert_to_base64(pil_image):
"""
Convert PIL images to Base64 encoded strings
:param pil_image: PIL image
:return: Re-sized Base64 string
"""
buffered = BytesIO()
pil_image.save(buffered, format='JPEG')
img_str = base64.b64encode(buffered.getvalue()).decode('utf-8')
img_str = resize_base64_image(img_str, size=(960, 540))
return img_str
|
Convert PIL images to Base64 encoded strings
:param pil_image: PIL image
:return: Re-sized Base64 string
|
test_bulk_args
|
"""Test to make sure the user-agent is set correctly."""
texts = ['foo', 'bob', 'baz']
ElasticsearchStore.from_texts(texts, FakeEmbeddings(), es_connection=
es_client, index_name=index_name, bulk_kwargs={'chunk_size': 1})
assert len(es_client.transport.requests) == 5
|
def test_bulk_args(self, es_client: Any, index_name: str) ->None:
"""Test to make sure the user-agent is set correctly."""
texts = ['foo', 'bob', 'baz']
ElasticsearchStore.from_texts(texts, FakeEmbeddings(), es_connection=
es_client, index_name=index_name, bulk_kwargs={'chunk_size': 1})
assert len(es_client.transport.requests) == 5
|
Test to make sure the user-agent is set correctly.
|
test_everything_embedded
|
llm, PROMPT = setup()
feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed=False,
model=MockEncoder())
chain = pick_best_chain.PickBest.from_llm(llm=llm, prompt=PROMPT,
feature_embedder=feature_embedder, auto_embed=False)
str1 = '0'
str2 = '1'
str3 = '2'
encoded_str1 = rl_chain.stringify_embedding(list(encoded_keyword + str1))
encoded_str2 = rl_chain.stringify_embedding(list(encoded_keyword + str2))
encoded_str3 = rl_chain.stringify_embedding(list(encoded_keyword + str3))
ctx_str_1 = 'context1'
encoded_ctx_str_1 = rl_chain.stringify_embedding(list(encoded_keyword +
ctx_str_1))
expected = f"""shared |User {ctx_str_1 + ' ' + encoded_ctx_str_1}
|action {str1 + ' ' + encoded_str1}
|action {str2 + ' ' + encoded_str2}
|action {str3 + ' ' + encoded_str3} """
actions = [str1, str2, str3]
response = chain.run(User=rl_chain.EmbedAndKeep(rl_chain.BasedOn(ctx_str_1)
), action=rl_chain.EmbedAndKeep(rl_chain.ToSelectFrom(actions)))
selection_metadata = response['selection_metadata']
vw_str = feature_embedder.format(selection_metadata)
assert vw_str == expected
|
@pytest.mark.requires('vowpal_wabbit_next', 'sentence_transformers')
def test_everything_embedded() ->None:
llm, PROMPT = setup()
feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed=
False, model=MockEncoder())
chain = pick_best_chain.PickBest.from_llm(llm=llm, prompt=PROMPT,
feature_embedder=feature_embedder, auto_embed=False)
str1 = '0'
str2 = '1'
str3 = '2'
encoded_str1 = rl_chain.stringify_embedding(list(encoded_keyword + str1))
encoded_str2 = rl_chain.stringify_embedding(list(encoded_keyword + str2))
encoded_str3 = rl_chain.stringify_embedding(list(encoded_keyword + str3))
ctx_str_1 = 'context1'
encoded_ctx_str_1 = rl_chain.stringify_embedding(list(encoded_keyword +
ctx_str_1))
expected = f"""shared |User {ctx_str_1 + ' ' + encoded_ctx_str_1}
|action {str1 + ' ' + encoded_str1}
|action {str2 + ' ' + encoded_str2}
|action {str3 + ' ' + encoded_str3} """
actions = [str1, str2, str3]
response = chain.run(User=rl_chain.EmbedAndKeep(rl_chain.BasedOn(
ctx_str_1)), action=rl_chain.EmbedAndKeep(rl_chain.ToSelectFrom(
actions)))
selection_metadata = response['selection_metadata']
vw_str = feature_embedder.format(selection_metadata)
assert vw_str == expected
| null |
delete
|
return 'delete_response'
|
@staticmethod
def delete(url: str, **kwargs: Any) ->str:
return 'delete_response'
| null |
_is_url
|
try:
result = urllib.parse.urlparse(s)
return all([result.scheme, result.netloc])
except Exception as e:
logger.debug(f'Unable to parse URL: {e}')
return False
|
def _is_url(s: str) ->bool:
try:
result = urllib.parse.urlparse(s)
return all([result.scheme, result.netloc])
except Exception as e:
logger.debug(f'Unable to parse URL: {e}')
return False
| null |
test_search_anomalous
|
"""
Test detection of anomalousness
"""
result = self.vectorstore.is_anomalous(query='dogs can jump high')
assert result is False
|
def test_search_anomalous(self) ->None:
"""
Test detection of anomalousness
"""
result = self.vectorstore.is_anomalous(query='dogs can jump high')
assert result is False
|
Test detection of anomalousness
|
_convert_delta_to_message_chunk
|
role = _dict.get('role')
content = _dict.get('content') or ''
if _dict.get('function_call'):
additional_kwargs = {'function_call': dict(_dict['function_call'])}
else:
additional_kwargs = {}
if role == 'user' or default_class == HumanMessageChunk:
return HumanMessageChunk(content=content)
elif role == 'assistant' or default_class == AIMessageChunk:
return AIMessageChunk(content=content, additional_kwargs=additional_kwargs)
elif role == 'system' or default_class == SystemMessageChunk:
return SystemMessageChunk(content=content)
elif role == 'function' or default_class == FunctionMessageChunk:
return FunctionMessageChunk(content=content, name=_dict['name'])
elif role or default_class == ChatMessageChunk:
return ChatMessageChunk(content=content, role=role)
else:
return default_class(content=content)
|
def _convert_delta_to_message_chunk(_dict: Mapping[str, Any], default_class:
Type[BaseMessageChunk]) ->BaseMessageChunk:
role = _dict.get('role')
content = _dict.get('content') or ''
if _dict.get('function_call'):
additional_kwargs = {'function_call': dict(_dict['function_call'])}
else:
additional_kwargs = {}
if role == 'user' or default_class == HumanMessageChunk:
return HumanMessageChunk(content=content)
elif role == 'assistant' or default_class == AIMessageChunk:
return AIMessageChunk(content=content, additional_kwargs=
additional_kwargs)
elif role == 'system' or default_class == SystemMessageChunk:
return SystemMessageChunk(content=content)
elif role == 'function' or default_class == FunctionMessageChunk:
return FunctionMessageChunk(content=content, name=_dict['name'])
elif role or default_class == ChatMessageChunk:
return ChatMessageChunk(content=content, role=role)
else:
return default_class(content=content)
| null |
_query
|
embeddings = []
for txt in _chunk(texts, 20):
try:
resp = self.client.query_route(self.route, query_body={'input': txt})
resp_dict = resp.dict()
embeddings_chunk = resp_dict.get('llm_response', {}).get('data', [])
for item in embeddings_chunk:
if 'embedding' in item:
embeddings.append(item['embedding'])
except ValueError as e:
print('Failed to query route: ' + str(e))
return embeddings
|
def _query(self, texts: List[str]) ->List[List[float]]:
embeddings = []
for txt in _chunk(texts, 20):
try:
resp = self.client.query_route(self.route, query_body={'input':
txt})
resp_dict = resp.dict()
embeddings_chunk = resp_dict.get('llm_response', {}).get('data', []
)
for item in embeddings_chunk:
if 'embedding' in item:
embeddings.append(item['embedding'])
except ValueError as e:
print('Failed to query route: ' + str(e))
return embeddings
| null |
import_infino
|
"""Import the infino client."""
try:
from infinopy import InfinoClient
except ImportError:
raise ImportError(
'To use the Infino callbacks manager you need to have the `infinopy` python package installed.Please install it with `pip install infinopy`'
)
return InfinoClient()
|
def import_infino() ->Any:
"""Import the infino client."""
try:
from infinopy import InfinoClient
except ImportError:
raise ImportError(
'To use the Infino callbacks manager you need to have the `infinopy` python package installed.Please install it with `pip install infinopy`'
)
return InfinoClient()
|
Import the infino client.
|
_embed_documents
|
if isinstance(self.embedding_function, Embeddings):
return self.embedding_function.embed_documents(texts)
else:
return [self.embedding_function(text) for text in texts]
|
def _embed_documents(self, texts: List[str]) ->List[List[float]]:
if isinstance(self.embedding_function, Embeddings):
return self.embedding_function.embed_documents(texts)
else:
return [self.embedding_function(text) for text in texts]
| null |
visit_comparison
|
comparator = self._format_func(comparison.comparator)
values = comparison.value
if isinstance(values, list):
tql = []
for value in values:
comparison.value = value
tql.append(self.visit_comparison(comparison))
return '(' + ' or '.join(tql) + ')'
if not can_cast_to_float(comparison.value):
values = f"'{values}'"
return f"metadata['{comparison.attribute}'] {comparator} {values}"
|
def visit_comparison(self, comparison: Comparison) ->str:
comparator = self._format_func(comparison.comparator)
values = comparison.value
if isinstance(values, list):
tql = []
for value in values:
comparison.value = value
tql.append(self.visit_comparison(comparison))
return '(' + ' or '.join(tql) + ')'
if not can_cast_to_float(comparison.value):
values = f"'{values}'"
return f"metadata['{comparison.attribute}'] {comparator} {values}"
| null |
_import_zapier
|
from langchain_community.utilities.zapier import ZapierNLAWrapper
return ZapierNLAWrapper
|
def _import_zapier() ->Any:
from langchain_community.utilities.zapier import ZapierNLAWrapper
return ZapierNLAWrapper
| null |
test_sitemap
|
"""Test sitemap loader."""
loader = SitemapLoader('https://api.python.langchain.com/sitemap.xml')
documents = loader.load()
assert len(documents) > 1
assert 'LangChain Python API' in documents[0].page_content
|
def test_sitemap() ->None:
"""Test sitemap loader."""
loader = SitemapLoader('https://api.python.langchain.com/sitemap.xml')
documents = loader.load()
assert len(documents) > 1
assert 'LangChain Python API' in documents[0].page_content
|
Test sitemap loader.
|
test_md_header_text_splitter_3
|
"""Test markdown splitter by header: Case 3."""
markdown_document = """# Foo
## Bar
Hi this is Jim
Hi this is Joe
### Boo
Hi this is Lance
#### Bim
Hi this is John
## Baz
Hi this is Molly"""
headers_to_split_on = [('#', 'Header 1'), ('##', 'Header 2'), ('###',
'Header 3'), ('####', 'Header 4')]
markdown_splitter = MarkdownHeaderTextSplitter(headers_to_split_on=
headers_to_split_on)
output = markdown_splitter.split_text(markdown_document)
expected_output = [Document(page_content=
"""Hi this is Jim
Hi this is Joe""", metadata={'Header 1': 'Foo',
'Header 2': 'Bar'}), Document(page_content='Hi this is Lance', metadata
={'Header 1': 'Foo', 'Header 2': 'Bar', 'Header 3': 'Boo'}), Document(
page_content='Hi this is John', metadata={'Header 1': 'Foo', 'Header 2':
'Bar', 'Header 3': 'Boo', 'Header 4': 'Bim'}), Document(page_content=
'Hi this is Molly', metadata={'Header 1': 'Foo', 'Header 2': 'Baz'})]
assert output == expected_output
|
def test_md_header_text_splitter_3() ->None:
"""Test markdown splitter by header: Case 3."""
markdown_document = """# Foo
## Bar
Hi this is Jim
Hi this is Joe
### Boo
Hi this is Lance
#### Bim
Hi this is John
## Baz
Hi this is Molly"""
headers_to_split_on = [('#', 'Header 1'), ('##', 'Header 2'), ('###',
'Header 3'), ('####', 'Header 4')]
markdown_splitter = MarkdownHeaderTextSplitter(headers_to_split_on=
headers_to_split_on)
output = markdown_splitter.split_text(markdown_document)
expected_output = [Document(page_content=
'Hi this is Jim \nHi this is Joe', metadata={'Header 1': 'Foo',
'Header 2': 'Bar'}), Document(page_content='Hi this is Lance',
metadata={'Header 1': 'Foo', 'Header 2': 'Bar', 'Header 3': 'Boo'}),
Document(page_content='Hi this is John', metadata={'Header 1':
'Foo', 'Header 2': 'Bar', 'Header 3': 'Boo', 'Header 4': 'Bim'}),
Document(page_content='Hi this is Molly', metadata={'Header 1':
'Foo', 'Header 2': 'Baz'})]
assert output == expected_output
|
Test markdown splitter by header: Case 3.
|
_import_baidu_qianfan_endpoint
|
from langchain_community.llms.baidu_qianfan_endpoint import QianfanLLMEndpoint
return QianfanLLMEndpoint
|
def _import_baidu_qianfan_endpoint() ->Any:
from langchain_community.llms.baidu_qianfan_endpoint import QianfanLLMEndpoint
return QianfanLLMEndpoint
| null |
_create_action_url
|
"""Create a url for an action."""
return self.zapier_nla_api_base + f'exposed/{action_id}/execute/'
|
def _create_action_url(self, action_id: str) ->str:
"""Create a url for an action."""
return self.zapier_nla_api_base + f'exposed/{action_id}/execute/'
|
Create a url for an action.
|
_paragraphs
|
if self.document is None:
raise ValueError('Cannot get paragraphs without a document')
return self.document.page_content.split('\n\n')
|
@property
def _paragraphs(self) ->List[str]:
if self.document is None:
raise ValueError('Cannot get paragraphs without a document')
return self.document.page_content.split('\n\n')
| null |
clear
|
"""Remove the thought from the screen. A cleared thought can't be reused."""
self._container.clear()
|
def clear(self) ->None:
"""Remove the thought from the screen. A cleared thought can't be reused."""
self._container.clear()
|
Remove the thought from the screen. A cleared thought can't be reused.
|
is_codey_model
|
"""Returns True if the model name is a Codey model."""
return 'code' in model_name
|
def is_codey_model(model_name: str) ->bool:
"""Returns True if the model name is a Codey model."""
return 'code' in model_name
|
Returns True if the model name is a Codey model.
|
mock_create_run
|
projects.append(kwargs.get('project_name'))
return unittest.mock.MagicMock()
|
def mock_create_run(**kwargs: Any) ->Any:
projects.append(kwargs.get('project_name'))
return unittest.mock.MagicMock()
| null |
request_url
|
"""Get the request url."""
if self.group_id:
return (
f'{BASE_URL}/groups/{self.group_id}/datasets/{self.dataset_id}/executeQueries'
)
return f'{BASE_URL}/datasets/{self.dataset_id}/executeQueries'
|
@property
def request_url(self) ->str:
"""Get the request url."""
if self.group_id:
return (
f'{BASE_URL}/groups/{self.group_id}/datasets/{self.dataset_id}/executeQueries'
)
return f'{BASE_URL}/datasets/{self.dataset_id}/executeQueries'
|
Get the request url.
|
assert_query
|
assert query_body == {'knn': {'filter': [], 'field':
'vector_query_field.predicted_value', 'k': 1, 'num_candidates': 50,
'query_vector_builder': {'text_embedding': {'model_id':
'sentence-transformers__all-minilm-l6-v2', 'model_text': 'foo'}}}}
return query_body
|
def assert_query(query_body: dict, query: str) ->dict:
assert query_body == {'knn': {'filter': [], 'field':
'vector_query_field.predicted_value', 'k': 1, 'num_candidates': 50,
'query_vector_builder': {'text_embedding': {'model_id':
'sentence-transformers__all-minilm-l6-v2', 'model_text': 'foo'}}}}
return query_body
| null |
_completion_with_retry
|
resp = self.client.call(**_kwargs)
return check_response(resp)
|
@retry_decorator
def _completion_with_retry(**_kwargs: Any) ->Any:
resp = self.client.call(**_kwargs)
return check_response(resp)
| null |
update
|
"""Update cache based on prompt and llm_string."""
doc_id = self._make_id(prompt, llm_string)
llm_string_hash = _hash(llm_string)
embedding_vector = self._get_embedding(text=prompt)
body = _dumps_generations(return_val)
self.collection.upsert({'_id': doc_id, 'body_blob': body, 'llm_string_hash':
llm_string_hash, '$vector': embedding_vector})
|
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE
) ->None:
"""Update cache based on prompt and llm_string."""
doc_id = self._make_id(prompt, llm_string)
llm_string_hash = _hash(llm_string)
embedding_vector = self._get_embedding(text=prompt)
body = _dumps_generations(return_val)
self.collection.upsert({'_id': doc_id, 'body_blob': body,
'llm_string_hash': llm_string_hash, '$vector': embedding_vector})
|
Update cache based on prompt and llm_string.
|
_batch
|
results_map: Dict[int, Output] = {}
def pending(iterable: List[U]) ->List[U]:
return [item for idx, item in enumerate(iterable) if idx not in results_map
]
try:
for attempt in self._sync_retrying():
with attempt:
result = super().batch(pending(inputs), self._patch_config_list
(pending(config), pending(run_manager), attempt.retry_state
), return_exceptions=True, **kwargs)
first_exception = None
for i, r in enumerate(result):
if isinstance(r, Exception):
if not first_exception:
first_exception = r
continue
results_map[i] = r
if first_exception:
raise first_exception
if (attempt.retry_state.outcome and not attempt.retry_state.outcome
.failed):
attempt.retry_state.set_result(result)
except RetryError as e:
try:
result
except UnboundLocalError:
result = cast(List[Output], [e] * len(inputs))
outputs: List[Union[Output, Exception]] = []
for idx, _ in enumerate(inputs):
if idx in results_map:
outputs.append(results_map[idx])
else:
outputs.append(result.pop(0))
return outputs
|
def _batch(self, inputs: List[Input], run_manager: List[
'CallbackManagerForChainRun'], config: List[RunnableConfig], **kwargs: Any
) ->List[Union[Output, Exception]]:
results_map: Dict[int, Output] = {}
def pending(iterable: List[U]) ->List[U]:
return [item for idx, item in enumerate(iterable) if idx not in
results_map]
try:
for attempt in self._sync_retrying():
with attempt:
result = super().batch(pending(inputs), self.
_patch_config_list(pending(config), pending(run_manager
), attempt.retry_state), return_exceptions=True, **kwargs)
first_exception = None
for i, r in enumerate(result):
if isinstance(r, Exception):
if not first_exception:
first_exception = r
continue
results_map[i] = r
if first_exception:
raise first_exception
if (attempt.retry_state.outcome and not attempt.retry_state.
outcome.failed):
attempt.retry_state.set_result(result)
except RetryError as e:
try:
result
except UnboundLocalError:
result = cast(List[Output], [e] * len(inputs))
outputs: List[Union[Output, Exception]] = []
for idx, _ in enumerate(inputs):
if idx in results_map:
outputs.append(results_map[idx])
else:
outputs.append(result.pop(0))
return outputs
| null |
similarity_search
|
"""Run similarity search with AtlasDB
Args:
query (str): Query text to search for.
k (int): Number of results to return. Defaults to 4.
Returns:
List[Document]: List of documents most similar to the query text.
"""
if self._embedding_function is None:
raise NotImplementedError(
'AtlasDB requires an embedding_function for text similarity search!')
_embedding = self._embedding_function.embed_documents([query])[0]
embedding = np.array(_embedding).reshape(1, -1)
with self.project.wait_for_project_lock():
neighbors, _ = self.project.projections[0].vector_search(queries=
embedding, k=k)
data = self.project.get_data(ids=neighbors[0])
docs = [Document(page_content=data[i]['text'], metadata=data[i]) for i,
neighbor in enumerate(neighbors)]
return docs
|
def similarity_search(self, query: str, k: int=4, **kwargs: Any) ->List[
Document]:
"""Run similarity search with AtlasDB
Args:
query (str): Query text to search for.
k (int): Number of results to return. Defaults to 4.
Returns:
List[Document]: List of documents most similar to the query text.
"""
if self._embedding_function is None:
raise NotImplementedError(
'AtlasDB requires an embedding_function for text similarity search!'
)
_embedding = self._embedding_function.embed_documents([query])[0]
embedding = np.array(_embedding).reshape(1, -1)
with self.project.wait_for_project_lock():
neighbors, _ = self.project.projections[0].vector_search(queries=
embedding, k=k)
data = self.project.get_data(ids=neighbors[0])
docs = [Document(page_content=data[i]['text'], metadata=data[i]) for i,
neighbor in enumerate(neighbors)]
return docs
|
Run similarity search with AtlasDB
Args:
query (str): Query text to search for.
k (int): Number of results to return. Defaults to 4.
Returns:
List[Document]: List of documents most similar to the query text.
|
lc_secrets
|
return {'anthropic_api_key': 'ANTHROPIC_API_KEY'}
|
@property
def lc_secrets(self) ->Dict[str, str]:
return {'anthropic_api_key': 'ANTHROPIC_API_KEY'}
| null |
load_local
|
"""Load the local specified table.
Args:
table_name: Table name
kwargs: Any possible extend parameters in the future.
Returns:
Success or failure of loading the local specified table
"""
if self.awadb_client is None:
raise ValueError('AwaDB client is None!!!')
return self.awadb_client.Load(table_name)
|
def load_local(self, table_name: str, **kwargs: Any) ->bool:
"""Load the local specified table.
Args:
table_name: Table name
kwargs: Any possible extend parameters in the future.
Returns:
Success or failure of loading the local specified table
"""
if self.awadb_client is None:
raise ValueError('AwaDB client is None!!!')
return self.awadb_client.Load(table_name)
|
Load the local specified table.
Args:
table_name: Table name
kwargs: Any possible extend parameters in the future.
Returns:
Success or failure of loading the local specified table
|
load
|
"""Eagerly load the chat sessions into memory."""
return list(self.lazy_load())
|
def load(self) ->List[ChatSession]:
"""Eagerly load the chat sessions into memory."""
return list(self.lazy_load())
|
Eagerly load the chat sessions into memory.
|
_patch_config
|
attempt = retry_state.attempt_number
tag = 'retry:attempt:{}'.format(attempt) if attempt > 1 else None
return patch_config(config, callbacks=run_manager.get_child(tag))
|
def _patch_config(self, config: RunnableConfig, run_manager: 'T',
retry_state: RetryCallState) ->RunnableConfig:
attempt = retry_state.attempt_number
tag = 'retry:attempt:{}'.format(attempt) if attempt > 1 else None
return patch_config(config, callbacks=run_manager.get_child(tag))
| null |
add_documents
|
"""Add documents to vectorstore."""
return self.vectorstore.add_documents(documents, **kwargs)
|
def add_documents(self, documents: List[Document], **kwargs: Any) ->List[str]:
"""Add documents to vectorstore."""
return self.vectorstore.add_documents(documents, **kwargs)
|
Add documents to vectorstore.
|
_index_schema
|
"""Return the index schema as a dictionary.
Return None if no schema found.
"""
if self._is_direct_access_index():
schema_json = self._direct_access_index_spec.get('schema_json')
if schema_json is not None:
return json.loads(schema_json)
return None
|
def _index_schema(self) ->Optional[dict]:
"""Return the index schema as a dictionary.
Return None if no schema found.
"""
if self._is_direct_access_index():
schema_json = self._direct_access_index_spec.get('schema_json')
if schema_json is not None:
return json.loads(schema_json)
return None
|
Return the index schema as a dictionary.
Return None if no schema found.
|
create_inverse_metadata
|
"""Create metadata from fields.
Args:
fields: The fields of the document. The fields must be a dict.
Returns:
metadata: The metadata of the document. The metadata must be a dict.
"""
metadata: Dict[str, Any] = {}
for key, value in fields.items():
if key == 'id' or key == 'document' or key == 'embedding':
continue
metadata[self.config.inverse_field_name_mapping[key]] = value
return metadata
|
def create_inverse_metadata(self, fields: Dict[str, Any]) ->Dict[str, Any]:
"""Create metadata from fields.
Args:
fields: The fields of the document. The fields must be a dict.
Returns:
metadata: The metadata of the document. The metadata must be a dict.
"""
metadata: Dict[str, Any] = {}
for key, value in fields.items():
if key == 'id' or key == 'document' or key == 'embedding':
continue
metadata[self.config.inverse_field_name_mapping[key]] = value
return metadata
|
Create metadata from fields.
Args:
fields: The fields of the document. The fields must be a dict.
Returns:
metadata: The metadata of the document. The metadata must be a dict.
|
_import_mlflow_ai_gateway
|
from langchain_community.llms.mlflow_ai_gateway import MlflowAIGateway
return MlflowAIGateway
|
def _import_mlflow_ai_gateway() ->Any:
from langchain_community.llms.mlflow_ai_gateway import MlflowAIGateway
return MlflowAIGateway
| null |
vcr_cassette_dir
|
return os.path.join(os.path.dirname(request.module.__file__), 'cassettes',
os.path.basename(request.module.__file__).replace('.py', ''))
|
@pytest.fixture(scope='module')
def vcr_cassette_dir(request: pytest.FixtureRequest) ->str:
return os.path.join(os.path.dirname(request.module.__file__),
'cassettes', os.path.basename(request.module.__file__).replace(
'.py', ''))
| null |
invoke
|
config = ensure_config(config)
return self.generate_prompt([self._convert_input(input)], stop=stop,
callbacks=config.get('callbacks'), tags=config.get('tags'), metadata=
config.get('metadata'), run_name=config.get('run_name'), **kwargs
).generations[0][0].text
|
def invoke(self, input: LanguageModelInput, config: Optional[RunnableConfig
]=None, *, stop: Optional[List[str]]=None, **kwargs: Any) ->str:
config = ensure_config(config)
return self.generate_prompt([self._convert_input(input)], stop=stop,
callbacks=config.get('callbacks'), tags=config.get('tags'),
metadata=config.get('metadata'), run_name=config.get('run_name'),
**kwargs).generations[0][0].text
| null |
get_openapi_chain
|
"""Create a chain for querying an API from a OpenAPI spec.
Args:
spec: OpenAPISpec or url/file/text string corresponding to one.
llm: language model, should be an OpenAI function-calling model, e.g.
`ChatOpenAI(model="gpt-3.5-turbo-0613")`.
prompt: Main prompt template to use.
request_chain: Chain for taking the functions output and executing the request.
"""
if isinstance(spec, str):
for conversion in (OpenAPISpec.from_url, OpenAPISpec.from_file,
OpenAPISpec.from_text):
try:
spec = conversion(spec)
break
except ImportError as e:
raise e
except Exception:
pass
if isinstance(spec, str):
raise ValueError(f'Unable to parse spec from source {spec}')
openai_fns, call_api_fn = openapi_spec_to_openai_fn(spec)
llm = llm or ChatOpenAI(model='gpt-3.5-turbo-0613')
prompt = prompt or ChatPromptTemplate.from_template(
"""Use the provided API's to respond to this user query:
{query}""")
llm_chain = LLMChain(llm=llm, prompt=prompt, llm_kwargs={'functions':
openai_fns}, output_parser=JsonOutputFunctionsParser(args_only=False),
output_key='function', verbose=verbose, **llm_chain_kwargs or {})
request_chain = request_chain or SimpleRequestChain(request_method=lambda
name, args: call_api_fn(name, args, headers=headers, params=params),
verbose=verbose)
return SequentialChain(chains=[llm_chain, request_chain], input_variables=
llm_chain.input_keys, output_variables=['response'], verbose=verbose,
**kwargs)
|
def get_openapi_chain(spec: Union[OpenAPISpec, str], llm: Optional[
BaseLanguageModel]=None, prompt: Optional[BasePromptTemplate]=None,
request_chain: Optional[Chain]=None, llm_chain_kwargs: Optional[Dict]=
None, verbose: bool=False, headers: Optional[Dict]=None, params:
Optional[Dict]=None, **kwargs: Any) ->SequentialChain:
"""Create a chain for querying an API from a OpenAPI spec.
Args:
spec: OpenAPISpec or url/file/text string corresponding to one.
llm: language model, should be an OpenAI function-calling model, e.g.
`ChatOpenAI(model="gpt-3.5-turbo-0613")`.
prompt: Main prompt template to use.
request_chain: Chain for taking the functions output and executing the request.
"""
if isinstance(spec, str):
for conversion in (OpenAPISpec.from_url, OpenAPISpec.from_file,
OpenAPISpec.from_text):
try:
spec = conversion(spec)
break
except ImportError as e:
raise e
except Exception:
pass
if isinstance(spec, str):
raise ValueError(f'Unable to parse spec from source {spec}')
openai_fns, call_api_fn = openapi_spec_to_openai_fn(spec)
llm = llm or ChatOpenAI(model='gpt-3.5-turbo-0613')
prompt = prompt or ChatPromptTemplate.from_template(
"""Use the provided API's to respond to this user query:
{query}""")
llm_chain = LLMChain(llm=llm, prompt=prompt, llm_kwargs={'functions':
openai_fns}, output_parser=JsonOutputFunctionsParser(args_only=
False), output_key='function', verbose=verbose, **llm_chain_kwargs or
{})
request_chain = request_chain or SimpleRequestChain(request_method=lambda
name, args: call_api_fn(name, args, headers=headers, params=params),
verbose=verbose)
return SequentialChain(chains=[llm_chain, request_chain],
input_variables=llm_chain.input_keys, output_variables=['response'],
verbose=verbose, **kwargs)
|
Create a chain for querying an API from a OpenAPI spec.
Args:
spec: OpenAPISpec or url/file/text string corresponding to one.
llm: language model, should be an OpenAI function-calling model, e.g.
`ChatOpenAI(model="gpt-3.5-turbo-0613")`.
prompt: Main prompt template to use.
request_chain: Chain for taking the functions output and executing the request.
|
get_sync
|
"""Get the equivalent sync RunManager.
Returns:
CallbackManagerForLLMRun: The sync RunManager.
"""
return CallbackManagerForLLMRun(run_id=self.run_id, handlers=self.handlers,
inheritable_handlers=self.inheritable_handlers, parent_run_id=self.
parent_run_id, tags=self.tags, inheritable_tags=self.inheritable_tags,
metadata=self.metadata, inheritable_metadata=self.inheritable_metadata)
|
def get_sync(self) ->CallbackManagerForLLMRun:
"""Get the equivalent sync RunManager.
Returns:
CallbackManagerForLLMRun: The sync RunManager.
"""
return CallbackManagerForLLMRun(run_id=self.run_id, handlers=self.
handlers, inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags=
self.inheritable_tags, metadata=self.metadata, inheritable_metadata
=self.inheritable_metadata)
|
Get the equivalent sync RunManager.
Returns:
CallbackManagerForLLMRun: The sync RunManager.
|
_messages_search_result_to_doc
|
return [Document(page_content=r.message.pop('content'), metadata={'score':
r.dist, **r.message}) for r in results if r.message]
|
def _messages_search_result_to_doc(self, results: List[MemorySearchResult]
) ->List[Document]:
return [Document(page_content=r.message.pop('content'), metadata={
'score': r.dist, **r.message}) for r in results if r.message]
| null |
max_marginal_relevance_search
|
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
if self._embedding is not None:
embedding = self._embedding.embed_query(query)
else:
raise ValueError(
'max_marginal_relevance_search requires a suitable Embeddings object')
return self.max_marginal_relevance_search_by_vector(embedding, k=k, fetch_k
=fetch_k, lambda_mult=lambda_mult, **kwargs)
|
def max_marginal_relevance_search(self, query: str, k: int=4, fetch_k: int=
20, lambda_mult: float=0.5, **kwargs: Any) ->List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
if self._embedding is not None:
embedding = self._embedding.embed_query(query)
else:
raise ValueError(
'max_marginal_relevance_search requires a suitable Embeddings object'
)
return self.max_marginal_relevance_search_by_vector(embedding, k=k,
fetch_k=fetch_k, lambda_mult=lambda_mult, **kwargs)
|
Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
|
test_ngram_overlap_score
|
"""Tests that ngram_overlap_score returns correct values."""
selector.threshold = 1.0 + 1e-09
none = ngram_overlap_score(['Spot can run.'], ['My dog barks.'])
some = ngram_overlap_score(['Spot can run.'], ['See Spot run.'])
complete = ngram_overlap_score(['Spot can run.'], ['Spot can run.'])
check = [abs(none - 0.0) < 1e-09, 0.0 < some < 1.0, abs(complete - 1.0) < 1e-09
]
assert check == [True, True, True]
|
def test_ngram_overlap_score(selector: NGramOverlapExampleSelector) ->None:
"""Tests that ngram_overlap_score returns correct values."""
selector.threshold = 1.0 + 1e-09
none = ngram_overlap_score(['Spot can run.'], ['My dog barks.'])
some = ngram_overlap_score(['Spot can run.'], ['See Spot run.'])
complete = ngram_overlap_score(['Spot can run.'], ['Spot can run.'])
check = [abs(none - 0.0) < 1e-09, 0.0 < some < 1.0, abs(complete - 1.0) <
1e-09]
assert check == [True, True, True]
|
Tests that ngram_overlap_score returns correct values.
|
_llm_type
|
"""Return type of llm."""
return 'openai'
|
@property
def _llm_type(self) ->str:
"""Return type of llm."""
return 'openai'
|
Return type of llm.
|
return_values
|
"""Return values of the agent."""
return []
|
@property
def return_values(self) ->List[str]:
"""Return values of the agent."""
return []
|
Return values of the agent.
|
similarity_search
|
embedding = self.embedding_function.embed_query(text=query)
return self.similarity_search_by_vector(embedding=embedding, k=k, filter=filter
)
|
def similarity_search(self, query: str, k: int=4, filter: Optional[dict]=
None, **kwargs: Any) ->List[Document]:
embedding = self.embedding_function.embed_query(text=query)
return self.similarity_search_by_vector(embedding=embedding, k=k,
filter=filter)
| null |
test_similarity_search_approx_with_hybrid_search_rrf
|
"""Test end to end construction and rrf hybrid search with metadata."""
from functools import partial
from typing import Optional
rrf_test_cases: List[Optional[Union[dict, bool]]] = [True, False, {
'rank_constant': 1, 'window_size': 5}]
for rrf_test_case in rrf_test_cases:
texts = ['foo', 'bar', 'baz']
docsearch = ElasticsearchStore.from_texts(texts, FakeEmbeddings(), **
elasticsearch_connection, index_name=index_name, strategy=
ElasticsearchStore.ApproxRetrievalStrategy(hybrid=True, rrf=
rrf_test_case))
def assert_query(query_body: dict, query: str, rrf: Optional[Union[dict,
bool]]=True) ->dict:
cmp_query_body = {'knn': {'field': 'vector', 'filter': [], 'k': 3,
'num_candidates': 50, 'query_vector': [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 0.0]}, 'query': {'bool': {'filter': [],
'must': [{'match': {'text': {'query': 'foo'}}}]}}}
if isinstance(rrf, dict):
cmp_query_body['rank'] = {'rrf': rrf}
elif isinstance(rrf, bool) and rrf is True:
cmp_query_body['rank'] = {'rrf': {}}
assert query_body == cmp_query_body
return query_body
output = docsearch.similarity_search('foo', k=3, custom_query=partial(
assert_query, rrf=rrf_test_case))
es_output = es_client.search(index=index_name, query={'bool': {'filter': [],
'must': [{'match': {'text': {'query': 'foo'}}}]}}, knn={'field':
'vector', 'filter': [], 'k': 3, 'num_candidates': 50, 'query_vector': [
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0]}, size=3, rank={'rrf':
{'rank_constant': 1, 'window_size': 5}})
assert [o.page_content for o in output] == [e['_source']['text'] for e in
es_output['hits']['hits']]
docsearch = ElasticsearchStore.from_texts(texts, FakeEmbeddings(), **
elasticsearch_connection, index_name=index_name, strategy=
ElasticsearchStore.ApproxRetrievalStrategy(hybrid=True))
output = docsearch.similarity_search('foo', k=3, fetch_k=50, custom_query=
assert_query)
|
def test_similarity_search_approx_with_hybrid_search_rrf(self, es_client:
Any, elasticsearch_connection: dict, index_name: str) ->None:
"""Test end to end construction and rrf hybrid search with metadata."""
from functools import partial
from typing import Optional
rrf_test_cases: List[Optional[Union[dict, bool]]] = [True, False, {
'rank_constant': 1, 'window_size': 5}]
for rrf_test_case in rrf_test_cases:
texts = ['foo', 'bar', 'baz']
docsearch = ElasticsearchStore.from_texts(texts, FakeEmbeddings(),
**elasticsearch_connection, index_name=index_name, strategy=
ElasticsearchStore.ApproxRetrievalStrategy(hybrid=True, rrf=
rrf_test_case))
def assert_query(query_body: dict, query: str, rrf: Optional[Union[
dict, bool]]=True) ->dict:
cmp_query_body = {'knn': {'field': 'vector', 'filter': [], 'k':
3, 'num_candidates': 50, 'query_vector': [1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0]}, 'query': {'bool': {
'filter': [], 'must': [{'match': {'text': {'query': 'foo'}}}]}}
}
if isinstance(rrf, dict):
cmp_query_body['rank'] = {'rrf': rrf}
elif isinstance(rrf, bool) and rrf is True:
cmp_query_body['rank'] = {'rrf': {}}
assert query_body == cmp_query_body
return query_body
output = docsearch.similarity_search('foo', k=3, custom_query=
partial(assert_query, rrf=rrf_test_case))
es_output = es_client.search(index=index_name, query={'bool': {'filter':
[], 'must': [{'match': {'text': {'query': 'foo'}}}]}}, knn={'field':
'vector', 'filter': [], 'k': 3, 'num_candidates': 50,
'query_vector': [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0]},
size=3, rank={'rrf': {'rank_constant': 1, 'window_size': 5}})
assert [o.page_content for o in output] == [e['_source']['text'] for e in
es_output['hits']['hits']]
docsearch = ElasticsearchStore.from_texts(texts, FakeEmbeddings(), **
elasticsearch_connection, index_name=index_name, strategy=
ElasticsearchStore.ApproxRetrievalStrategy(hybrid=True))
output = docsearch.similarity_search('foo', k=3, fetch_k=50,
custom_query=assert_query)
|
Test end to end construction and rrf hybrid search with metadata.
|
__repr__
|
from pprint import pformat
return f'RunLogPatch({pformat(self.ops)[1:-1]})'
|
def __repr__(self) ->str:
from pprint import pformat
return f'RunLogPatch({pformat(self.ops)[1:-1]})'
| null |
test_html_code_splitter
|
splitter = RecursiveCharacterTextSplitter.from_language(Language.HTML,
chunk_size=60, chunk_overlap=0)
code = """
<h1>Sample Document</h1>
<h2>Section</h2>
<p id="1234">Reference content.</p>
<h2>Lists</h2>
<ul>
<li>Item 1</li>
<li>Item 2</li>
<li>Item 3</li>
</ul>
<h3>A block</h3>
<div class="amazing">
<p>Some text</p>
<p>Some more text</p>
</div>
"""
chunks = splitter.split_text(code)
assert chunks == ["""<h1>Sample Document</h1>
<h2>Section</h2>""",
'<p id="1234">Reference content.</p>',
"""<h2>Lists</h2>
<ul>""",
"""<li>Item 1</li>
<li>Item 2</li>""",
'<li>Item 3</li>\n </ul>', '<h3>A block</h3>',
'<div class="amazing">', '<p>Some text</p>',
"""<p>Some more text</p>
</div>"""]
|
def test_html_code_splitter() ->None:
splitter = RecursiveCharacterTextSplitter.from_language(Language.HTML,
chunk_size=60, chunk_overlap=0)
code = """
<h1>Sample Document</h1>
<h2>Section</h2>
<p id="1234">Reference content.</p>
<h2>Lists</h2>
<ul>
<li>Item 1</li>
<li>Item 2</li>
<li>Item 3</li>
</ul>
<h3>A block</h3>
<div class="amazing">
<p>Some text</p>
<p>Some more text</p>
</div>
"""
chunks = splitter.split_text(code)
assert chunks == ["""<h1>Sample Document</h1>
<h2>Section</h2>""",
'<p id="1234">Reference content.</p>',
"""<h2>Lists</h2>
<ul>""",
"""<li>Item 1</li>
<li>Item 2</li>""",
'<li>Item 3</li>\n </ul>', '<h3>A block</h3>',
'<div class="amazing">', '<p>Some text</p>',
"""<p>Some more text</p>
</div>"""]
| null |
messages
|
"""Retrieve the current list of messages"""
return self._messages
|
@property
def messages(self) ->List[BaseMessage]:
"""Retrieve the current list of messages"""
return self._messages
|
Retrieve the current list of messages
|
complete
|
"""Finish the thought."""
if final_label is None and self._state == LLMThoughtState.RUNNING_TOOL:
assert self._last_tool is not None, '_last_tool should never be null when _state == RUNNING_TOOL'
final_label = self._labeler.get_tool_label(self._last_tool, is_complete
=True)
self._state = LLMThoughtState.COMPLETE
if self._collapse_on_complete:
self._container.update(new_label=final_label, new_expanded=False)
else:
self._container.update(new_label=final_label)
|
def complete(self, final_label: Optional[str]=None) ->None:
"""Finish the thought."""
if final_label is None and self._state == LLMThoughtState.RUNNING_TOOL:
assert self._last_tool is not None, '_last_tool should never be null when _state == RUNNING_TOOL'
final_label = self._labeler.get_tool_label(self._last_tool,
is_complete=True)
self._state = LLMThoughtState.COMPLETE
if self._collapse_on_complete:
self._container.update(new_label=final_label, new_expanded=False)
else:
self._container.update(new_label=final_label)
|
Finish the thought.
|
create_list_elements
|
skeleton = _input['skeleton']
numbered_list = parse_numbered_list(skeleton)
for el in numbered_list:
el['skeleton'] = skeleton
el['question'] = _input['question']
return numbered_list
|
def create_list_elements(_input):
skeleton = _input['skeleton']
numbered_list = parse_numbered_list(skeleton)
for el in numbered_list:
el['skeleton'] = skeleton
el['question'] = _input['question']
return numbered_list
| null |
delete_collection
|
"""
Completely delete the collection from the database (as opposed
to 'clear()', which empties it only).
Stored data is lost and unrecoverable, resources are freed.
Use with caution.
"""
self._drop_collection()
return None
|
def delete_collection(self) ->None:
"""
Completely delete the collection from the database (as opposed
to 'clear()', which empties it only).
Stored data is lost and unrecoverable, resources are freed.
Use with caution.
"""
self._drop_collection()
return None
|
Completely delete the collection from the database (as opposed
to 'clear()', which empties it only).
Stored data is lost and unrecoverable, resources are freed.
Use with caution.
|
assert_query
|
assert query_body == {'query': {'script_score': {'query': {'match_all': {}},
'script': {'source':
"""
double value = dotProduct(params.query_vector, 'vector');
return sigmoid(1, Math.E, -value);
"""
, 'params': {'query_vector': [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 0.0]}}}}}
return query_body
|
def assert_query(query_body: dict, query: str) ->dict:
assert query_body == {'query': {'script_score': {'query': {'match_all':
{}}, 'script': {'source':
"""
double value = dotProduct(params.query_vector, 'vector');
return sigmoid(1, Math.E, -value);
"""
, 'params': {'query_vector': [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 0.0]}}}}}
return query_body
| null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.