method_name
stringlengths 1
78
| method_body
stringlengths 3
9.66k
| full_code
stringlengths 31
10.7k
| docstring
stringlengths 4
4.74k
⌀ |
|---|---|---|---|
on_feedback
|
self.num += score or 0
self.i += 1
if self.step > 0 and self.i % self.step == 0:
self.history.append({'step': self.i, 'score': self.score})
|
def on_feedback(self, score: float) ->None:
self.num += score or 0
self.i += 1
if self.step > 0 and self.i % self.step == 0:
self.history.append({'step': self.i, 'score': self.score})
| null |
messages_2
|
return [SystemMessage(content='You are a test user.'), HumanMessage(content
='Hello, I not a test user.')]
|
@pytest.fixture
def messages_2() ->list:
return [SystemMessage(content='You are a test user.'), HumanMessage(
content='Hello, I not a test user.')]
| null |
test_chat_openai_streaming
|
"""Test that streaming correctly invokes on_llm_new_token callback."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
chat = _get_llm(max_tokens=10, streaming=True, temperature=0,
callback_manager=callback_manager, verbose=True)
message = HumanMessage(content='Hello')
response = chat([message])
assert callback_handler.llm_streams > 0
assert isinstance(response, BaseMessage)
|
@pytest.mark.scheduled
def test_chat_openai_streaming() ->None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
chat = _get_llm(max_tokens=10, streaming=True, temperature=0,
callback_manager=callback_manager, verbose=True)
message = HumanMessage(content='Hello')
response = chat([message])
assert callback_handler.llm_streams > 0
assert isinstance(response, BaseMessage)
|
Test that streaming correctly invokes on_llm_new_token callback.
|
_get_verbosity
|
from langchain_core.globals import get_verbose
return get_verbose()
|
def _get_verbosity() ->bool:
from langchain_core.globals import get_verbose
return get_verbose()
| null |
drop_tables
|
with Session(self._bind) as session, session.begin():
Base.metadata.drop_all(session.get_bind())
|
def drop_tables(self) ->None:
with Session(self._bind) as session, session.begin():
Base.metadata.drop_all(session.get_bind())
| null |
test_load_arxiv_from_universal_entry_with_params
|
params = {'top_k_results': 1, 'load_max_docs': 10,
'load_all_available_meta': True}
arxiv_tool = _load_arxiv_from_universal_entry(**params)
assert isinstance(arxiv_tool, ArxivQueryRun)
wp = arxiv_tool.api_wrapper
assert wp.top_k_results == 1, 'failed to assert top_k_results'
assert wp.load_max_docs == 10, 'failed to assert load_max_docs'
assert wp.load_all_available_meta is True, 'failed to assert load_all_available_meta'
|
def test_load_arxiv_from_universal_entry_with_params() ->None:
params = {'top_k_results': 1, 'load_max_docs': 10,
'load_all_available_meta': True}
arxiv_tool = _load_arxiv_from_universal_entry(**params)
assert isinstance(arxiv_tool, ArxivQueryRun)
wp = arxiv_tool.api_wrapper
assert wp.top_k_results == 1, 'failed to assert top_k_results'
assert wp.load_max_docs == 10, 'failed to assert load_max_docs'
assert wp.load_all_available_meta is True, 'failed to assert load_all_available_meta'
| null |
_index_mapping
|
"""
Executes when the index is created.
Args:
dims_length: Numeric length of the embedding vectors,
or None if not using vector-based query.
index_params: The extra pamameters for creating index.
Returns:
Dict: The Elasticsearch settings and mappings for the strategy.
"""
if 'linear' == self.index_type:
return {self.vector_query_field: {'type': 'bpack_vector', 'dims':
dims_length, 'build_index': self.index_params.get('build_index',
False)}}
elif 'hnsw' == self.index_type:
return {self.vector_query_field: {'type': 'bpack_vector', 'dims':
dims_length, 'index_type': 'hnsw', 'space_type': self.space_type,
'parameters': {'ef_construction': self.index_params.get(
'hnsw_ef_construction', 200), 'm': self.index_params.get('hnsw_m', 4)}}
}
else:
return {self.vector_query_field: {'type': 'bpack_vector', 'model_id':
self.index_params.get('model_id', '')}}
|
def _index_mapping(self, dims_length: Union[int, None]) ->Dict:
"""
Executes when the index is created.
Args:
dims_length: Numeric length of the embedding vectors,
or None if not using vector-based query.
index_params: The extra pamameters for creating index.
Returns:
Dict: The Elasticsearch settings and mappings for the strategy.
"""
if 'linear' == self.index_type:
return {self.vector_query_field: {'type': 'bpack_vector', 'dims':
dims_length, 'build_index': self.index_params.get('build_index',
False)}}
elif 'hnsw' == self.index_type:
return {self.vector_query_field: {'type': 'bpack_vector', 'dims':
dims_length, 'index_type': 'hnsw', 'space_type': self.
space_type, 'parameters': {'ef_construction': self.index_params
.get('hnsw_ef_construction', 200), 'm': self.index_params.get(
'hnsw_m', 4)}}}
else:
return {self.vector_query_field: {'type': 'bpack_vector',
'model_id': self.index_params.get('model_id', '')}}
|
Executes when the index is created.
Args:
dims_length: Numeric length of the embedding vectors,
or None if not using vector-based query.
index_params: The extra pamameters for creating index.
Returns:
Dict: The Elasticsearch settings and mappings for the strategy.
|
_import_openai_chat
|
from langchain_community.llms.openai import OpenAIChat
return OpenAIChat
|
def _import_openai_chat() ->Any:
from langchain_community.llms.openai import OpenAIChat
return OpenAIChat
| null |
test__convert_dict_to_message_human
|
message_dict = {'role': 'user', 'content': 'foo'}
result = _convert_dict_to_message(message_dict)
expected_output = HumanMessage(content='foo')
assert result == expected_output
|
def test__convert_dict_to_message_human() ->None:
message_dict = {'role': 'user', 'content': 'foo'}
result = _convert_dict_to_message(message_dict)
expected_output = HumanMessage(content='foo')
assert result == expected_output
| null |
always_verbose
|
"""Whether to call verbose callbacks even if verbose is False."""
return self.always_verbose_
|
@property
def always_verbose(self) ->bool:
"""Whether to call verbose callbacks even if verbose is False."""
return self.always_verbose_
|
Whether to call verbose callbacks even if verbose is False.
|
_run
|
"""Use the tool."""
query_params = {'file_url': query, 'attributes_as_list': False}
return self._call_eden_ai(query_params)
|
def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun]
=None) ->str:
"""Use the tool."""
query_params = {'file_url': query, 'attributes_as_list': False}
return self._call_eden_ai(query_params)
|
Use the tool.
|
test_huggingface_instructor_embedding_normalize
|
"""Test huggingface embeddings."""
query = 'foo bar'
model_name = 'hkunlp/instructor-base'
encode_kwargs = {'normalize_embeddings': True}
embedding = HuggingFaceInstructEmbeddings(model_name=model_name,
encode_kwargs=encode_kwargs)
output = embedding.embed_query(query)
assert len(output) == 768
eps = 1e-05
norm = sum([(o ** 2) for o in output])
assert abs(1 - norm) <= eps
|
def test_huggingface_instructor_embedding_normalize() ->None:
"""Test huggingface embeddings."""
query = 'foo bar'
model_name = 'hkunlp/instructor-base'
encode_kwargs = {'normalize_embeddings': True}
embedding = HuggingFaceInstructEmbeddings(model_name=model_name,
encode_kwargs=encode_kwargs)
output = embedding.embed_query(query)
assert len(output) == 768
eps = 1e-05
norm = sum([(o ** 2) for o in output])
assert abs(1 - norm) <= eps
|
Test huggingface embeddings.
|
fix_table_name
|
"""Add single quotes around table names that contain spaces."""
if ' ' in table and not table.startswith("'") and not table.endswith("'"):
return f"'{table}'"
return table
|
def fix_table_name(table: str) ->str:
"""Add single quotes around table names that contain spaces."""
if ' ' in table and not table.startswith("'") and not table.endswith("'"):
return f"'{table}'"
return table
|
Add single quotes around table names that contain spaces.
|
normalize
|
"""Normalize vector."""
return [float(v / np.linalg.norm(vector)) for v in vector]
|
def normalize(self, vector: List[float]) ->List[float]:
"""Normalize vector."""
return [float(v / np.linalg.norm(vector)) for v in vector]
|
Normalize vector.
|
add_to_dict
|
nonlocal total_tokens
tokens = get_tokens(value)
if total_tokens + tokens <= max_tokens:
data_dict[key] = value
total_tokens += tokens
|
def add_to_dict(data_dict: Dict[str, Any], key: str, value: str) ->None:
nonlocal total_tokens
tokens = get_tokens(value)
if total_tokens + tokens <= max_tokens:
data_dict[key] = value
total_tokens += tokens
| null |
_llm_type
|
"""Return type of chat model."""
return 'javelin-ai-gateway-chat'
|
@property
def _llm_type(self) ->str:
"""Return type of chat model."""
return 'javelin-ai-gateway-chat'
|
Return type of chat model.
|
test_openai_invoke
|
"""Test streaming tokens from OpenAI."""
llm = OpenAI(max_tokens=10)
result = llm.invoke("I'm Pickle Rick", config=dict(tags=['foo']))
assert isinstance(result, str)
|
@pytest.mark.scheduled
def test_openai_invoke() ->None:
"""Test streaming tokens from OpenAI."""
llm = OpenAI(max_tokens=10)
result = llm.invoke("I'm Pickle Rick", config=dict(tags=['foo']))
assert isinstance(result, str)
|
Test streaming tokens from OpenAI.
|
critique_prompt_inputs
|
return {'question': self.question, **{f'idea_{i + 1}': idea for i, idea in
enumerate(self.ideas)}}
|
def critique_prompt_inputs(self) ->Dict[str, Any]:
return {'question': self.question, **{f'idea_{i + 1}': idea for i, idea in
enumerate(self.ideas)}}
| null |
load
|
"""Load documents."""
dataset_items = self.apify_client.dataset(self.dataset_id).list_items(clean
=True).items
return list(map(self.dataset_mapping_function, dataset_items))
|
def load(self) ->List[Document]:
"""Load documents."""
dataset_items = self.apify_client.dataset(self.dataset_id).list_items(clean
=True).items
return list(map(self.dataset_mapping_function, dataset_items))
|
Load documents.
|
test_load_llmchain_with_non_serializable_arg
|
llm = OpenAI(model='davinci', temperature=0.5, openai_api_key='hello',
http_client=NotSerializable)
prompt = PromptTemplate.from_template('hello {name}!')
chain = LLMChain(llm=llm, prompt=prompt)
chain_obj = dumpd(chain)
with pytest.raises(NotImplementedError):
load(chain_obj, secrets_map={'OPENAI_API_KEY': 'hello'})
|
@pytest.mark.requires('openai')
def test_load_llmchain_with_non_serializable_arg() ->None:
llm = OpenAI(model='davinci', temperature=0.5, openai_api_key='hello',
http_client=NotSerializable)
prompt = PromptTemplate.from_template('hello {name}!')
chain = LLMChain(llm=llm, prompt=prompt)
chain_obj = dumpd(chain)
with pytest.raises(NotImplementedError):
load(chain_obj, secrets_map={'OPENAI_API_KEY': 'hello'})
| null |
test_sitemap_block_num_to_small
|
"""Test sitemap loader."""
with pytest.raises(ValueError, match='Sitemap blocknum can not be lower then 0'
):
SitemapLoader('https://api.python.langchain.com/sitemap.xml', blocksize
=1000000, blocknum=-1)
|
def test_sitemap_block_num_to_small() ->None:
"""Test sitemap loader."""
with pytest.raises(ValueError, match=
'Sitemap blocknum can not be lower then 0'):
SitemapLoader('https://api.python.langchain.com/sitemap.xml',
blocksize=1000000, blocknum=-1)
|
Test sitemap loader.
|
results
|
"""Run query through Metaphor Search and return metadata.
Args:
query: The query to search for.
num_results: The number of results to return.
include_domains: A list of domains to include in the search. Only one of include_domains and exclude_domains should be defined.
exclude_domains: A list of domains to exclude from the search. Only one of include_domains and exclude_domains should be defined.
start_crawl_date: If specified, only pages we crawled after start_crawl_date will be returned.
end_crawl_date: If specified, only pages we crawled before end_crawl_date will be returned.
start_published_date: If specified, only pages published after start_published_date will be returned.
end_published_date: If specified, only pages published before end_published_date will be returned.
use_autoprompt: If true, we turn your query into a more Metaphor-friendly query. Adds latency.
Returns:
A list of dictionaries with the following keys:
title - The title of the page
url - The url
author - Author of the content, if applicable. Otherwise, None.
published_date - Estimated date published
in YYYY-MM-DD format. Otherwise, None.
"""
raw_search_results = self._metaphor_search_results(query, num_results=
num_results, include_domains=include_domains, exclude_domains=
exclude_domains, start_crawl_date=start_crawl_date, end_crawl_date=
end_crawl_date, start_published_date=start_published_date,
end_published_date=end_published_date, use_autoprompt=use_autoprompt)
return self._clean_results(raw_search_results)
|
def results(self, query: str, num_results: int, include_domains: Optional[
List[str]]=None, exclude_domains: Optional[List[str]]=None,
start_crawl_date: Optional[str]=None, end_crawl_date: Optional[str]=
None, start_published_date: Optional[str]=None, end_published_date:
Optional[str]=None, use_autoprompt: Optional[bool]=None) ->List[Dict]:
"""Run query through Metaphor Search and return metadata.
Args:
query: The query to search for.
num_results: The number of results to return.
include_domains: A list of domains to include in the search. Only one of include_domains and exclude_domains should be defined.
exclude_domains: A list of domains to exclude from the search. Only one of include_domains and exclude_domains should be defined.
start_crawl_date: If specified, only pages we crawled after start_crawl_date will be returned.
end_crawl_date: If specified, only pages we crawled before end_crawl_date will be returned.
start_published_date: If specified, only pages published after start_published_date will be returned.
end_published_date: If specified, only pages published before end_published_date will be returned.
use_autoprompt: If true, we turn your query into a more Metaphor-friendly query. Adds latency.
Returns:
A list of dictionaries with the following keys:
title - The title of the page
url - The url
author - Author of the content, if applicable. Otherwise, None.
published_date - Estimated date published
in YYYY-MM-DD format. Otherwise, None.
"""
raw_search_results = self._metaphor_search_results(query, num_results=
num_results, include_domains=include_domains, exclude_domains=
exclude_domains, start_crawl_date=start_crawl_date, end_crawl_date=
end_crawl_date, start_published_date=start_published_date,
end_published_date=end_published_date, use_autoprompt=use_autoprompt)
return self._clean_results(raw_search_results)
|
Run query through Metaphor Search and return metadata.
Args:
query: The query to search for.
num_results: The number of results to return.
include_domains: A list of domains to include in the search. Only one of include_domains and exclude_domains should be defined.
exclude_domains: A list of domains to exclude from the search. Only one of include_domains and exclude_domains should be defined.
start_crawl_date: If specified, only pages we crawled after start_crawl_date will be returned.
end_crawl_date: If specified, only pages we crawled before end_crawl_date will be returned.
start_published_date: If specified, only pages published after start_published_date will be returned.
end_published_date: If specified, only pages published before end_published_date will be returned.
use_autoprompt: If true, we turn your query into a more Metaphor-friendly query. Adds latency.
Returns:
A list of dictionaries with the following keys:
title - The title of the page
url - The url
author - Author of the content, if applicable. Otherwise, None.
published_date - Estimated date published
in YYYY-MM-DD format. Otherwise, None.
|
test_ai_endpoints_batch
|
"""Test batch tokens."""
llm = ChatNVIDIA(model='llama2_13b', max_tokens=60)
result = llm.batch(["I'm Pickle Rick", "I'm not Pickle Rick"])
for token in result:
assert isinstance(token.content, str)
|
def test_ai_endpoints_batch() ->None:
"""Test batch tokens."""
llm = ChatNVIDIA(model='llama2_13b', max_tokens=60)
result = llm.batch(["I'm Pickle Rick", "I'm not Pickle Rick"])
for token in result:
assert isinstance(token.content, str)
|
Test batch tokens.
|
clear
|
"""Clear session memory from Upstash Redis"""
self.redis_client.delete(self.key)
|
def clear(self) ->None:
"""Clear session memory from Upstash Redis"""
self.redis_client.delete(self.key)
|
Clear session memory from Upstash Redis
|
mset
|
"""Set the values for the given keys.
Args:
key_value_pairs: A sequence of key-value pairs.
Returns:
None
"""
for key, value in key_value_pairs:
full_path = self._get_full_path(key)
full_path.parent.mkdir(parents=True, exist_ok=True)
full_path.write_bytes(value)
|
def mset(self, key_value_pairs: Sequence[Tuple[str, bytes]]) ->None:
"""Set the values for the given keys.
Args:
key_value_pairs: A sequence of key-value pairs.
Returns:
None
"""
for key, value in key_value_pairs:
full_path = self._get_full_path(key)
full_path.parent.mkdir(parents=True, exist_ok=True)
full_path.write_bytes(value)
|
Set the values for the given keys.
Args:
key_value_pairs: A sequence of key-value pairs.
Returns:
None
|
batch
|
from langchain_core.callbacks.manager import CallbackManager
if return_exceptions:
raise NotImplementedError()
if not inputs:
return []
configs = get_config_list(config, len(inputs))
callback_managers = [CallbackManager.configure(inheritable_callbacks=config
.get('callbacks'), local_callbacks=None, verbose=False,
inheritable_tags=config.get('tags'), local_tags=None,
inheritable_metadata=config.get('metadata'), local_metadata=None) for
config in configs]
run_managers = [cm.on_chain_start(dumpd(self), input if isinstance(input,
dict) else {'input': input}, name=config.get('run_name')) for cm, input,
config in zip(callback_managers, inputs, configs)]
first_error = None
for runnable in self.runnables:
try:
outputs = runnable.batch(inputs, [patch_config(config, callbacks=rm
.get_child()) for rm, config in zip(run_managers, configs)],
return_exceptions=return_exceptions, **kwargs)
except self.exceptions_to_handle as e:
if first_error is None:
first_error = e
except BaseException as e:
for rm in run_managers:
rm.on_chain_error(e)
raise e
else:
for rm, output in zip(run_managers, outputs):
rm.on_chain_end(output)
return outputs
if first_error is None:
raise ValueError('No error stored at end of fallbacks.')
for rm in run_managers:
rm.on_chain_error(first_error)
raise first_error
|
def batch(self, inputs: List[Input], config: Optional[Union[RunnableConfig,
List[RunnableConfig]]]=None, *, return_exceptions: bool=False, **kwargs:
Optional[Any]) ->List[Output]:
from langchain_core.callbacks.manager import CallbackManager
if return_exceptions:
raise NotImplementedError()
if not inputs:
return []
configs = get_config_list(config, len(inputs))
callback_managers = [CallbackManager.configure(inheritable_callbacks=
config.get('callbacks'), local_callbacks=None, verbose=False,
inheritable_tags=config.get('tags'), local_tags=None,
inheritable_metadata=config.get('metadata'), local_metadata=None) for
config in configs]
run_managers = [cm.on_chain_start(dumpd(self), input if isinstance(
input, dict) else {'input': input}, name=config.get('run_name')) for
cm, input, config in zip(callback_managers, inputs, configs)]
first_error = None
for runnable in self.runnables:
try:
outputs = runnable.batch(inputs, [patch_config(config,
callbacks=rm.get_child()) for rm, config in zip(
run_managers, configs)], return_exceptions=
return_exceptions, **kwargs)
except self.exceptions_to_handle as e:
if first_error is None:
first_error = e
except BaseException as e:
for rm in run_managers:
rm.on_chain_error(e)
raise e
else:
for rm, output in zip(run_managers, outputs):
rm.on_chain_end(output)
return outputs
if first_error is None:
raise ValueError('No error stored at end of fallbacks.')
for rm in run_managers:
rm.on_chain_error(first_error)
raise first_error
| null |
drop_index
|
"""
Drop a Redis search index.
Args:
index_name (str): Name of the index to drop.
delete_documents (bool): Whether to drop the associated documents.
Returns:
bool: Whether or not the drop was successful.
"""
redis_url = get_from_dict_or_env(kwargs, 'redis_url', 'REDIS_URL')
try:
import redis
except ImportError:
raise ValueError(
'Could not import redis python package. Please install it with `pip install redis`.'
)
try:
if 'redis_url' in kwargs:
kwargs.pop('redis_url')
client = get_client(redis_url=redis_url, **kwargs)
except ValueError as e:
raise ValueError(f'Your redis connected error: {e}')
try:
client.ft(index_name).dropindex(delete_documents)
logger.info('Drop index')
return True
except:
return False
|
@staticmethod
def drop_index(index_name: str, delete_documents: bool, **kwargs: Any) ->bool:
"""
Drop a Redis search index.
Args:
index_name (str): Name of the index to drop.
delete_documents (bool): Whether to drop the associated documents.
Returns:
bool: Whether or not the drop was successful.
"""
redis_url = get_from_dict_or_env(kwargs, 'redis_url', 'REDIS_URL')
try:
import redis
except ImportError:
raise ValueError(
'Could not import redis python package. Please install it with `pip install redis`.'
)
try:
if 'redis_url' in kwargs:
kwargs.pop('redis_url')
client = get_client(redis_url=redis_url, **kwargs)
except ValueError as e:
raise ValueError(f'Your redis connected error: {e}')
try:
client.ft(index_name).dropindex(delete_documents)
logger.info('Drop index')
return True
except:
return False
|
Drop a Redis search index.
Args:
index_name (str): Name of the index to drop.
delete_documents (bool): Whether to drop the associated documents.
Returns:
bool: Whether or not the drop was successful.
|
_load_credentials
|
"""Load credentials."""
try:
from google.auth import default
from google.auth.transport.requests import Request
from google.oauth2 import service_account
from google.oauth2.credentials import Credentials
from google_auth_oauthlib.flow import InstalledAppFlow
except ImportError:
raise ImportError(
'You must run `pip install --upgrade google-api-python-client google-auth-httplib2 google-auth-oauthlib` to use the Google Drive loader.'
)
creds = None
if self.service_account_key.exists():
return service_account.Credentials.from_service_account_file(str(self.
service_account_key), scopes=SCOPES)
if self.token_path.exists():
creds = Credentials.from_authorized_user_file(str(self.token_path), SCOPES)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
elif 'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ:
creds, project = default()
creds = creds.with_scopes(SCOPES)
if creds:
return creds
else:
flow = InstalledAppFlow.from_client_secrets_file(str(self.
credentials_path), SCOPES)
creds = flow.run_local_server(port=0)
with open(self.token_path, 'w') as token:
token.write(creds.to_json())
return creds
|
def _load_credentials(self) ->Any:
"""Load credentials."""
try:
from google.auth import default
from google.auth.transport.requests import Request
from google.oauth2 import service_account
from google.oauth2.credentials import Credentials
from google_auth_oauthlib.flow import InstalledAppFlow
except ImportError:
raise ImportError(
'You must run `pip install --upgrade google-api-python-client google-auth-httplib2 google-auth-oauthlib` to use the Google Drive loader.'
)
creds = None
if self.service_account_key.exists():
return service_account.Credentials.from_service_account_file(str(
self.service_account_key), scopes=SCOPES)
if self.token_path.exists():
creds = Credentials.from_authorized_user_file(str(self.token_path),
SCOPES)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
elif 'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ:
creds, project = default()
creds = creds.with_scopes(SCOPES)
if creds:
return creds
else:
flow = InstalledAppFlow.from_client_secrets_file(str(self.
credentials_path), SCOPES)
creds = flow.run_local_server(port=0)
with open(self.token_path, 'w') as token:
token.write(creds.to_json())
return creds
|
Load credentials.
|
is_lc_serializable
|
return False
|
@classmethod
def is_lc_serializable(cls) ->bool:
return False
| null |
top
|
"""Get the top of the stack without popping it."""
return self.stack[-1] if len(self.stack) > 0 else None
|
def top(self) ->Optional[Thought]:
"""Get the top of the stack without popping it."""
return self.stack[-1] if len(self.stack) > 0 else None
|
Get the top of the stack without popping it.
|
test_search
|
with mock.patch('nuclia.sdk.search.NucliaSearch.find', new_callable=FakeFind):
ndb = NucliaDB(knowledge_box='YOUR_KB_ID', local=False, api_key=
'YOUR_API_KEY')
results = ndb.similarity_search('Who was inspired by Ada Lovelace?')
assert len(results) == 1
assert results[0].page_content == 'This is a test'
assert results[0].metadata['extra']['some'] == 'metadata'
assert results[0].metadata['value']['body'] == 'This is a test'
|
def test_search() ->None:
with mock.patch('nuclia.sdk.search.NucliaSearch.find', new_callable=
FakeFind):
ndb = NucliaDB(knowledge_box='YOUR_KB_ID', local=False, api_key=
'YOUR_API_KEY')
results = ndb.similarity_search('Who was inspired by Ada Lovelace?')
assert len(results) == 1
assert results[0].page_content == 'This is a test'
assert results[0].metadata['extra']['some'] == 'metadata'
assert results[0].metadata['value']['body'] == 'This is a test'
| null |
get_input_schema
|
"""Get a pydantic model that can be used to validate input to the runnable.
Runnables that leverage the configurable_fields and configurable_alternatives
methods will have a dynamic input schema that depends on which
configuration the runnable is invoked with.
This method allows to get an input schema for a specific configuration.
Args:
config: A config to use when generating the schema.
Returns:
A pydantic model that can be used to validate input.
"""
root_type = self.InputType
if inspect.isclass(root_type) and issubclass(root_type, BaseModel):
return root_type
return create_model(self.get_name('Input'), __root__=(root_type, None),
__config__=_SchemaConfig)
|
def get_input_schema(self, config: Optional[RunnableConfig]=None) ->Type[
BaseModel]:
"""Get a pydantic model that can be used to validate input to the runnable.
Runnables that leverage the configurable_fields and configurable_alternatives
methods will have a dynamic input schema that depends on which
configuration the runnable is invoked with.
This method allows to get an input schema for a specific configuration.
Args:
config: A config to use when generating the schema.
Returns:
A pydantic model that can be used to validate input.
"""
root_type = self.InputType
if inspect.isclass(root_type) and issubclass(root_type, BaseModel):
return root_type
return create_model(self.get_name('Input'), __root__=(root_type, None),
__config__=_SchemaConfig)
|
Get a pydantic model that can be used to validate input to the runnable.
Runnables that leverage the configurable_fields and configurable_alternatives
methods will have a dynamic input schema that depends on which
configuration the runnable is invoked with.
This method allows to get an input schema for a specific configuration.
Args:
config: A config to use when generating the schema.
Returns:
A pydantic model that can be used to validate input.
|
parse_result
|
result = super().parse_result(result)
return getattr(result, self.attr_name)
|
def parse_result(self, result: List[Generation], *, partial: bool=False) ->Any:
result = super().parse_result(result)
return getattr(result, self.attr_name)
| null |
test_analyticdb_with_filter_no_match
|
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': str(i)} for i in range(len(texts))]
docsearch = AnalyticDB.from_texts(texts=texts, collection_name=
'test_collection_filter', embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas, connection_string=CONNECTION_STRING,
pre_delete_collection=True)
output = docsearch.similarity_search_with_score('foo', k=1, filter={'page':
'5'})
assert output == []
|
def test_analyticdb_with_filter_no_match() ->None:
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': str(i)} for i in range(len(texts))]
docsearch = AnalyticDB.from_texts(texts=texts, collection_name=
'test_collection_filter', embedding=FakeEmbeddingsWithAdaDimension(
), metadatas=metadatas, connection_string=CONNECTION_STRING,
pre_delete_collection=True)
output = docsearch.similarity_search_with_score('foo', k=1, filter={
'page': '5'})
assert output == []
|
Test end to end construction and search.
|
on_chain_error
|
"""Run when chain errors.
Args:
error (Exception or KeyboardInterrupt): The error.
"""
handle_event(self.handlers, 'on_chain_error', 'ignore_chain', error, run_id
=self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs)
|
def on_chain_error(self, error: BaseException, **kwargs: Any) ->None:
"""Run when chain errors.
Args:
error (Exception or KeyboardInterrupt): The error.
"""
handle_event(self.handlers, 'on_chain_error', 'ignore_chain', error,
run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.
tags, **kwargs)
|
Run when chain errors.
Args:
error (Exception or KeyboardInterrupt): The error.
|
test_geometry_not_returned
|
loader = ArcGISLoader(layer=mock_feature_layer, gis=mock_gis,
return_geometry=False)
documents = list(loader.lazy_load())
assert 'geometry' not in documents[0].metadata
|
def test_geometry_not_returned(arcgis_mocks, mock_feature_layer, mock_gis):
loader = ArcGISLoader(layer=mock_feature_layer, gis=mock_gis,
return_geometry=False)
documents = list(loader.lazy_load())
assert 'geometry' not in documents[0].metadata
| null |
add_example
|
"""Add new example to list."""
self.examples.append(example)
|
def add_example(self, example: Dict[str, str]) ->None:
"""Add new example to list."""
self.examples.append(example)
|
Add new example to list.
|
test_valid_action_and_answer_raises_exception
|
"""Test handling when both an action and answer are found."""
llm_output = """Thought: I need to search for NBA
Action: Search
Action Input: NBA
Observation: founded in 1994
Thought: I can now answer the question
Final Answer: 1994"""
with pytest.raises(OutputParserException):
get_action_and_input(llm_output)
|
def test_valid_action_and_answer_raises_exception() ->None:
"""Test handling when both an action and answer are found."""
llm_output = """Thought: I need to search for NBA
Action: Search
Action Input: NBA
Observation: founded in 1994
Thought: I can now answer the question
Final Answer: 1994"""
with pytest.raises(OutputParserException):
get_action_and_input(llm_output)
|
Test handling when both an action and answer are found.
|
__len__
|
return len(self._children)
|
def __len__(self) ->int:
return len(self._children)
| null |
parse_result
|
res = super().parse_result(result, partial=partial)
if partial and res is None:
return None
return res.get(self.key_name) if partial else res[self.key_name]
|
def parse_result(self, result: List[Generation], *, partial: bool=False) ->Any:
res = super().parse_result(result, partial=partial)
if partial and res is None:
return None
return res.get(self.key_name) if partial else res[self.key_name]
| null |
_format_func
|
self._validate_func(func)
return f'${func.value}'
|
def _format_func(self, func: Union[Operator, Comparator]) ->str:
self._validate_func(func)
return f'${func.value}'
| null |
_import_playwright_NavigateBackTool
|
from langchain_community.tools.playwright import NavigateBackTool
return NavigateBackTool
|
def _import_playwright_NavigateBackTool() ->Any:
from langchain_community.tools.playwright import NavigateBackTool
return NavigateBackTool
| null |
_async_retrying
|
return AsyncRetrying(**self._kwargs_retrying, **kwargs)
|
def _async_retrying(self, **kwargs: Any) ->AsyncRetrying:
return AsyncRetrying(**self._kwargs_retrying, **kwargs)
| null |
update_token_usage
|
"""Update token usage."""
_keys_to_use = keys.intersection(response['usage'])
for _key in _keys_to_use:
if _key not in token_usage:
token_usage[_key] = response['usage'][_key]
else:
token_usage[_key] += response['usage'][_key]
|
def update_token_usage(keys: Set[str], response: Dict[str, Any],
token_usage: Dict[str, Any]) ->None:
"""Update token usage."""
_keys_to_use = keys.intersection(response['usage'])
for _key in _keys_to_use:
if _key not in token_usage:
token_usage[_key] = response['usage'][_key]
else:
token_usage[_key] += response['usage'][_key]
|
Update token usage.
|
retrieve
|
results = vector_search.similarity_search(query, k=4, pre_filter={
'doc_level': {'$eq': 'child'}}, post_filter_pipeline=[{'$project': {
'embedding': 0}}, {'$lookup': {'from': COLLECTION_NAME, 'localField':
PARENT_DOC_ID_KEY, 'foreignField': PARENT_DOC_ID_KEY, 'as':
'parent_context', 'pipeline': [{'$match': {'doc_level': 'parent'}}, {
'$limit': 1}, {'$project': {'embedding': 0}}]}}])
parent_docs = []
parent_doc_ids = set()
for result in results:
res = result.metadata['parent_context'][0]
text = res.pop('text')
res.pop('_id')
parent_doc = Document(page_content=text, metadata=res)
if parent_doc.metadata[PARENT_DOC_ID_KEY] not in parent_doc_ids:
parent_doc_ids.add(parent_doc.metadata[PARENT_DOC_ID_KEY])
parent_docs.append(parent_doc)
return parent_docs
|
def retrieve(query: str):
results = vector_search.similarity_search(query, k=4, pre_filter={
'doc_level': {'$eq': 'child'}}, post_filter_pipeline=[{'$project':
{'embedding': 0}}, {'$lookup': {'from': COLLECTION_NAME,
'localField': PARENT_DOC_ID_KEY, 'foreignField': PARENT_DOC_ID_KEY,
'as': 'parent_context', 'pipeline': [{'$match': {'doc_level':
'parent'}}, {'$limit': 1}, {'$project': {'embedding': 0}}]}}])
parent_docs = []
parent_doc_ids = set()
for result in results:
res = result.metadata['parent_context'][0]
text = res.pop('text')
res.pop('_id')
parent_doc = Document(page_content=text, metadata=res)
if parent_doc.metadata[PARENT_DOC_ID_KEY] not in parent_doc_ids:
parent_doc_ids.add(parent_doc.metadata[PARENT_DOC_ID_KEY])
parent_docs.append(parent_doc)
return parent_docs
| null |
_create_retry_decorator
|
min_seconds = 4
max_seconds = 10
return retry(reraise=True, stop=stop_after_attempt(embeddings.max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
before_sleep=before_sleep_log(logger, logging.WARNING))
|
def _create_retry_decorator(embeddings: VoyageEmbeddings) ->Callable[[Any], Any
]:
min_seconds = 4
max_seconds = 10
return retry(reraise=True, stop=stop_after_attempt(embeddings.
max_retries), wait=wait_exponential(multiplier=1, min=min_seconds,
max=max_seconds), before_sleep=before_sleep_log(logger, logging.
WARNING))
| null |
load
|
"""Load the specified URLs using Playwright and create Document instances.
Returns:
List[Document]: A list of Document instances with loaded content.
"""
from playwright.sync_api import sync_playwright
docs: List[Document] = list()
with sync_playwright() as p:
browser = p.chromium.launch(headless=self.headless)
for url in self.urls:
try:
page = browser.new_page()
response = page.goto(url)
if response is None:
raise ValueError(f'page.goto() returned None for url {url}')
text = self.evaluator.evaluate(page, browser, response)
metadata = {'source': url}
docs.append(Document(page_content=text, metadata=metadata))
except Exception as e:
if self.continue_on_failure:
logger.error(
f'Error fetching or processing {url}, exception: {e}')
else:
raise e
browser.close()
return docs
|
def load(self) ->List[Document]:
"""Load the specified URLs using Playwright and create Document instances.
Returns:
List[Document]: A list of Document instances with loaded content.
"""
from playwright.sync_api import sync_playwright
docs: List[Document] = list()
with sync_playwright() as p:
browser = p.chromium.launch(headless=self.headless)
for url in self.urls:
try:
page = browser.new_page()
response = page.goto(url)
if response is None:
raise ValueError(f'page.goto() returned None for url {url}'
)
text = self.evaluator.evaluate(page, browser, response)
metadata = {'source': url}
docs.append(Document(page_content=text, metadata=metadata))
except Exception as e:
if self.continue_on_failure:
logger.error(
f'Error fetching or processing {url}, exception: {e}')
else:
raise e
browser.close()
return docs
|
Load the specified URLs using Playwright and create Document instances.
Returns:
List[Document]: A list of Document instances with loaded content.
|
test_verbose_is_settable_directly
|
import langchain
from langchain.chains.base import _get_verbosity
previous_value = langchain.verbose
previous_fn_reading = _get_verbosity()
assert previous_value == previous_fn_reading
langchain.verbose = not previous_value
new_value = langchain.verbose
new_fn_reading = _get_verbosity()
try:
assert new_value != previous_value
assert new_value == new_fn_reading
assert new_value == get_verbose()
finally:
set_verbose(previous_value)
|
def test_verbose_is_settable_directly() ->None:
import langchain
from langchain.chains.base import _get_verbosity
previous_value = langchain.verbose
previous_fn_reading = _get_verbosity()
assert previous_value == previous_fn_reading
langchain.verbose = not previous_value
new_value = langchain.verbose
new_fn_reading = _get_verbosity()
try:
assert new_value != previous_value
assert new_value == new_fn_reading
assert new_value == get_verbose()
finally:
set_verbose(previous_value)
| null |
test_cloudflare_workers_ai_embedding_documents
|
"""Test Cloudflare Workers AI embeddings."""
documents = ['foo bar', 'foo bar', 'foo bar']
responses.add(responses.POST,
'https://api.cloudflare.com/client/v4/accounts/123/ai/run/@cf/baai/bge-base-en-v1.5'
, json={'result': {'shape': [3, 768], 'data': [[0.0] * 768, [0.0] * 768,
[0.0] * 768]}, 'success': 'true', 'errors': [], 'messages': []})
embeddings = CloudflareWorkersAIEmbeddings(account_id='123', api_token='abc')
output = embeddings.embed_documents(documents)
assert len(output) == 3
assert len(output[0]) == 768
|
@responses.activate
def test_cloudflare_workers_ai_embedding_documents() ->None:
"""Test Cloudflare Workers AI embeddings."""
documents = ['foo bar', 'foo bar', 'foo bar']
responses.add(responses.POST,
'https://api.cloudflare.com/client/v4/accounts/123/ai/run/@cf/baai/bge-base-en-v1.5'
, json={'result': {'shape': [3, 768], 'data': [[0.0] * 768, [0.0] *
768, [0.0] * 768]}, 'success': 'true', 'errors': [], 'messages': []})
embeddings = CloudflareWorkersAIEmbeddings(account_id='123', api_token=
'abc')
output = embeddings.embed_documents(documents)
assert len(output) == 3
assert len(output[0]) == 768
|
Test Cloudflare Workers AI embeddings.
|
validate_environment_override
|
"""Validate that api key and python package exists in environment."""
values['openai_api_key'] = get_from_dict_or_env(values, 'everlyai_api_key',
'EVERLYAI_API_KEY')
values['openai_api_base'] = DEFAULT_API_BASE
try:
import openai
except ImportError as e:
raise ValueError(
'Could not import openai python package. Please install it with `pip install openai`.'
) from e
try:
values['client'] = openai.ChatCompletion
except AttributeError as exc:
raise ValueError(
'`openai` has no `ChatCompletion` attribute, this is likely due to an old version of the openai package. Try upgrading it with `pip install --upgrade openai`.'
) from exc
if 'model_name' not in values.keys():
values['model_name'] = DEFAULT_MODEL
model_name = values['model_name']
available_models = cls.get_available_models()
if model_name not in available_models:
raise ValueError(
f'Model name {model_name} not found in available models: {available_models}.'
)
values['available_models'] = available_models
return values
|
@root_validator(pre=True)
def validate_environment_override(cls, values: dict) ->dict:
"""Validate that api key and python package exists in environment."""
values['openai_api_key'] = get_from_dict_or_env(values,
'everlyai_api_key', 'EVERLYAI_API_KEY')
values['openai_api_base'] = DEFAULT_API_BASE
try:
import openai
except ImportError as e:
raise ValueError(
'Could not import openai python package. Please install it with `pip install openai`.'
) from e
try:
values['client'] = openai.ChatCompletion
except AttributeError as exc:
raise ValueError(
'`openai` has no `ChatCompletion` attribute, this is likely due to an old version of the openai package. Try upgrading it with `pip install --upgrade openai`.'
) from exc
if 'model_name' not in values.keys():
values['model_name'] = DEFAULT_MODEL
model_name = values['model_name']
available_models = cls.get_available_models()
if model_name not in available_models:
raise ValueError(
f'Model name {model_name} not found in available models: {available_models}.'
)
values['available_models'] = available_models
return values
|
Validate that api key and python package exists in environment.
|
test_all_imports
|
assert set(__all__) == set(EXPECTED_ALL)
|
def test_all_imports() ->None:
assert set(__all__) == set(EXPECTED_ALL)
| null |
test_openai_incorrect_field
|
with pytest.warns(match='not default parameter'):
llm = OpenAI(foo='bar')
assert llm.model_kwargs == {'foo': 'bar'}
|
@pytest.mark.requires('openai')
def test_openai_incorrect_field() ->None:
with pytest.warns(match='not default parameter'):
llm = OpenAI(foo='bar')
assert llm.model_kwargs == {'foo': 'bar'}
| null |
embed_documents
|
"""Embed a list of documents using the Llama model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
embeddings = [self.client.embed(text) for text in texts]
return [list(map(float, e)) for e in embeddings]
|
def embed_documents(self, texts: List[str]) ->List[List[float]]:
"""Embed a list of documents using the Llama model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
embeddings = [self.client.embed(text) for text in texts]
return [list(map(float, e)) for e in embeddings]
|
Embed a list of documents using the Llama model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
|
_import_searchapi
|
from langchain_community.utilities.searchapi import SearchApiAPIWrapper
return SearchApiAPIWrapper
|
def _import_searchapi() ->Any:
from langchain_community.utilities.searchapi import SearchApiAPIWrapper
return SearchApiAPIWrapper
| null |
validate_environment
|
"""Validate that python package exists in environment."""
try:
import boto3
except ImportError:
raise ImportError(
'boto3 is not installed. Please install it with `pip install boto3`')
values['lambda_client'] = boto3.client('lambda')
values['function_name'] = values['function_name']
return values
|
@root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that python package exists in environment."""
try:
import boto3
except ImportError:
raise ImportError(
'boto3 is not installed. Please install it with `pip install boto3`'
)
values['lambda_client'] = boto3.client('lambda')
values['function_name'] = values['function_name']
return values
|
Validate that python package exists in environment.
|
_call
|
"""Call the evaluation chain."""
evaluate_strings_inputs = self._prepare_input(inputs)
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
callbacks = _run_manager.get_child()
chain_output = self.string_evaluator.evaluate_strings(**
evaluate_strings_inputs, callbacks=callbacks, include_run_info=True)
return self._prepare_output(chain_output)
|
def _call(self, inputs: Dict[str, str], run_manager: Optional[
CallbackManagerForChainRun]=None) ->Dict[str, Any]:
"""Call the evaluation chain."""
evaluate_strings_inputs = self._prepare_input(inputs)
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
callbacks = _run_manager.get_child()
chain_output = self.string_evaluator.evaluate_strings(**
evaluate_strings_inputs, callbacks=callbacks, include_run_info=True)
return self._prepare_output(chain_output)
|
Call the evaluation chain.
|
_import_minimax
|
from langchain_community.llms.minimax import Minimax
return Minimax
|
def _import_minimax() ->Any:
from langchain_community.llms.minimax import Minimax
return Minimax
| null |
set_api_url
|
if 'api_url' not in values:
host = values['host']
group_id = values['group_id']
api_url = f'{host}/v1/text/chatcompletion?GroupId={group_id}'
values['api_url'] = api_url
return values
|
@root_validator(pre=True, allow_reuse=True)
def set_api_url(cls, values: Dict[str, Any]) ->Dict[str, Any]:
if 'api_url' not in values:
host = values['host']
group_id = values['group_id']
api_url = f'{host}/v1/text/chatcompletion?GroupId={group_id}'
values['api_url'] = api_url
return values
| null |
_import_azureml_endpoint
|
from langchain_community.llms.azureml_endpoint import AzureMLOnlineEndpoint
return AzureMLOnlineEndpoint
|
def _import_azureml_endpoint() ->Any:
from langchain_community.llms.azureml_endpoint import AzureMLOnlineEndpoint
return AzureMLOnlineEndpoint
| null |
validate_environment
|
"""Validate that api key and python package exists in environment."""
huggingfacehub_api_token = get_from_dict_or_env(values,
'huggingfacehub_api_token', 'HUGGINGFACEHUB_API_TOKEN')
try:
from huggingface_hub.hf_api import HfApi
try:
HfApi(endpoint='https://huggingface.co', token=huggingfacehub_api_token
).whoami()
except Exception as e:
raise ValueError(
'Could not authenticate with huggingface_hub. Please check your API token.'
) from e
except ImportError:
raise ImportError(
'Could not import huggingface_hub python package. Please install it with `pip install huggingface_hub`.'
)
values['huggingfacehub_api_token'] = huggingfacehub_api_token
return values
|
@root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that api key and python package exists in environment."""
huggingfacehub_api_token = get_from_dict_or_env(values,
'huggingfacehub_api_token', 'HUGGINGFACEHUB_API_TOKEN')
try:
from huggingface_hub.hf_api import HfApi
try:
HfApi(endpoint='https://huggingface.co', token=
huggingfacehub_api_token).whoami()
except Exception as e:
raise ValueError(
'Could not authenticate with huggingface_hub. Please check your API token.'
) from e
except ImportError:
raise ImportError(
'Could not import huggingface_hub python package. Please install it with `pip install huggingface_hub`.'
)
values['huggingfacehub_api_token'] = huggingfacehub_api_token
return values
|
Validate that api key and python package exists in environment.
|
split_text
|
def _encode(_text: str) ->List[int]:
return self._tokenizer.encode(_text, allowed_special=self.
_allowed_special, disallowed_special=self._disallowed_special)
tokenizer = Tokenizer(chunk_overlap=self._chunk_overlap, tokens_per_chunk=
self._chunk_size, decode=self._tokenizer.decode, encode=_encode)
return split_text_on_tokens(text=text, tokenizer=tokenizer)
|
def split_text(self, text: str) ->List[str]:
def _encode(_text: str) ->List[int]:
return self._tokenizer.encode(_text, allowed_special=self.
_allowed_special, disallowed_special=self._disallowed_special)
tokenizer = Tokenizer(chunk_overlap=self._chunk_overlap,
tokens_per_chunk=self._chunk_size, decode=self._tokenizer.decode,
encode=_encode)
return split_text_on_tokens(text=text, tokenizer=tokenizer)
| null |
_stream
|
raise NotImplementedError
|
def _stream(self, messages: List[BaseMessage], stop: Optional[List[str]]=
None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any
) ->Iterator[ChatGenerationChunk]:
raise NotImplementedError
| null |
test_simple_question
|
"""Test simple question that should not need python."""
question = (
"Please write a bash script that prints 'Hello World' to the console.")
prompt = _PROMPT_TEMPLATE.format(question=question)
queries = {prompt: """```bash
expr 1 + 1
```"""}
fake_llm = FakeLLM(queries=queries)
fake_llm_bash_chain = LLMBashChain.from_llm(fake_llm, input_key='q',
output_key='a')
output = fake_llm_bash_chain.run(question)
assert output == '2\n'
|
@pytest.mark.skipif(sys.platform.startswith('win'), reason=
'Test not supported on Windows')
def test_simple_question() ->None:
"""Test simple question that should not need python."""
question = (
"Please write a bash script that prints 'Hello World' to the console.")
prompt = _PROMPT_TEMPLATE.format(question=question)
queries = {prompt: '```bash\nexpr 1 + 1\n```'}
fake_llm = FakeLLM(queries=queries)
fake_llm_bash_chain = LLMBashChain.from_llm(fake_llm, input_key='q',
output_key='a')
output = fake_llm_bash_chain.run(question)
assert output == '2\n'
|
Test simple question that should not need python.
|
test_loadnotewithnometadata_documentreturnedwithsourceonly
|
documents = EverNoteLoader(self.example_notebook_path(
'sample_notebook_missingmetadata.enex'), False).load()
note = documents[0]
assert note.page_content == 'I only have content, no metadata'
assert len(note.metadata) == 1
assert 'source' in note.metadata
assert 'sample_notebook_missingmetadata.enex' in note.metadata['source']
|
def test_loadnotewithnometadata_documentreturnedwithsourceonly(self) ->None:
documents = EverNoteLoader(self.example_notebook_path(
'sample_notebook_missingmetadata.enex'), False).load()
note = documents[0]
assert note.page_content == 'I only have content, no metadata'
assert len(note.metadata) == 1
assert 'source' in note.metadata
assert 'sample_notebook_missingmetadata.enex' in note.metadata['source']
| null |
memory_variables
|
"""Return baz variable."""
return ['baz']
|
@property
def memory_variables(self) ->List[str]:
"""Return baz variable."""
return ['baz']
|
Return baz variable.
|
list_keys
|
"""List records in the SQLite database based on the provided date range."""
with self._make_session() as session:
query = session.query(UpsertionRecord).filter(UpsertionRecord.namespace ==
self.namespace)
if after:
query = query.filter(UpsertionRecord.updated_at > after)
if before:
query = query.filter(UpsertionRecord.updated_at < before)
if group_ids:
query = query.filter(UpsertionRecord.group_id.in_(group_ids))
if limit:
query = query.limit(limit)
records = query.all()
return [r.key for r in records]
|
def list_keys(self, *, before: Optional[float]=None, after: Optional[float]
=None, group_ids: Optional[Sequence[str]]=None, limit: Optional[int]=None
) ->List[str]:
"""List records in the SQLite database based on the provided date range."""
with self._make_session() as session:
query = session.query(UpsertionRecord).filter(UpsertionRecord.
namespace == self.namespace)
if after:
query = query.filter(UpsertionRecord.updated_at > after)
if before:
query = query.filter(UpsertionRecord.updated_at < before)
if group_ids:
query = query.filter(UpsertionRecord.group_id.in_(group_ids))
if limit:
query = query.limit(limit)
records = query.all()
return [r.key for r in records]
|
List records in the SQLite database based on the provided date range.
|
_import_file_management_WriteFileTool
|
from langchain_community.tools.file_management import WriteFileTool
return WriteFileTool
|
def _import_file_management_WriteFileTool() ->Any:
from langchain_community.tools.file_management import WriteFileTool
return WriteFileTool
| null |
parse
|
return self.guard.parse_folder(text, *self.args, llm_api=self.api, **self.
kwargs)
|
def parse(self, text: str) ->Dict:
return self.guard.parse_folder(text, *self.args, llm_api=self.api, **
self.kwargs)
| null |
remove_handler
|
"""Remove a handler from the callback manager."""
self.handlers.remove(handler)
self.inheritable_handlers.remove(handler)
|
def remove_handler(self, handler: BaseCallbackHandler) ->None:
"""Remove a handler from the callback manager."""
self.handlers.remove(handler)
self.inheritable_handlers.remove(handler)
|
Remove a handler from the callback manager.
|
_get_google_serper
|
return GoogleSerperRun(api_wrapper=GoogleSerperAPIWrapper(**kwargs))
|
def _get_google_serper(**kwargs: Any) ->BaseTool:
return GoogleSerperRun(api_wrapper=GoogleSerperAPIWrapper(**kwargs))
| null |
similarity_search_with_score
|
"""Return docs most similar to query."""
embedding = self._embedding.embed_query(query)
documents = self.similarity_search_with_score_by_vector(embedding=embedding,
k=k)
return documents
|
def similarity_search_with_score(self, query: str, k: int=4, **kwargs: Any
) ->List[Tuple[Document, float]]:
"""Return docs most similar to query."""
embedding = self._embedding.embed_query(query)
documents = self.similarity_search_with_score_by_vector(embedding=
embedding, k=k)
return documents
|
Return docs most similar to query.
|
_generate
|
"""Run the LLM on the given prompt and input."""
def _inner_generate(prompt: str) ->List[Generation]:
return [Generation(text=self._call(prompt=prompt, stop=stop,
run_manager=run_manager, **kwargs))]
if len(prompts) <= 1:
generations = list(map(_inner_generate, prompts))
else:
with ThreadPoolExecutor(min(8, len(prompts))) as p:
generations = list(p.map(_inner_generate, prompts))
return LLMResult(generations=generations)
|
def _generate(self, prompts: List[str], stop: Optional[List[str]]=None,
run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any
) ->LLMResult:
"""Run the LLM on the given prompt and input."""
def _inner_generate(prompt: str) ->List[Generation]:
return [Generation(text=self._call(prompt=prompt, stop=stop,
run_manager=run_manager, **kwargs))]
if len(prompts) <= 1:
generations = list(map(_inner_generate, prompts))
else:
with ThreadPoolExecutor(min(8, len(prompts))) as p:
generations = list(p.map(_inner_generate, prompts))
return LLMResult(generations=generations)
|
Run the LLM on the given prompt and input.
|
test_mosaicml_endpoint_change
|
"""Test valid call to MosaicML."""
new_url = (
'https://models.hosted-on.mosaicml.hosting/mpt-30b-instruct/v1/predict')
llm = MosaicML(endpoint_url=new_url)
assert llm.endpoint_url == new_url
output = llm('Say foo:')
assert isinstance(output, str)
|
def test_mosaicml_endpoint_change() ->None:
"""Test valid call to MosaicML."""
new_url = (
'https://models.hosted-on.mosaicml.hosting/mpt-30b-instruct/v1/predict'
)
llm = MosaicML(endpoint_url=new_url)
assert llm.endpoint_url == new_url
output = llm('Say foo:')
assert isinstance(output, str)
|
Test valid call to MosaicML.
|
_get_encoding_model
|
tiktoken_ = _import_tiktoken()
if self.tiktoken_model_name is not None:
model = self.tiktoken_model_name
else:
model = self.model_name
if model == 'gpt-3.5-turbo':
model = 'gpt-3.5-turbo-0301'
elif model == 'gpt-4':
model = 'gpt-4-0314'
try:
encoding = tiktoken_.encoding_for_model(model)
except KeyError:
logger.warning('Warning: model not found. Using cl100k_base encoding.')
model = 'cl100k_base'
encoding = tiktoken_.get_encoding(model)
return model, encoding
|
def _get_encoding_model(self) ->Tuple[str, tiktoken.Encoding]:
tiktoken_ = _import_tiktoken()
if self.tiktoken_model_name is not None:
model = self.tiktoken_model_name
else:
model = self.model_name
if model == 'gpt-3.5-turbo':
model = 'gpt-3.5-turbo-0301'
elif model == 'gpt-4':
model = 'gpt-4-0314'
try:
encoding = tiktoken_.encoding_for_model(model)
except KeyError:
logger.warning('Warning: model not found. Using cl100k_base encoding.')
model = 'cl100k_base'
encoding = tiktoken_.get_encoding(model)
return model, encoding
| null |
__init__
|
"""Initialize with file path."""
try:
import unstructured
from unstructured.__version__ import __version__ as __unstructured_version__
self.__version = __unstructured_version__
except ImportError:
raise ImportError(
'unstructured package not found, please install it with `pip install unstructured`'
)
self._validate_mode(mode)
self.mode = mode
headers = unstructured_kwargs.pop('headers', {})
if len(headers.keys()) != 0:
warn_about_headers = False
if self.__is_non_html_available():
warn_about_headers = not self.__is_headers_available_for_non_html()
else:
warn_about_headers = not self.__is_headers_available_for_html()
if warn_about_headers:
logger.warning(
'You are using an old version of unstructured. The headers parameter is ignored'
)
self.urls = urls
self.continue_on_failure = continue_on_failure
self.headers = headers
self.unstructured_kwargs = unstructured_kwargs
self.show_progress_bar = show_progress_bar
|
def __init__(self, urls: List[str], continue_on_failure: bool=True, mode:
str='single', show_progress_bar: bool=False, **unstructured_kwargs: Any):
"""Initialize with file path."""
try:
import unstructured
from unstructured.__version__ import __version__ as __unstructured_version__
self.__version = __unstructured_version__
except ImportError:
raise ImportError(
'unstructured package not found, please install it with `pip install unstructured`'
)
self._validate_mode(mode)
self.mode = mode
headers = unstructured_kwargs.pop('headers', {})
if len(headers.keys()) != 0:
warn_about_headers = False
if self.__is_non_html_available():
warn_about_headers = not self.__is_headers_available_for_non_html()
else:
warn_about_headers = not self.__is_headers_available_for_html()
if warn_about_headers:
logger.warning(
'You are using an old version of unstructured. The headers parameter is ignored'
)
self.urls = urls
self.continue_on_failure = continue_on_failure
self.headers = headers
self.unstructured_kwargs = unstructured_kwargs
self.show_progress_bar = show_progress_bar
|
Initialize with file path.
|
_convert_agent_action_to_messages
|
"""Convert an agent action to a message.
This code is used to reconstruct the original AI message from the agent action.
Args:
agent_action: Agent action to convert.
Returns:
AIMessage that corresponds to the original tool invocation.
"""
if isinstance(agent_action, AgentActionMessageLog):
return list(agent_action.message_log) + [_create_function_message(
agent_action, observation)]
else:
return [AIMessage(content=agent_action.log)]
|
def _convert_agent_action_to_messages(agent_action: AgentAction,
observation: str) ->List[BaseMessage]:
"""Convert an agent action to a message.
This code is used to reconstruct the original AI message from the agent action.
Args:
agent_action: Agent action to convert.
Returns:
AIMessage that corresponds to the original tool invocation.
"""
if isinstance(agent_action, AgentActionMessageLog):
return list(agent_action.message_log) + [_create_function_message(
agent_action, observation)]
else:
return [AIMessage(content=agent_action.log)]
|
Convert an agent action to a message.
This code is used to reconstruct the original AI message from the agent action.
Args:
agent_action: Agent action to convert.
Returns:
AIMessage that corresponds to the original tool invocation.
|
check_if_answer_reached
|
if self.strip_tokens:
return self.last_tokens_stripped == self.answer_prefix_tokens_stripped
else:
return self.last_tokens == self.answer_prefix_tokens
|
def check_if_answer_reached(self) ->bool:
if self.strip_tokens:
return self.last_tokens_stripped == self.answer_prefix_tokens_stripped
else:
return self.last_tokens == self.answer_prefix_tokens
| null |
mock_completion
|
return {'id': 'cmpl-3evkmQda5Hu7fcZavknQda3SQ', 'object': 'text_completion',
'created': 1689989000, 'model': 'gpt-3.5-turbo-instruct', 'choices': [{
'text': 'Bar Baz', 'index': 0, 'logprobs': None, 'finish_reason':
'length'}], 'usage': {'prompt_tokens': 1, 'completion_tokens': 2,
'total_tokens': 3}}
|
@pytest.fixture
def mock_completion() ->dict:
return {'id': 'cmpl-3evkmQda5Hu7fcZavknQda3SQ', 'object':
'text_completion', 'created': 1689989000, 'model':
'gpt-3.5-turbo-instruct', 'choices': [{'text': 'Bar Baz', 'index':
0, 'logprobs': None, 'finish_reason': 'length'}], 'usage': {
'prompt_tokens': 1, 'completion_tokens': 2, 'total_tokens': 3}}
| null |
ignore_chat_model
|
"""Whether to ignore chat model callbacks."""
return False
|
@property
def ignore_chat_model(self) ->bool:
"""Whether to ignore chat model callbacks."""
return False
|
Whether to ignore chat model callbacks.
|
lc_secrets
|
return {'zhipuai_api_key': 'ZHIPUAI_API_KEY'}
|
@property
def lc_secrets(self) ->Dict[str, str]:
return {'zhipuai_api_key': 'ZHIPUAI_API_KEY'}
| null |
embed_with_retry
|
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator()
@retry_decorator
def _embed_with_retry(*args: Any, **kwargs: Any) ->Any:
return embeddings.client.generate_embeddings(*args, **kwargs)
return _embed_with_retry(*args, **kwargs)
|
def embed_with_retry(embeddings: GooglePalmEmbeddings, *args: Any, **kwargs:
Any) ->Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator()
@retry_decorator
def _embed_with_retry(*args: Any, **kwargs: Any) ->Any:
return embeddings.client.generate_embeddings(*args, **kwargs)
return _embed_with_retry(*args, **kwargs)
|
Use tenacity to retry the completion call.
|
loads
|
"""Revive a LangChain class from a JSON string.
Equivalent to `load(json.loads(text))`.
Args:
text: The string to load.
secrets_map: A map of secrets to load.
valid_namespaces: A list of additional namespaces (modules)
to allow to be deserialized.
Returns:
Revived LangChain objects.
"""
return json.loads(text, object_hook=Reviver(secrets_map, valid_namespaces))
|
def loads(text: str, *, secrets_map: Optional[Dict[str, str]]=None,
valid_namespaces: Optional[List[str]]=None) ->Any:
"""Revive a LangChain class from a JSON string.
Equivalent to `load(json.loads(text))`.
Args:
text: The string to load.
secrets_map: A map of secrets to load.
valid_namespaces: A list of additional namespaces (modules)
to allow to be deserialized.
Returns:
Revived LangChain objects.
"""
return json.loads(text, object_hook=Reviver(secrets_map, valid_namespaces))
|
Revive a LangChain class from a JSON string.
Equivalent to `load(json.loads(text))`.
Args:
text: The string to load.
secrets_map: A map of secrets to load.
valid_namespaces: A list of additional namespaces (modules)
to allow to be deserialized.
Returns:
Revived LangChain objects.
|
delete_keys
|
"""Delete specified records from the database.
Args:
keys: A list of keys to delete.
"""
|
@abstractmethod
def delete_keys(self, keys: Sequence[str]) ->None:
"""Delete specified records from the database.
Args:
keys: A list of keys to delete.
"""
|
Delete specified records from the database.
Args:
keys: A list of keys to delete.
|
_parse_response
|
generations: List[List[Generation]] = []
for result in predictions.predictions:
generations.append([Generation(text=self._parse_prediction(prediction)) for
prediction in result])
return LLMResult(generations=generations)
|
def _parse_response(self, predictions: 'Prediction') ->LLMResult:
generations: List[List[Generation]] = []
for result in predictions.predictions:
generations.append([Generation(text=self._parse_prediction(
prediction)) for prediction in result])
return LLMResult(generations=generations)
| null |
put
|
return f'put {str(data)}'
|
@staticmethod
def put(url: str, data: Dict[str, Any], **kwargs: Any) ->str:
return f'put {str(data)}'
| null |
_import_office365_send_event
|
from langchain_community.tools.office365.send_event import O365SendEvent
return O365SendEvent
|
def _import_office365_send_event() ->Any:
from langchain_community.tools.office365.send_event import O365SendEvent
return O365SendEvent
| null |
_initialize_chunk_configuration
|
self.maximum_tokens_per_chunk = cast(int, self._model.max_seq_length)
if tokens_per_chunk is None:
self.tokens_per_chunk = self.maximum_tokens_per_chunk
else:
self.tokens_per_chunk = tokens_per_chunk
if self.tokens_per_chunk > self.maximum_tokens_per_chunk:
raise ValueError(
f"The token limit of the models '{self.model_name}' is: {self.maximum_tokens_per_chunk}. Argument tokens_per_chunk={self.tokens_per_chunk} > maximum token limit."
)
|
def _initialize_chunk_configuration(self, *, tokens_per_chunk: Optional[int]
) ->None:
self.maximum_tokens_per_chunk = cast(int, self._model.max_seq_length)
if tokens_per_chunk is None:
self.tokens_per_chunk = self.maximum_tokens_per_chunk
else:
self.tokens_per_chunk = tokens_per_chunk
if self.tokens_per_chunk > self.maximum_tokens_per_chunk:
raise ValueError(
f"The token limit of the models '{self.model_name}' is: {self.maximum_tokens_per_chunk}. Argument tokens_per_chunk={self.tokens_per_chunk} > maximum token limit."
)
| null |
exists
|
"""Check if the provided keys exist in the database.
Args:
keys: A list of keys to check.
Returns:
A list of boolean values indicating the existence of each key.
"""
|
@abstractmethod
def exists(self, keys: Sequence[str]) ->List[bool]:
"""Check if the provided keys exist in the database.
Args:
keys: A list of keys to check.
Returns:
A list of boolean values indicating the existence of each key.
"""
|
Check if the provided keys exist in the database.
Args:
keys: A list of keys to check.
Returns:
A list of boolean values indicating the existence of each key.
|
test_from_texts
|
texts = ['Dogs are tough.', 'Cats have fluff.', 'What is a sandwich?',
'That fence is purple.']
vectorstore = MongoDBAtlasVectorSearch.from_texts(texts, embedding_openai,
collection=collection, index_name=INDEX_NAME)
sleep(1)
output = vectorstore.similarity_search('Sandwich', k=1)
assert output[0].page_content == 'What is a sandwich?'
|
def test_from_texts(self, embedding_openai: Embeddings, collection: Any
) ->None:
texts = ['Dogs are tough.', 'Cats have fluff.', 'What is a sandwich?',
'That fence is purple.']
vectorstore = MongoDBAtlasVectorSearch.from_texts(texts,
embedding_openai, collection=collection, index_name=INDEX_NAME)
sleep(1)
output = vectorstore.similarity_search('Sandwich', k=1)
assert output[0].page_content == 'What is a sandwich?'
| null |
__init__
|
"""Initialize callback handler."""
super().__init__()
aim = import_aim()
self.repo = repo
self.experiment_name = experiment_name
self.system_tracking_interval = system_tracking_interval
self.log_system_params = log_system_params
self._run = aim.Run(repo=self.repo, experiment=self.experiment_name,
system_tracking_interval=self.system_tracking_interval,
log_system_params=self.log_system_params)
self._run_hash = self._run.hash
self.action_records: list = []
|
def __init__(self, repo: Optional[str]=None, experiment_name: Optional[str]
=None, system_tracking_interval: Optional[int]=10, log_system_params:
bool=True) ->None:
"""Initialize callback handler."""
super().__init__()
aim = import_aim()
self.repo = repo
self.experiment_name = experiment_name
self.system_tracking_interval = system_tracking_interval
self.log_system_params = log_system_params
self._run = aim.Run(repo=self.repo, experiment=self.experiment_name,
system_tracking_interval=self.system_tracking_interval,
log_system_params=self.log_system_params)
self._run_hash = self._run.hash
self.action_records: list = []
|
Initialize callback handler.
|
_identifying_params
|
"""Get the identifying parameters."""
return {**{'pipeline_key': self.pipeline_key}, **{'pipeline_kwargs': self.
pipeline_kwargs}}
|
@property
def _identifying_params(self) ->Mapping[str, Any]:
"""Get the identifying parameters."""
return {**{'pipeline_key': self.pipeline_key}, **{'pipeline_kwargs':
self.pipeline_kwargs}}
|
Get the identifying parameters.
|
test_json_distance_evaluator_evaluation_name
|
assert json_distance_evaluator.evaluation_name == 'json_edit_distance'
|
@pytest.mark.requires('rapidfuzz')
def test_json_distance_evaluator_evaluation_name(json_distance_evaluator:
JsonEditDistanceEvaluator) ->None:
assert json_distance_evaluator.evaluation_name == 'json_edit_distance'
| null |
_llm_type
|
"""Return type of chat model."""
return 'ollama-chat'
|
@property
def _llm_type(self) ->str:
"""Return type of chat model."""
return 'ollama-chat'
|
Return type of chat model.
|
__init__
|
"""Override init to support instantiation by position for backward compat."""
super().__init__(tool=tool, tool_input=tool_input, log=log, **kwargs)
|
def __init__(self, tool: str, tool_input: Union[str, dict], log: str, **
kwargs: Any):
"""Override init to support instantiation by position for backward compat."""
super().__init__(tool=tool, tool_input=tool_input, log=log, **kwargs)
|
Override init to support instantiation by position for backward compat.
|
_run
|
try:
dir_path_ = self.get_relative_path(dir_path)
except FileValidationError:
return INVALID_PATH_TEMPLATE.format(arg_name='dir_path', value=dir_path)
matches = []
try:
for root, _, filenames in os.walk(dir_path_):
for filename in fnmatch.filter(filenames, pattern):
absolute_path = os.path.join(root, filename)
relative_path = os.path.relpath(absolute_path, dir_path_)
matches.append(relative_path)
if matches:
return '\n'.join(matches)
else:
return f'No files found for pattern {pattern} in directory {dir_path}'
except Exception as e:
return 'Error: ' + str(e)
|
def _run(self, pattern: str, dir_path: str='.', run_manager: Optional[
CallbackManagerForToolRun]=None) ->str:
try:
dir_path_ = self.get_relative_path(dir_path)
except FileValidationError:
return INVALID_PATH_TEMPLATE.format(arg_name='dir_path', value=dir_path
)
matches = []
try:
for root, _, filenames in os.walk(dir_path_):
for filename in fnmatch.filter(filenames, pattern):
absolute_path = os.path.join(root, filename)
relative_path = os.path.relpath(absolute_path, dir_path_)
matches.append(relative_path)
if matches:
return '\n'.join(matches)
else:
return (
f'No files found for pattern {pattern} in directory {dir_path}'
)
except Exception as e:
return 'Error: ' + str(e)
| null |
_cosine_relevance_score_fn
|
"""Normalize the distance to a score on a scale [0, 1]."""
return (distance + 1.0) / 2.0
|
@staticmethod
def _cosine_relevance_score_fn(distance: float) ->float:
"""Normalize the distance to a score on a scale [0, 1]."""
return (distance + 1.0) / 2.0
|
Normalize the distance to a score on a scale [0, 1].
|
test_other
|
"""Non-exhaustive test for accessing other JIRA API methods"""
jira = JiraAPIWrapper()
issue_create_dict = """
{
"function":"issue_create",
"kwargs": {
"fields": {
"summary": "Test Summary",
"description": "Test Description",
"issuetype": {"name": "Bug"},
"project": {"key": "TP"}
}
}
}
"""
output = jira.run('other', issue_create_dict)
assert 'id' in output
assert 'key' in output
|
def test_other() ->None:
"""Non-exhaustive test for accessing other JIRA API methods"""
jira = JiraAPIWrapper()
issue_create_dict = """
{
"function":"issue_create",
"kwargs": {
"fields": {
"summary": "Test Summary",
"description": "Test Description",
"issuetype": {"name": "Bug"},
"project": {"key": "TP"}
}
}
}
"""
output = jira.run('other', issue_create_dict)
assert 'id' in output
assert 'key' in output
|
Non-exhaustive test for accessing other JIRA API methods
|
from_vectorstore
|
return cls(vectorstore._schema)
|
@classmethod
def from_vectorstore(cls, vectorstore: Redis) ->RedisTranslator:
return cls(vectorstore._schema)
| null |
test_unstructured_excel_loader
|
"""Test unstructured loader."""
file_path = os.path.join(EXAMPLE_DIRECTORY, 'stanley-cups.xlsx')
loader = UnstructuredExcelLoader(str(file_path))
docs = loader.load()
assert len(docs) == 1
|
def test_unstructured_excel_loader() ->None:
"""Test unstructured loader."""
file_path = os.path.join(EXAMPLE_DIRECTORY, 'stanley-cups.xlsx')
loader = UnstructuredExcelLoader(str(file_path))
docs = loader.load()
assert len(docs) == 1
|
Test unstructured loader.
|
test_memory_with_message_store
|
"""Test the memory with a message store."""
message_history = RedisChatMessageHistory(url='redis://localhost:6379/0',
ttl=10, session_id='my-test-session')
memory = ConversationBufferMemory(memory_key='baz', chat_memory=
message_history, return_messages=True)
memory.chat_memory.add_ai_message('This is me, the AI')
memory.chat_memory.add_user_message('This is me, the human')
messages = memory.chat_memory.messages
messages_json = json.dumps([message_to_dict(msg) for msg in messages])
assert 'This is me, the AI' in messages_json
assert 'This is me, the human' in messages_json
memory.chat_memory.clear()
|
def test_memory_with_message_store() ->None:
"""Test the memory with a message store."""
message_history = RedisChatMessageHistory(url=
'redis://localhost:6379/0', ttl=10, session_id='my-test-session')
memory = ConversationBufferMemory(memory_key='baz', chat_memory=
message_history, return_messages=True)
memory.chat_memory.add_ai_message('This is me, the AI')
memory.chat_memory.add_user_message('This is me, the human')
messages = memory.chat_memory.messages
messages_json = json.dumps([message_to_dict(msg) for msg in messages])
assert 'This is me, the AI' in messages_json
assert 'This is me, the human' in messages_json
memory.chat_memory.clear()
|
Test the memory with a message store.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.