method_name
stringlengths 1
78
| method_body
stringlengths 3
9.66k
| full_code
stringlengths 31
10.7k
| docstring
stringlengths 4
4.74k
⌀ |
|---|---|---|---|
_chain_type
|
return 'vector_sql_database_chain'
|
@property
def _chain_type(self) ->str:
return 'vector_sql_database_chain'
| null |
_get_elements
|
from unstructured.partition.tsv import partition_tsv
return partition_tsv(filename=self.file_path, **self.unstructured_kwargs)
|
def _get_elements(self) ->List:
from unstructured.partition.tsv import partition_tsv
return partition_tsv(filename=self.file_path, **self.unstructured_kwargs)
| null |
_Expr
|
self.fill()
self.dispatch(tree.value)
|
def _Expr(self, tree):
self.fill()
self.dispatch(tree.value)
| null |
compress_documents
|
"""
Compress documents using Cohere's rerank API.
Args:
documents: A sequence of documents to compress.
query: The query to use for compressing the documents.
callbacks: Callbacks to run during the compression process.
Returns:
A sequence of compressed documents.
"""
if len(documents) == 0:
return []
doc_list = list(documents)
_docs = [d.page_content for d in doc_list]
results = self.client.rerank(model=self.model, query=query, documents=_docs,
top_n=self.top_n)
final_results = []
for r in results:
doc = doc_list[r.index]
doc.metadata['relevance_score'] = r.relevance_score
final_results.append(doc)
return final_results
|
def compress_documents(self, documents: Sequence[Document], query: str,
callbacks: Optional[Callbacks]=None) ->Sequence[Document]:
"""
Compress documents using Cohere's rerank API.
Args:
documents: A sequence of documents to compress.
query: The query to use for compressing the documents.
callbacks: Callbacks to run during the compression process.
Returns:
A sequence of compressed documents.
"""
if len(documents) == 0:
return []
doc_list = list(documents)
_docs = [d.page_content for d in doc_list]
results = self.client.rerank(model=self.model, query=query, documents=
_docs, top_n=self.top_n)
final_results = []
for r in results:
doc = doc_list[r.index]
doc.metadata['relevance_score'] = r.relevance_score
final_results.append(doc)
return final_results
|
Compress documents using Cohere's rerank API.
Args:
documents: A sequence of documents to compress.
query: The query to use for compressing the documents.
callbacks: Callbacks to run during the compression process.
Returns:
A sequence of compressed documents.
|
invoke
|
config = ensure_config(config)
return self(input, callbacks=config.get('callbacks'), tags=config.get(
'tags'), metadata=config.get('metadata'), run_name=config.get(
'run_name'), **kwargs)
|
def invoke(self, input: Dict[str, Any], config: Optional[RunnableConfig]=
None, **kwargs: Any) ->Dict[str, Any]:
config = ensure_config(config)
return self(input, callbacks=config.get('callbacks'), tags=config.get(
'tags'), metadata=config.get('metadata'), run_name=config.get(
'run_name'), **kwargs)
| null |
input_keys
|
return ['objective']
|
@property
def input_keys(self) ->List[str]:
return ['objective']
| null |
create_spark_sql_agent
|
"""Construct a Spark SQL agent from an LLM and tools."""
from langchain.agents.agent import AgentExecutor
from langchain.agents.mrkl.base import ZeroShotAgent
from langchain.chains.llm import LLMChain
tools = toolkit.get_tools()
prefix = prefix.format(top_k=top_k)
prompt_params = {'format_instructions': format_instructions
} if format_instructions is not None else {}
prompt = ZeroShotAgent.create_prompt(tools, prefix=prefix, suffix=suffix,
input_variables=input_variables, **prompt_params)
llm_chain = LLMChain(llm=llm, prompt=prompt, callback_manager=
callback_manager, callbacks=callbacks)
tool_names = [tool.name for tool in tools]
agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names, **kwargs)
return AgentExecutor.from_agent_and_tools(agent=agent, tools=tools,
callback_manager=callback_manager, callbacks=callbacks, verbose=verbose,
max_iterations=max_iterations, max_execution_time=max_execution_time,
early_stopping_method=early_stopping_method, **agent_executor_kwargs or {})
|
def create_spark_sql_agent(llm: BaseLanguageModel, toolkit: SparkSQLToolkit,
callback_manager: Optional[BaseCallbackManager]=None, callbacks:
Callbacks=None, prefix: str=SQL_PREFIX, suffix: str=SQL_SUFFIX,
format_instructions: Optional[str]=None, input_variables: Optional[List
[str]]=None, top_k: int=10, max_iterations: Optional[int]=15,
max_execution_time: Optional[float]=None, early_stopping_method: str=
'force', verbose: bool=False, agent_executor_kwargs: Optional[Dict[str,
Any]]=None, **kwargs: Any) ->AgentExecutor:
"""Construct a Spark SQL agent from an LLM and tools."""
from langchain.agents.agent import AgentExecutor
from langchain.agents.mrkl.base import ZeroShotAgent
from langchain.chains.llm import LLMChain
tools = toolkit.get_tools()
prefix = prefix.format(top_k=top_k)
prompt_params = {'format_instructions': format_instructions
} if format_instructions is not None else {}
prompt = ZeroShotAgent.create_prompt(tools, prefix=prefix, suffix=
suffix, input_variables=input_variables, **prompt_params)
llm_chain = LLMChain(llm=llm, prompt=prompt, callback_manager=
callback_manager, callbacks=callbacks)
tool_names = [tool.name for tool in tools]
agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names, **
kwargs)
return AgentExecutor.from_agent_and_tools(agent=agent, tools=tools,
callback_manager=callback_manager, callbacks=callbacks, verbose=
verbose, max_iterations=max_iterations, max_execution_time=
max_execution_time, early_stopping_method=early_stopping_method, **
agent_executor_kwargs or {})
|
Construct a Spark SQL agent from an LLM and tools.
|
_format_func
|
self._validate_func(func)
return f'${func.value}'
|
def _format_func(self, func: Union[Operator, Comparator]) ->str:
self._validate_func(func)
return f'${func.value}'
| null |
test_astradb_vectorstore_custom_params
|
"""Custom batch size and concurrency params."""
emb = SomeEmbeddings(dimension=2)
v_store = AstraDB(embedding=emb, collection_name='lc_test_c', token=os.
environ['ASTRA_DB_APPLICATION_TOKEN'], api_endpoint=os.environ[
'ASTRA_DB_API_ENDPOINT'], namespace=os.environ.get('ASTRA_DB_KEYSPACE'),
batch_size=17, bulk_insert_batch_concurrency=13,
bulk_insert_overwrite_concurrency=7, bulk_delete_concurrency=19)
try:
N = 50
texts = [str(i + 1 / 7.0) for i in range(N)]
ids = [('doc_%i' % i) for i in range(N)]
v_store.add_texts(texts=texts, ids=ids)
v_store.add_texts(texts=texts, ids=ids, batch_size=19,
batch_concurrency=7, overwrite_concurrency=13)
_ = v_store.delete(ids[:N // 2])
_ = v_store.delete(ids[N // 2:], concurrency=23)
finally:
v_store.delete_collection()
|
def test_astradb_vectorstore_custom_params(self) ->None:
"""Custom batch size and concurrency params."""
emb = SomeEmbeddings(dimension=2)
v_store = AstraDB(embedding=emb, collection_name='lc_test_c', token=os.
environ['ASTRA_DB_APPLICATION_TOKEN'], api_endpoint=os.environ[
'ASTRA_DB_API_ENDPOINT'], namespace=os.environ.get(
'ASTRA_DB_KEYSPACE'), batch_size=17, bulk_insert_batch_concurrency=
13, bulk_insert_overwrite_concurrency=7, bulk_delete_concurrency=19)
try:
N = 50
texts = [str(i + 1 / 7.0) for i in range(N)]
ids = [('doc_%i' % i) for i in range(N)]
v_store.add_texts(texts=texts, ids=ids)
v_store.add_texts(texts=texts, ids=ids, batch_size=19,
batch_concurrency=7, overwrite_concurrency=13)
_ = v_store.delete(ids[:N // 2])
_ = v_store.delete(ids[N // 2:], concurrency=23)
finally:
v_store.delete_collection()
|
Custom batch size and concurrency params.
|
dispatch
|
"""Dispatcher function, dispatching tree type T to method _T."""
if isinstance(tree, list):
for t in tree:
self.dispatch(t)
return
meth = getattr(self, '_' + tree.__class__.__name__)
meth(tree)
|
def dispatch(self, tree):
"""Dispatcher function, dispatching tree type T to method _T."""
if isinstance(tree, list):
for t in tree:
self.dispatch(t)
return
meth = getattr(self, '_' + tree.__class__.__name__)
meth(tree)
|
Dispatcher function, dispatching tree type T to method _T.
|
dependable_tiledb_import
|
"""Import tiledb-vector-search if available, otherwise raise error."""
try:
import tiledb as tiledb
import tiledb.vector_search as tiledb_vs
except ImportError:
raise ValueError(
'Could not import tiledb-vector-search python package. Please install it with `conda install -c tiledb tiledb-vector-search` or `pip install tiledb-vector-search`'
)
return tiledb_vs, tiledb
|
def dependable_tiledb_import() ->Any:
"""Import tiledb-vector-search if available, otherwise raise error."""
try:
import tiledb as tiledb
import tiledb.vector_search as tiledb_vs
except ImportError:
raise ValueError(
'Could not import tiledb-vector-search python package. Please install it with `conda install -c tiledb tiledb-vector-search` or `pip install tiledb-vector-search`'
)
return tiledb_vs, tiledb
|
Import tiledb-vector-search if available, otherwise raise error.
|
test_parse_without_language
|
llm_output = """I can use the `foo` tool to achieve the goal.
Action:
```
{
"action": "foo",
"action_input": "bar"
}
```
"""
action, action_input = get_action_and_input(llm_output)
assert action == 'foo'
assert action_input == 'bar'
|
def test_parse_without_language() ->None:
llm_output = """I can use the `foo` tool to achieve the goal.
Action:
```
{
"action": "foo",
"action_input": "bar"
}
```
"""
action, action_input = get_action_and_input(llm_output)
assert action == 'foo'
assert action_input == 'bar'
| null |
model
|
"""For backwards compatibility."""
return self.llm
|
@property
def model(self) ->BaseChatModel:
"""For backwards compatibility."""
return self.llm
|
For backwards compatibility.
|
build_extra
|
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.values()}
extra = values.get('model_kwargs', {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f'Found {field_name} supplied twice.')
logger.warning(
f"""WARNING! {field_name} is not default parameter.
{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values['model_kwargs'] = extra
return values
|
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) ->Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.
values()}
extra = values.get('model_kwargs', {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f'Found {field_name} supplied twice.')
logger.warning(
f"""WARNING! {field_name} is not default parameter.
{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values['model_kwargs'] = extra
return values
|
Build extra kwargs from additional params that were passed in.
|
on_llm_end
|
self.saved_things['generation'] = args[0]
|
def on_llm_end(self, *args: Any, **kwargs: Any) ->Any:
self.saved_things['generation'] = args[0]
| null |
lookup
|
"""Look up based on prompt and llm_string."""
hit_with_id = self.lookup_with_id(prompt, llm_string)
if hit_with_id is not None:
return hit_with_id[1]
else:
return None
|
def lookup(self, prompt: str, llm_string: str) ->Optional[RETURN_VAL_TYPE]:
"""Look up based on prompt and llm_string."""
hit_with_id = self.lookup_with_id(prompt, llm_string)
if hit_with_id is not None:
return hit_with_id[1]
else:
return None
|
Look up based on prompt and llm_string.
|
__add
|
try:
from elasticsearch.helpers import BulkIndexError, bulk
except ImportError:
raise ImportError(
'Could not import elasticsearch python package. Please install it with `pip install elasticsearch`.'
)
bulk_kwargs = bulk_kwargs or {}
ids = ids or [str(uuid.uuid4()) for _ in texts]
requests = []
if create_index_if_not_exists:
if embeddings:
dims_length = len(embeddings[0])
else:
dims_length = None
self._create_index_if_not_exists(index_name=self.index_name,
dims_length=dims_length)
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
request = {'_op_type': 'index', '_index': self.index_name, self.
query_field: text, 'metadata': metadata, '_id': ids[i]}
if embeddings:
request[self.vector_query_field] = embeddings[i]
requests.append(request)
if len(requests) > 0:
try:
success, failed = bulk(self.client, requests, stats_only=True,
refresh=refresh_indices, **bulk_kwargs)
logger.debug(
f'Added {success} and failed to add {failed} texts to index')
logger.debug(f'added texts {ids} to index')
return ids
except BulkIndexError as e:
logger.error(f'Error adding texts: {e}')
firstError = e.errors[0].get('index', {}).get('error', {})
logger.error(f"First error reason: {firstError.get('reason')}")
raise e
else:
logger.debug('No texts to add to index')
return []
|
def __add(self, texts: Iterable[str], embeddings: Optional[List[List[float]
]], metadatas: Optional[List[Dict[Any, Any]]]=None, ids: Optional[List[
str]]=None, refresh_indices: bool=True, create_index_if_not_exists:
bool=True, bulk_kwargs: Optional[Dict]=None, **kwargs: Any) ->List[str]:
try:
from elasticsearch.helpers import BulkIndexError, bulk
except ImportError:
raise ImportError(
'Could not import elasticsearch python package. Please install it with `pip install elasticsearch`.'
)
bulk_kwargs = bulk_kwargs or {}
ids = ids or [str(uuid.uuid4()) for _ in texts]
requests = []
if create_index_if_not_exists:
if embeddings:
dims_length = len(embeddings[0])
else:
dims_length = None
self._create_index_if_not_exists(index_name=self.index_name,
dims_length=dims_length)
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
request = {'_op_type': 'index', '_index': self.index_name, self.
query_field: text, 'metadata': metadata, '_id': ids[i]}
if embeddings:
request[self.vector_query_field] = embeddings[i]
requests.append(request)
if len(requests) > 0:
try:
success, failed = bulk(self.client, requests, stats_only=True,
refresh=refresh_indices, **bulk_kwargs)
logger.debug(
f'Added {success} and failed to add {failed} texts to index')
logger.debug(f'added texts {ids} to index')
return ids
except BulkIndexError as e:
logger.error(f'Error adding texts: {e}')
firstError = e.errors[0].get('index', {}).get('error', {})
logger.error(f"First error reason: {firstError.get('reason')}")
raise e
else:
logger.debug('No texts to add to index')
return []
| null |
get_output_schema
|
map_input_schema = self.mapper.get_input_schema(config)
map_output_schema = self.mapper.get_output_schema(config)
if not map_input_schema.__custom_root_type__ and not map_output_schema.__custom_root_type__:
return create_model('RunnableAssignOutput', **{k: (v.type_, v.default) for
s in (map_input_schema, map_output_schema) for k, v in s.__fields__
.items()})
elif not map_output_schema.__custom_root_type__:
return map_output_schema
return super().get_output_schema(config)
|
def get_output_schema(self, config: Optional[RunnableConfig]=None) ->Type[
BaseModel]:
map_input_schema = self.mapper.get_input_schema(config)
map_output_schema = self.mapper.get_output_schema(config)
if (not map_input_schema.__custom_root_type__ and not map_output_schema
.__custom_root_type__):
return create_model('RunnableAssignOutput', **{k: (v.type_, v.
default) for s in (map_input_schema, map_output_schema) for k,
v in s.__fields__.items()})
elif not map_output_schema.__custom_root_type__:
return map_output_schema
return super().get_output_schema(config)
| null |
test_joplin_loader
|
loader = JoplinLoader()
docs = loader.load()
assert isinstance(docs, list)
assert isinstance(docs[0].page_content, str)
assert isinstance(docs[0].metadata['source'], str)
assert isinstance(docs[0].metadata['title'], str)
|
def test_joplin_loader() ->None:
loader = JoplinLoader()
docs = loader.load()
assert isinstance(docs, list)
assert isinstance(docs[0].page_content, str)
assert isinstance(docs[0].metadata['source'], str)
assert isinstance(docs[0].metadata['title'], str)
| null |
_import_huggingface_pipeline
|
from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline
return HuggingFacePipeline
|
def _import_huggingface_pipeline() ->Any:
from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline
return HuggingFacePipeline
| null |
__init__
|
"""Instantiate a chat message history cache that uses Momento as a backend.
Note: to instantiate the cache client passed to MomentoChatMessageHistory,
you must have a Momento account at https://gomomento.com/.
Args:
session_id (str): The session ID to use for this chat session.
cache_client (CacheClient): The Momento cache client.
cache_name (str): The name of the cache to use to store the messages.
key_prefix (str, optional): The prefix to apply to the cache key.
Defaults to "message_store:".
ttl (Optional[timedelta], optional): The TTL to use for the messages.
Defaults to None, ie the default TTL of the cache will be used.
ensure_cache_exists (bool, optional): Create the cache if it doesn't exist.
Defaults to True.
Raises:
ImportError: Momento python package is not installed.
TypeError: cache_client is not of type momento.CacheClientObject
"""
try:
from momento import CacheClient
from momento.requests import CollectionTtl
except ImportError:
raise ImportError(
'Could not import momento python package. Please install it with `pip install momento`.'
)
if not isinstance(cache_client, CacheClient):
raise TypeError('cache_client must be a momento.CacheClient object.')
if ensure_cache_exists:
_ensure_cache_exists(cache_client, cache_name)
self.key = key_prefix + session_id
self.cache_client = cache_client
self.cache_name = cache_name
if ttl is not None:
self.ttl = CollectionTtl.of(ttl)
else:
self.ttl = CollectionTtl.from_cache_ttl()
|
def __init__(self, session_id: str, cache_client: momento.CacheClient,
cache_name: str, *, key_prefix: str='message_store:', ttl: Optional[
timedelta]=None, ensure_cache_exists: bool=True):
"""Instantiate a chat message history cache that uses Momento as a backend.
Note: to instantiate the cache client passed to MomentoChatMessageHistory,
you must have a Momento account at https://gomomento.com/.
Args:
session_id (str): The session ID to use for this chat session.
cache_client (CacheClient): The Momento cache client.
cache_name (str): The name of the cache to use to store the messages.
key_prefix (str, optional): The prefix to apply to the cache key.
Defaults to "message_store:".
ttl (Optional[timedelta], optional): The TTL to use for the messages.
Defaults to None, ie the default TTL of the cache will be used.
ensure_cache_exists (bool, optional): Create the cache if it doesn't exist.
Defaults to True.
Raises:
ImportError: Momento python package is not installed.
TypeError: cache_client is not of type momento.CacheClientObject
"""
try:
from momento import CacheClient
from momento.requests import CollectionTtl
except ImportError:
raise ImportError(
'Could not import momento python package. Please install it with `pip install momento`.'
)
if not isinstance(cache_client, CacheClient):
raise TypeError('cache_client must be a momento.CacheClient object.')
if ensure_cache_exists:
_ensure_cache_exists(cache_client, cache_name)
self.key = key_prefix + session_id
self.cache_client = cache_client
self.cache_name = cache_name
if ttl is not None:
self.ttl = CollectionTtl.of(ttl)
else:
self.ttl = CollectionTtl.from_cache_ttl()
|
Instantiate a chat message history cache that uses Momento as a backend.
Note: to instantiate the cache client passed to MomentoChatMessageHistory,
you must have a Momento account at https://gomomento.com/.
Args:
session_id (str): The session ID to use for this chat session.
cache_client (CacheClient): The Momento cache client.
cache_name (str): The name of the cache to use to store the messages.
key_prefix (str, optional): The prefix to apply to the cache key.
Defaults to "message_store:".
ttl (Optional[timedelta], optional): The TTL to use for the messages.
Defaults to None, ie the default TTL of the cache will be used.
ensure_cache_exists (bool, optional): Create the cache if it doesn't exist.
Defaults to True.
Raises:
ImportError: Momento python package is not installed.
TypeError: cache_client is not of type momento.CacheClientObject
|
test_edenai_call
|
"""Test simple call to edenai's text moderation endpoint."""
text_moderation = EdenAiTextModerationTool(providers=['openai'], language='en')
output = text_moderation('i hate you')
assert text_moderation.name == 'edenai_explicit_content_detection_text'
assert text_moderation.feature == 'text'
assert text_moderation.subfeature == 'moderation'
assert isinstance(output, str)
|
def test_edenai_call() ->None:
"""Test simple call to edenai's text moderation endpoint."""
text_moderation = EdenAiTextModerationTool(providers=['openai'],
language='en')
output = text_moderation('i hate you')
assert text_moderation.name == 'edenai_explicit_content_detection_text'
assert text_moderation.feature == 'text'
assert text_moderation.subfeature == 'moderation'
assert isinstance(output, str)
|
Test simple call to edenai's text moderation endpoint.
|
anonymizer_mapping
|
"""Return the anonymizer mapping
This is just the reverse version of the deanonymizer mapping."""
return {key: {v: k for k, v in inner_dict.items()} for key, inner_dict in
self.deanonymizer_mapping.items()}
|
@property
def anonymizer_mapping(self) ->MappingDataType:
"""Return the anonymizer mapping
This is just the reverse version of the deanonymizer mapping."""
return {key: {v: k for k, v in inner_dict.items()} for key, inner_dict in
self.deanonymizer_mapping.items()}
|
Return the anonymizer mapping
This is just the reverse version of the deanonymizer mapping.
|
test_react_chain_bad_action
|
"""Test react chain when bad action given."""
bad_action_name = 'BadAction'
responses = [f"""I'm turning evil
Action: {bad_action_name}[langchain]""",
"""Oh well
Action: Finish[curses foiled again]"""]
fake_llm = FakeListLLM(responses=responses)
react_chain = ReActChain(llm=fake_llm, docstore=FakeDocstore())
output = react_chain.run('when was langchain made')
assert output == 'curses foiled again'
|
def test_react_chain_bad_action() ->None:
"""Test react chain when bad action given."""
bad_action_name = 'BadAction'
responses = [f"I'm turning evil\nAction: {bad_action_name}[langchain]",
"""Oh well
Action: Finish[curses foiled again]"""]
fake_llm = FakeListLLM(responses=responses)
react_chain = ReActChain(llm=fake_llm, docstore=FakeDocstore())
output = react_chain.run('when was langchain made')
assert output == 'curses foiled again'
|
Test react chain when bad action given.
|
get_model_details
|
"""Get more meta-details about a model retrieved by a given name"""
if model is None:
model = self.model
model_key = self.client._get_invoke_url(model).split('/')[-1]
known_fns = self.client.available_functions
fn_spec = [f for f in known_fns if f.get('id') == model_key][0]
return fn_spec
|
def get_model_details(self, model: Optional[str]=None) ->dict:
"""Get more meta-details about a model retrieved by a given name"""
if model is None:
model = self.model
model_key = self.client._get_invoke_url(model).split('/')[-1]
known_fns = self.client.available_functions
fn_spec = [f for f in known_fns if f.get('id') == model_key][0]
return fn_spec
|
Get more meta-details about a model retrieved by a given name
|
load
|
"""Load and return documents from the JSON file."""
docs: List[Document] = []
if self._json_lines:
with self.file_path.open(encoding='utf-8') as f:
for line in f:
line = line.strip()
if line:
self._parse(line, docs)
else:
self._parse(self.file_path.read_text(encoding='utf-8'), docs)
return docs
|
def load(self) ->List[Document]:
"""Load and return documents from the JSON file."""
docs: List[Document] = []
if self._json_lines:
with self.file_path.open(encoding='utf-8') as f:
for line in f:
line = line.strip()
if line:
self._parse(line, docs)
else:
self._parse(self.file_path.read_text(encoding='utf-8'), docs)
return docs
|
Load and return documents from the JSON file.
|
_import_bing_search_tool_BingSearchRun
|
from langchain_community.tools.bing_search.tool import BingSearchRun
return BingSearchRun
|
def _import_bing_search_tool_BingSearchRun() ->Any:
from langchain_community.tools.bing_search.tool import BingSearchRun
return BingSearchRun
| null |
config_specs
|
return super().config_specs + [ConfigurableFieldSpec(id=id_, annotation=
Callable[[], Any]) for id_ in self.ids]
|
@property
def config_specs(self) ->List[ConfigurableFieldSpec]:
return super().config_specs + [ConfigurableFieldSpec(id=id_, annotation
=Callable[[], Any]) for id_ in self.ids]
| null |
inference_fn
|
"""Inference function for testing."""
return pipeline(prompt)[0]['generated_text']
|
def inference_fn(pipeline: Any, prompt: str, stop: Optional[List[str]]=None
) ->str:
"""Inference function for testing."""
return pipeline(prompt)[0]['generated_text']
|
Inference function for testing.
|
save
|
output = {'output': input_output.pop('output')}
memory.save_context(input_output, output)
return output['output']
|
def save(input_output):
output = {'output': input_output.pop('output')}
memory.save_context(input_output, output)
return output['output']
| null |
test_concurrent_language_loader_for_python
|
"""Test Python ConcurrentLoader with parser enabled."""
file_path = Path(__file__).parent.parent.parent / 'examples'
loader = ConcurrentLoader.from_filesystem(file_path, glob='hello_world.py',
parser=LanguageParser(parser_threshold=5))
docs = loader.load()
assert len(docs) == 2
|
def test_concurrent_language_loader_for_python() ->None:
"""Test Python ConcurrentLoader with parser enabled."""
file_path = Path(__file__).parent.parent.parent / 'examples'
loader = ConcurrentLoader.from_filesystem(file_path, glob=
'hello_world.py', parser=LanguageParser(parser_threshold=5))
docs = loader.load()
assert len(docs) == 2
|
Test Python ConcurrentLoader with parser enabled.
|
_import_searchapi_tool_SearchAPIRun
|
from langchain_community.tools.searchapi.tool import SearchAPIRun
return SearchAPIRun
|
def _import_searchapi_tool_SearchAPIRun() ->Any:
from langchain_community.tools.searchapi.tool import SearchAPIRun
return SearchAPIRun
| null |
create_openai_functions_agent
|
"""Create an agent that uses OpenAI function calling.
Examples:
Creating an agent with no memory
.. code-block:: python
from langchain_community.chat_models import ChatOpenAI
from langchain.agents import AgentExecutor, create_openai_functions_agent
from langchain import hub
prompt = hub.pull("hwchase17/openai-functions-agent")
model = ChatOpenAI()
tools = ...
agent = create_openai_functions_agent(model, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools)
agent_executor.invoke({"input": "hi"})
# Using with chat history
from langchain_core.messages import AIMessage, HumanMessage
agent_executor.invoke(
{
"input": "what's my name?",
"chat_history": [
HumanMessage(content="hi! my name is bob"),
AIMessage(content="Hello Bob! How can I assist you today?"),
],
}
)
Args:
llm: LLM to use as the agent. Should work with OpenAI function calling,
so either be an OpenAI model that supports that or a wrapper of
a different model that adds in equivalent support.
tools: Tools this agent has access to.
prompt: The prompt to use, must have an input key of `agent_scratchpad`.
Returns:
A runnable sequence representing an agent. It takes as input all the same input
variables as the prompt passed in does. It returns as output either an
AgentAction or AgentFinish.
"""
if 'agent_scratchpad' not in prompt.input_variables:
raise ValueError(
f"Prompt must have input variable `agent_scratchpad`, but wasn't found. Found {prompt.input_variables} instead."
)
llm_with_tools = llm.bind(functions=[format_tool_to_openai_function(t) for
t in tools])
agent = RunnablePassthrough.assign(agent_scratchpad=lambda x:
format_to_openai_function_messages(x['intermediate_steps'])
) | prompt | llm_with_tools | OpenAIFunctionsAgentOutputParser()
return agent
|
def create_openai_functions_agent(llm: BaseLanguageModel, tools: Sequence[
BaseTool], prompt: ChatPromptTemplate) ->Runnable:
"""Create an agent that uses OpenAI function calling.
Examples:
Creating an agent with no memory
.. code-block:: python
from langchain_community.chat_models import ChatOpenAI
from langchain.agents import AgentExecutor, create_openai_functions_agent
from langchain import hub
prompt = hub.pull("hwchase17/openai-functions-agent")
model = ChatOpenAI()
tools = ...
agent = create_openai_functions_agent(model, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools)
agent_executor.invoke({"input": "hi"})
# Using with chat history
from langchain_core.messages import AIMessage, HumanMessage
agent_executor.invoke(
{
"input": "what's my name?",
"chat_history": [
HumanMessage(content="hi! my name is bob"),
AIMessage(content="Hello Bob! How can I assist you today?"),
],
}
)
Args:
llm: LLM to use as the agent. Should work with OpenAI function calling,
so either be an OpenAI model that supports that or a wrapper of
a different model that adds in equivalent support.
tools: Tools this agent has access to.
prompt: The prompt to use, must have an input key of `agent_scratchpad`.
Returns:
A runnable sequence representing an agent. It takes as input all the same input
variables as the prompt passed in does. It returns as output either an
AgentAction or AgentFinish.
"""
if 'agent_scratchpad' not in prompt.input_variables:
raise ValueError(
f"Prompt must have input variable `agent_scratchpad`, but wasn't found. Found {prompt.input_variables} instead."
)
llm_with_tools = llm.bind(functions=[format_tool_to_openai_function(t) for
t in tools])
agent = RunnablePassthrough.assign(agent_scratchpad=lambda x:
format_to_openai_function_messages(x['intermediate_steps'])
) | prompt | llm_with_tools | OpenAIFunctionsAgentOutputParser()
return agent
|
Create an agent that uses OpenAI function calling.
Examples:
Creating an agent with no memory
.. code-block:: python
from langchain_community.chat_models import ChatOpenAI
from langchain.agents import AgentExecutor, create_openai_functions_agent
from langchain import hub
prompt = hub.pull("hwchase17/openai-functions-agent")
model = ChatOpenAI()
tools = ...
agent = create_openai_functions_agent(model, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools)
agent_executor.invoke({"input": "hi"})
# Using with chat history
from langchain_core.messages import AIMessage, HumanMessage
agent_executor.invoke(
{
"input": "what's my name?",
"chat_history": [
HumanMessage(content="hi! my name is bob"),
AIMessage(content="Hello Bob! How can I assist you today?"),
],
}
)
Args:
llm: LLM to use as the agent. Should work with OpenAI function calling,
so either be an OpenAI model that supports that or a wrapper of
a different model that adds in equivalent support.
tools: Tools this agent has access to.
prompt: The prompt to use, must have an input key of `agent_scratchpad`.
Returns:
A runnable sequence representing an agent. It takes as input all the same input
variables as the prompt passed in does. It returns as output either an
AgentAction or AgentFinish.
|
_astream
|
raise NotImplementedError()
|
def _astream(self, prompt: str, stop: Optional[List[str]]=None, run_manager:
Optional[AsyncCallbackManagerForLLMRun]=None, **kwargs: Any
) ->AsyncIterator[GenerationChunk]:
raise NotImplementedError()
| null |
_load_documents_from_paths
|
"""Load documents from a list of Dropbox file paths."""
if not self.dropbox_file_paths:
raise ValueError('file_paths must be set')
return [doc for doc in (self._load_file_from_path(file_path) for file_path in
self.dropbox_file_paths) if doc is not None]
|
def _load_documents_from_paths(self) ->List[Document]:
"""Load documents from a list of Dropbox file paths."""
if not self.dropbox_file_paths:
raise ValueError('file_paths must be set')
return [doc for doc in (self._load_file_from_path(file_path) for
file_path in self.dropbox_file_paths) if doc is not None]
|
Load documents from a list of Dropbox file paths.
|
validate_imports_and_browser_provided
|
"""Check that the arguments are valid."""
lazy_import_playwright_browsers()
if values.get('async_browser') is None and values.get('sync_browser') is None:
raise ValueError('Either async_browser or sync_browser must be specified.')
return values
|
@root_validator
def validate_imports_and_browser_provided(cls, values: dict) ->dict:
"""Check that the arguments are valid."""
lazy_import_playwright_browsers()
if values.get('async_browser') is None and values.get('sync_browser'
) is None:
raise ValueError(
'Either async_browser or sync_browser must be specified.')
return values
|
Check that the arguments are valid.
|
_import_bearly_tool
|
from langchain_community.tools.bearly.tool import BearlyInterpreterTool
return BearlyInterpreterTool
|
def _import_bearly_tool() ->Any:
from langchain_community.tools.bearly.tool import BearlyInterpreterTool
return BearlyInterpreterTool
| null |
_import_baseten
|
from langchain_community.llms.baseten import Baseten
return Baseten
|
def _import_baseten() ->Any:
from langchain_community.llms.baseten import Baseten
return Baseten
| null |
validate_params
|
"""Validate that custom searx params are merged with default ones."""
user_params = values['params']
default = _get_default_params()
values['params'] = {**default, **user_params}
engines = values.get('engines')
if engines:
values['params']['engines'] = ','.join(engines)
categories = values.get('categories')
if categories:
values['params']['categories'] = ','.join(categories)
searx_host = get_from_dict_or_env(values, 'searx_host', 'SEARX_HOST')
if not searx_host.startswith('http'):
print(
f'Warning: missing the url scheme on host ! assuming secure https://{searx_host} '
)
searx_host = 'https://' + searx_host
elif searx_host.startswith('http://'):
values['unsecure'] = True
cls.disable_ssl_warnings(True)
values['searx_host'] = searx_host
return values
|
@root_validator()
def validate_params(cls, values: Dict) ->Dict:
"""Validate that custom searx params are merged with default ones."""
user_params = values['params']
default = _get_default_params()
values['params'] = {**default, **user_params}
engines = values.get('engines')
if engines:
values['params']['engines'] = ','.join(engines)
categories = values.get('categories')
if categories:
values['params']['categories'] = ','.join(categories)
searx_host = get_from_dict_or_env(values, 'searx_host', 'SEARX_HOST')
if not searx_host.startswith('http'):
print(
f'Warning: missing the url scheme on host ! assuming secure https://{searx_host} '
)
searx_host = 'https://' + searx_host
elif searx_host.startswith('http://'):
values['unsecure'] = True
cls.disable_ssl_warnings(True)
values['searx_host'] = searx_host
return values
|
Validate that custom searx params are merged with default ones.
|
test_embedding_documents_1
|
documents = ['foo bar']
embedding = ErnieEmbeddings()
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 384
|
def test_embedding_documents_1() ->None:
documents = ['foo bar']
embedding = ErnieEmbeddings()
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 384
| null |
test_nested_list_features_throws
|
with pytest.raises(ValueError):
base.embed({'test_namespace': [[1, 2], [3, 4]]}, MockEncoder())
|
@pytest.mark.requires('vowpal_wabbit_next')
def test_nested_list_features_throws() ->None:
with pytest.raises(ValueError):
base.embed({'test_namespace': [[1, 2], [3, 4]]}, MockEncoder())
| null |
InputType
|
return cast(Type[Input], self.custom_input_type
) if self.custom_input_type is not None else self.bound.InputType
|
@property
def InputType(self) ->Type[Input]:
return cast(Type[Input], self.custom_input_type
) if self.custom_input_type is not None else self.bound.InputType
| null |
validate_environment
|
"""Validate that awadb library is installed."""
try:
from awadb import AwaEmbedding
except ImportError as exc:
raise ImportError(
'Could not import awadb library. Please install it with `pip install awadb`'
) from exc
values['client'] = AwaEmbedding()
return values
|
@root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that awadb library is installed."""
try:
from awadb import AwaEmbedding
except ImportError as exc:
raise ImportError(
'Could not import awadb library. Please install it with `pip install awadb`'
) from exc
values['client'] = AwaEmbedding()
return values
|
Validate that awadb library is installed.
|
MilvusRetreiver
|
"""Deprecated MilvusRetreiver. Please use MilvusRetriever ('i' before 'e') instead.
Args:
*args:
**kwargs:
Returns:
MilvusRetriever
"""
warnings.warn(
"MilvusRetreiver will be deprecated in the future. Please use MilvusRetriever ('i' before 'e') instead."
, DeprecationWarning)
return MilvusRetriever(*args, **kwargs)
|
def MilvusRetreiver(*args: Any, **kwargs: Any) ->MilvusRetriever:
"""Deprecated MilvusRetreiver. Please use MilvusRetriever ('i' before 'e') instead.
Args:
*args:
**kwargs:
Returns:
MilvusRetriever
"""
warnings.warn(
"MilvusRetreiver will be deprecated in the future. Please use MilvusRetriever ('i' before 'e') instead."
, DeprecationWarning)
return MilvusRetriever(*args, **kwargs)
|
Deprecated MilvusRetreiver. Please use MilvusRetriever ('i' before 'e') instead.
Args:
*args:
**kwargs:
Returns:
MilvusRetriever
|
test_chroma_with_persistence
|
"""Test end to end construction and search, with persistence."""
chroma_persist_dir = './tests/persist_dir'
collection_name = 'test_collection'
texts = ['foo', 'bar', 'baz']
docsearch = Chroma.from_texts(collection_name=collection_name, texts=texts,
embedding=FakeEmbeddings(), persist_directory=chroma_persist_dir)
output = docsearch.similarity_search('foo', k=1)
assert output == [Document(page_content='foo')]
docsearch.persist()
docsearch = Chroma(collection_name=collection_name, embedding_function=
FakeEmbeddings(), persist_directory=chroma_persist_dir)
output = docsearch.similarity_search('foo', k=1)
docsearch.delete_collection()
|
def test_chroma_with_persistence() ->None:
"""Test end to end construction and search, with persistence."""
chroma_persist_dir = './tests/persist_dir'
collection_name = 'test_collection'
texts = ['foo', 'bar', 'baz']
docsearch = Chroma.from_texts(collection_name=collection_name, texts=
texts, embedding=FakeEmbeddings(), persist_directory=chroma_persist_dir
)
output = docsearch.similarity_search('foo', k=1)
assert output == [Document(page_content='foo')]
docsearch.persist()
docsearch = Chroma(collection_name=collection_name, embedding_function=
FakeEmbeddings(), persist_directory=chroma_persist_dir)
output = docsearch.similarity_search('foo', k=1)
docsearch.delete_collection()
|
Test end to end construction and search, with persistence.
|
create_prompt
|
"""Create a prompt for this class."""
|
@classmethod
@abstractmethod
def create_prompt(cls, tools: Sequence[BaseTool]) ->BasePromptTemplate:
"""Create a prompt for this class."""
|
Create a prompt for this class.
|
__eq__
|
"""Check for LLMResult equality by ignoring any metadata related to runs."""
if not isinstance(other, LLMResult):
return NotImplemented
return self.generations == other.generations and self.llm_output == other.llm_output
|
def __eq__(self, other: object) ->bool:
"""Check for LLMResult equality by ignoring any metadata related to runs."""
if not isinstance(other, LLMResult):
return NotImplemented
return (self.generations == other.generations and self.llm_output ==
other.llm_output)
|
Check for LLMResult equality by ignoring any metadata related to runs.
|
on_chain_error
|
"""Run when chain errors."""
|
def on_chain_error(self, error: BaseException, *, run_id: UUID,
parent_run_id: Optional[UUID]=None, **kwargs: Any) ->Any:
"""Run when chain errors."""
|
Run when chain errors.
|
query
|
"""Query FalkorDB database."""
try:
data = self._graph.query(query, params)
return data.result_set
except Exception as e:
raise ValueError(f'Generated Cypher Statement is not valid\n{e}')
|
def query(self, query: str, params: dict={}) ->List[Dict[str, Any]]:
"""Query FalkorDB database."""
try:
data = self._graph.query(query, params)
return data.result_set
except Exception as e:
raise ValueError(f'Generated Cypher Statement is not valid\n{e}')
|
Query FalkorDB database.
|
authenticate
|
"""Authenticate using the Amadeus API"""
try:
from amadeus import Client
except ImportError as e:
raise ImportError(
'Cannot import amadeus. Please install the package with `pip install amadeus`.'
) from e
if 'AMADEUS_CLIENT_ID' in os.environ and 'AMADEUS_CLIENT_SECRET' in os.environ:
client_id = os.environ['AMADEUS_CLIENT_ID']
client_secret = os.environ['AMADEUS_CLIENT_SECRET']
else:
logger.error(
'Error: The AMADEUS_CLIENT_ID and AMADEUS_CLIENT_SECRET environmental variables have not been set. Visit the following link on how to acquire these authorization tokens: https://developers.amadeus.com/register'
)
return None
hostname = 'test'
if 'AMADEUS_HOSTNAME' in os.environ:
hostname = os.environ['AMADEUS_HOSTNAME']
client = Client(client_id=client_id, client_secret=client_secret, hostname=
hostname)
return client
|
def authenticate() ->Client:
"""Authenticate using the Amadeus API"""
try:
from amadeus import Client
except ImportError as e:
raise ImportError(
'Cannot import amadeus. Please install the package with `pip install amadeus`.'
) from e
if ('AMADEUS_CLIENT_ID' in os.environ and 'AMADEUS_CLIENT_SECRET' in os
.environ):
client_id = os.environ['AMADEUS_CLIENT_ID']
client_secret = os.environ['AMADEUS_CLIENT_SECRET']
else:
logger.error(
'Error: The AMADEUS_CLIENT_ID and AMADEUS_CLIENT_SECRET environmental variables have not been set. Visit the following link on how to acquire these authorization tokens: https://developers.amadeus.com/register'
)
return None
hostname = 'test'
if 'AMADEUS_HOSTNAME' in os.environ:
hostname = os.environ['AMADEUS_HOSTNAME']
client = Client(client_id=client_id, client_secret=client_secret,
hostname=hostname)
return client
|
Authenticate using the Amadeus API
|
_List
|
self.write('[')
interleave(lambda : self.write(', '), self.dispatch, t.elts)
self.write(']')
|
def _List(self, t):
self.write('[')
interleave(lambda : self.write(', '), self.dispatch, t.elts)
self.write(']')
| null |
test_format_headers_access_token
|
"""Test that the action headers is being created correctly."""
tool = ZapierNLARunAction(action_id='test', zapier_description='test',
params_schema={'test': 'test'}, api_wrapper=ZapierNLAWrapper(
zapier_nla_oauth_access_token='test'))
headers = tool.api_wrapper._format_headers()
assert headers['Content-Type'] == 'application/json'
assert headers['Accept'] == 'application/json'
assert headers['Authorization'] == 'Bearer test'
|
def test_format_headers_access_token() ->None:
"""Test that the action headers is being created correctly."""
tool = ZapierNLARunAction(action_id='test', zapier_description='test',
params_schema={'test': 'test'}, api_wrapper=ZapierNLAWrapper(
zapier_nla_oauth_access_token='test'))
headers = tool.api_wrapper._format_headers()
assert headers['Content-Type'] == 'application/json'
assert headers['Accept'] == 'application/json'
assert headers['Authorization'] == 'Bearer test'
|
Test that the action headers is being created correctly.
|
_create_retry_decorator
|
from gpt_router import exceptions
errors = [exceptions.GPTRouterApiTimeoutError, exceptions.
GPTRouterInternalServerError, exceptions.GPTRouterNotAvailableError,
exceptions.GPTRouterTooManyRequestsError]
return create_base_retry_decorator(error_types=errors, max_retries=llm.
max_retries, run_manager=run_manager)
|
def _create_retry_decorator(llm: GPTRouter, run_manager: Optional[Union[
AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun]]=None) ->Callable[
[Any], Any]:
from gpt_router import exceptions
errors = [exceptions.GPTRouterApiTimeoutError, exceptions.
GPTRouterInternalServerError, exceptions.GPTRouterNotAvailableError,
exceptions.GPTRouterTooManyRequestsError]
return create_base_retry_decorator(error_types=errors, max_retries=llm.
max_retries, run_manager=run_manager)
| null |
add_files
|
"""
Vectara provides a way to add documents directly via our API where
pre-processing and chunking occurs internally in an optimal way
This method provides a way to use that API in LangChain
Args:
files_list: Iterable of strings, each representing a local file path.
Files could be text, HTML, PDF, markdown, doc/docx, ppt/pptx, etc.
see API docs for full list
metadatas: Optional list of metadatas associated with each file
Returns:
List of ids associated with each of the files indexed
"""
doc_ids = []
for inx, file in enumerate(files_list):
if not os.path.exists(file):
logger.error(f'File {file} does not exist, skipping')
continue
md = metadatas[inx] if metadatas else {}
files: dict = {'file': (file, open(file, 'rb')), 'doc_metadata': json.
dumps(md)}
headers = self._get_post_headers()
headers.pop('Content-Type')
response = self._session.post(
f'https://api.vectara.io/upload?c={self._vectara_customer_id}&o={self._vectara_corpus_id}&d=True'
, files=files, verify=True, headers=headers, timeout=self.
vectara_api_timeout)
if response.status_code == 409:
doc_id = response.json()['document']['documentId']
logger.info(
f'File {file} already exists on Vectara (doc_id={doc_id}), skipping'
)
elif response.status_code == 200:
doc_id = response.json()['document']['documentId']
doc_ids.append(doc_id)
else:
logger.info(f'Error indexing file {file}: {response.json()}')
return doc_ids
|
def add_files(self, files_list: Iterable[str], metadatas: Optional[List[
dict]]=None, **kwargs: Any) ->List[str]:
"""
Vectara provides a way to add documents directly via our API where
pre-processing and chunking occurs internally in an optimal way
This method provides a way to use that API in LangChain
Args:
files_list: Iterable of strings, each representing a local file path.
Files could be text, HTML, PDF, markdown, doc/docx, ppt/pptx, etc.
see API docs for full list
metadatas: Optional list of metadatas associated with each file
Returns:
List of ids associated with each of the files indexed
"""
doc_ids = []
for inx, file in enumerate(files_list):
if not os.path.exists(file):
logger.error(f'File {file} does not exist, skipping')
continue
md = metadatas[inx] if metadatas else {}
files: dict = {'file': (file, open(file, 'rb')), 'doc_metadata':
json.dumps(md)}
headers = self._get_post_headers()
headers.pop('Content-Type')
response = self._session.post(
f'https://api.vectara.io/upload?c={self._vectara_customer_id}&o={self._vectara_corpus_id}&d=True'
, files=files, verify=True, headers=headers, timeout=self.
vectara_api_timeout)
if response.status_code == 409:
doc_id = response.json()['document']['documentId']
logger.info(
f'File {file} already exists on Vectara (doc_id={doc_id}), skipping'
)
elif response.status_code == 200:
doc_id = response.json()['document']['documentId']
doc_ids.append(doc_id)
else:
logger.info(f'Error indexing file {file}: {response.json()}')
return doc_ids
|
Vectara provides a way to add documents directly via our API where
pre-processing and chunking occurs internally in an optimal way
This method provides a way to use that API in LangChain
Args:
files_list: Iterable of strings, each representing a local file path.
Files could be text, HTML, PDF, markdown, doc/docx, ppt/pptx, etc.
see API docs for full list
metadatas: Optional list of metadatas associated with each file
Returns:
List of ids associated with each of the files indexed
|
format_prompt
|
"""Create Chat Messages."""
return StringPromptValue(text=self.format(**kwargs))
|
def format_prompt(self, **kwargs: Any) ->PromptValue:
"""Create Chat Messages."""
return StringPromptValue(text=self.format(**kwargs))
|
Create Chat Messages.
|
_sort_entities
|
sorted_nodes = self._networkx_wrapper.get_topological_sort()
self.causal_operations.entities.sort(key=lambda x: sorted_nodes.index(x.name))
|
def _sort_entities(self) ->None:
sorted_nodes = self._networkx_wrapper.get_topological_sort()
self.causal_operations.entities.sort(key=lambda x: sorted_nodes.index(x
.name))
| null |
stream
|
yield from self.bound.stream(input, self._merge_configs(config), **{**self.
kwargs, **kwargs})
|
def stream(self, input: Input, config: Optional[RunnableConfig]=None, **
kwargs: Optional[Any]) ->Iterator[Output]:
yield from self.bound.stream(input, self._merge_configs(config), **{**
self.kwargs, **kwargs})
| null |
test_agent_tool_return_direct
|
"""Test agent using tools that return directly."""
tool = 'Search'
responses = [f"""FooBarBaz
Action: {tool}
Action Input: misalignment""",
"""Oh well
Final Answer: curses foiled again"""]
fake_llm = FakeListLLM(responses=responses)
tools = [Tool(name='Search', func=lambda x: x, description=
'Useful for searching', return_direct=True)]
agent = initialize_agent(tools, fake_llm, agent=AgentType.
ZERO_SHOT_REACT_DESCRIPTION)
output = agent.run('when was langchain made')
assert output == 'misalignment'
|
def test_agent_tool_return_direct() ->None:
"""Test agent using tools that return directly."""
tool = 'Search'
responses = [f'FooBarBaz\nAction: {tool}\nAction Input: misalignment',
"""Oh well
Final Answer: curses foiled again"""]
fake_llm = FakeListLLM(responses=responses)
tools = [Tool(name='Search', func=lambda x: x, description=
'Useful for searching', return_direct=True)]
agent = initialize_agent(tools, fake_llm, agent=AgentType.
ZERO_SHOT_REACT_DESCRIPTION)
output = agent.run('when was langchain made')
assert output == 'misalignment'
|
Test agent using tools that return directly.
|
_import_office365_messages_search
|
from langchain_community.tools.office365.messages_search import O365SearchEmails
return O365SearchEmails
|
def _import_office365_messages_search() ->Any:
from langchain_community.tools.office365.messages_search import O365SearchEmails
return O365SearchEmails
| null |
test_sync_async_equivalent
|
url = 'https://docs.python.org/3.9/'
loader = RecursiveUrlLoader(url, use_async=False, max_depth=2)
async_loader = RecursiveUrlLoader(url, use_async=False, max_depth=2)
docs = sorted(loader.load(), key=lambda d: d.metadata['source'])
async_docs = sorted(async_loader.load(), key=lambda d: d.metadata['source'])
assert docs == async_docs
|
def test_sync_async_equivalent() ->None:
url = 'https://docs.python.org/3.9/'
loader = RecursiveUrlLoader(url, use_async=False, max_depth=2)
async_loader = RecursiveUrlLoader(url, use_async=False, max_depth=2)
docs = sorted(loader.load(), key=lambda d: d.metadata['source'])
async_docs = sorted(async_loader.load(), key=lambda d: d.metadata['source']
)
assert docs == async_docs
| null |
test_create_action_payload
|
"""Test that the action payload is being created correctly."""
tool = ZapierNLARunAction(action_id='test', zapier_description='test',
params_schema={'test': 'test'}, api_wrapper=ZapierNLAWrapper(
zapier_nla_api_key='test'))
payload = tool.api_wrapper._create_action_payload('some instructions')
assert payload['instructions'] == 'some instructions'
assert payload.get('preview_only') is None
|
def test_create_action_payload() ->None:
"""Test that the action payload is being created correctly."""
tool = ZapierNLARunAction(action_id='test', zapier_description='test',
params_schema={'test': 'test'}, api_wrapper=ZapierNLAWrapper(
zapier_nla_api_key='test'))
payload = tool.api_wrapper._create_action_payload('some instructions')
assert payload['instructions'] == 'some instructions'
assert payload.get('preview_only') is None
|
Test that the action payload is being created correctly.
|
_import_promptlayer
|
from langchain_community.llms.promptlayer_openai import PromptLayerOpenAI
return PromptLayerOpenAI
|
def _import_promptlayer() ->Any:
from langchain_community.llms.promptlayer_openai import PromptLayerOpenAI
return PromptLayerOpenAI
| null |
_call
|
"""Generate Cypher statement, use it to look up in db and answer question."""
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
callbacks = _run_manager.get_child()
question = inputs[self.input_key]
intermediate_steps: List = []
generated_cypher = self.cypher_generation_chain.run({'question': question,
'schema': self.graph_schema}, callbacks=callbacks)
generated_cypher = extract_cypher(generated_cypher)
if self.cypher_query_corrector:
generated_cypher = self.cypher_query_corrector(generated_cypher)
_run_manager.on_text('Generated Cypher:', end='\n', verbose=self.verbose)
_run_manager.on_text(generated_cypher, color='green', end='\n', verbose=
self.verbose)
intermediate_steps.append({'query': generated_cypher})
if generated_cypher:
context = self.graph.query(generated_cypher)[:self.top_k]
else:
context = []
if self.return_direct:
final_result = context
else:
_run_manager.on_text('Full Context:', end='\n', verbose=self.verbose)
_run_manager.on_text(str(context), color='green', end='\n', verbose=
self.verbose)
intermediate_steps.append({'context': context})
result = self.qa_chain({'question': question, 'context': context},
callbacks=callbacks)
final_result = result[self.qa_chain.output_key]
chain_result: Dict[str, Any] = {self.output_key: final_result}
if self.return_intermediate_steps:
chain_result[INTERMEDIATE_STEPS_KEY] = intermediate_steps
return chain_result
|
def _call(self, inputs: Dict[str, Any], run_manager: Optional[
CallbackManagerForChainRun]=None) ->Dict[str, Any]:
"""Generate Cypher statement, use it to look up in db and answer question."""
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
callbacks = _run_manager.get_child()
question = inputs[self.input_key]
intermediate_steps: List = []
generated_cypher = self.cypher_generation_chain.run({'question':
question, 'schema': self.graph_schema}, callbacks=callbacks)
generated_cypher = extract_cypher(generated_cypher)
if self.cypher_query_corrector:
generated_cypher = self.cypher_query_corrector(generated_cypher)
_run_manager.on_text('Generated Cypher:', end='\n', verbose=self.verbose)
_run_manager.on_text(generated_cypher, color='green', end='\n', verbose
=self.verbose)
intermediate_steps.append({'query': generated_cypher})
if generated_cypher:
context = self.graph.query(generated_cypher)[:self.top_k]
else:
context = []
if self.return_direct:
final_result = context
else:
_run_manager.on_text('Full Context:', end='\n', verbose=self.verbose)
_run_manager.on_text(str(context), color='green', end='\n', verbose
=self.verbose)
intermediate_steps.append({'context': context})
result = self.qa_chain({'question': question, 'context': context},
callbacks=callbacks)
final_result = result[self.qa_chain.output_key]
chain_result: Dict[str, Any] = {self.output_key: final_result}
if self.return_intermediate_steps:
chain_result[INTERMEDIATE_STEPS_KEY] = intermediate_steps
return chain_result
|
Generate Cypher statement, use it to look up in db and answer question.
|
test_opensearch_with_custom_field_name_appx_true
|
"""Test Approximate Search with custom field name appx true."""
text_input = ['add', 'test', 'text', 'method']
docsearch = OpenSearchVectorSearch.from_texts(text_input, FakeEmbeddings(),
opensearch_url=DEFAULT_OPENSEARCH_URL, is_appx_search=True)
output = docsearch.similarity_search('add', k=1)
assert output == [Document(page_content='add')]
|
def test_opensearch_with_custom_field_name_appx_true() ->None:
"""Test Approximate Search with custom field name appx true."""
text_input = ['add', 'test', 'text', 'method']
docsearch = OpenSearchVectorSearch.from_texts(text_input,
FakeEmbeddings(), opensearch_url=DEFAULT_OPENSEARCH_URL,
is_appx_search=True)
output = docsearch.similarity_search('add', k=1)
assert output == [Document(page_content='add')]
|
Test Approximate Search with custom field name appx true.
|
foo
|
"""Docstring
Args:
bar: int
baz: str
"""
assert callbacks is not None
return str(bar) + baz
|
def foo(bar: int, baz: str, callbacks: Optional[CallbackManagerForToolRun]=None
) ->str:
"""Docstring
Args:
bar: int
baz: str
"""
assert callbacks is not None
return str(bar) + baz
|
Docstring
Args:
bar: int
baz: str
|
get_lc_namespace
|
"""Get the namespace of the langchain object."""
return ['langchain', 'llms', 'openai']
|
@classmethod
def get_lc_namespace(cls) ->List[str]:
"""Get the namespace of the langchain object."""
return ['langchain', 'llms', 'openai']
|
Get the namespace of the langchain object.
|
observation_prefix
|
"""Prefix to append the observation with."""
return 'Observation: '
|
@property
def observation_prefix(self) ->str:
"""Prefix to append the observation with."""
return 'Observation: '
|
Prefix to append the observation with.
|
_type
|
return 'vector_sql_retrieve_all_parser'
|
@property
def _type(self) ->str:
return 'vector_sql_retrieve_all_parser'
| null |
test_sequential_missing_inputs
|
"""Test error is raised when input variables are missing."""
chain_1 = FakeChain(input_variables=['foo'], output_variables=['bar'])
chain_2 = FakeChain(input_variables=['bar', 'test'], output_variables=['baz'])
with pytest.raises(ValueError):
SequentialChain(chains=[chain_1, chain_2], input_variables=['foo'])
|
def test_sequential_missing_inputs() ->None:
"""Test error is raised when input variables are missing."""
chain_1 = FakeChain(input_variables=['foo'], output_variables=['bar'])
chain_2 = FakeChain(input_variables=['bar', 'test'], output_variables=[
'baz'])
with pytest.raises(ValueError):
SequentialChain(chains=[chain_1, chain_2], input_variables=['foo'])
|
Test error is raised when input variables are missing.
|
test_whatsapp_chat_loader
|
"""Test WhatsAppChatLoader."""
file_path = Path(__file__).parent.parent / 'examples' / 'whatsapp_chat.txt'
loader = WhatsAppChatLoader(str(file_path))
docs = loader.load()
assert len(docs) == 1
assert docs[0].metadata['source'] == str(file_path)
assert docs[0].page_content == """James on 05.05.23, 15:48:11: Hi here
User name on 11/8/21, 9:41:32 AM: Message 123
User 2 on 1/23/23, 3:19 AM: Bye!
User 1 on 1/23/23, 3:22_AM: And let me know if anything changes
~ User name 2 on 1/24/21, 12:41:03 PM: Of course!
~ User 2 on 2023/5/4, 16:13:23: See you!
User 1 on 7/19/22, 11:32 PM: Hello
User 2 on 7/20/22, 11:32 am: Goodbye
"""
|
def test_whatsapp_chat_loader() ->None:
"""Test WhatsAppChatLoader."""
file_path = Path(__file__).parent.parent / 'examples' / 'whatsapp_chat.txt'
loader = WhatsAppChatLoader(str(file_path))
docs = loader.load()
assert len(docs) == 1
assert docs[0].metadata['source'] == str(file_path)
assert docs[0].page_content == """James on 05.05.23, 15:48:11: Hi here
User name on 11/8/21, 9:41:32 AM: Message 123
User 2 on 1/23/23, 3:19 AM: Bye!
User 1 on 1/23/23, 3:22_AM: And let me know if anything changes
~ User name 2 on 1/24/21, 12:41:03 PM: Of course!
~ User 2 on 2023/5/4, 16:13:23: See you!
User 1 on 7/19/22, 11:32 PM: Hello
User 2 on 7/20/22, 11:32 am: Goodbye
"""
|
Test WhatsAppChatLoader.
|
body_params
|
if self.request_body is None:
return []
return [prop.name for prop in self.request_body.properties]
|
@property
def body_params(self) ->List[str]:
if self.request_body is None:
return []
return [prop.name for prop in self.request_body.properties]
| null |
test_runnable_lambda_stream
|
"""Test that stream works for both normal functions & those returning Runnable."""
output: List[Any] = [chunk for chunk in RunnableLambda(range).stream(5)]
assert output == [range(5)]
llm_res = "i'm a textbot"
llm = FakeStreamingListLLM(responses=[llm_res], sleep=0.01)
output = list(RunnableLambda(lambda x: llm).stream(''))
assert output == list(llm_res)
|
def test_runnable_lambda_stream() ->None:
"""Test that stream works for both normal functions & those returning Runnable."""
output: List[Any] = [chunk for chunk in RunnableLambda(range).stream(5)]
assert output == [range(5)]
llm_res = "i'm a textbot"
llm = FakeStreamingListLLM(responses=[llm_res], sleep=0.01)
output = list(RunnableLambda(lambda x: llm).stream(''))
assert output == list(llm_res)
|
Test that stream works for both normal functions & those returning Runnable.
|
test_self_hosted_huggingface_pipeline_text_generation
|
"""Test valid call to self-hosted HuggingFace text generation model."""
gpu = get_remote_instance()
llm = SelfHostedHuggingFaceLLM(model_id='gpt2', task='text-generation',
model_kwargs={'n_positions': 1024}, hardware=gpu, model_reqs=model_reqs)
output = llm('Say foo:')
assert isinstance(output, str)
|
def test_self_hosted_huggingface_pipeline_text_generation() ->None:
"""Test valid call to self-hosted HuggingFace text generation model."""
gpu = get_remote_instance()
llm = SelfHostedHuggingFaceLLM(model_id='gpt2', task='text-generation',
model_kwargs={'n_positions': 1024}, hardware=gpu, model_reqs=model_reqs
)
output = llm('Say foo:')
assert isinstance(output, str)
|
Test valid call to self-hosted HuggingFace text generation model.
|
_hash_string_to_uuid
|
"""Hash a string and returns the corresponding UUID."""
hash_value = hashlib.sha1(input_string.encode('utf-8')).hexdigest()
return uuid.uuid5(NAMESPACE_UUID, hash_value)
|
def _hash_string_to_uuid(input_string: str) ->uuid.UUID:
"""Hash a string and returns the corresponding UUID."""
hash_value = hashlib.sha1(input_string.encode('utf-8')).hexdigest()
return uuid.uuid5(NAMESPACE_UUID, hash_value)
|
Hash a string and returns the corresponding UUID.
|
load
|
"""Load data into Document objects.
Attention:
This implementation starts an asyncio event loop which
will only work if running in a sync env. In an async env, it should
fail since there is already an event loop running.
This code should be updated to kick off the event loop from a separate
thread if running within an async context.
"""
return asyncio.run(self.aload())
|
def load(self) ->List[Document]:
"""Load data into Document objects.
Attention:
This implementation starts an asyncio event loop which
will only work if running in a sync env. In an async env, it should
fail since there is already an event loop running.
This code should be updated to kick off the event loop from a separate
thread if running within an async context.
"""
return asyncio.run(self.aload())
|
Load data into Document objects.
Attention:
This implementation starts an asyncio event loop which
will only work if running in a sync env. In an async env, it should
fail since there is already an event loop running.
This code should be updated to kick off the event loop from a separate
thread if running within an async context.
|
__getattr__
|
"""Get attr name."""
if name == 'create_xorbits_agent':
HERE = Path(__file__).parents[3]
here = as_import_path(Path(__file__).parent, relative_to=HERE)
old_path = 'langchain.' + here + '.' + name
new_path = 'langchain_experimental.' + here + '.' + name
raise ImportError(
f"""This agent has been moved to langchain experiment. This agent relies on python REPL tool under the hood, so to use it safely please sandbox the python REPL. Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md and https://github.com/langchain-ai/langchain/discussions/11680To keep using this code as is, install langchain experimental and update your import statement from:
`{old_path}` to `{new_path}`."""
)
raise AttributeError(f'{name} does not exist')
|
def __getattr__(name: str) ->Any:
"""Get attr name."""
if name == 'create_xorbits_agent':
HERE = Path(__file__).parents[3]
here = as_import_path(Path(__file__).parent, relative_to=HERE)
old_path = 'langchain.' + here + '.' + name
new_path = 'langchain_experimental.' + here + '.' + name
raise ImportError(
f"""This agent has been moved to langchain experiment. This agent relies on python REPL tool under the hood, so to use it safely please sandbox the python REPL. Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md and https://github.com/langchain-ai/langchain/discussions/11680To keep using this code as is, install langchain experimental and update your import statement from:
`{old_path}` to `{new_path}`."""
)
raise AttributeError(f'{name} does not exist')
|
Get attr name.
|
load
|
try:
from git import Blob, Repo
except ImportError as ex:
raise ImportError(
'Could not import git python package. Please install it with `pip install GitPython`.'
) from ex
if not os.path.exists(self.repo_path) and self.clone_url is None:
raise ValueError(f'Path {self.repo_path} does not exist')
elif self.clone_url:
if os.path.isdir(os.path.join(self.repo_path, '.git')):
repo = Repo(self.repo_path)
if repo.remotes.origin.url != self.clone_url:
raise ValueError(
'A different repository is already cloned at this path.')
else:
repo = Repo.clone_from(self.clone_url, self.repo_path)
repo.git.checkout(self.branch)
else:
repo = Repo(self.repo_path)
repo.git.checkout(self.branch)
docs: List[Document] = []
for item in repo.tree().traverse():
if not isinstance(item, Blob):
continue
file_path = os.path.join(self.repo_path, item.path)
ignored_files = repo.ignored([file_path])
if len(ignored_files):
continue
if self.file_filter and not self.file_filter(file_path):
continue
rel_file_path = os.path.relpath(file_path, self.repo_path)
try:
with open(file_path, 'rb') as f:
content = f.read()
file_type = os.path.splitext(item.name)[1]
try:
text_content = content.decode('utf-8')
except UnicodeDecodeError:
continue
metadata = {'source': rel_file_path, 'file_path': rel_file_path,
'file_name': item.name, 'file_type': file_type}
doc = Document(page_content=text_content, metadata=metadata)
docs.append(doc)
except Exception as e:
print(f'Error reading file {file_path}: {e}')
return docs
|
def load(self) ->List[Document]:
try:
from git import Blob, Repo
except ImportError as ex:
raise ImportError(
'Could not import git python package. Please install it with `pip install GitPython`.'
) from ex
if not os.path.exists(self.repo_path) and self.clone_url is None:
raise ValueError(f'Path {self.repo_path} does not exist')
elif self.clone_url:
if os.path.isdir(os.path.join(self.repo_path, '.git')):
repo = Repo(self.repo_path)
if repo.remotes.origin.url != self.clone_url:
raise ValueError(
'A different repository is already cloned at this path.')
else:
repo = Repo.clone_from(self.clone_url, self.repo_path)
repo.git.checkout(self.branch)
else:
repo = Repo(self.repo_path)
repo.git.checkout(self.branch)
docs: List[Document] = []
for item in repo.tree().traverse():
if not isinstance(item, Blob):
continue
file_path = os.path.join(self.repo_path, item.path)
ignored_files = repo.ignored([file_path])
if len(ignored_files):
continue
if self.file_filter and not self.file_filter(file_path):
continue
rel_file_path = os.path.relpath(file_path, self.repo_path)
try:
with open(file_path, 'rb') as f:
content = f.read()
file_type = os.path.splitext(item.name)[1]
try:
text_content = content.decode('utf-8')
except UnicodeDecodeError:
continue
metadata = {'source': rel_file_path, 'file_path':
rel_file_path, 'file_name': item.name, 'file_type':
file_type}
doc = Document(page_content=text_content, metadata=metadata)
docs.append(doc)
except Exception as e:
print(f'Error reading file {file_path}: {e}')
return docs
| null |
__init__
|
try:
from cassio.vector import VectorTable
except (ImportError, ModuleNotFoundError):
raise ImportError(
'Could not import cassio python package. Please install it with `pip install cassio`.'
)
"""Create a vector table."""
self.embedding = embedding
self.session = session
self.keyspace = keyspace
self.table_name = table_name
self.ttl_seconds = ttl_seconds
self._embedding_dimension = None
self.table = VectorTable(session=session, keyspace=keyspace, table=
table_name, embedding_dimension=self._get_embedding_dimension(),
primary_key_type='TEXT')
|
def __init__(self, embedding: Embeddings, session: Session, keyspace: str,
table_name: str, ttl_seconds: Optional[int]=None) ->None:
try:
from cassio.vector import VectorTable
except (ImportError, ModuleNotFoundError):
raise ImportError(
'Could not import cassio python package. Please install it with `pip install cassio`.'
)
"""Create a vector table."""
self.embedding = embedding
self.session = session
self.keyspace = keyspace
self.table_name = table_name
self.ttl_seconds = ttl_seconds
self._embedding_dimension = None
self.table = VectorTable(session=session, keyspace=keyspace, table=
table_name, embedding_dimension=self._get_embedding_dimension(),
primary_key_type='TEXT')
| null |
structured_tool
|
"""Return the arguments directly."""
return {'some_enum': some_enum, 'some_base_model': some_base_model}
|
@tool
def structured_tool(some_enum: SomeEnum, some_base_model: SomeBaseModel
) ->dict:
"""Return the arguments directly."""
return {'some_enum': some_enum, 'some_base_model': some_base_model}
|
Return the arguments directly.
|
_get_connection
|
try:
import singlestoredb as s2
except ImportError:
raise ImportError(
'Could not import singlestoredb python package. Please install it with `pip install singlestoredb`.'
)
return s2.connect(**self.connection_kwargs)
|
def _get_connection(self: SingleStoreDB) ->Any:
try:
import singlestoredb as s2
except ImportError:
raise ImportError(
'Could not import singlestoredb python package. Please install it with `pip install singlestoredb`.'
)
return s2.connect(**self.connection_kwargs)
| null |
_call
|
"""
Wrapper around the bittensor top miner models. Its built by Neural Internet.
Call the Neural Internet's BTVEP Server and return the output.
Parameters (optional):
system_prompt(str): A system prompt defining how your model should respond.
top_responses(int): Total top miner responses to retrieve from Bittensor
protocol.
Return:
The generated response(s).
Example:
.. code-block:: python
from langchain_community.llms import NIBittensorLLM
llm = NIBittensorLLM(system_prompt="Act like you are programmer with 5+ years of experience.")
"""
context = ssl.create_default_context()
context.check_hostname = True
conn = http.client.HTTPSConnection('test.neuralinternet.ai', context=context)
if isinstance(self.top_responses, int):
top_n = min(100, self.top_responses)
else:
top_n = 0
default_prompt = (
'You are an assistant which is created by Neural Internet(NI) in decentralized network named as a Bittensor.'
)
if self.system_prompt is None:
system_prompt = (default_prompt +
' Your task is to provide accurate response based on user prompt')
else:
system_prompt = default_prompt + str(self.system_prompt)
conn.request('GET', '/admin/api-keys/')
api_key_response = conn.getresponse()
api_keys_data = api_key_response.read().decode('utf-8').replace('\n', ''
).replace('\t', '')
api_keys_json = json.loads(api_keys_data)
api_key = api_keys_json[0]['api_key']
headers = {'Content-Type': 'application/json', 'Authorization':
f'Bearer {api_key}', 'Endpoint-Version': '2023-05-19'}
conn.request('GET', '/top_miner_uids', headers=headers)
miner_response = conn.getresponse()
miner_data = miner_response.read().decode('utf-8').replace('\n', '').replace(
'\t', '')
uids = json.loads(miner_data)
if isinstance(uids, list) and uids and not top_n:
for uid in uids:
try:
payload = json.dumps({'uids': [uid], 'messages': [{'role':
'system', 'content': system_prompt}, {'role': 'user',
'content': prompt}]})
conn.request('POST', '/chat', payload, headers)
init_response = conn.getresponse()
init_data = init_response.read().decode('utf-8').replace('\n', ''
).replace('\t', '')
init_json = json.loads(init_data)
if 'choices' not in init_json:
continue
reply = init_json['choices'][0]['message']['content']
conn.close()
return reply
except Exception:
continue
try:
payload = json.dumps({'top_n': top_n, 'messages': [{'role': 'system',
'content': system_prompt}, {'role': 'user', 'content': prompt}]})
conn.request('POST', '/chat', payload, headers)
response = conn.getresponse()
utf_string = response.read().decode('utf-8').replace('\n', '').replace('\t'
, '')
if top_n:
conn.close()
return utf_string
json_resp = json.loads(utf_string)
reply = json_resp['choices'][0]['message']['content']
conn.close()
return reply
except Exception as e:
conn.request('GET', f'/error_msg?e={e}&p={prompt}', headers=headers)
return 'Sorry I am unable to provide response now, Please try again later.'
|
def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager:
Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str:
"""
Wrapper around the bittensor top miner models. Its built by Neural Internet.
Call the Neural Internet's BTVEP Server and return the output.
Parameters (optional):
system_prompt(str): A system prompt defining how your model should respond.
top_responses(int): Total top miner responses to retrieve from Bittensor
protocol.
Return:
The generated response(s).
Example:
.. code-block:: python
from langchain_community.llms import NIBittensorLLM
llm = NIBittensorLLM(system_prompt="Act like you are programmer with 5+ years of experience.")
"""
context = ssl.create_default_context()
context.check_hostname = True
conn = http.client.HTTPSConnection('test.neuralinternet.ai', context=
context)
if isinstance(self.top_responses, int):
top_n = min(100, self.top_responses)
else:
top_n = 0
default_prompt = (
'You are an assistant which is created by Neural Internet(NI) in decentralized network named as a Bittensor.'
)
if self.system_prompt is None:
system_prompt = (default_prompt +
' Your task is to provide accurate response based on user prompt')
else:
system_prompt = default_prompt + str(self.system_prompt)
conn.request('GET', '/admin/api-keys/')
api_key_response = conn.getresponse()
api_keys_data = api_key_response.read().decode('utf-8').replace('\n', ''
).replace('\t', '')
api_keys_json = json.loads(api_keys_data)
api_key = api_keys_json[0]['api_key']
headers = {'Content-Type': 'application/json', 'Authorization':
f'Bearer {api_key}', 'Endpoint-Version': '2023-05-19'}
conn.request('GET', '/top_miner_uids', headers=headers)
miner_response = conn.getresponse()
miner_data = miner_response.read().decode('utf-8').replace('\n', ''
).replace('\t', '')
uids = json.loads(miner_data)
if isinstance(uids, list) and uids and not top_n:
for uid in uids:
try:
payload = json.dumps({'uids': [uid], 'messages': [{'role':
'system', 'content': system_prompt}, {'role': 'user',
'content': prompt}]})
conn.request('POST', '/chat', payload, headers)
init_response = conn.getresponse()
init_data = init_response.read().decode('utf-8').replace('\n',
'').replace('\t', '')
init_json = json.loads(init_data)
if 'choices' not in init_json:
continue
reply = init_json['choices'][0]['message']['content']
conn.close()
return reply
except Exception:
continue
try:
payload = json.dumps({'top_n': top_n, 'messages': [{'role':
'system', 'content': system_prompt}, {'role': 'user', 'content':
prompt}]})
conn.request('POST', '/chat', payload, headers)
response = conn.getresponse()
utf_string = response.read().decode('utf-8').replace('\n', '').replace(
'\t', '')
if top_n:
conn.close()
return utf_string
json_resp = json.loads(utf_string)
reply = json_resp['choices'][0]['message']['content']
conn.close()
return reply
except Exception as e:
conn.request('GET', f'/error_msg?e={e}&p={prompt}', headers=headers)
return (
'Sorry I am unable to provide response now, Please try again later.'
)
|
Wrapper around the bittensor top miner models. Its built by Neural Internet.
Call the Neural Internet's BTVEP Server and return the output.
Parameters (optional):
system_prompt(str): A system prompt defining how your model should respond.
top_responses(int): Total top miner responses to retrieve from Bittensor
protocol.
Return:
The generated response(s).
Example:
.. code-block:: python
from langchain_community.llms import NIBittensorLLM
llm = NIBittensorLLM(system_prompt="Act like you are programmer with 5+ years of experience.")
|
test_selector_threshold_zero
|
"""Tests NGramOverlapExampleSelector threshold set to 0.0."""
selector.threshold = 0.0
sentence = 'Spot can run.'
output = selector.select_examples({'input': sentence})
assert output == [EXAMPLES[2], EXAMPLES[0]]
|
def test_selector_threshold_zero(selector: NGramOverlapExampleSelector) ->None:
"""Tests NGramOverlapExampleSelector threshold set to 0.0."""
selector.threshold = 0.0
sentence = 'Spot can run.'
output = selector.select_examples({'input': sentence})
assert output == [EXAMPLES[2], EXAMPLES[0]]
|
Tests NGramOverlapExampleSelector threshold set to 0.0.
|
_import_huggingface_endpoint
|
from langchain_community.llms.huggingface_endpoint import HuggingFaceEndpoint
return HuggingFaceEndpoint
|
def _import_huggingface_endpoint() ->Any:
from langchain_community.llms.huggingface_endpoint import HuggingFaceEndpoint
return HuggingFaceEndpoint
| null |
load
|
"""Load documents."""
return list(self.lazy_load())
|
def load(self) ->List[Document]:
"""Load documents."""
return list(self.lazy_load())
|
Load documents.
|
similarity_search_by_vector
|
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query vector.
"""
raise NotImplementedError
|
def similarity_search_by_vector(self, embedding: List[float], k: int=4, **
kwargs: Any) ->List[Document]:
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query vector.
"""
raise NotImplementedError
|
Return docs most similar to embedding vector.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query vector.
|
_convert_message_to_dict
|
message_dict: Dict[str, Any]
if isinstance(message, ChatMessage):
message_dict = {'role': message.role, 'content': message.content}
elif isinstance(message, HumanMessage):
message_dict = {'role': 'user', 'content': message.content}
elif isinstance(message, AIMessage):
message_dict = {'role': 'assistant', 'content': message.content}
if 'function_call' in message.additional_kwargs:
message_dict['function_call'] = message.additional_kwargs[
'function_call']
if message_dict['content'] == '':
message_dict['content'] = None
elif isinstance(message, SystemMessage):
message_dict = {'role': 'system', 'content': message.content}
elif isinstance(message, FunctionMessage):
message_dict = {'role': 'function', 'content': message.content, 'name':
message.name}
else:
raise TypeError(f'Got unknown type {message}')
if 'name' in message.additional_kwargs:
message_dict['name'] = message.additional_kwargs['name']
return message_dict
|
def _convert_message_to_dict(message: BaseMessage) ->dict:
message_dict: Dict[str, Any]
if isinstance(message, ChatMessage):
message_dict = {'role': message.role, 'content': message.content}
elif isinstance(message, HumanMessage):
message_dict = {'role': 'user', 'content': message.content}
elif isinstance(message, AIMessage):
message_dict = {'role': 'assistant', 'content': message.content}
if 'function_call' in message.additional_kwargs:
message_dict['function_call'] = message.additional_kwargs[
'function_call']
if message_dict['content'] == '':
message_dict['content'] = None
elif isinstance(message, SystemMessage):
message_dict = {'role': 'system', 'content': message.content}
elif isinstance(message, FunctionMessage):
message_dict = {'role': 'function', 'content': message.content,
'name': message.name}
else:
raise TypeError(f'Got unknown type {message}')
if 'name' in message.additional_kwargs:
message_dict['name'] = message.additional_kwargs['name']
return message_dict
| null |
__init__
|
"""Initialize with a file path.
Args:
file_path: The path to the file to load.
"""
with open(file_path, 'rb') as f:
encoding, _ = tokenize.detect_encoding(f.readline)
super().__init__(file_path=file_path, encoding=encoding)
|
def __init__(self, file_path: str):
"""Initialize with a file path.
Args:
file_path: The path to the file to load.
"""
with open(file_path, 'rb') as f:
encoding, _ = tokenize.detect_encoding(f.readline)
super().__init__(file_path=file_path, encoding=encoding)
|
Initialize with a file path.
Args:
file_path: The path to the file to load.
|
get_format_instructions
|
return FORMAT_INSTRUCTIONS
|
def get_format_instructions(self) ->str:
return FORMAT_INSTRUCTIONS
| null |
test_default_embeddings_mixed_w_explicit_user_embeddings
|
llm, PROMPT = setup()
feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed=True,
model=MockEncoderReturnsList())
chain = pick_best_chain.PickBest.from_llm(llm=llm, prompt=PROMPT,
feature_embedder=feature_embedder, auto_embed=True)
str1 = '0'
str2 = '1'
encoded_str2 = rl_chain.stringify_embedding([1.0, 2.0])
ctx_str_1 = 'context1'
ctx_str_2 = 'context2'
encoded_ctx_str_1 = rl_chain.stringify_embedding([1.0, 2.0])
dot_prod = 'dotprod 0:5.0 1:5.0'
expected = f"""shared |User {encoded_ctx_str_1} |@ User={encoded_ctx_str_1} |User2 {ctx_str_2} |@ User2={ctx_str_2}
|action {str1} |# action={str1} |{dot_prod}
|action {encoded_str2} |# action={encoded_str2} |{dot_prod}"""
actions = [str1, rl_chain.Embed(str2)]
response = chain.run(User=rl_chain.BasedOn(rl_chain.Embed(ctx_str_1)),
User2=rl_chain.BasedOn(ctx_str_2), action=rl_chain.ToSelectFrom(actions))
selection_metadata = response['selection_metadata']
vw_str = feature_embedder.format(selection_metadata)
assert vw_str == expected
|
@pytest.mark.requires('vowpal_wabbit_next', 'sentence_transformers')
def test_default_embeddings_mixed_w_explicit_user_embeddings() ->None:
llm, PROMPT = setup()
feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed=
True, model=MockEncoderReturnsList())
chain = pick_best_chain.PickBest.from_llm(llm=llm, prompt=PROMPT,
feature_embedder=feature_embedder, auto_embed=True)
str1 = '0'
str2 = '1'
encoded_str2 = rl_chain.stringify_embedding([1.0, 2.0])
ctx_str_1 = 'context1'
ctx_str_2 = 'context2'
encoded_ctx_str_1 = rl_chain.stringify_embedding([1.0, 2.0])
dot_prod = 'dotprod 0:5.0 1:5.0'
expected = f"""shared |User {encoded_ctx_str_1} |@ User={encoded_ctx_str_1} |User2 {ctx_str_2} |@ User2={ctx_str_2}
|action {str1} |# action={str1} |{dot_prod}
|action {encoded_str2} |# action={encoded_str2} |{dot_prod}"""
actions = [str1, rl_chain.Embed(str2)]
response = chain.run(User=rl_chain.BasedOn(rl_chain.Embed(ctx_str_1)),
User2=rl_chain.BasedOn(ctx_str_2), action=rl_chain.ToSelectFrom(
actions))
selection_metadata = response['selection_metadata']
vw_str = feature_embedder.format(selection_metadata)
assert vw_str == expected
| null |
test_from_texts_not_supported
|
with pytest.raises(NotImplementedError) as ex:
DatabricksVectorSearch.from_texts(fake_texts, FakeEmbeddings())
assert '`from_texts` is not supported. Use `add_texts` to add to existing direct-access index.' in str(
ex.value)
|
@pytest.mark.requires('databricks', 'databricks.vector_search')
def test_from_texts_not_supported() ->None:
with pytest.raises(NotImplementedError) as ex:
DatabricksVectorSearch.from_texts(fake_texts, FakeEmbeddings())
assert '`from_texts` is not supported. Use `add_texts` to add to existing direct-access index.' in str(
ex.value)
| null |
_create_index_by_id
|
"""Creates a MatchingEngineIndex object by id.
Args:
index_id: The created index id.
project_id: The project to retrieve index from.
region: Location to retrieve index from.
credentials: GCS credentials.
Returns:
A configured MatchingEngineIndex.
"""
from google.cloud import aiplatform
logger.debug(f'Creating matching engine index with id {index_id}.')
return aiplatform.MatchingEngineIndex(index_name=index_id, project=
project_id, location=region, credentials=credentials)
|
@classmethod
def _create_index_by_id(cls, index_id: str, project_id: str, region: str,
credentials: 'Credentials') ->MatchingEngineIndex:
"""Creates a MatchingEngineIndex object by id.
Args:
index_id: The created index id.
project_id: The project to retrieve index from.
region: Location to retrieve index from.
credentials: GCS credentials.
Returns:
A configured MatchingEngineIndex.
"""
from google.cloud import aiplatform
logger.debug(f'Creating matching engine index with id {index_id}.')
return aiplatform.MatchingEngineIndex(index_name=index_id, project=
project_id, location=region, credentials=credentials)
|
Creates a MatchingEngineIndex object by id.
Args:
index_id: The created index id.
project_id: The project to retrieve index from.
region: Location to retrieve index from.
credentials: GCS credentials.
Returns:
A configured MatchingEngineIndex.
|
_import_reddit_search_RedditSearchRun
|
from langchain_community.tools.reddit_search.tool import RedditSearchRun
return RedditSearchRun
|
def _import_reddit_search_RedditSearchRun() ->Any:
from langchain_community.tools.reddit_search.tool import RedditSearchRun
return RedditSearchRun
| null |
similarity_search_with_score_by_vector
|
"""
Performs a search on the query string and returns results with scores.
Args:
embedding (List[float]): The embedding vector being searched.
k (int, optional): The number of results to return.
Default is 4.
param (dict): Specifies the search parameters for the index.
Default is None.
expr (str, optional): Filtering expression. Default is None.
timeout (int, optional): The waiting time before a timeout error.
Default is None.
kwargs: Keyword arguments for Collection.search().
Returns:
List[Tuple[Document, float]]: Resulting documents and scores.
"""
if self.col is None:
logger.debug('No existing collection to search.')
return []
output_fields = self.fields[:]
output_fields.remove(self._vector_field)
logger.debug(f'search_field:{self._vector_field}')
logger.debug(f'vectors:{[embedding]}')
logger.debug(f'output_fields:{output_fields}')
logger.debug(f'topk:{k}')
logger.debug(f'dsl:{expr}')
res = self.col.query(search_field=self._vector_field, vectors=[embedding],
output_fields=output_fields, topk=k, dsl=expr)
logger.debug(f'[similarity_search_with_score_by_vector] res:{res}')
score_col = self._text_field + '%scores'
ret = []
count = 0
for items in zip(*[res[0][field] for field in output_fields]):
meta = {field: value for field, value in zip(output_fields, items)}
doc = Document(page_content=meta.pop(self._text_field), metadata=meta)
logger.debug(
f'[similarity_search_with_score_by_vector] res[0][score_col]:{res[0][score_col]}'
)
score = res[0][score_col][count]
count += 1
ret.append((doc, score))
return ret
|
def similarity_search_with_score_by_vector(self, embedding: List[float], k:
int=4, param: Optional[dict]=None, expr: Optional[str]=None, timeout:
Optional[int]=None, **kwargs: Any) ->List[Tuple[Document, float]]:
"""
Performs a search on the query string and returns results with scores.
Args:
embedding (List[float]): The embedding vector being searched.
k (int, optional): The number of results to return.
Default is 4.
param (dict): Specifies the search parameters for the index.
Default is None.
expr (str, optional): Filtering expression. Default is None.
timeout (int, optional): The waiting time before a timeout error.
Default is None.
kwargs: Keyword arguments for Collection.search().
Returns:
List[Tuple[Document, float]]: Resulting documents and scores.
"""
if self.col is None:
logger.debug('No existing collection to search.')
return []
output_fields = self.fields[:]
output_fields.remove(self._vector_field)
logger.debug(f'search_field:{self._vector_field}')
logger.debug(f'vectors:{[embedding]}')
logger.debug(f'output_fields:{output_fields}')
logger.debug(f'topk:{k}')
logger.debug(f'dsl:{expr}')
res = self.col.query(search_field=self._vector_field, vectors=[
embedding], output_fields=output_fields, topk=k, dsl=expr)
logger.debug(f'[similarity_search_with_score_by_vector] res:{res}')
score_col = self._text_field + '%scores'
ret = []
count = 0
for items in zip(*[res[0][field] for field in output_fields]):
meta = {field: value for field, value in zip(output_fields, items)}
doc = Document(page_content=meta.pop(self._text_field), metadata=meta)
logger.debug(
f'[similarity_search_with_score_by_vector] res[0][score_col]:{res[0][score_col]}'
)
score = res[0][score_col][count]
count += 1
ret.append((doc, score))
return ret
|
Performs a search on the query string and returns results with scores.
Args:
embedding (List[float]): The embedding vector being searched.
k (int, optional): The number of results to return.
Default is 4.
param (dict): Specifies the search parameters for the index.
Default is None.
expr (str, optional): Filtering expression. Default is None.
timeout (int, optional): The waiting time before a timeout error.
Default is None.
kwargs: Keyword arguments for Collection.search().
Returns:
List[Tuple[Document, float]]: Resulting documents and scores.
|
on_llm_new_token
|
self.on_llm_new_token_common()
|
def on_llm_new_token(self, *args: Any, **kwargs: Any) ->Any:
self.on_llm_new_token_common()
| null |
__init__
|
"""Construct the pipeline remotely using an auxiliary function.
The load function needs to be importable to be imported
and run on the server, i.e. in a module and not a REPL or closure.
Then, initialize the remote inference function.
"""
load_fn_kwargs = {'model_id': kwargs.get('model_id', DEFAULT_MODEL_ID),
'task': kwargs.get('task', DEFAULT_TASK), 'device': kwargs.get('device',
0), 'model_kwargs': kwargs.get('model_kwargs', None)}
super().__init__(load_fn_kwargs=load_fn_kwargs, **kwargs)
|
def __init__(self, **kwargs: Any):
"""Construct the pipeline remotely using an auxiliary function.
The load function needs to be importable to be imported
and run on the server, i.e. in a module and not a REPL or closure.
Then, initialize the remote inference function.
"""
load_fn_kwargs = {'model_id': kwargs.get('model_id', DEFAULT_MODEL_ID),
'task': kwargs.get('task', DEFAULT_TASK), 'device': kwargs.get(
'device', 0), 'model_kwargs': kwargs.get('model_kwargs', None)}
super().__init__(load_fn_kwargs=load_fn_kwargs, **kwargs)
|
Construct the pipeline remotely using an auxiliary function.
The load function needs to be importable to be imported
and run on the server, i.e. in a module and not a REPL or closure.
Then, initialize the remote inference function.
|
max_marginal_relevance_search
|
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Optional.
Returns:
List of Documents selected by maximal marginal relevance.
"""
embedding_vector = self.embedding.embed_query(query)
return self.max_marginal_relevance_search_by_vector(embedding_vector, k,
fetch_k, lambda_mult=lambda_mult, filter=filter)
|
def max_marginal_relevance_search(self, query: str, k: int=4, fetch_k: int=
20, lambda_mult: float=0.5, filter: Optional[Dict[str, str]]=None, **
kwargs: Any) ->List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Optional.
Returns:
List of Documents selected by maximal marginal relevance.
"""
embedding_vector = self.embedding.embed_query(query)
return self.max_marginal_relevance_search_by_vector(embedding_vector, k,
fetch_k, lambda_mult=lambda_mult, filter=filter)
|
Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Optional.
Returns:
List of Documents selected by maximal marginal relevance.
|
test_cypher_return_correct_schema
|
"""Test that chain returns direct results."""
url = os.environ.get('NEO4J_URI')
username = os.environ.get('NEO4J_USERNAME')
password = os.environ.get('NEO4J_PASSWORD')
assert url is not None
assert username is not None
assert password is not None
graph = Neo4jGraph(url=url, username=username, password=password)
graph.query('MATCH (n) DETACH DELETE n')
graph.query(
"""
CREATE (la:LabelA {property_a: 'a'})
CREATE (lb:LabelB)
CREATE (lc:LabelC)
MERGE (la)-[:REL_TYPE]-> (lb)
MERGE (la)-[:REL_TYPE {rel_prop: 'abc'}]-> (lc)
"""
)
graph.refresh_schema()
node_properties = graph.query(node_properties_query)
relationships_properties = graph.query(rel_properties_query)
relationships = graph.query(rel_query)
expected_node_properties = [{'output': {'properties': [{'property':
'property_a', 'type': 'STRING'}], 'labels': 'LabelA'}}]
expected_relationships_properties = [{'output': {'type': 'REL_TYPE',
'properties': [{'property': 'rel_prop', 'type': 'STRING'}]}}]
expected_relationships = [{'output': {'start': 'LabelA', 'type': 'REL_TYPE',
'end': 'LabelB'}}, {'output': {'start': 'LabelA', 'type': 'REL_TYPE',
'end': 'LabelC'}}]
assert node_properties == expected_node_properties
assert relationships_properties == expected_relationships_properties
assert sorted(relationships, key=lambda x: x['output']['end']
) == expected_relationships
|
def test_cypher_return_correct_schema() ->None:
"""Test that chain returns direct results."""
url = os.environ.get('NEO4J_URI')
username = os.environ.get('NEO4J_USERNAME')
password = os.environ.get('NEO4J_PASSWORD')
assert url is not None
assert username is not None
assert password is not None
graph = Neo4jGraph(url=url, username=username, password=password)
graph.query('MATCH (n) DETACH DELETE n')
graph.query(
"""
CREATE (la:LabelA {property_a: 'a'})
CREATE (lb:LabelB)
CREATE (lc:LabelC)
MERGE (la)-[:REL_TYPE]-> (lb)
MERGE (la)-[:REL_TYPE {rel_prop: 'abc'}]-> (lc)
"""
)
graph.refresh_schema()
node_properties = graph.query(node_properties_query)
relationships_properties = graph.query(rel_properties_query)
relationships = graph.query(rel_query)
expected_node_properties = [{'output': {'properties': [{'property':
'property_a', 'type': 'STRING'}], 'labels': 'LabelA'}}]
expected_relationships_properties = [{'output': {'type': 'REL_TYPE',
'properties': [{'property': 'rel_prop', 'type': 'STRING'}]}}]
expected_relationships = [{'output': {'start': 'LabelA', 'type':
'REL_TYPE', 'end': 'LabelB'}}, {'output': {'start': 'LabelA',
'type': 'REL_TYPE', 'end': 'LabelC'}}]
assert node_properties == expected_node_properties
assert relationships_properties == expected_relationships_properties
assert sorted(relationships, key=lambda x: x['output']['end']
) == expected_relationships
|
Test that chain returns direct results.
|
test_llamacpp_incorrect_field
|
with pytest.warns(match='not default parameter'):
llm = LlamaCpp(model_path=get_model(), n_gqa=None)
llm.model_kwargs == {'n_gqa': None}
|
def test_llamacpp_incorrect_field() ->None:
with pytest.warns(match='not default parameter'):
llm = LlamaCpp(model_path=get_model(), n_gqa=None)
llm.model_kwargs == {'n_gqa': None}
| null |
test_truncate_word
|
assert truncate_word('Hello World', length=5) == 'He...'
assert truncate_word('Hello World', length=0) == 'Hello World'
assert truncate_word('Hello World', length=-10) == 'Hello World'
assert truncate_word('Hello World', length=5, suffix='!!!') == 'He!!!'
assert truncate_word('Hello World', length=12, suffix='!!!') == 'Hello World'
|
def test_truncate_word() ->None:
assert truncate_word('Hello World', length=5) == 'He...'
assert truncate_word('Hello World', length=0) == 'Hello World'
assert truncate_word('Hello World', length=-10) == 'Hello World'
assert truncate_word('Hello World', length=5, suffix='!!!') == 'He!!!'
assert truncate_word('Hello World', length=12, suffix='!!!'
) == 'Hello World'
| null |
_identifying_params
|
"""Get the identifying parameters."""
return {'model': self.model, 'model_config': self.model_config,
'generation_config': self.generation_config, 'streaming': self.streaming}
|
@property
def _identifying_params(self) ->Dict[str, Any]:
"""Get the identifying parameters."""
return {'model': self.model, 'model_config': self.model_config,
'generation_config': self.generation_config, 'streaming': self.
streaming}
|
Get the identifying parameters.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.