method_name
stringlengths 1
78
| method_body
stringlengths 3
9.66k
| full_code
stringlengths 31
10.7k
| docstring
stringlengths 4
4.74k
⌀ |
|---|---|---|---|
_run
|
"""Use the tool."""
return self.api_wrapper.run(query)
|
def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun]
=None) ->str:
"""Use the tool."""
return self.api_wrapper.run(query)
|
Use the tool.
|
_openapi_params_to_json_schema
|
properties = {}
required = []
for p in params:
if p.param_schema:
schema = spec.get_schema(p.param_schema)
else:
media_type_schema = list(p.content.values())[0].media_type_schema
schema = spec.get_schema(media_type_schema)
if p.description and not schema.description:
schema.description = p.description
properties[p.name] = json.loads(schema.json(exclude_none=True))
if p.required:
required.append(p.name)
return {'type': 'object', 'properties': properties, 'required': required}
|
def _openapi_params_to_json_schema(params: List[Parameter], spec: OpenAPISpec
) ->dict:
properties = {}
required = []
for p in params:
if p.param_schema:
schema = spec.get_schema(p.param_schema)
else:
media_type_schema = list(p.content.values())[0].media_type_schema
schema = spec.get_schema(media_type_schema)
if p.description and not schema.description:
schema.description = p.description
properties[p.name] = json.loads(schema.json(exclude_none=True))
if p.required:
required.append(p.name)
return {'type': 'object', 'properties': properties, 'required': required}
| null |
_generate_examples
|
"""This function returns the examples.
Args:
split (`string`):
Split to process
name (`string`):
Name of dataset, as defined in the BuilderConfig
"""
if name == 'v1':
yield 1, {'split': split, 'text': 'This is text in version 1', 'list':
['List item 1', 'List item 2', 'List item 3'], 'dict': [{
'dict_text': 'Object text 1', 'dict_int': '1'}, {'dict_text':
'Object text 2', 'dict_int': str(0)}]}
elif name == 'v2':
yield 2, {'split': split, 'text': 'This is text in version 2', 'list':
['Hello', 'Bonjour', 'Hola'], 'dict': [{'dict_text': 'Hello world!',
'dict_int': '2'}, {'dict_text': 'langchain is cool', 'dict_int':
str(123)}]}
|
def _generate_examples(self, split: str, name: str) ->Generator[Tuple[int,
object], Any, None]:
"""This function returns the examples.
Args:
split (`string`):
Split to process
name (`string`):
Name of dataset, as defined in the BuilderConfig
"""
if name == 'v1':
yield 1, {'split': split, 'text': 'This is text in version 1',
'list': ['List item 1', 'List item 2', 'List item 3'], 'dict':
[{'dict_text': 'Object text 1', 'dict_int': '1'}, {'dict_text':
'Object text 2', 'dict_int': str(0)}]}
elif name == 'v2':
yield 2, {'split': split, 'text': 'This is text in version 2',
'list': ['Hello', 'Bonjour', 'Hola'], 'dict': [{'dict_text':
'Hello world!', 'dict_int': '2'}, {'dict_text':
'langchain is cool', 'dict_int': str(123)}]}
|
This function returns the examples.
Args:
split (`string`):
Split to process
name (`string`):
Name of dataset, as defined in the BuilderConfig
|
load_memory_variables
|
"""Return key-value pairs given the text input to the chain."""
queries = inputs.get(self.queries_key)
now = inputs.get(self.now_key)
if queries is not None:
relevant_memories = [mem for query in queries for mem in self.
fetch_memories(query, now=now)]
return {self.relevant_memories_key: self.format_memories_detail(
relevant_memories), self.relevant_memories_simple_key: self.
format_memories_simple(relevant_memories)}
most_recent_memories_token = inputs.get(self.most_recent_memories_token_key)
if most_recent_memories_token is not None:
return {self.most_recent_memories_key: self._get_memories_until_limit(
most_recent_memories_token)}
return {}
|
def load_memory_variables(self, inputs: Dict[str, Any]) ->Dict[str, str]:
"""Return key-value pairs given the text input to the chain."""
queries = inputs.get(self.queries_key)
now = inputs.get(self.now_key)
if queries is not None:
relevant_memories = [mem for query in queries for mem in self.
fetch_memories(query, now=now)]
return {self.relevant_memories_key: self.format_memories_detail(
relevant_memories), self.relevant_memories_simple_key: self.
format_memories_simple(relevant_memories)}
most_recent_memories_token = inputs.get(self.most_recent_memories_token_key
)
if most_recent_memories_token is not None:
return {self.most_recent_memories_key: self.
_get_memories_until_limit(most_recent_memories_token)}
return {}
|
Return key-value pairs given the text input to the chain.
|
test_similarity_search_exact_search_distance_dot_product
|
"""Test end to end construction and search with metadata."""
texts = ['foo', 'bar', 'baz']
docsearch = ElasticsearchStore.from_texts(texts, FakeEmbeddings(), **
elasticsearch_connection, index_name=index_name, strategy=
ElasticsearchStore.ExactRetrievalStrategy(), distance_strategy=
'DOT_PRODUCT')
def assert_query(query_body: dict, query: str) ->dict:
assert query_body == {'query': {'script_score': {'query': {'match_all':
{}}, 'script': {'source':
"""
double value = dotProduct(params.query_vector, 'vector');
return sigmoid(1, Math.E, -value);
"""
, 'params': {'query_vector': [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 0.0]}}}}}
return query_body
output = docsearch.similarity_search('foo', k=1, custom_query=assert_query)
assert output == [Document(page_content='foo')]
|
def test_similarity_search_exact_search_distance_dot_product(self,
elasticsearch_connection: dict, index_name: str) ->None:
"""Test end to end construction and search with metadata."""
texts = ['foo', 'bar', 'baz']
docsearch = ElasticsearchStore.from_texts(texts, FakeEmbeddings(), **
elasticsearch_connection, index_name=index_name, strategy=
ElasticsearchStore.ExactRetrievalStrategy(), distance_strategy=
'DOT_PRODUCT')
def assert_query(query_body: dict, query: str) ->dict:
assert query_body == {'query': {'script_score': {'query': {
'match_all': {}}, 'script': {'source':
"""
double value = dotProduct(params.query_vector, 'vector');
return sigmoid(1, Math.E, -value);
"""
, 'params': {'query_vector': [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 0.0]}}}}}
return query_body
output = docsearch.similarity_search('foo', k=1, custom_query=assert_query)
assert output == [Document(page_content='foo')]
|
Test end to end construction and search with metadata.
|
_default_params
|
"""Get the default parameters for calling VolcEngineMaas API."""
normal_params = {'top_p': self.top_p, 'temperature': self.temperature}
return {**normal_params, **self.model_kwargs}
|
@property
def _default_params(self) ->Dict[str, Any]:
"""Get the default parameters for calling VolcEngineMaas API."""
normal_params = {'top_p': self.top_p, 'temperature': self.temperature}
return {**normal_params, **self.model_kwargs}
|
Get the default parameters for calling VolcEngineMaas API.
|
_convert_to_message
|
"""Instantiate a message from a variety of message formats.
The message format can be one of the following:
- BaseMessagePromptTemplate
- BaseMessage
- 2-tuple of (role string, template); e.g., ("human", "{user_input}")
- 2-tuple of (message class, template)
- string: shorthand for ("human", template); e.g., "{user_input}"
Args:
message: a representation of a message in one of the supported formats
Returns:
an instance of a message or a message template
"""
if isinstance(message, (BaseMessagePromptTemplate, BaseChatPromptTemplate)):
_message: Union[BaseMessage, BaseMessagePromptTemplate,
BaseChatPromptTemplate] = message
elif isinstance(message, BaseMessage):
_message = message
elif isinstance(message, str):
_message = _create_template_from_message_type('human', message)
elif isinstance(message, tuple):
if len(message) != 2:
raise ValueError(f'Expected 2-tuple of (role, template), got {message}'
)
message_type_str, template = message
if isinstance(message_type_str, str):
_message = _create_template_from_message_type(message_type_str,
template)
else:
_message = message_type_str(prompt=PromptTemplate.from_template(
template))
else:
raise NotImplementedError(f'Unsupported message type: {type(message)}')
return _message
|
def _convert_to_message(message: MessageLikeRepresentation) ->Union[
BaseMessage, BaseMessagePromptTemplate, BaseChatPromptTemplate]:
"""Instantiate a message from a variety of message formats.
The message format can be one of the following:
- BaseMessagePromptTemplate
- BaseMessage
- 2-tuple of (role string, template); e.g., ("human", "{user_input}")
- 2-tuple of (message class, template)
- string: shorthand for ("human", template); e.g., "{user_input}"
Args:
message: a representation of a message in one of the supported formats
Returns:
an instance of a message or a message template
"""
if isinstance(message, (BaseMessagePromptTemplate, BaseChatPromptTemplate)
):
_message: Union[BaseMessage, BaseMessagePromptTemplate,
BaseChatPromptTemplate] = message
elif isinstance(message, BaseMessage):
_message = message
elif isinstance(message, str):
_message = _create_template_from_message_type('human', message)
elif isinstance(message, tuple):
if len(message) != 2:
raise ValueError(
f'Expected 2-tuple of (role, template), got {message}')
message_type_str, template = message
if isinstance(message_type_str, str):
_message = _create_template_from_message_type(message_type_str,
template)
else:
_message = message_type_str(prompt=PromptTemplate.from_template
(template))
else:
raise NotImplementedError(f'Unsupported message type: {type(message)}')
return _message
|
Instantiate a message from a variety of message formats.
The message format can be one of the following:
- BaseMessagePromptTemplate
- BaseMessage
- 2-tuple of (role string, template); e.g., ("human", "{user_input}")
- 2-tuple of (message class, template)
- string: shorthand for ("human", template); e.g., "{user_input}"
Args:
message: a representation of a message in one of the supported formats
Returns:
an instance of a message or a message template
|
test_get_access_token
|
output = ClickupAPIWrapper.get_access_token('oauth_client_id',
'oauth_client_secret', 'code')
assert output is None
|
def test_get_access_token() ->None:
output = ClickupAPIWrapper.get_access_token('oauth_client_id',
'oauth_client_secret', 'code')
assert output is None
| null |
headers
|
"""Return the common headers."""
return {'content-type': 'application/json', 'X-RapidAPI-Key': self.api_key,
'X-RapidAPI-Host': SemaDB.HOST}
|
@property
def headers(self) ->dict:
"""Return the common headers."""
return {'content-type': 'application/json', 'X-RapidAPI-Key': self.
api_key, 'X-RapidAPI-Host': SemaDB.HOST}
|
Return the common headers.
|
_load_llm_requests_chain
|
if 'llm_chain' in config:
llm_chain_config = config.pop('llm_chain')
llm_chain = load_chain_from_config(llm_chain_config)
elif 'llm_chain_path' in config:
llm_chain = load_chain(config.pop('llm_chain_path'))
else:
raise ValueError('One of `llm_chain` or `llm_chain_path` must be present.')
if 'requests_wrapper' in kwargs:
requests_wrapper = kwargs.pop('requests_wrapper')
return LLMRequestsChain(llm_chain=llm_chain, requests_wrapper=
requests_wrapper, **config)
else:
return LLMRequestsChain(llm_chain=llm_chain, **config)
|
def _load_llm_requests_chain(config: dict, **kwargs: Any) ->LLMRequestsChain:
if 'llm_chain' in config:
llm_chain_config = config.pop('llm_chain')
llm_chain = load_chain_from_config(llm_chain_config)
elif 'llm_chain_path' in config:
llm_chain = load_chain(config.pop('llm_chain_path'))
else:
raise ValueError(
'One of `llm_chain` or `llm_chain_path` must be present.')
if 'requests_wrapper' in kwargs:
requests_wrapper = kwargs.pop('requests_wrapper')
return LLMRequestsChain(llm_chain=llm_chain, requests_wrapper=
requests_wrapper, **config)
else:
return LLMRequestsChain(llm_chain=llm_chain, **config)
| null |
_call
|
if self.streaming:
completion = ''
for chunk in self._stream(prompt, stop, run_manager, **kwargs):
completion += chunk.text
return completion
invocation_params = self._invocation_params(stop, **kwargs)
res = self.client.generate(prompt, **invocation_params)
for stop_seq in invocation_params['stop_sequences']:
if stop_seq in res.generated_text:
res.generated_text = res.generated_text[:res.generated_text.index(
stop_seq)]
return res.generated_text
|
def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager:
Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str:
if self.streaming:
completion = ''
for chunk in self._stream(prompt, stop, run_manager, **kwargs):
completion += chunk.text
return completion
invocation_params = self._invocation_params(stop, **kwargs)
res = self.client.generate(prompt, **invocation_params)
for stop_seq in invocation_params['stop_sequences']:
if stop_seq in res.generated_text:
res.generated_text = res.generated_text[:res.generated_text.
index(stop_seq)]
return res.generated_text
| null |
test_milvus_no_drop
|
"""Test end to end construction and MRR search."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = _milvus_from_texts(metadatas=metadatas)
del docsearch
docsearch = _milvus_from_texts(metadatas=metadatas, drop=False)
output = docsearch.similarity_search('foo', k=10)
assert len(output) == 6
|
def test_milvus_no_drop() ->None:
"""Test end to end construction and MRR search."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = _milvus_from_texts(metadatas=metadatas)
del docsearch
docsearch = _milvus_from_texts(metadatas=metadatas, drop=False)
output = docsearch.similarity_search('foo', k=10)
assert len(output) == 6
|
Test end to end construction and MRR search.
|
test_delete_not_supported_for_delta_sync_index
|
index = mock_index(index_details)
vectorsearch = default_databricks_vector_search(index)
with pytest.raises(ValueError) as ex:
vectorsearch.delete(['some id'])
assert '`delete` is only supported for direct-access index.' in str(ex.value)
|
@pytest.mark.requires('databricks', 'databricks.vector_search')
@pytest.mark.parametrize('index_details', [
DELTA_SYNC_INDEX_MANAGED_EMBEDDINGS,
DELTA_SYNC_INDEX_SELF_MANAGED_EMBEDDINGS])
def test_delete_not_supported_for_delta_sync_index(index_details: dict) ->None:
index = mock_index(index_details)
vectorsearch = default_databricks_vector_search(index)
with pytest.raises(ValueError) as ex:
vectorsearch.delete(['some id'])
assert '`delete` is only supported for direct-access index.' in str(ex.
value)
| null |
on_agent_finish
|
pass
|
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) ->None:
pass
| null |
__exit__
|
return False
|
def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) ->Literal[False]:
return False
| null |
get_tools
|
"""Get the tools in the toolkit."""
list_sql_database_tool = ListSQLDatabaseTool(db=self.db)
info_sql_database_tool_description = (
f'Input to this tool is a comma-separated list of tables, output is the schema and sample rows for those tables. Be sure that the tables actually exist by calling {list_sql_database_tool.name} first! Example Input: table1, table2, table3'
)
info_sql_database_tool = InfoSQLDatabaseTool(db=self.db, description=
info_sql_database_tool_description)
query_sql_database_tool_description = (
f"Input to this tool is a detailed and correct SQL query, output is a result from the database. If the query is not correct, an error message will be returned. If an error is returned, rewrite the query, check the query, and try again. If you encounter an issue with Unknown column 'xxxx' in 'field list', use {info_sql_database_tool.name} to query the correct table fields."
)
query_sql_database_tool = QuerySQLDataBaseTool(db=self.db, description=
query_sql_database_tool_description)
query_sql_checker_tool_description = (
f'Use this tool to double check if your query is correct before executing it. Always use this tool before executing a query with {query_sql_database_tool.name}!'
)
query_sql_checker_tool = QuerySQLCheckerTool(db=self.db, llm=self.llm,
description=query_sql_checker_tool_description)
return [query_sql_database_tool, info_sql_database_tool,
list_sql_database_tool, query_sql_checker_tool]
|
def get_tools(self) ->List[BaseTool]:
"""Get the tools in the toolkit."""
list_sql_database_tool = ListSQLDatabaseTool(db=self.db)
info_sql_database_tool_description = (
f'Input to this tool is a comma-separated list of tables, output is the schema and sample rows for those tables. Be sure that the tables actually exist by calling {list_sql_database_tool.name} first! Example Input: table1, table2, table3'
)
info_sql_database_tool = InfoSQLDatabaseTool(db=self.db, description=
info_sql_database_tool_description)
query_sql_database_tool_description = (
f"Input to this tool is a detailed and correct SQL query, output is a result from the database. If the query is not correct, an error message will be returned. If an error is returned, rewrite the query, check the query, and try again. If you encounter an issue with Unknown column 'xxxx' in 'field list', use {info_sql_database_tool.name} to query the correct table fields."
)
query_sql_database_tool = QuerySQLDataBaseTool(db=self.db, description=
query_sql_database_tool_description)
query_sql_checker_tool_description = (
f'Use this tool to double check if your query is correct before executing it. Always use this tool before executing a query with {query_sql_database_tool.name}!'
)
query_sql_checker_tool = QuerySQLCheckerTool(db=self.db, llm=self.llm,
description=query_sql_checker_tool_description)
return [query_sql_database_tool, info_sql_database_tool,
list_sql_database_tool, query_sql_checker_tool]
|
Get the tools in the toolkit.
|
_load_llm_chain
|
"""Load LLM chain from config dict."""
if 'llm' in config:
llm_config = config.pop('llm')
llm = load_llm_from_config(llm_config)
elif 'llm_path' in config:
llm = load_llm(config.pop('llm_path'))
else:
raise ValueError('One of `llm` or `llm_path` must be present.')
if 'prompt' in config:
prompt_config = config.pop('prompt')
prompt = load_prompt_from_config(prompt_config)
elif 'prompt_path' in config:
prompt = load_prompt(config.pop('prompt_path'))
else:
raise ValueError('One of `prompt` or `prompt_path` must be present.')
_load_output_parser(config)
return LLMChain(llm=llm, prompt=prompt, **config)
|
def _load_llm_chain(config: dict, **kwargs: Any) ->LLMChain:
"""Load LLM chain from config dict."""
if 'llm' in config:
llm_config = config.pop('llm')
llm = load_llm_from_config(llm_config)
elif 'llm_path' in config:
llm = load_llm(config.pop('llm_path'))
else:
raise ValueError('One of `llm` or `llm_path` must be present.')
if 'prompt' in config:
prompt_config = config.pop('prompt')
prompt = load_prompt_from_config(prompt_config)
elif 'prompt_path' in config:
prompt = load_prompt(config.pop('prompt_path'))
else:
raise ValueError('One of `prompt` or `prompt_path` must be present.')
_load_output_parser(config)
return LLMChain(llm=llm, prompt=prompt, **config)
|
Load LLM chain from config dict.
|
_type
|
return 'retry'
|
@property
def _type(self) ->str:
return 'retry'
| null |
max_marginal_relevance_search_by_vector
|
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Returns:
List of Documents selected by maximal marginal relevance.
"""
search_metadata = self._filter_to_metadata(filter)
prefetchHits = self.table.search(embedding_vector=embedding, top_k=fetch_k,
metric='cos', metric_threshold=None, metadata=search_metadata)
mmrChosenIndices = maximal_marginal_relevance(np.array(embedding, dtype=np.
float32), [pfHit['embedding_vector'] for pfHit in prefetchHits], k=k,
lambda_mult=lambda_mult)
mmrHits = [pfHit for pfIndex, pfHit in enumerate(prefetchHits) if pfIndex in
mmrChosenIndices]
return [Document(page_content=hit['document'], metadata=hit['metadata']) for
hit in mmrHits]
|
def max_marginal_relevance_search_by_vector(self, embedding: List[float], k:
int=4, fetch_k: int=20, lambda_mult: float=0.5, filter: Optional[Dict[
str, str]]=None, **kwargs: Any) ->List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Returns:
List of Documents selected by maximal marginal relevance.
"""
search_metadata = self._filter_to_metadata(filter)
prefetchHits = self.table.search(embedding_vector=embedding, top_k=
fetch_k, metric='cos', metric_threshold=None, metadata=search_metadata)
mmrChosenIndices = maximal_marginal_relevance(np.array(embedding, dtype
=np.float32), [pfHit['embedding_vector'] for pfHit in prefetchHits],
k=k, lambda_mult=lambda_mult)
mmrHits = [pfHit for pfIndex, pfHit in enumerate(prefetchHits) if
pfIndex in mmrChosenIndices]
return [Document(page_content=hit['document'], metadata=hit['metadata']
) for hit in mmrHits]
|
Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Returns:
List of Documents selected by maximal marginal relevance.
|
_index_doc
|
request: dict[str, Any] = {}
request['customer_id'] = self._vectara_customer_id
request['corpus_id'] = self._vectara_corpus_id
request['document'] = doc
api_endpoint = ('https://api.vectara.io/v1/core/index' if use_core_api else
'https://api.vectara.io/v1/index')
response = self._session.post(headers=self._get_post_headers(), url=
api_endpoint, data=json.dumps(request), timeout=self.
vectara_api_timeout, verify=True)
status_code = response.status_code
result = response.json()
status_str = result['status']['code'] if 'status' in result else None
if status_code == 409 or status_str and status_str == 'ALREADY_EXISTS':
return 'E_ALREADY_EXISTS'
elif status_str and status_str == 'FORBIDDEN':
return 'E_NO_PERMISSIONS'
else:
return 'E_SUCCEEDED'
|
def _index_doc(self, doc: dict, use_core_api: bool=False) ->str:
request: dict[str, Any] = {}
request['customer_id'] = self._vectara_customer_id
request['corpus_id'] = self._vectara_corpus_id
request['document'] = doc
api_endpoint = ('https://api.vectara.io/v1/core/index' if use_core_api else
'https://api.vectara.io/v1/index')
response = self._session.post(headers=self._get_post_headers(), url=
api_endpoint, data=json.dumps(request), timeout=self.
vectara_api_timeout, verify=True)
status_code = response.status_code
result = response.json()
status_str = result['status']['code'] if 'status' in result else None
if status_code == 409 or status_str and status_str == 'ALREADY_EXISTS':
return 'E_ALREADY_EXISTS'
elif status_str and status_str == 'FORBIDDEN':
return 'E_NO_PERMISSIONS'
else:
return 'E_SUCCEEDED'
| null |
_document_from_scored_point
|
return Document(page_content=scored_point.payload.get(content_payload_key),
metadata=scored_point.payload.get(metadata_payload_key) or {})
|
@classmethod
def _document_from_scored_point(cls, scored_point: Any, content_payload_key:
str, metadata_payload_key: str) ->Document:
return Document(page_content=scored_point.payload.get(
content_payload_key), metadata=scored_point.payload.get(
metadata_payload_key) or {})
| null |
py_anext
|
"""Pure-Python implementation of anext() for testing purposes.
Closely matches the builtin anext() C implementation.
Can be used to compare the built-in implementation of the inner
coroutines machinery to C-implementation of __anext__() and send()
or throw() on the returned generator.
"""
try:
__anext__ = cast(Callable[[AsyncIterator[T]], Awaitable[T]], type(
iterator).__anext__)
except AttributeError:
raise TypeError(f'{iterator!r} is not an async iterator')
if default is _no_default:
return __anext__(iterator)
async def anext_impl() ->Union[T, Any]:
try:
return await __anext__(iterator)
except StopAsyncIteration:
return default
return anext_impl()
|
def py_anext(iterator: AsyncIterator[T], default: Union[T, Any]=_no_default
) ->Awaitable[Union[T, None, Any]]:
"""Pure-Python implementation of anext() for testing purposes.
Closely matches the builtin anext() C implementation.
Can be used to compare the built-in implementation of the inner
coroutines machinery to C-implementation of __anext__() and send()
or throw() on the returned generator.
"""
try:
__anext__ = cast(Callable[[AsyncIterator[T]], Awaitable[T]], type(
iterator).__anext__)
except AttributeError:
raise TypeError(f'{iterator!r} is not an async iterator')
if default is _no_default:
return __anext__(iterator)
async def anext_impl() ->Union[T, Any]:
try:
return await __anext__(iterator)
except StopAsyncIteration:
return default
return anext_impl()
|
Pure-Python implementation of anext() for testing purposes.
Closely matches the builtin anext() C implementation.
Can be used to compare the built-in implementation of the inner
coroutines machinery to C-implementation of __anext__() and send()
or throw() on the returned generator.
|
_format_attribute_info
|
info_dicts = {}
for i in info:
i_dict = dict(i)
info_dicts[i_dict.pop('name')] = i_dict
return json.dumps(info_dicts, indent=4).replace('{', '{{').replace('}', '}}')
|
def _format_attribute_info(info: Sequence[Union[AttributeInfo, dict]]) ->str:
info_dicts = {}
for i in info:
i_dict = dict(i)
info_dicts[i_dict.pop('name')] = i_dict
return json.dumps(info_dicts, indent=4).replace('{', '{{').replace('}',
'}}')
| null |
_identifying_params
|
return {}
|
@property
def _identifying_params(self) ->Dict[str, Any]:
return {}
| null |
_get_all_table_names
|
rows = self._spark.sql('SHOW TABLES').select('tableName').collect()
return list(map(lambda row: row.tableName, rows))
|
def _get_all_table_names(self) ->Iterable[str]:
rows = self._spark.sql('SHOW TABLES').select('tableName').collect()
return list(map(lambda row: row.tableName, rows))
| null |
semantic_hybrid_search
|
"""
Returns the most similar indexed documents to the query text.
Args:
query (str): The query text for which to find similar documents.
k (int): The number of documents to return. Default is 4.
Returns:
List[Document]: A list of documents that are most similar to the query text.
"""
docs_and_scores = self.semantic_hybrid_search_with_score_and_rerank(query,
k=k, filters=kwargs.get('filters', None))
return [doc for doc, _, _ in docs_and_scores]
|
def semantic_hybrid_search(self, query: str, k: int=4, **kwargs: Any) ->List[
Document]:
"""
Returns the most similar indexed documents to the query text.
Args:
query (str): The query text for which to find similar documents.
k (int): The number of documents to return. Default is 4.
Returns:
List[Document]: A list of documents that are most similar to the query text.
"""
docs_and_scores = self.semantic_hybrid_search_with_score_and_rerank(query,
k=k, filters=kwargs.get('filters', None))
return [doc for doc, _, _ in docs_and_scores]
|
Returns the most similar indexed documents to the query text.
Args:
query (str): The query text for which to find similar documents.
k (int): The number of documents to return. Default is 4.
Returns:
List[Document]: A list of documents that are most similar to the query text.
|
lazy_load
|
"""A lazy loader for Documents."""
for file_pattern in self.patterns:
for p in self.file_path.rglob(file_pattern):
if p.is_dir():
continue
with open(p, encoding=self.encoding, errors=self.errors) as f:
text = self._clean_data(f.read())
yield Document(page_content=text, metadata={'source': str(p)})
|
def lazy_load(self) ->Iterator[Document]:
"""A lazy loader for Documents."""
for file_pattern in self.patterns:
for p in self.file_path.rglob(file_pattern):
if p.is_dir():
continue
with open(p, encoding=self.encoding, errors=self.errors) as f:
text = self._clean_data(f.read())
yield Document(page_content=text, metadata={'source': str(p)})
|
A lazy loader for Documents.
|
validate_environment
|
"""Validate that api key and python package exists in environment."""
values['anthropic_api_key'] = convert_to_secret_str(get_from_dict_or_env(
values, 'anthropic_api_key', 'ANTHROPIC_API_KEY'))
values['anthropic_api_url'] = get_from_dict_or_env(values,
'anthropic_api_url', 'ANTHROPIC_API_URL', default=
'https://api.anthropic.com')
try:
import anthropic
check_package_version('anthropic', gte_version='0.3')
values['client'] = anthropic.Anthropic(base_url=values[
'anthropic_api_url'], api_key=values['anthropic_api_key'].
get_secret_value(), timeout=values['default_request_timeout'])
values['async_client'] = anthropic.AsyncAnthropic(base_url=values[
'anthropic_api_url'], api_key=values['anthropic_api_key'].
get_secret_value(), timeout=values['default_request_timeout'])
values['HUMAN_PROMPT'] = anthropic.HUMAN_PROMPT
values['AI_PROMPT'] = anthropic.AI_PROMPT
values['count_tokens'] = values['client'].count_tokens
except ImportError:
raise ImportError(
'Could not import anthropic python package. Please it install it with `pip install anthropic`.'
)
return values
|
@root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that api key and python package exists in environment."""
values['anthropic_api_key'] = convert_to_secret_str(get_from_dict_or_env
(values, 'anthropic_api_key', 'ANTHROPIC_API_KEY'))
values['anthropic_api_url'] = get_from_dict_or_env(values,
'anthropic_api_url', 'ANTHROPIC_API_URL', default=
'https://api.anthropic.com')
try:
import anthropic
check_package_version('anthropic', gte_version='0.3')
values['client'] = anthropic.Anthropic(base_url=values[
'anthropic_api_url'], api_key=values['anthropic_api_key'].
get_secret_value(), timeout=values['default_request_timeout'])
values['async_client'] = anthropic.AsyncAnthropic(base_url=values[
'anthropic_api_url'], api_key=values['anthropic_api_key'].
get_secret_value(), timeout=values['default_request_timeout'])
values['HUMAN_PROMPT'] = anthropic.HUMAN_PROMPT
values['AI_PROMPT'] = anthropic.AI_PROMPT
values['count_tokens'] = values['client'].count_tokens
except ImportError:
raise ImportError(
'Could not import anthropic python package. Please it install it with `pip install anthropic`.'
)
return values
|
Validate that api key and python package exists in environment.
|
on_tool_start
|
"""Run when tool starts running."""
self.step += 1
self.tool_starts += 1
self.starts += 1
resp: Dict[str, Any] = {}
resp.update({'action': 'on_tool_start', 'input_str': input_str})
resp.update(flatten_dict(serialized))
resp.update(self.get_custom_callback_meta())
self.deck.append(self.markdown_renderer().to_html('### Tool Start'))
self.deck.append(self.table_renderer().to_html(self.pandas.DataFrame([resp]
)) + '\n')
|
def on_tool_start(self, serialized: Dict[str, Any], input_str: str, **
kwargs: Any) ->None:
"""Run when tool starts running."""
self.step += 1
self.tool_starts += 1
self.starts += 1
resp: Dict[str, Any] = {}
resp.update({'action': 'on_tool_start', 'input_str': input_str})
resp.update(flatten_dict(serialized))
resp.update(self.get_custom_callback_meta())
self.deck.append(self.markdown_renderer().to_html('### Tool Start'))
self.deck.append(self.table_renderer().to_html(self.pandas.DataFrame([
resp])) + '\n')
|
Run when tool starts running.
|
test_loader_detect_encoding_text
|
"""Test text loader."""
path = Path(__file__).parent.parent / 'examples'
files = path.glob('**/*.txt')
loader = DirectoryLoader(str(path), glob='**/*.txt', loader_cls=TextLoader)
loader_detect_encoding = DirectoryLoader(str(path), glob='**/*.txt',
loader_kwargs={'autodetect_encoding': True}, loader_cls=TextLoader)
with pytest.raises((UnicodeDecodeError, RuntimeError)):
loader.load()
docs = loader_detect_encoding.load()
assert len(docs) == len(list(files))
|
@pytest.mark.requires('chardet')
def test_loader_detect_encoding_text() ->None:
"""Test text loader."""
path = Path(__file__).parent.parent / 'examples'
files = path.glob('**/*.txt')
loader = DirectoryLoader(str(path), glob='**/*.txt', loader_cls=TextLoader)
loader_detect_encoding = DirectoryLoader(str(path), glob='**/*.txt',
loader_kwargs={'autodetect_encoding': True}, loader_cls=TextLoader)
with pytest.raises((UnicodeDecodeError, RuntimeError)):
loader.load()
docs = loader_detect_encoding.load()
assert len(docs) == len(list(files))
|
Test text loader.
|
_ideate
|
"""Generate n_ideas ideas as response to user prompt."""
llm = self.ideation_llm if self.ideation_llm else self.llm
prompt = self.ideation_prompt().format_prompt(**self.history.
ideation_prompt_inputs())
callbacks = run_manager.get_child() if run_manager else None
if llm:
ideas = [self._get_text_from_llm_result(llm.generate_prompt([prompt],
stop, callbacks), step='ideate') for _ in range(self.n_ideas)]
for i, idea in enumerate(ideas):
_colored_text = get_colored_text(idea, 'blue')
_text = f'Idea {i + 1}:\n' + _colored_text
if run_manager:
run_manager.on_text(_text, end='\n', verbose=self.verbose)
return ideas
else:
raise ValueError('llm is none, which should never happen')
|
def _ideate(self, stop: Optional[List[str]]=None, run_manager: Optional[
CallbackManagerForChainRun]=None) ->List[str]:
"""Generate n_ideas ideas as response to user prompt."""
llm = self.ideation_llm if self.ideation_llm else self.llm
prompt = self.ideation_prompt().format_prompt(**self.history.
ideation_prompt_inputs())
callbacks = run_manager.get_child() if run_manager else None
if llm:
ideas = [self._get_text_from_llm_result(llm.generate_prompt([prompt
], stop, callbacks), step='ideate') for _ in range(self.n_ideas)]
for i, idea in enumerate(ideas):
_colored_text = get_colored_text(idea, 'blue')
_text = f'Idea {i + 1}:\n' + _colored_text
if run_manager:
run_manager.on_text(_text, end='\n', verbose=self.verbose)
return ideas
else:
raise ValueError('llm is none, which should never happen')
|
Generate n_ideas ideas as response to user prompt.
|
analyze_text
|
"""Analyze text using textstat and spacy.
Parameters:
text (str): The text to analyze.
nlp (spacy.lang): The spacy language model to use for visualization.
Returns:
(dict): A dictionary containing the complexity metrics and visualization
files serialized to HTML string.
"""
resp: Dict[str, Any] = {}
if textstat is not None:
text_complexity_metrics = {'flesch_reading_ease': textstat.
flesch_reading_ease(text), 'flesch_kincaid_grade': textstat.
flesch_kincaid_grade(text), 'smog_index': textstat.smog_index(text),
'coleman_liau_index': textstat.coleman_liau_index(text),
'automated_readability_index': textstat.automated_readability_index
(text), 'dale_chall_readability_score': textstat.
dale_chall_readability_score(text), 'difficult_words': textstat.
difficult_words(text), 'linsear_write_formula': textstat.
linsear_write_formula(text), 'gunning_fog': textstat.gunning_fog(
text), 'fernandez_huerta': textstat.fernandez_huerta(text),
'szigriszt_pazos': textstat.szigriszt_pazos(text),
'gutierrez_polini': textstat.gutierrez_polini(text), 'crawford':
textstat.crawford(text), 'gulpease_index': textstat.gulpease_index(
text), 'osman': textstat.osman(text)}
resp.update({'text_complexity_metrics': text_complexity_metrics})
resp.update(text_complexity_metrics)
if nlp is not None:
spacy = import_spacy()
doc = nlp(text)
dep_out = spacy.displacy.render(doc, style='dep', jupyter=False, page=True)
ent_out = spacy.displacy.render(doc, style='ent', jupyter=False, page=True)
text_visualizations = {'dependency_tree': dep_out, 'entities': ent_out}
resp.update(text_visualizations)
return resp
|
def analyze_text(text: str, nlp: Any=None, textstat: Any=None) ->dict:
"""Analyze text using textstat and spacy.
Parameters:
text (str): The text to analyze.
nlp (spacy.lang): The spacy language model to use for visualization.
Returns:
(dict): A dictionary containing the complexity metrics and visualization
files serialized to HTML string.
"""
resp: Dict[str, Any] = {}
if textstat is not None:
text_complexity_metrics = {'flesch_reading_ease': textstat.
flesch_reading_ease(text), 'flesch_kincaid_grade': textstat.
flesch_kincaid_grade(text), 'smog_index': textstat.smog_index(
text), 'coleman_liau_index': textstat.coleman_liau_index(text),
'automated_readability_index': textstat.
automated_readability_index(text),
'dale_chall_readability_score': textstat.
dale_chall_readability_score(text), 'difficult_words': textstat
.difficult_words(text), 'linsear_write_formula': textstat.
linsear_write_formula(text), 'gunning_fog': textstat.
gunning_fog(text), 'fernandez_huerta': textstat.
fernandez_huerta(text), 'szigriszt_pazos': textstat.
szigriszt_pazos(text), 'gutierrez_polini': textstat.
gutierrez_polini(text), 'crawford': textstat.crawford(text),
'gulpease_index': textstat.gulpease_index(text), 'osman':
textstat.osman(text)}
resp.update({'text_complexity_metrics': text_complexity_metrics})
resp.update(text_complexity_metrics)
if nlp is not None:
spacy = import_spacy()
doc = nlp(text)
dep_out = spacy.displacy.render(doc, style='dep', jupyter=False,
page=True)
ent_out = spacy.displacy.render(doc, style='ent', jupyter=False,
page=True)
text_visualizations = {'dependency_tree': dep_out, 'entities': ent_out}
resp.update(text_visualizations)
return resp
|
Analyze text using textstat and spacy.
Parameters:
text (str): The text to analyze.
nlp (spacy.lang): The spacy language model to use for visualization.
Returns:
(dict): A dictionary containing the complexity metrics and visualization
files serialized to HTML string.
|
get
|
res = self.redis_client.getex(f'{self.full_key_prefix}:{key}', ex=self.
recall_ttl) or default or ''
logger.debug(f"Upstash Redis MEM get '{self.full_key_prefix}:{key}': '{res}'")
return res
|
def get(self, key: str, default: Optional[str]=None) ->Optional[str]:
res = self.redis_client.getex(f'{self.full_key_prefix}:{key}', ex=self.
recall_ttl) or default or ''
logger.debug(
f"Upstash Redis MEM get '{self.full_key_prefix}:{key}': '{res}'")
return res
| null |
on_chat_model_start
|
"""Run when LLM starts running."""
|
def on_chat_model_start(self, serialized: Dict[str, Any], messages: List[
List[BaseMessage]], **kwargs: Any) ->None:
"""Run when LLM starts running."""
|
Run when LLM starts running.
|
test_nlpcloud_call
|
"""Test valid call to nlpcloud."""
llm = NLPCloud(max_length=10)
output = llm('Say foo:')
assert isinstance(output, str)
|
def test_nlpcloud_call() ->None:
"""Test valid call to nlpcloud."""
llm = NLPCloud(max_length=10)
output = llm('Say foo:')
assert isinstance(output, str)
|
Test valid call to nlpcloud.
|
__init__
|
"""Initialize callback handler."""
super().__init__()
arthurai = _lazy_load_arthur()
Stage = arthurai.common.constants.Stage
ValueType = arthurai.common.constants.ValueType
self.arthur_model = arthur_model
self.attr_names = set([a.name for a in self.arthur_model.get_attributes()])
self.input_attr = [x for x in self.arthur_model.get_attributes() if x.stage ==
Stage.ModelPipelineInput and x.value_type == ValueType.Unstructured_Text][0
].name
self.output_attr = [x for x in self.arthur_model.get_attributes() if x.
stage == Stage.PredictedValue and x.value_type == ValueType.
Unstructured_Text][0].name
self.token_likelihood_attr = None
if len([x for x in self.arthur_model.get_attributes() if x.value_type ==
ValueType.TokenLikelihoods]) > 0:
self.token_likelihood_attr = [x for x in self.arthur_model.
get_attributes() if x.value_type == ValueType.TokenLikelihoods][0].name
self.run_map: DefaultDict[str, Any] = defaultdict(dict)
|
def __init__(self, arthur_model: ArthurModel) ->None:
"""Initialize callback handler."""
super().__init__()
arthurai = _lazy_load_arthur()
Stage = arthurai.common.constants.Stage
ValueType = arthurai.common.constants.ValueType
self.arthur_model = arthur_model
self.attr_names = set([a.name for a in self.arthur_model.get_attributes()])
self.input_attr = [x for x in self.arthur_model.get_attributes() if x.
stage == Stage.ModelPipelineInput and x.value_type == ValueType.
Unstructured_Text][0].name
self.output_attr = [x for x in self.arthur_model.get_attributes() if x.
stage == Stage.PredictedValue and x.value_type == ValueType.
Unstructured_Text][0].name
self.token_likelihood_attr = None
if len([x for x in self.arthur_model.get_attributes() if x.value_type ==
ValueType.TokenLikelihoods]) > 0:
self.token_likelihood_attr = [x for x in self.arthur_model.
get_attributes() if x.value_type == ValueType.TokenLikelihoods][0
].name
self.run_map: DefaultDict[str, Any] = defaultdict(dict)
|
Initialize callback handler.
|
_search_content_by_cql
|
url = 'rest/api/content/search'
params: Dict[str, Any] = {'cql': cql}
params.update(kwargs)
if include_archived_spaces is not None:
params['includeArchivedSpaces'] = include_archived_spaces
response = self.confluence.get(url, params=params)
return response.get('results', [])
|
def _search_content_by_cql(self, cql: str, include_archived_spaces:
Optional[bool]=None, **kwargs: Any) ->List[dict]:
url = 'rest/api/content/search'
params: Dict[str, Any] = {'cql': cql}
params.update(kwargs)
if include_archived_spaces is not None:
params['includeArchivedSpaces'] = include_archived_spaces
response = self.confluence.get(url, params=params)
return response.get('results', [])
| null |
test_parse_response_format
|
mock_response = MagicMock()
mock_response.status_code = 200
mock_response.json.return_value = [{'status': 'success', 'provider':
'microsoft', 'nsfw_likelihood': 5, 'cost': 0.001, 'label': ['offensive',
'hate_speech'], 'likelihood': [4, 5]}]
mock_post.return_value = mock_response
result = tool('some query')
assert result == """nsfw_likelihood: 5
"offensive": 4
"hate_speech": 5"""
|
def test_parse_response_format(mock_post: MagicMock) ->None:
mock_response = MagicMock()
mock_response.status_code = 200
mock_response.json.return_value = [{'status': 'success', 'provider':
'microsoft', 'nsfw_likelihood': 5, 'cost': 0.001, 'label': [
'offensive', 'hate_speech'], 'likelihood': [4, 5]}]
mock_post.return_value = mock_response
result = tool('some query')
assert result == 'nsfw_likelihood: 5\n"offensive": 4\n"hate_speech": 5'
| null |
test_multiple_tokens
|
assert len(_get_token_ids_default_method('a b c')) == 3
|
def test_multiple_tokens(self) ->None:
assert len(_get_token_ids_default_method('a b c')) == 3
| null |
add_texts
|
"""Run more texts through the embeddings and add to the retriever.
Args:
texts: Iterable of strings/URLs to add to the retriever.
Returns:
List of ids from adding the texts into the retriever.
"""
ids = []
for text in texts:
_id = self.client.add(text)
ids.append(_id)
return ids
|
def add_texts(self, texts: Iterable[str]) ->List[str]:
"""Run more texts through the embeddings and add to the retriever.
Args:
texts: Iterable of strings/URLs to add to the retriever.
Returns:
List of ids from adding the texts into the retriever.
"""
ids = []
for text in texts:
_id = self.client.add(text)
ids.append(_id)
return ids
|
Run more texts through the embeddings and add to the retriever.
Args:
texts: Iterable of strings/URLs to add to the retriever.
Returns:
List of ids from adding the texts into the retriever.
|
get_input_schema
|
runnables = [self.default] + [r for _, r in self.branches] + [r for r, _ in
self.branches]
for runnable in runnables:
if runnable.get_input_schema(config).schema().get('type') is not None:
return runnable.get_input_schema(config)
return super().get_input_schema(config)
|
def get_input_schema(self, config: Optional[RunnableConfig]=None) ->Type[
BaseModel]:
runnables = [self.default] + [r for _, r in self.branches] + [r for r,
_ in self.branches]
for runnable in runnables:
if runnable.get_input_schema(config).schema().get('type') is not None:
return runnable.get_input_schema(config)
return super().get_input_schema(config)
| null |
test_create_action_payload_preview
|
"""Test that the action payload with preview is being created correctly."""
tool = ZapierNLARunAction(action_id='test', zapier_description='test',
params_schema={'test': 'test'}, api_wrapper=ZapierNLAWrapper(
zapier_nla_api_key='test'))
payload = tool.api_wrapper._create_action_payload('some instructions',
preview_only=True)
assert payload['instructions'] == 'some instructions'
assert payload['preview_only'] is True
|
def test_create_action_payload_preview() ->None:
"""Test that the action payload with preview is being created correctly."""
tool = ZapierNLARunAction(action_id='test', zapier_description='test',
params_schema={'test': 'test'}, api_wrapper=ZapierNLAWrapper(
zapier_nla_api_key='test'))
payload = tool.api_wrapper._create_action_payload('some instructions',
preview_only=True)
assert payload['instructions'] == 'some instructions'
assert payload['preview_only'] is True
|
Test that the action payload with preview is being created correctly.
|
operator
|
return x['a'] == y['a']
|
def operator(x: dict, y: dict) ->bool:
return x['a'] == y['a']
| null |
write
|
"""Append a piece of text to the current line."""
self.f.write(text)
|
def write(self, text):
"""Append a piece of text to the current line."""
self.f.write(text)
|
Append a piece of text to the current line.
|
test_yield_keys
|
store = RedisStore(client=redis_client, ttl=None)
redis_client.mset({'key1': b'value1', 'key2': b'value2'})
assert sorted(store.yield_keys()) == ['key1', 'key2']
assert sorted(store.yield_keys(prefix='key*')) == ['key1', 'key2']
assert sorted(store.yield_keys(prefix='lang*')) == []
|
def test_yield_keys(redis_client: Redis) ->None:
store = RedisStore(client=redis_client, ttl=None)
redis_client.mset({'key1': b'value1', 'key2': b'value2'})
assert sorted(store.yield_keys()) == ['key1', 'key2']
assert sorted(store.yield_keys(prefix='key*')) == ['key1', 'key2']
assert sorted(store.yield_keys(prefix='lang*')) == []
| null |
__aiter__
|
return self.receive_stream.__aiter__()
|
def __aiter__(self) ->AsyncIterator[RunLogPatch]:
return self.receive_stream.__aiter__()
| null |
from_sql_model
|
return messages_from_dict([json.loads(sql_message.message)])[0]
|
def from_sql_model(self, sql_message: Any) ->BaseMessage:
return messages_from_dict([json.loads(sql_message.message)])[0]
| null |
test_all_imports
|
assert set(__all__) == set(EXPECTED_ALL)
|
def test_all_imports() ->None:
assert set(__all__) == set(EXPECTED_ALL)
| null |
remove_unnecessary_lines
|
"""
Clean up the content by removing unnecessary lines.
Args:
content: A string, which may contain unnecessary lines or spaces.
Returns:
A cleaned string with unnecessary lines removed.
"""
lines = content.split('\n')
stripped_lines = [line.strip() for line in lines]
non_empty_lines = [line for line in stripped_lines if line]
cleaned_content = ' '.join(non_empty_lines)
return cleaned_content
|
@staticmethod
def remove_unnecessary_lines(content: str) ->str:
"""
Clean up the content by removing unnecessary lines.
Args:
content: A string, which may contain unnecessary lines or spaces.
Returns:
A cleaned string with unnecessary lines removed.
"""
lines = content.split('\n')
stripped_lines = [line.strip() for line in lines]
non_empty_lines = [line for line in stripped_lines if line]
cleaned_content = ' '.join(non_empty_lines)
return cleaned_content
|
Clean up the content by removing unnecessary lines.
Args:
content: A string, which may contain unnecessary lines or spaces.
Returns:
A cleaned string with unnecessary lines removed.
|
max_marginal_relevance_search
|
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
embedding = self._embedding.embed_query(query)
return self.max_marginal_relevance_search_by_vector(embedding, k, fetch_k,
lambda_mult, search_params)
|
def max_marginal_relevance_search(self, query: str, k: int=4, fetch_k: int=
20, lambda_mult: float=0.5, search_params: Optional[dict]=None, **
kwargs: Any) ->List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
embedding = self._embedding.embed_query(query)
return self.max_marginal_relevance_search_by_vector(embedding, k,
fetch_k, lambda_mult, search_params)
|
Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
|
_stream_response_to_generation_chunk
|
"""Convert a stream response to a generation chunk."""
return GenerationChunk(text=stream_response.choices[0].text,
generation_info=dict(finish_reason=stream_response.choices[0].
finish_reason, logprobs=stream_response.choices[0].logprobs))
|
def _stream_response_to_generation_chunk(stream_response: Any
) ->GenerationChunk:
"""Convert a stream response to a generation chunk."""
return GenerationChunk(text=stream_response.choices[0].text,
generation_info=dict(finish_reason=stream_response.choices[0].
finish_reason, logprobs=stream_response.choices[0].logprobs))
|
Convert a stream response to a generation chunk.
|
test_llama_call
|
"""Test valid call to Open Source Foundation Model."""
chat = AzureMLChatOnlineEndpoint(content_formatter=LlamaContentFormatter())
response = chat(messages=[HumanMessage(content='Foo')])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
|
def test_llama_call() ->None:
"""Test valid call to Open Source Foundation Model."""
chat = AzureMLChatOnlineEndpoint(content_formatter=LlamaContentFormatter())
response = chat(messages=[HumanMessage(content='Foo')])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
|
Test valid call to Open Source Foundation Model.
|
embed_query
|
resp = self.embed_documents([text])
return resp[0]
|
def embed_query(self, text: str) ->List[float]:
resp = self.embed_documents([text])
return resp[0]
| null |
from_llm
|
"""Create a LLMChainFilter from a language model.
Args:
llm: The language model to use for filtering.
prompt: The prompt to use for the filter.
**kwargs: Additional arguments to pass to the constructor.
Returns:
A LLMChainFilter that uses the given language model.
"""
_prompt = prompt if prompt is not None else _get_default_chain_prompt()
llm_chain = LLMChain(llm=llm, prompt=_prompt)
return cls(llm_chain=llm_chain, **kwargs)
|
@classmethod
def from_llm(cls, llm: BaseLanguageModel, prompt: Optional[
BasePromptTemplate]=None, **kwargs: Any) ->'LLMChainFilter':
"""Create a LLMChainFilter from a language model.
Args:
llm: The language model to use for filtering.
prompt: The prompt to use for the filter.
**kwargs: Additional arguments to pass to the constructor.
Returns:
A LLMChainFilter that uses the given language model.
"""
_prompt = prompt if prompt is not None else _get_default_chain_prompt()
llm_chain = LLMChain(llm=llm, prompt=_prompt)
return cls(llm_chain=llm_chain, **kwargs)
|
Create a LLMChainFilter from a language model.
Args:
llm: The language model to use for filtering.
prompt: The prompt to use for the filter.
**kwargs: Additional arguments to pass to the constructor.
Returns:
A LLMChainFilter that uses the given language model.
|
format
|
"""Format the prompt template.
Args:
**kwargs: Keyword arguments to use for formatting.
Returns:
Formatted message.
"""
text = self.prompt.format(**kwargs)
return HumanMessage(content=text, additional_kwargs=self.additional_kwargs)
|
def format(self, **kwargs: Any) ->BaseMessage:
"""Format the prompt template.
Args:
**kwargs: Keyword arguments to use for formatting.
Returns:
Formatted message.
"""
text = self.prompt.format(**kwargs)
return HumanMessage(content=text, additional_kwargs=self.additional_kwargs)
|
Format the prompt template.
Args:
**kwargs: Keyword arguments to use for formatting.
Returns:
Formatted message.
|
_import_spark_sql_tool_QueryCheckerTool
|
from langchain_community.tools.spark_sql.tool import QueryCheckerTool
return QueryCheckerTool
|
def _import_spark_sql_tool_QueryCheckerTool() ->Any:
from langchain_community.tools.spark_sql.tool import QueryCheckerTool
return QueryCheckerTool
| null |
on_chain_end
|
"""Run when chain ends running."""
|
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) ->None:
"""Run when chain ends running."""
|
Run when chain ends running.
|
validate_environment
|
"""Validate that api key and endpoint exists in environment."""
azure_cogs_key = get_from_dict_or_env(values, 'azure_cogs_key',
'AZURE_COGS_KEY')
azure_cogs_endpoint = get_from_dict_or_env(values, 'azure_cogs_endpoint',
'AZURE_COGS_ENDPOINT')
try:
import azure.ai.vision as sdk
values['vision_service'] = sdk.VisionServiceOptions(endpoint=
azure_cogs_endpoint, key=azure_cogs_key)
values['analysis_options'] = sdk.ImageAnalysisOptions()
values['analysis_options'].features = (sdk.ImageAnalysisFeature.CAPTION |
sdk.ImageAnalysisFeature.OBJECTS | sdk.ImageAnalysisFeature.TAGS |
sdk.ImageAnalysisFeature.TEXT)
except ImportError:
raise ImportError(
'azure-ai-vision is not installed. Run `pip install azure-ai-vision` to install.'
)
return values
|
@root_validator(pre=True)
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that api key and endpoint exists in environment."""
azure_cogs_key = get_from_dict_or_env(values, 'azure_cogs_key',
'AZURE_COGS_KEY')
azure_cogs_endpoint = get_from_dict_or_env(values,
'azure_cogs_endpoint', 'AZURE_COGS_ENDPOINT')
try:
import azure.ai.vision as sdk
values['vision_service'] = sdk.VisionServiceOptions(endpoint=
azure_cogs_endpoint, key=azure_cogs_key)
values['analysis_options'] = sdk.ImageAnalysisOptions()
values['analysis_options'].features = (sdk.ImageAnalysisFeature.
CAPTION | sdk.ImageAnalysisFeature.OBJECTS | sdk.
ImageAnalysisFeature.TAGS | sdk.ImageAnalysisFeature.TEXT)
except ImportError:
raise ImportError(
'azure-ai-vision is not installed. Run `pip install azure-ai-vision` to install.'
)
return values
|
Validate that api key and endpoint exists in environment.
|
test_pgvector_with_metadatas_with_scores
|
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': str(i)} for i in range(len(texts))]
docsearch = PGVector.from_texts(texts=texts, collection_name=
'test_collection', embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas, connection_string=CONNECTION_STRING,
pre_delete_collection=True)
output = docsearch.similarity_search_with_score('foo', k=1)
assert output == [(Document(page_content='foo', metadata={'page': '0'}), 0.0)]
|
def test_pgvector_with_metadatas_with_scores() ->None:
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': str(i)} for i in range(len(texts))]
docsearch = PGVector.from_texts(texts=texts, collection_name=
'test_collection', embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas, connection_string=CONNECTION_STRING,
pre_delete_collection=True)
output = docsearch.similarity_search_with_score('foo', k=1)
assert output == [(Document(page_content='foo', metadata={'page': '0'}),
0.0)]
|
Test end to end construction and search.
|
test_invalid_html
|
bs_transformer = BeautifulSoupTransformer()
invalid_html_1 = '<html><h1>First heading.'
invalid_html_2 = '<html 1234 xyz'
documents = [Document(page_content=invalid_html_1), Document(page_content=
invalid_html_2)]
docs_transformed = bs_transformer.transform_documents(documents,
tags_to_extract=['h1'])
assert docs_transformed[0].page_content == 'First heading.'
assert docs_transformed[1].page_content == ''
|
@pytest.mark.requires('bs4')
def test_invalid_html() ->None:
bs_transformer = BeautifulSoupTransformer()
invalid_html_1 = '<html><h1>First heading.'
invalid_html_2 = '<html 1234 xyz'
documents = [Document(page_content=invalid_html_1), Document(
page_content=invalid_html_2)]
docs_transformed = bs_transformer.transform_documents(documents,
tags_to_extract=['h1'])
assert docs_transformed[0].page_content == 'First heading.'
assert docs_transformed[1].page_content == ''
| null |
_import_mlflow
|
from langchain_community.llms.mlflow import Mlflow
return Mlflow
|
def _import_mlflow() ->Any:
from langchain_community.llms.mlflow import Mlflow
return Mlflow
| null |
_import_playwright_ExtractTextTool
|
from langchain_community.tools.playwright import ExtractTextTool
return ExtractTextTool
|
def _import_playwright_ExtractTextTool() ->Any:
from langchain_community.tools.playwright import ExtractTextTool
return ExtractTextTool
| null |
_type
|
return 'list'
|
@property
def _type(self) ->str:
return 'list'
| null |
foo
|
"""Foo."""
return bar
|
@tool
def foo(bar: str) ->str:
"""Foo."""
return bar
|
Foo.
|
test_one_thoghts
|
thoughts = [Thought(text='a', validity=ThoughtValidity.VALID_FINAL)]
memory = ToTDFSMemory(thoughts)
self.assertEqual(self.controller(memory), ('a',))
|
def test_one_thoghts(self) ->None:
thoughts = [Thought(text='a', validity=ThoughtValidity.VALID_FINAL)]
memory = ToTDFSMemory(thoughts)
self.assertEqual(self.controller(memory), ('a',))
| null |
test_clarifai_with_from_texts
|
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
USER_ID = 'minhajul'
APP_ID = 'test-lang-2'
NUMBER_OF_DOCS = 1
docsearch = Clarifai.from_texts(user_id=USER_ID, app_id=APP_ID, texts=texts,
pat=None, number_of_docs=NUMBER_OF_DOCS)
time.sleep(2.5)
output = docsearch.similarity_search('foo')
assert output == [Document(page_content='foo')]
|
def test_clarifai_with_from_texts() ->None:
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
USER_ID = 'minhajul'
APP_ID = 'test-lang-2'
NUMBER_OF_DOCS = 1
docsearch = Clarifai.from_texts(user_id=USER_ID, app_id=APP_ID, texts=
texts, pat=None, number_of_docs=NUMBER_OF_DOCS)
time.sleep(2.5)
output = docsearch.similarity_search('foo')
assert output == [Document(page_content='foo')]
|
Test end to end construction and search.
|
__init__
|
"""Initialize with file path."""
validate_unstructured_version(min_unstructured_version='0.10.15')
self.url = url
self.api_key = api_key
super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs)
|
def __init__(self, file_path: Union[str, List[str]]='', mode: str='single',
url: str='https://api.unstructured.io/general/v0/general', api_key: str
='', **unstructured_kwargs: Any):
"""Initialize with file path."""
validate_unstructured_version(min_unstructured_version='0.10.15')
self.url = url
self.api_key = api_key
super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs)
|
Initialize with file path.
|
make_fake_llm
|
"""
Fake LLM service for testing CPAL chain and its components chains
on univariate math examples.
"""
class LLMMockData(pydantic.BaseModel):
question: str
completion: str
template: str
data_model: Type[pydantic.BaseModel]
@property
def prompt(self) ->str:
"""Create LLM prompt with the question."""
prompt_template = PromptTemplate(input_variables=[Constant.
narrative_input.value], template=self.template,
partial_variables={'format_instructions': PydanticOutputParser(
pydantic_object=self.data_model).get_format_instructions()})
prompt = prompt_template.format(narrative_input=self.question)
return prompt
narrative = LLMMockData(**{'question':
'jan has three times the number of pets as marcia. marcia has two more pets than cindy.if cindy has ten pets, how many pets does jan have? '
, 'completion': json.dumps({'story_outcome_question':
'how many pets does jan have? ', 'story_hypothetical':
'if cindy has ten pets', 'story_plot':
'jan has three times the number of pets as marcia. marcia has two more pets than cindy.'
}), 'template': narrative_template, 'data_model': NarrativeModel})
causal_model = LLMMockData(**{'question':
'jan has three times the number of pets as marcia. marcia has two more pets than cindy.'
, 'completion':
"""
{
"attribute": "pet_count",
"entities": [
{
"name": "cindy",
"value": 0,
"depends_on": [],
"code": "pass"
},
{
"name": "marcia",
"value": 0,
"depends_on": ["cindy"],
"code": "marcia.value = cindy.value + 2"
},
{
"name": "jan",
"value": 0,
"depends_on": ["marcia"],
"code": "jan.value = marcia.value * 3"
}
]
}"""
, 'template': causal_template, 'data_model': CausalModel})
intervention = LLMMockData(**{'question': 'if cindy has ten pets',
'completion':
"""{
"entity_settings" : [
{ "name": "cindy", "attribute": "pet_count", "value": "10" }
]
}"""
, 'template': intervention_template, 'data_model': InterventionModel})
query = LLMMockData(**{'question': 'how many pets does jan have? ',
'completion':
"""{
"narrative_input": "how many pets does jan have? ",
"llm_error_msg": "",
"expression": "SELECT name, value FROM df WHERE name = 'jan'"
}"""
, 'template': query_template, 'data_model': QueryModel})
fake_llm = FakeLLM()
fake_llm.queries = {}
for mock_data in [narrative, causal_model, intervention, query]:
fake_llm.queries.update({mock_data.prompt: mock_data.completion})
return fake_llm
|
def make_fake_llm(self) ->FakeLLM:
"""
Fake LLM service for testing CPAL chain and its components chains
on univariate math examples.
"""
class LLMMockData(pydantic.BaseModel):
question: str
completion: str
template: str
data_model: Type[pydantic.BaseModel]
@property
def prompt(self) ->str:
"""Create LLM prompt with the question."""
prompt_template = PromptTemplate(input_variables=[Constant.
narrative_input.value], template=self.template,
partial_variables={'format_instructions':
PydanticOutputParser(pydantic_object=self.data_model).
get_format_instructions()})
prompt = prompt_template.format(narrative_input=self.question)
return prompt
narrative = LLMMockData(**{'question':
'jan has three times the number of pets as marcia. marcia has two more pets than cindy.if cindy has ten pets, how many pets does jan have? '
, 'completion': json.dumps({'story_outcome_question':
'how many pets does jan have? ', 'story_hypothetical':
'if cindy has ten pets', 'story_plot':
'jan has three times the number of pets as marcia. marcia has two more pets than cindy.'
}), 'template': narrative_template, 'data_model': NarrativeModel})
causal_model = LLMMockData(**{'question':
'jan has three times the number of pets as marcia. marcia has two more pets than cindy.'
, 'completion':
"""
{
"attribute": "pet_count",
"entities": [
{
"name": "cindy",
"value": 0,
"depends_on": [],
"code": "pass"
},
{
"name": "marcia",
"value": 0,
"depends_on": ["cindy"],
"code": "marcia.value = cindy.value + 2"
},
{
"name": "jan",
"value": 0,
"depends_on": ["marcia"],
"code": "jan.value = marcia.value * 3"
}
]
}"""
, 'template': causal_template, 'data_model': CausalModel})
intervention = LLMMockData(**{'question': 'if cindy has ten pets',
'completion':
"""{
"entity_settings" : [
{ "name": "cindy", "attribute": "pet_count", "value": "10" }
]
}"""
, 'template': intervention_template, 'data_model': InterventionModel})
query = LLMMockData(**{'question': 'how many pets does jan have? ',
'completion':
"""{
"narrative_input": "how many pets does jan have? ",
"llm_error_msg": "",
"expression": "SELECT name, value FROM df WHERE name = 'jan'"
}"""
, 'template': query_template, 'data_model': QueryModel})
fake_llm = FakeLLM()
fake_llm.queries = {}
for mock_data in [narrative, causal_model, intervention, query]:
fake_llm.queries.update({mock_data.prompt: mock_data.completion})
return fake_llm
|
Fake LLM service for testing CPAL chain and its components chains
on univariate math examples.
|
metric
|
"""To log metric to mlflow server."""
with self.mlflow.start_run(run_id=self.run.info.run_id, experiment_id=self.
mlf_expid):
self.mlflow.log_metric(key, value)
|
def metric(self, key: str, value: float) ->None:
"""To log metric to mlflow server."""
with self.mlflow.start_run(run_id=self.run.info.run_id, experiment_id=
self.mlf_expid):
self.mlflow.log_metric(key, value)
|
To log metric to mlflow server.
|
test_bookend_embedding_documents
|
"""Test Bookend AI embeddings for documents."""
documents = ['foo bar', 'bar foo']
embedding = BookendEmbeddings(domain='<bookend_domain>', api_token=
'<bookend_api_token>', model_id='<bookend_embeddings_model_id>')
output = embedding.embed_documents(documents)
assert len(output) == 2
assert len(output[0]) == 768
|
def test_bookend_embedding_documents() ->None:
"""Test Bookend AI embeddings for documents."""
documents = ['foo bar', 'bar foo']
embedding = BookendEmbeddings(domain='<bookend_domain>', api_token=
'<bookend_api_token>', model_id='<bookend_embeddings_model_id>')
output = embedding.embed_documents(documents)
assert len(output) == 2
assert len(output[0]) == 768
|
Test Bookend AI embeddings for documents.
|
_on_tool_error
|
"""Process the Tool Run upon error."""
self._process_end_trace(run)
|
def _on_tool_error(self, run: 'Run') ->None:
"""Process the Tool Run upon error."""
self._process_end_trace(run)
|
Process the Tool Run upon error.
|
_import_amazon_api_gateway
|
from langchain_community.llms.amazon_api_gateway import AmazonAPIGateway
return AmazonAPIGateway
|
def _import_amazon_api_gateway() ->Any:
from langchain_community.llms.amazon_api_gateway import AmazonAPIGateway
return AmazonAPIGateway
| null |
__init__
|
"""Initialize with a file path."""
try:
import pypdf
except ImportError:
raise ImportError(
'pypdf package not found, please install it with `pip install pypdf`')
super().__init__(file_path, headers=headers)
self.parser = PyPDFParser(password=password, extract_images=extract_images)
|
def __init__(self, file_path: str, password: Optional[Union[str, bytes]]=
None, headers: Optional[Dict]=None, extract_images: bool=False) ->None:
"""Initialize with a file path."""
try:
import pypdf
except ImportError:
raise ImportError(
'pypdf package not found, please install it with `pip install pypdf`'
)
super().__init__(file_path, headers=headers)
self.parser = PyPDFParser(password=password, extract_images=extract_images)
|
Initialize with a file path.
|
create_xml_agent
|
"""Create an agent that uses XML to format its logic.
Examples:
.. code-block:: python
from langchain import hub
from langchain_community.chat_models import ChatAnthropic
from langchain.agents import AgentExecutor, create_xml_agent
prompt = hub.pull("hwchase17/xml-agent-convo")
model = ChatAnthropic()
tools = ...
agent = create_xml_agent(model, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools)
agent_executor.invoke({"input": "hi"})
# Use with chat history
from langchain_core.messages import AIMessage, HumanMessage
agent_executor.invoke(
{
"input": "what's my name?",
# Notice that chat_history is a string
# since this prompt is aimed at LLMs, not chat models
"chat_history": "Human: My name is Bob
AI: Hello Bob!",
}
)
Args:
llm: LLM to use as the agent.
tools: Tools this agent has access to.
prompt: The prompt to use, must have input keys of
`tools` and `agent_scratchpad`.
Returns:
A runnable sequence representing an agent. It takes as input all the same input
variables as the prompt passed in does. It returns as output either an
AgentAction or AgentFinish.
"""
missing_vars = {'tools', 'agent_scratchpad'}.difference(prompt.input_variables)
if missing_vars:
raise ValueError(f'Prompt missing required variables: {missing_vars}')
prompt = prompt.partial(tools=render_text_description(list(tools)))
llm_with_stop = llm.bind(stop=['</tool_input>'])
agent = RunnablePassthrough.assign(agent_scratchpad=lambda x: format_xml(x[
'intermediate_steps'])) | prompt | llm_with_stop | XMLAgentOutputParser()
return agent
|
def create_xml_agent(llm: BaseLanguageModel, tools: Sequence[BaseTool],
prompt: BasePromptTemplate) ->Runnable:
"""Create an agent that uses XML to format its logic.
Examples:
.. code-block:: python
from langchain import hub
from langchain_community.chat_models import ChatAnthropic
from langchain.agents import AgentExecutor, create_xml_agent
prompt = hub.pull("hwchase17/xml-agent-convo")
model = ChatAnthropic()
tools = ...
agent = create_xml_agent(model, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools)
agent_executor.invoke({"input": "hi"})
# Use with chat history
from langchain_core.messages import AIMessage, HumanMessage
agent_executor.invoke(
{
"input": "what's my name?",
# Notice that chat_history is a string
# since this prompt is aimed at LLMs, not chat models
"chat_history": "Human: My name is Bob
AI: Hello Bob!",
}
)
Args:
llm: LLM to use as the agent.
tools: Tools this agent has access to.
prompt: The prompt to use, must have input keys of
`tools` and `agent_scratchpad`.
Returns:
A runnable sequence representing an agent. It takes as input all the same input
variables as the prompt passed in does. It returns as output either an
AgentAction or AgentFinish.
"""
missing_vars = {'tools', 'agent_scratchpad'}.difference(prompt.
input_variables)
if missing_vars:
raise ValueError(f'Prompt missing required variables: {missing_vars}')
prompt = prompt.partial(tools=render_text_description(list(tools)))
llm_with_stop = llm.bind(stop=['</tool_input>'])
agent = RunnablePassthrough.assign(agent_scratchpad=lambda x:
format_xml(x['intermediate_steps'])
) | prompt | llm_with_stop | XMLAgentOutputParser()
return agent
|
Create an agent that uses XML to format its logic.
Examples:
.. code-block:: python
from langchain import hub
from langchain_community.chat_models import ChatAnthropic
from langchain.agents import AgentExecutor, create_xml_agent
prompt = hub.pull("hwchase17/xml-agent-convo")
model = ChatAnthropic()
tools = ...
agent = create_xml_agent(model, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools)
agent_executor.invoke({"input": "hi"})
# Use with chat history
from langchain_core.messages import AIMessage, HumanMessage
agent_executor.invoke(
{
"input": "what's my name?",
# Notice that chat_history is a string
# since this prompt is aimed at LLMs, not chat models
"chat_history": "Human: My name is Bob
AI: Hello Bob!",
}
)
Args:
llm: LLM to use as the agent.
tools: Tools this agent has access to.
prompt: The prompt to use, must have input keys of
`tools` and `agent_scratchpad`.
Returns:
A runnable sequence representing an agent. It takes as input all the same input
variables as the prompt passed in does. It returns as output either an
AgentAction or AgentFinish.
|
embed_documents
|
if self.show_progress_bar:
try:
from tqdm import tqdm
iter_ = tqdm(texts, desc='GooglePalmEmbeddings')
except ImportError:
logger.warning(
'Unable to show progress bar because tqdm could not be imported. Please install with `pip install tqdm`.'
)
iter_ = texts
else:
iter_ = texts
return [self.embed_query(text) for text in iter_]
|
def embed_documents(self, texts: List[str]) ->List[List[float]]:
if self.show_progress_bar:
try:
from tqdm import tqdm
iter_ = tqdm(texts, desc='GooglePalmEmbeddings')
except ImportError:
logger.warning(
'Unable to show progress bar because tqdm could not be imported. Please install with `pip install tqdm`.'
)
iter_ = texts
else:
iter_ = texts
return [self.embed_query(text) for text in iter_]
| null |
__init__
|
self._lookup_fn = lookup_fn
|
def __init__(self, lookup_fn: Callable[[str], Union[Document, str]]):
self._lookup_fn = lookup_fn
| null |
build_extra
|
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.values()}
extra = values.get('model_kwargs', {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f'Found {field_name} supplied twice.')
logger.warning(
f"""WARNING! {field_name} is not default parameter.
{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values['model_kwargs'] = extra
return values
|
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) ->Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.
values()}
extra = values.get('model_kwargs', {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f'Found {field_name} supplied twice.')
logger.warning(
f"""WARNING! {field_name} is not default parameter.
{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values['model_kwargs'] = extra
return values
|
Build extra kwargs from additional params that were passed in.
|
test_repeated_memory_var
|
"""Test raising error when repeated memory variables found"""
with pytest.raises(ValueError):
CombinedMemory(memories=[example_memory[1], example_memory[2]])
|
def test_repeated_memory_var(example_memory: List[ConversationBufferMemory]
) ->None:
"""Test raising error when repeated memory variables found"""
with pytest.raises(ValueError):
CombinedMemory(memories=[example_memory[1], example_memory[2]])
|
Test raising error when repeated memory variables found
|
as_field
|
from redis.commands.search.field import TagField
return TagField(self.name, separator=self.separator, case_sensitive=self.
case_sensitive, sortable=self.sortable, no_index=self.no_index)
|
def as_field(self) ->TagField:
from redis.commands.search.field import TagField
return TagField(self.name, separator=self.separator, case_sensitive=
self.case_sensitive, sortable=self.sortable, no_index=self.no_index)
| null |
__repr__
|
"""A string representation of this runnable."""
if hasattr(self, 'func') and isinstance(self.func, itemgetter):
return f"RunnableLambda({str(self.func)[len('operator.'):]})"
elif hasattr(self, 'func'):
return f"RunnableLambda({get_lambda_source(self.func) or '...'})"
elif hasattr(self, 'afunc'):
return f"RunnableLambda(afunc={get_lambda_source(self.afunc) or '...'})"
else:
return 'RunnableLambda(...)'
|
def __repr__(self) ->str:
"""A string representation of this runnable."""
if hasattr(self, 'func') and isinstance(self.func, itemgetter):
return f"RunnableLambda({str(self.func)[len('operator.'):]})"
elif hasattr(self, 'func'):
return f"RunnableLambda({get_lambda_source(self.func) or '...'})"
elif hasattr(self, 'afunc'):
return (
f"RunnableLambda(afunc={get_lambda_source(self.afunc) or '...'})")
else:
return 'RunnableLambda(...)'
|
A string representation of this runnable.
|
_format_chat_history
|
buffer = []
for human, ai in chat_history:
buffer.append(HumanMessage(content=human))
buffer.append(AIMessage(content=ai))
return buffer
|
def _format_chat_history(chat_history: List[Tuple[str, str]]):
buffer = []
for human, ai in chat_history:
buffer.append(HumanMessage(content=human))
buffer.append(AIMessage(content=ai))
return buffer
| null |
test_get_game_details
|
"""Test for getting game details on Steam"""
steam = SteamWebAPIWrapper()
output = steam.run('get_game_details', 'Terraria')
assert 'id' in output
assert 'link' in output
assert 'detailed description' in output
assert 'supported languages' in output
assert 'price' in output
|
def test_get_game_details() ->None:
"""Test for getting game details on Steam"""
steam = SteamWebAPIWrapper()
output = steam.run('get_game_details', 'Terraria')
assert 'id' in output
assert 'link' in output
assert 'detailed description' in output
assert 'supported languages' in output
assert 'price' in output
|
Test for getting game details on Steam
|
f
|
args_: map[str] = map(str, args)
return f"{op_name}({','.join(args_)})"
|
def f(*args: Any) ->str:
args_: map[str] = map(str, args)
return f"{op_name}({','.join(args_)})"
| null |
greet
|
print(f'Hello, {self.name}!')
|
def greet(self):
print(f'Hello, {self.name}!')
| null |
set_verbose
|
"""Set a new value for the `verbose` global setting."""
import langchain
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message=
'Importing verbose from langchain root module is no longer supported')
langchain.verbose = value
global _verbose
_verbose = value
|
def set_verbose(value: bool) ->None:
"""Set a new value for the `verbose` global setting."""
import langchain
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message=
'Importing verbose from langchain root module is no longer supported'
)
langchain.verbose = value
global _verbose
_verbose = value
|
Set a new value for the `verbose` global setting.
|
__init__
|
assert access_token == _GRADIENT_SECRET
assert workspace_id == _GRADIENT_WORKSPACE_ID
assert host == _GRADIENT_BASE_URL
|
def __init__(self, access_token: str, workspace_id, host):
assert access_token == _GRADIENT_SECRET
assert workspace_id == _GRADIENT_WORKSPACE_ID
assert host == _GRADIENT_BASE_URL
| null |
_remove_front_matter
|
"""Remove front matter metadata from the given content."""
if not self.collect_metadata:
return content
return self.FRONT_MATTER_REGEX.sub('', content)
|
def _remove_front_matter(self, content: str) ->str:
"""Remove front matter metadata from the given content."""
if not self.collect_metadata:
return content
return self.FRONT_MATTER_REGEX.sub('', content)
|
Remove front matter metadata from the given content.
|
on_text_common
|
self.text += 1
|
def on_text_common(self) ->None:
self.text += 1
| null |
_invoke
|
if inspect.isgeneratorfunction(self.func):
output: Optional[Output] = None
for chunk in call_func_with_variable_args(cast(Callable[[Input],
Iterator[Output]], self.func), input, config, run_manager, **kwargs):
if output is None:
output = chunk
else:
try:
output = output + chunk
except TypeError:
output = chunk
else:
output = call_func_with_variable_args(self.func, input, config,
run_manager, **kwargs)
if isinstance(output, Runnable):
recursion_limit = config['recursion_limit']
if recursion_limit <= 0:
raise RecursionError(
f'Recursion limit reached when invoking {self} with input {input}.'
)
output = output.invoke(input, patch_config(config, callbacks=
run_manager.get_child(), recursion_limit=recursion_limit - 1))
return cast(Output, output)
|
def _invoke(self, input: Input, run_manager: CallbackManagerForChainRun,
config: RunnableConfig, **kwargs: Any) ->Output:
if inspect.isgeneratorfunction(self.func):
output: Optional[Output] = None
for chunk in call_func_with_variable_args(cast(Callable[[Input],
Iterator[Output]], self.func), input, config, run_manager, **kwargs
):
if output is None:
output = chunk
else:
try:
output = output + chunk
except TypeError:
output = chunk
else:
output = call_func_with_variable_args(self.func, input, config,
run_manager, **kwargs)
if isinstance(output, Runnable):
recursion_limit = config['recursion_limit']
if recursion_limit <= 0:
raise RecursionError(
f'Recursion limit reached when invoking {self} with input {input}.'
)
output = output.invoke(input, patch_config(config, callbacks=
run_manager.get_child(), recursion_limit=recursion_limit - 1))
return cast(Output, output)
| null |
__exit__
|
"""Context manager exit"""
self.upsert_messages()
self._client.__exit__(exc_type, exc_val, traceback)
|
def __exit__(self, exc_type: Optional[Type[BaseException]], exc_val:
Optional[BaseException], traceback: Optional[TracebackType]) ->None:
"""Context manager exit"""
self.upsert_messages()
self._client.__exit__(exc_type, exc_val, traceback)
|
Context manager exit
|
test_huggingface_embedding_query
|
"""Test huggingface embeddings."""
document = 'foo bar'
embedding = HuggingFaceEmbeddings(encode_kwargs={'batch_size': 16})
output = embedding.embed_query(document)
assert len(output) == 768
|
def test_huggingface_embedding_query() ->None:
"""Test huggingface embeddings."""
document = 'foo bar'
embedding = HuggingFaceEmbeddings(encode_kwargs={'batch_size': 16})
output = embedding.embed_query(document)
assert len(output) == 768
|
Test huggingface embeddings.
|
test_Config
|
headers = Portkey.Config(api_key='test_api_key', environment=
'test_environment', user='test_user', organisation='test_organisation',
prompt='test_prompt', retry_count=3, trace_id='test_trace_id', cache=
'simple', cache_force_refresh='True', cache_age=3600)
assert headers['x-portkey-api-key'] == 'test_api_key'
assert headers['x-portkey-trace-id'] == 'test_trace_id'
assert headers['x-portkey-retry-count'] == '3'
assert headers['x-portkey-cache'] == 'simple'
assert headers['x-portkey-cache-force-refresh'] == 'True'
assert headers['Cache-Control'] == 'max-age:3600'
metadata = json.loads(headers['x-portkey-metadata'])
assert metadata['_environment'] == 'test_environment'
assert metadata['_user'] == 'test_user'
assert metadata['_organisation'] == 'test_organisation'
assert metadata['_prompt'] == 'test_prompt'
|
def test_Config() ->None:
headers = Portkey.Config(api_key='test_api_key', environment=
'test_environment', user='test_user', organisation=
'test_organisation', prompt='test_prompt', retry_count=3, trace_id=
'test_trace_id', cache='simple', cache_force_refresh='True',
cache_age=3600)
assert headers['x-portkey-api-key'] == 'test_api_key'
assert headers['x-portkey-trace-id'] == 'test_trace_id'
assert headers['x-portkey-retry-count'] == '3'
assert headers['x-portkey-cache'] == 'simple'
assert headers['x-portkey-cache-force-refresh'] == 'True'
assert headers['Cache-Control'] == 'max-age:3600'
metadata = json.loads(headers['x-portkey-metadata'])
assert metadata['_environment'] == 'test_environment'
assert metadata['_user'] == 'test_user'
assert metadata['_organisation'] == 'test_organisation'
assert metadata['_prompt'] == 'test_prompt'
| null |
_import_amazon_api_gateway
|
from langchain_community.llms.amazon_api_gateway import AmazonAPIGateway
return AmazonAPIGateway
|
def _import_amazon_api_gateway() ->Any:
from langchain_community.llms.amazon_api_gateway import AmazonAPIGateway
return AmazonAPIGateway
| null |
_llm_type
|
"""Return type of model."""
return 'baseten'
|
@property
def _llm_type(self) ->str:
"""Return type of model."""
return 'baseten'
|
Return type of model.
|
_select_relevance_score_fn
|
"""
The 'correct' relevance function
may differ depending on a few things, including:
- the distance / similarity metric used by the VectorStore
- the scale of your embeddings (OpenAI's are unit normed. Many others are not!)
- embedding dimensionality
- etc.
"""
if self.distance_strategy == 'COSINE':
return self._cosine_relevance_score_fn
elif self.distance_strategy == 'DOT':
return self._max_inner_product_relevance_score_fn
elif self.distance_strategy == 'EUCLID':
return self._euclidean_relevance_score_fn
else:
raise ValueError(
'Unknown distance strategy, must be cosine, max_inner_product, or euclidean'
)
|
def _select_relevance_score_fn(self) ->Callable[[float], float]:
"""
The 'correct' relevance function
may differ depending on a few things, including:
- the distance / similarity metric used by the VectorStore
- the scale of your embeddings (OpenAI's are unit normed. Many others are not!)
- embedding dimensionality
- etc.
"""
if self.distance_strategy == 'COSINE':
return self._cosine_relevance_score_fn
elif self.distance_strategy == 'DOT':
return self._max_inner_product_relevance_score_fn
elif self.distance_strategy == 'EUCLID':
return self._euclidean_relevance_score_fn
else:
raise ValueError(
'Unknown distance strategy, must be cosine, max_inner_product, or euclidean'
)
|
The 'correct' relevance function
may differ depending on a few things, including:
- the distance / similarity metric used by the VectorStore
- the scale of your embeddings (OpenAI's are unit normed. Many others are not!)
- embedding dimensionality
- etc.
|
on_chain_start_common
|
self.chain_starts += 1
self.starts += 1
|
def on_chain_start_common(self) ->None:
self.chain_starts += 1
self.starts += 1
| null |
test_api_key_is_string
|
llm = Predibase(predibase_api_key='secret-api-key')
assert isinstance(llm.predibase_api_key, SecretStr)
|
def test_api_key_is_string() ->None:
llm = Predibase(predibase_api_key='secret-api-key')
assert isinstance(llm.predibase_api_key, SecretStr)
| null |
test_all_imports
|
assert set(__all__) == set(EXPECTED_ALL)
|
def test_all_imports() ->None:
assert set(__all__) == set(EXPECTED_ALL)
| null |
_import_aphrodite
|
from langchain_community.llms.aphrodite import Aphrodite
return Aphrodite
|
def _import_aphrodite() ->Any:
from langchain_community.llms.aphrodite import Aphrodite
return Aphrodite
| null |
load
|
"""Load given path as pages."""
return list(self.lazy_load())
|
def load(self) ->List[Document]:
"""Load given path as pages."""
return list(self.lazy_load())
|
Load given path as pages.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.