method_name
stringlengths 1
78
| method_body
stringlengths 3
9.66k
| full_code
stringlengths 31
10.7k
| docstring
stringlengths 4
4.74k
⌀ |
|---|---|---|---|
_validate_aoss_with_engines
|
"""Validate AOSS with the engine."""
if is_aoss and engine != 'nmslib' and engine != 'faiss':
raise ValueError(
'Amazon OpenSearch Service Serverless only supports `nmslib` or `faiss` engines'
)
|
def _validate_aoss_with_engines(is_aoss: bool, engine: str) ->None:
"""Validate AOSS with the engine."""
if is_aoss and engine != 'nmslib' and engine != 'faiss':
raise ValueError(
'Amazon OpenSearch Service Serverless only supports `nmslib` or `faiss` engines'
)
|
Validate AOSS with the engine.
|
__init__
|
"""Initialize with url and api key."""
self.url = url
self.api_key = api_key
|
def __init__(self, url: str, api_key: str):
"""Initialize with url and api key."""
self.url = url
self.api_key = api_key
|
Initialize with url and api key.
|
_import_gigachat
|
from langchain_community.llms.gigachat import GigaChat
return GigaChat
|
def _import_gigachat() ->Any:
from langchain_community.llms.gigachat import GigaChat
return GigaChat
| null |
_import_replicate
|
from langchain_community.llms.replicate import Replicate
return Replicate
|
def _import_replicate() ->Any:
from langchain_community.llms.replicate import Replicate
return Replicate
| null |
_result_too_large
|
"""Tokenize the output of the query."""
if self.tiktoken_model_name:
tiktoken_ = _import_tiktoken()
encoding = tiktoken_.encoding_for_model(self.tiktoken_model_name)
length = len(encoding.encode(result))
logger.info('Result length: %s', length)
return length > self.output_token_limit, length
return False, 0
|
def _result_too_large(self, result: str) ->Tuple[bool, int]:
"""Tokenize the output of the query."""
if self.tiktoken_model_name:
tiktoken_ = _import_tiktoken()
encoding = tiktoken_.encoding_for_model(self.tiktoken_model_name)
length = len(encoding.encode(result))
logger.info('Result length: %s', length)
return length > self.output_token_limit, length
return False, 0
|
Tokenize the output of the query.
|
call_as_llm
|
return self.predict(message, stop=stop, **kwargs)
|
def call_as_llm(self, message: str, stop: Optional[List[str]]=None, **
kwargs: Any) ->str:
return self.predict(message, stop=stop, **kwargs)
| null |
_request
|
"""Request inferencing from the triton server."""
inputs = self._generate_inputs(stream=False, prompt=prompt, **params)
outputs = self._generate_outputs()
result_queue = self._invoke_triton(self.model_name, inputs, outputs, stop)
result_str = ''
for token in result_queue:
result_str += token
self.client.stop_stream()
return result_str
|
def _request(self, model_name: str, prompt: Sequence[Sequence[str]], stop:
Optional[List[str]]=None, **params: Any) ->str:
"""Request inferencing from the triton server."""
inputs = self._generate_inputs(stream=False, prompt=prompt, **params)
outputs = self._generate_outputs()
result_queue = self._invoke_triton(self.model_name, inputs, outputs, stop)
result_str = ''
for token in result_queue:
result_str += token
self.client.stop_stream()
return result_str
|
Request inferencing from the triton server.
|
embeddings
|
return self._embeddings
|
@property
def embeddings(self) ->Optional[Embeddings]:
return self._embeddings
| null |
__init__
|
"""Initialize with necessary components.
Args:
embedding (Embeddings): A text embedding model.
distance_strategy (DistanceStrategy, optional):
Determines the strategy employed for calculating
the distance between vectors in the embedding space.
Defaults to DOT_PRODUCT.
Available options are:
- DOT_PRODUCT: Computes the scalar product of two vectors.
This is the default behavior
- EUCLIDEAN_DISTANCE: Computes the Euclidean distance between
two vectors. This metric considers the geometric distance in
the vector space, and might be more suitable for embeddings
that rely on spatial relationships.
table_name (str, optional): Specifies the name of the table in use.
Defaults to "embeddings".
content_field (str, optional): Specifies the field to store the content.
Defaults to "content".
metadata_field (str, optional): Specifies the field to store metadata.
Defaults to "metadata".
vector_field (str, optional): Specifies the field to store the vector.
Defaults to "vector".
Following arguments pertain to the connection pool:
pool_size (int, optional): Determines the number of active connections in
the pool. Defaults to 5.
max_overflow (int, optional): Determines the maximum number of connections
allowed beyond the pool_size. Defaults to 10.
timeout (float, optional): Specifies the maximum wait time in seconds for
establishing a connection. Defaults to 30.
Following arguments pertain to the database connection:
host (str, optional): Specifies the hostname, IP address, or URL for the
database connection. The default scheme is "mysql".
user (str, optional): Database username.
password (str, optional): Database password.
port (int, optional): Database port. Defaults to 3306 for non-HTTP
connections, 80 for HTTP connections, and 443 for HTTPS connections.
database (str, optional): Database name.
Additional optional arguments provide further customization over the
database connection:
pure_python (bool, optional): Toggles the connector mode. If True,
operates in pure Python mode.
local_infile (bool, optional): Allows local file uploads.
charset (str, optional): Specifies the character set for string values.
ssl_key (str, optional): Specifies the path of the file containing the SSL
key.
ssl_cert (str, optional): Specifies the path of the file containing the SSL
certificate.
ssl_ca (str, optional): Specifies the path of the file containing the SSL
certificate authority.
ssl_cipher (str, optional): Sets the SSL cipher list.
ssl_disabled (bool, optional): Disables SSL usage.
ssl_verify_cert (bool, optional): Verifies the server's certificate.
Automatically enabled if ``ssl_ca`` is specified.
ssl_verify_identity (bool, optional): Verifies the server's identity.
conv (dict[int, Callable], optional): A dictionary of data conversion
functions.
credential_type (str, optional): Specifies the type of authentication to
use: auth.PASSWORD, auth.JWT, or auth.BROWSER_SSO.
autocommit (bool, optional): Enables autocommits.
results_type (str, optional): Determines the structure of the query results:
tuples, namedtuples, dicts.
results_format (str, optional): Deprecated. This option has been renamed to
results_type.
Examples:
Basic Usage:
.. code-block:: python
from langchain_community.embeddings import OpenAIEmbeddings
from langchain_community.vectorstores import SingleStoreDB
vectorstore = SingleStoreDB(
OpenAIEmbeddings(),
host="https://user:password@127.0.0.1:3306/database"
)
Advanced Usage:
.. code-block:: python
from langchain_community.embeddings import OpenAIEmbeddings
from langchain_community.vectorstores import SingleStoreDB
vectorstore = SingleStoreDB(
OpenAIEmbeddings(),
distance_strategy=DistanceStrategy.EUCLIDEAN_DISTANCE,
host="127.0.0.1",
port=3306,
user="user",
password="password",
database="db",
table_name="my_custom_table",
pool_size=10,
timeout=60,
)
Using environment variables:
.. code-block:: python
from langchain_community.embeddings import OpenAIEmbeddings
from langchain_community.vectorstores import SingleStoreDB
os.environ['SINGLESTOREDB_URL'] = 'me:p455w0rd@s2-host.com/my_db'
vectorstore = SingleStoreDB(OpenAIEmbeddings())
"""
self.embedding = embedding
self.distance_strategy = distance_strategy
self.table_name = self._sanitize_input(table_name)
self.content_field = self._sanitize_input(content_field)
self.metadata_field = self._sanitize_input(metadata_field)
self.vector_field = self._sanitize_input(vector_field)
self.connection_kwargs = kwargs
if 'conn_attrs' not in self.connection_kwargs:
self.connection_kwargs['conn_attrs'] = dict()
self.connection_kwargs['conn_attrs']['_connector_name'
] = 'langchain python sdk'
self.connection_kwargs['conn_attrs']['_connector_version'] = '1.0.1'
self.connection_pool = QueuePool(self._get_connection, max_overflow=
max_overflow, pool_size=pool_size, timeout=timeout)
self._create_table()
|
def __init__(self, embedding: Embeddings, *, distance_strategy:
DistanceStrategy=DEFAULT_DISTANCE_STRATEGY, table_name: str=
'embeddings', content_field: str='content', metadata_field: str=
'metadata', vector_field: str='vector', pool_size: int=5, max_overflow:
int=10, timeout: float=30, **kwargs: Any):
"""Initialize with necessary components.
Args:
embedding (Embeddings): A text embedding model.
distance_strategy (DistanceStrategy, optional):
Determines the strategy employed for calculating
the distance between vectors in the embedding space.
Defaults to DOT_PRODUCT.
Available options are:
- DOT_PRODUCT: Computes the scalar product of two vectors.
This is the default behavior
- EUCLIDEAN_DISTANCE: Computes the Euclidean distance between
two vectors. This metric considers the geometric distance in
the vector space, and might be more suitable for embeddings
that rely on spatial relationships.
table_name (str, optional): Specifies the name of the table in use.
Defaults to "embeddings".
content_field (str, optional): Specifies the field to store the content.
Defaults to "content".
metadata_field (str, optional): Specifies the field to store metadata.
Defaults to "metadata".
vector_field (str, optional): Specifies the field to store the vector.
Defaults to "vector".
Following arguments pertain to the connection pool:
pool_size (int, optional): Determines the number of active connections in
the pool. Defaults to 5.
max_overflow (int, optional): Determines the maximum number of connections
allowed beyond the pool_size. Defaults to 10.
timeout (float, optional): Specifies the maximum wait time in seconds for
establishing a connection. Defaults to 30.
Following arguments pertain to the database connection:
host (str, optional): Specifies the hostname, IP address, or URL for the
database connection. The default scheme is "mysql".
user (str, optional): Database username.
password (str, optional): Database password.
port (int, optional): Database port. Defaults to 3306 for non-HTTP
connections, 80 for HTTP connections, and 443 for HTTPS connections.
database (str, optional): Database name.
Additional optional arguments provide further customization over the
database connection:
pure_python (bool, optional): Toggles the connector mode. If True,
operates in pure Python mode.
local_infile (bool, optional): Allows local file uploads.
charset (str, optional): Specifies the character set for string values.
ssl_key (str, optional): Specifies the path of the file containing the SSL
key.
ssl_cert (str, optional): Specifies the path of the file containing the SSL
certificate.
ssl_ca (str, optional): Specifies the path of the file containing the SSL
certificate authority.
ssl_cipher (str, optional): Sets the SSL cipher list.
ssl_disabled (bool, optional): Disables SSL usage.
ssl_verify_cert (bool, optional): Verifies the server's certificate.
Automatically enabled if ``ssl_ca`` is specified.
ssl_verify_identity (bool, optional): Verifies the server's identity.
conv (dict[int, Callable], optional): A dictionary of data conversion
functions.
credential_type (str, optional): Specifies the type of authentication to
use: auth.PASSWORD, auth.JWT, or auth.BROWSER_SSO.
autocommit (bool, optional): Enables autocommits.
results_type (str, optional): Determines the structure of the query results:
tuples, namedtuples, dicts.
results_format (str, optional): Deprecated. This option has been renamed to
results_type.
Examples:
Basic Usage:
.. code-block:: python
from langchain_community.embeddings import OpenAIEmbeddings
from langchain_community.vectorstores import SingleStoreDB
vectorstore = SingleStoreDB(
OpenAIEmbeddings(),
host="https://user:password@127.0.0.1:3306/database"
)
Advanced Usage:
.. code-block:: python
from langchain_community.embeddings import OpenAIEmbeddings
from langchain_community.vectorstores import SingleStoreDB
vectorstore = SingleStoreDB(
OpenAIEmbeddings(),
distance_strategy=DistanceStrategy.EUCLIDEAN_DISTANCE,
host="127.0.0.1",
port=3306,
user="user",
password="password",
database="db",
table_name="my_custom_table",
pool_size=10,
timeout=60,
)
Using environment variables:
.. code-block:: python
from langchain_community.embeddings import OpenAIEmbeddings
from langchain_community.vectorstores import SingleStoreDB
os.environ['SINGLESTOREDB_URL'] = 'me:p455w0rd@s2-host.com/my_db'
vectorstore = SingleStoreDB(OpenAIEmbeddings())
"""
self.embedding = embedding
self.distance_strategy = distance_strategy
self.table_name = self._sanitize_input(table_name)
self.content_field = self._sanitize_input(content_field)
self.metadata_field = self._sanitize_input(metadata_field)
self.vector_field = self._sanitize_input(vector_field)
self.connection_kwargs = kwargs
if 'conn_attrs' not in self.connection_kwargs:
self.connection_kwargs['conn_attrs'] = dict()
self.connection_kwargs['conn_attrs']['_connector_name'
] = 'langchain python sdk'
self.connection_kwargs['conn_attrs']['_connector_version'] = '1.0.1'
self.connection_pool = QueuePool(self._get_connection, max_overflow=
max_overflow, pool_size=pool_size, timeout=timeout)
self._create_table()
|
Initialize with necessary components.
Args:
embedding (Embeddings): A text embedding model.
distance_strategy (DistanceStrategy, optional):
Determines the strategy employed for calculating
the distance between vectors in the embedding space.
Defaults to DOT_PRODUCT.
Available options are:
- DOT_PRODUCT: Computes the scalar product of two vectors.
This is the default behavior
- EUCLIDEAN_DISTANCE: Computes the Euclidean distance between
two vectors. This metric considers the geometric distance in
the vector space, and might be more suitable for embeddings
that rely on spatial relationships.
table_name (str, optional): Specifies the name of the table in use.
Defaults to "embeddings".
content_field (str, optional): Specifies the field to store the content.
Defaults to "content".
metadata_field (str, optional): Specifies the field to store metadata.
Defaults to "metadata".
vector_field (str, optional): Specifies the field to store the vector.
Defaults to "vector".
Following arguments pertain to the connection pool:
pool_size (int, optional): Determines the number of active connections in
the pool. Defaults to 5.
max_overflow (int, optional): Determines the maximum number of connections
allowed beyond the pool_size. Defaults to 10.
timeout (float, optional): Specifies the maximum wait time in seconds for
establishing a connection. Defaults to 30.
Following arguments pertain to the database connection:
host (str, optional): Specifies the hostname, IP address, or URL for the
database connection. The default scheme is "mysql".
user (str, optional): Database username.
password (str, optional): Database password.
port (int, optional): Database port. Defaults to 3306 for non-HTTP
connections, 80 for HTTP connections, and 443 for HTTPS connections.
database (str, optional): Database name.
Additional optional arguments provide further customization over the
database connection:
pure_python (bool, optional): Toggles the connector mode. If True,
operates in pure Python mode.
local_infile (bool, optional): Allows local file uploads.
charset (str, optional): Specifies the character set for string values.
ssl_key (str, optional): Specifies the path of the file containing the SSL
key.
ssl_cert (str, optional): Specifies the path of the file containing the SSL
certificate.
ssl_ca (str, optional): Specifies the path of the file containing the SSL
certificate authority.
ssl_cipher (str, optional): Sets the SSL cipher list.
ssl_disabled (bool, optional): Disables SSL usage.
ssl_verify_cert (bool, optional): Verifies the server's certificate.
Automatically enabled if ``ssl_ca`` is specified.
ssl_verify_identity (bool, optional): Verifies the server's identity.
conv (dict[int, Callable], optional): A dictionary of data conversion
functions.
credential_type (str, optional): Specifies the type of authentication to
use: auth.PASSWORD, auth.JWT, or auth.BROWSER_SSO.
autocommit (bool, optional): Enables autocommits.
results_type (str, optional): Determines the structure of the query results:
tuples, namedtuples, dicts.
results_format (str, optional): Deprecated. This option has been renamed to
results_type.
Examples:
Basic Usage:
.. code-block:: python
from langchain_community.embeddings import OpenAIEmbeddings
from langchain_community.vectorstores import SingleStoreDB
vectorstore = SingleStoreDB(
OpenAIEmbeddings(),
host="https://user:password@127.0.0.1:3306/database"
)
Advanced Usage:
.. code-block:: python
from langchain_community.embeddings import OpenAIEmbeddings
from langchain_community.vectorstores import SingleStoreDB
vectorstore = SingleStoreDB(
OpenAIEmbeddings(),
distance_strategy=DistanceStrategy.EUCLIDEAN_DISTANCE,
host="127.0.0.1",
port=3306,
user="user",
password="password",
database="db",
table_name="my_custom_table",
pool_size=10,
timeout=60,
)
Using environment variables:
.. code-block:: python
from langchain_community.embeddings import OpenAIEmbeddings
from langchain_community.vectorstores import SingleStoreDB
os.environ['SINGLESTOREDB_URL'] = 'me:p455w0rd@s2-host.com/my_db'
vectorstore = SingleStoreDB(OpenAIEmbeddings())
|
test_dereference_refs_nested_refs_no_skip
|
schema = {'type': 'object', 'properties': {'info': {'$ref': '#/$defs/info'}
}, '$defs': {'name': {'type': 'string'}, 'info': {'type': 'object',
'properties': {'age': 'int', 'name': {'$ref': '#/$defs/name'}}}}}
expected = {'type': 'object', 'properties': {'info': {'type': 'object',
'properties': {'age': 'int', 'name': {'type': 'string'}}}}, '$defs': {
'name': {'type': 'string'}, 'info': {'type': 'object', 'properties': {
'age': 'int', 'name': {'type': 'string'}}}}}
actual = dereference_refs(schema, skip_keys=())
assert actual == expected
|
def test_dereference_refs_nested_refs_no_skip() ->None:
schema = {'type': 'object', 'properties': {'info': {'$ref':
'#/$defs/info'}}, '$defs': {'name': {'type': 'string'}, 'info': {
'type': 'object', 'properties': {'age': 'int', 'name': {'$ref':
'#/$defs/name'}}}}}
expected = {'type': 'object', 'properties': {'info': {'type': 'object',
'properties': {'age': 'int', 'name': {'type': 'string'}}}}, '$defs':
{'name': {'type': 'string'}, 'info': {'type': 'object',
'properties': {'age': 'int', 'name': {'type': 'string'}}}}}
actual = dereference_refs(schema, skip_keys=())
assert actual == expected
| null |
_convert_message_to_dict
|
message_dict: Dict[str, Any]
if isinstance(message, ChatMessage):
message_dict = {'role': message.role, 'content': message.content}
elif isinstance(message, HumanMessage):
message_dict = {'role': 'user', 'content': message.content}
elif isinstance(message, AIMessage):
message_dict = {'role': 'assistant', 'content': message.content}
else:
raise TypeError(f'Got unknown type {message}')
return message_dict
|
def _convert_message_to_dict(message: BaseMessage) ->dict:
message_dict: Dict[str, Any]
if isinstance(message, ChatMessage):
message_dict = {'role': message.role, 'content': message.content}
elif isinstance(message, HumanMessage):
message_dict = {'role': 'user', 'content': message.content}
elif isinstance(message, AIMessage):
message_dict = {'role': 'assistant', 'content': message.content}
else:
raise TypeError(f'Got unknown type {message}')
return message_dict
| null |
_convert_dict_to_message
|
role = _dict['role']
if role == 'user':
return HumanMessage(content=_dict['content'])
elif role == 'assistant':
content = _dict.get('content') or ''
if _dict.get('function_call'):
_dict['function_call']['arguments'] = json.dumps(_dict[
'function_call']['arguments'])
additional_kwargs = {'function_call': dict(_dict['function_call'])}
else:
additional_kwargs = {}
return AIMessage(content=content, additional_kwargs=additional_kwargs)
elif role == 'system':
return SystemMessage(content=_dict['content'])
elif role == 'function':
return FunctionMessage(content=_dict['content'], name=_dict['name'])
else:
return ChatMessage(content=_dict['content'], role=role)
|
def _convert_dict_to_message(_dict: Mapping[str, Any]) ->BaseMessage:
role = _dict['role']
if role == 'user':
return HumanMessage(content=_dict['content'])
elif role == 'assistant':
content = _dict.get('content') or ''
if _dict.get('function_call'):
_dict['function_call']['arguments'] = json.dumps(_dict[
'function_call']['arguments'])
additional_kwargs = {'function_call': dict(_dict['function_call'])}
else:
additional_kwargs = {}
return AIMessage(content=content, additional_kwargs=additional_kwargs)
elif role == 'system':
return SystemMessage(content=_dict['content'])
elif role == 'function':
return FunctionMessage(content=_dict['content'], name=_dict['name'])
else:
return ChatMessage(content=_dict['content'], role=role)
| null |
test_vectara_add_documents
|
"""Test add_documents."""
output1 = vectara1.similarity_search('large language model', k=2,
n_sentence_context=0)
assert len(output1) == 2
assert output1[0].page_content == 'large language model'
assert output1[0].metadata['abbr'] == 'llm'
assert output1[1].page_content == 'grounded generation'
assert output1[1].metadata['abbr'] == 'gg'
output2 = vectara1.similarity_search('large language model', k=1,
n_sentence_context=0, filter='doc.test_num = 1')
assert len(output2) == 1
assert output2[0].page_content == 'grounded generation'
assert output2[0].metadata['abbr'] == 'gg'
output3 = vectara1.similarity_search_with_score('large language model', k=2,
score_threshold=0.8, n_sentence_context=0)
assert len(output3) == 1
assert output3[0][0].page_content == 'large language model'
assert output3[0][0].metadata['abbr'] == 'llm'
|
def test_vectara_add_documents(vectara1) ->None:
"""Test add_documents."""
output1 = vectara1.similarity_search('large language model', k=2,
n_sentence_context=0)
assert len(output1) == 2
assert output1[0].page_content == 'large language model'
assert output1[0].metadata['abbr'] == 'llm'
assert output1[1].page_content == 'grounded generation'
assert output1[1].metadata['abbr'] == 'gg'
output2 = vectara1.similarity_search('large language model', k=1,
n_sentence_context=0, filter='doc.test_num = 1')
assert len(output2) == 1
assert output2[0].page_content == 'grounded generation'
assert output2[0].metadata['abbr'] == 'gg'
output3 = vectara1.similarity_search_with_score('large language model',
k=2, score_threshold=0.8, n_sentence_context=0)
assert len(output3) == 1
assert output3[0][0].page_content == 'large language model'
assert output3[0][0].metadata['abbr'] == 'llm'
|
Test add_documents.
|
test_raises_error
|
parser = SimpleJsonOutputParser()
with pytest.raises(Exception):
parser.invoke('hi')
|
def test_raises_error() ->None:
parser = SimpleJsonOutputParser()
with pytest.raises(Exception):
parser.invoke('hi')
| null |
add_documents
|
"""Upload documents to Weaviate."""
from weaviate.util import get_valid_uuid
with self.client.batch as batch:
ids = []
for i, doc in enumerate(docs):
metadata = doc.metadata or {}
data_properties = {self.text_key: doc.page_content, **metadata}
if 'uuids' in kwargs:
_id = kwargs['uuids'][i]
else:
_id = get_valid_uuid(uuid4())
batch.add_data_object(data_properties, self.index_name, _id)
ids.append(_id)
return ids
|
def add_documents(self, docs: List[Document], **kwargs: Any) ->List[str]:
"""Upload documents to Weaviate."""
from weaviate.util import get_valid_uuid
with self.client.batch as batch:
ids = []
for i, doc in enumerate(docs):
metadata = doc.metadata or {}
data_properties = {self.text_key: doc.page_content, **metadata}
if 'uuids' in kwargs:
_id = kwargs['uuids'][i]
else:
_id = get_valid_uuid(uuid4())
batch.add_data_object(data_properties, self.index_name, _id)
ids.append(_id)
return ids
|
Upload documents to Weaviate.
|
lazy_parse
|
"""Lazily parse the blob."""
yield Document(page_content=blob.as_string(), metadata={'source': blob.source})
|
def lazy_parse(self, blob: Blob) ->Iterator[Document]:
"""Lazily parse the blob."""
yield Document(page_content=blob.as_string(), metadata={'source': blob.
source})
|
Lazily parse the blob.
|
test_load_no_result
|
loader = WikipediaLoader(
'NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL'
)
docs = loader.load()
assert not docs
|
def test_load_no_result() ->None:
loader = WikipediaLoader(
'NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL'
)
docs = loader.load()
assert not docs
| null |
embed_query
|
"""
Convert input text to a 'vector' (list of floats).
If the text is a number, use it as the angle for the
unit vector in units of pi.
Any other input text becomes the singular result [0, 0] !
"""
try:
angle = float(text)
return [math.cos(angle * math.pi), math.sin(angle * math.pi)]
except ValueError:
return [0.0, 0.0]
|
def embed_query(self, text: str) ->List[float]:
"""
Convert input text to a 'vector' (list of floats).
If the text is a number, use it as the angle for the
unit vector in units of pi.
Any other input text becomes the singular result [0, 0] !
"""
try:
angle = float(text)
return [math.cos(angle * math.pi), math.sin(angle * math.pi)]
except ValueError:
return [0.0, 0.0]
|
Convert input text to a 'vector' (list of floats).
If the text is a number, use it as the angle for the
unit vector in units of pi.
Any other input text becomes the singular result [0, 0] !
|
_run
|
"""Use the tool."""
return self.api_spec
|
def _run(self, tool_input: Optional[str]='', run_manager: Optional[
CallbackManagerForToolRun]=None) ->str:
"""Use the tool."""
return self.api_spec
|
Use the tool.
|
_import_pubmed
|
from langchain_community.utilities.pubmed import PubMedAPIWrapper
return PubMedAPIWrapper
|
def _import_pubmed() ->Any:
from langchain_community.utilities.pubmed import PubMedAPIWrapper
return PubMedAPIWrapper
| null |
__init__
|
"""
Args:
schemas: list of schemas
"""
self.schemas = schemas
|
def __init__(self, schemas: List[Schema]):
"""
Args:
schemas: list of schemas
"""
self.schemas = schemas
|
Args:
schemas: list of schemas
|
test_does_not_allow_args
|
"""Test formatting raises error when args are provided."""
template = 'This is a {} test.'
with pytest.raises(ValueError):
formatter.format(template, 'good')
|
def test_does_not_allow_args() ->None:
"""Test formatting raises error when args are provided."""
template = 'This is a {} test.'
with pytest.raises(ValueError):
formatter.format(template, 'good')
|
Test formatting raises error when args are provided.
|
test_sequential_usage_memory
|
"""Test sequential usage with memory."""
memory = SimpleMemory(memories={'zab': 'rab'})
chain_1 = FakeChain(input_variables=['foo'], output_variables=['bar'])
chain_2 = FakeChain(input_variables=['bar'], output_variables=['baz'])
chain = SequentialChain(memory=memory, chains=[chain_1, chain_2],
input_variables=['foo'])
output = chain({'foo': '123'})
expected_output = {'baz': '123foofoo', 'foo': '123', 'zab': 'rab'}
assert output == expected_output
memory = SimpleMemory(memories={'zab': 'rab', 'foo': 'rab'})
chain_1 = FakeChain(input_variables=['foo'], output_variables=['bar'])
chain_2 = FakeChain(input_variables=['bar'], output_variables=['baz'])
with pytest.raises(ValueError):
SequentialChain(memory=memory, chains=[chain_1, chain_2],
input_variables=['foo'])
|
def test_sequential_usage_memory() ->None:
"""Test sequential usage with memory."""
memory = SimpleMemory(memories={'zab': 'rab'})
chain_1 = FakeChain(input_variables=['foo'], output_variables=['bar'])
chain_2 = FakeChain(input_variables=['bar'], output_variables=['baz'])
chain = SequentialChain(memory=memory, chains=[chain_1, chain_2],
input_variables=['foo'])
output = chain({'foo': '123'})
expected_output = {'baz': '123foofoo', 'foo': '123', 'zab': 'rab'}
assert output == expected_output
memory = SimpleMemory(memories={'zab': 'rab', 'foo': 'rab'})
chain_1 = FakeChain(input_variables=['foo'], output_variables=['bar'])
chain_2 = FakeChain(input_variables=['bar'], output_variables=['baz'])
with pytest.raises(ValueError):
SequentialChain(memory=memory, chains=[chain_1, chain_2],
input_variables=['foo'])
|
Test sequential usage with memory.
|
visit_operation
|
args = [arg.accept(self) for arg in operation.arguments]
return self._format_func(operation.operator).join(args)
|
def visit_operation(self, operation: Operation) ->str:
args = [arg.accept(self) for arg in operation.arguments]
return self._format_func(operation.operator).join(args)
| null |
test_annoy_vector_sim_by_index
|
"""Test vector similarity."""
texts = ['foo', 'bar', 'baz']
docsearch = Annoy.from_texts(texts, FakeEmbeddings())
index_to_id = docsearch.index_to_docstore_id
expected_docstore = InMemoryDocstore({index_to_id[0]: Document(page_content
='foo'), index_to_id[1]: Document(page_content='bar'), index_to_id[2]:
Document(page_content='baz')})
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
output = docsearch.similarity_search_by_index(2, k=1)
assert output == [Document(page_content='baz')]
|
def test_annoy_vector_sim_by_index() ->None:
"""Test vector similarity."""
texts = ['foo', 'bar', 'baz']
docsearch = Annoy.from_texts(texts, FakeEmbeddings())
index_to_id = docsearch.index_to_docstore_id
expected_docstore = InMemoryDocstore({index_to_id[0]: Document(
page_content='foo'), index_to_id[1]: Document(page_content='bar'),
index_to_id[2]: Document(page_content='baz')})
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
output = docsearch.similarity_search_by_index(2, k=1)
assert output == [Document(page_content='baz')]
|
Test vector similarity.
|
_import_supabase
|
from langchain_community.vectorstores.supabase import SupabaseVectorStore
return SupabaseVectorStore
|
def _import_supabase() ->Any:
from langchain_community.vectorstores.supabase import SupabaseVectorStore
return SupabaseVectorStore
| null |
_call
|
if self.sequential_responses:
return self._get_next_response_in_sequence
if self.queries is not None:
return self.queries[prompt]
if stop is None:
return 'foo'
else:
return 'bar'
|
def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager:
Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str:
if self.sequential_responses:
return self._get_next_response_in_sequence
if self.queries is not None:
return self.queries[prompt]
if stop is None:
return 'foo'
else:
return 'bar'
| null |
return_values
|
return ['output']
|
@property
def return_values(self) ->List[str]:
return ['output']
| null |
_summarize_metrics_for_generated_outputs
|
pd = import_pandas()
metrics_df = pd.DataFrame(metrics)
metrics_summary = metrics_df.describe()
return metrics_summary.to_dict()
|
def _summarize_metrics_for_generated_outputs(metrics: Sequence) ->dict:
pd = import_pandas()
metrics_df = pd.DataFrame(metrics)
metrics_summary = metrics_df.describe()
return metrics_summary.to_dict()
| null |
_format_response_payload
|
"""Formats response"""
try:
text = json.loads(output)['response']
if stop_sequences:
text = enforce_stop_tokens(text, stop_sequences)
return text
except Exception as e:
if isinstance(e, json.decoder.JSONDecodeError):
return output.decode('utf-8')
raise e
|
def _format_response_payload(self, output: bytes, stop_sequences: Optional[
List[str]]) ->str:
"""Formats response"""
try:
text = json.loads(output)['response']
if stop_sequences:
text = enforce_stop_tokens(text, stop_sequences)
return text
except Exception as e:
if isinstance(e, json.decoder.JSONDecodeError):
return output.decode('utf-8')
raise e
|
Formats response
|
lazy_load
|
"""A lazy loader for Documents."""
try:
from azureml.fsspec import AzureMachineLearningFileSystem
except ImportError as exc:
raise ImportError(
'Could not import azureml-fspec package.Please install it with `pip install azureml-fsspec`.'
) from exc
fs = AzureMachineLearningFileSystem(self.url)
if self.glob_pattern:
remote_paths_list = fs.glob(self.glob_pattern)
else:
remote_paths_list = fs.ls()
for remote_path in remote_paths_list:
with fs.open(remote_path) as f:
loader = UnstructuredFileIOLoader(file=f)
yield from loader.load()
|
def lazy_load(self) ->Iterator[Document]:
"""A lazy loader for Documents."""
try:
from azureml.fsspec import AzureMachineLearningFileSystem
except ImportError as exc:
raise ImportError(
'Could not import azureml-fspec package.Please install it with `pip install azureml-fsspec`.'
) from exc
fs = AzureMachineLearningFileSystem(self.url)
if self.glob_pattern:
remote_paths_list = fs.glob(self.glob_pattern)
else:
remote_paths_list = fs.ls()
for remote_path in remote_paths_list:
with fs.open(remote_path) as f:
loader = UnstructuredFileIOLoader(file=f)
yield from loader.load()
|
A lazy loader for Documents.
|
run_coroutine_in_new_loop
|
new_loop = asyncio.new_event_loop()
try:
asyncio.set_event_loop(new_loop)
return new_loop.run_until_complete(coroutine_func(*args, **kwargs))
finally:
new_loop.close()
|
def run_coroutine_in_new_loop(coroutine_func: Any, *args: Dict, **kwargs: Dict
) ->Any:
new_loop = asyncio.new_event_loop()
try:
asyncio.set_event_loop(new_loop)
return new_loop.run_until_complete(coroutine_func(*args, **kwargs))
finally:
new_loop.close()
| null |
query_params
|
"""Create query parameters for GitHub API."""
labels = ','.join(self.labels) if self.labels else self.labels
query_params_dict = {'milestone': self.milestone, 'state': self.state,
'assignee': self.assignee, 'creator': self.creator, 'mentioned': self.
mentioned, 'labels': labels, 'sort': self.sort, 'direction': self.
direction, 'since': self.since}
query_params_list = [f'{k}={v}' for k, v in query_params_dict.items() if v
is not None]
query_params = '&'.join(query_params_list)
return query_params
|
@property
def query_params(self) ->str:
"""Create query parameters for GitHub API."""
labels = ','.join(self.labels) if self.labels else self.labels
query_params_dict = {'milestone': self.milestone, 'state': self.state,
'assignee': self.assignee, 'creator': self.creator, 'mentioned':
self.mentioned, 'labels': labels, 'sort': self.sort, 'direction':
self.direction, 'since': self.since}
query_params_list = [f'{k}={v}' for k, v in query_params_dict.items() if
v is not None]
query_params = '&'.join(query_params_list)
return query_params
|
Create query parameters for GitHub API.
|
put
|
"""PUT the URL and return the text."""
return requests.put(url, json=data, headers=self.headers, auth=self.auth,
**kwargs)
|
def put(self, url: str, data: Dict[str, Any], **kwargs: Any
) ->requests.Response:
"""PUT the URL and return the text."""
return requests.put(url, json=data, headers=self.headers, auth=self.
auth, **kwargs)
|
PUT the URL and return the text.
|
on_retriever_error
|
self.on_retriever_error_common()
|
def on_retriever_error(self, *args: Any, **kwargs: Any) ->Any:
self.on_retriever_error_common()
| null |
_run
|
"""Use the tool."""
from bs4 import BeautifulSoup
if self.sync_browser is None:
raise ValueError(f'Synchronous browser not provided to {self.name}')
page = get_current_page(self.sync_browser)
html_content = page.content()
soup = BeautifulSoup(html_content, 'lxml')
return ' '.join(text for text in soup.stripped_strings)
|
def _run(self, run_manager: Optional[CallbackManagerForToolRun]=None) ->str:
"""Use the tool."""
from bs4 import BeautifulSoup
if self.sync_browser is None:
raise ValueError(f'Synchronous browser not provided to {self.name}')
page = get_current_page(self.sync_browser)
html_content = page.content()
soup = BeautifulSoup(html_content, 'lxml')
return ' '.join(text for text in soup.stripped_strings)
|
Use the tool.
|
_stream
|
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs, 'stream': True}
default_chunk_class = AIMessageChunk
for chunk in self.completion_with_retry(messages=message_dicts, run_manager
=run_manager, **params):
if len(chunk.choices) == 0:
continue
delta = chunk.choices[0].delta
if not delta.content:
continue
chunk = _convert_delta_to_message_chunk(delta, default_chunk_class)
default_chunk_class = chunk.__class__
yield ChatGenerationChunk(message=chunk)
if run_manager:
run_manager.on_llm_new_token(chunk.content)
|
def _stream(self, messages: List[BaseMessage], stop: Optional[List[str]]=
None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any
) ->Iterator[ChatGenerationChunk]:
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs, 'stream': True}
default_chunk_class = AIMessageChunk
for chunk in self.completion_with_retry(messages=message_dicts,
run_manager=run_manager, **params):
if len(chunk.choices) == 0:
continue
delta = chunk.choices[0].delta
if not delta.content:
continue
chunk = _convert_delta_to_message_chunk(delta, default_chunk_class)
default_chunk_class = chunk.__class__
yield ChatGenerationChunk(message=chunk)
if run_manager:
run_manager.on_llm_new_token(chunk.content)
| null |
default_preprocessing_func
|
return text.split()
|
def default_preprocessing_func(text: str) ->List[str]:
return text.split()
| null |
apply_and_parse
|
"""Call apply and then parse the results."""
warnings.warn(
'The apply_and_parse method is deprecated, instead pass an output parser directly to LLMChain.'
)
result = self.apply(input_list, callbacks=callbacks)
return self._parse_generation(result)
|
def apply_and_parse(self, input_list: List[Dict[str, Any]], callbacks:
Callbacks=None) ->Sequence[Union[str, List[str], Dict[str, str]]]:
"""Call apply and then parse the results."""
warnings.warn(
'The apply_and_parse method is deprecated, instead pass an output parser directly to LLMChain.'
)
result = self.apply(input_list, callbacks=callbacks)
return self._parse_generation(result)
|
Call apply and then parse the results.
|
__copy__
|
"""Return a copy of the callback handler."""
return self
|
def __copy__(self) ->'OpenAICallbackHandler':
"""Return a copy of the callback handler."""
return self
|
Return a copy of the callback handler.
|
clear
|
"""
Clear cache. If `asynchronous` is True, flush asynchronously.
This flushes the *whole* db.
"""
asynchronous = kwargs.get('asynchronous', False)
if asynchronous:
asynchronous = 'ASYNC'
else:
asynchronous = 'SYNC'
self.redis.flushdb(flush_type=asynchronous)
|
def clear(self, **kwargs: Any) ->None:
"""
Clear cache. If `asynchronous` is True, flush asynchronously.
This flushes the *whole* db.
"""
asynchronous = kwargs.get('asynchronous', False)
if asynchronous:
asynchronous = 'ASYNC'
else:
asynchronous = 'SYNC'
self.redis.flushdb(flush_type=asynchronous)
|
Clear cache. If `asynchronous` is True, flush asynchronously.
This flushes the *whole* db.
|
embed_query
|
return self.embed_documents([text])[0]
|
def embed_query(self, text: str) ->List[float]:
return self.embed_documents([text])[0]
| null |
resize_base64_image
|
"""
Resize an image encoded as a Base64 string.
:param base64_string: A Base64 encoded string of the image to be resized.
:param size: A tuple representing the new size (width, height) for the image.
:return: A Base64 encoded string of the resized image.
"""
img_data = base64.b64decode(base64_string)
img = Image.open(io.BytesIO(img_data))
resized_img = img.resize(size, Image.LANCZOS)
buffered = io.BytesIO()
resized_img.save(buffered, format=img.format)
return base64.b64encode(buffered.getvalue()).decode('utf-8')
|
def resize_base64_image(base64_string, size=(128, 128)):
"""
Resize an image encoded as a Base64 string.
:param base64_string: A Base64 encoded string of the image to be resized.
:param size: A tuple representing the new size (width, height) for the image.
:return: A Base64 encoded string of the resized image.
"""
img_data = base64.b64decode(base64_string)
img = Image.open(io.BytesIO(img_data))
resized_img = img.resize(size, Image.LANCZOS)
buffered = io.BytesIO()
resized_img.save(buffered, format=img.format)
return base64.b64encode(buffered.getvalue()).decode('utf-8')
|
Resize an image encoded as a Base64 string.
:param base64_string: A Base64 encoded string of the image to be resized.
:param size: A tuple representing the new size (width, height) for the image.
:return: A Base64 encoded string of the resized image.
|
test_cpal_chain
|
"""
patch required since `networkx` package is not part of unit test environment
"""
with mock.patch('langchain_experimental.cpal.models.NetworkxEntityGraph'
) as mock_networkx:
graph_instance = mock_networkx.return_value
graph_instance.get_topological_sort.return_value = ['cindy', 'marcia',
'jan']
cpal_chain = CPALChain.from_univariate_prompt(llm=self.fake_llm,
verbose=True)
cpal_chain.run(
'jan has three times the number of pets as marcia. marcia has two more pets than cindy.if cindy has ten pets, how many pets does jan have? '
)
|
def test_cpal_chain(self) ->None:
"""
patch required since `networkx` package is not part of unit test environment
"""
with mock.patch('langchain_experimental.cpal.models.NetworkxEntityGraph'
) as mock_networkx:
graph_instance = mock_networkx.return_value
graph_instance.get_topological_sort.return_value = ['cindy',
'marcia', 'jan']
cpal_chain = CPALChain.from_univariate_prompt(llm=self.fake_llm,
verbose=True)
cpal_chain.run(
'jan has three times the number of pets as marcia. marcia has two more pets than cindy.if cindy has ten pets, how many pets does jan have? '
)
|
patch required since `networkx` package is not part of unit test environment
|
_texts_to_documents
|
"""Return list of Documents from list of texts and metadatas."""
if metadatas is None:
metadatas = repeat({})
docs = [Document(page_content=text, metadata=metadata) for text, metadata in
zip(texts, metadatas)]
return docs
|
@staticmethod
def _texts_to_documents(texts: Iterable[str], metadatas: Optional[Iterable[
Dict[Any, Any]]]=None) ->List[Document]:
"""Return list of Documents from list of texts and metadatas."""
if metadatas is None:
metadatas = repeat({})
docs = [Document(page_content=text, metadata=metadata) for text,
metadata in zip(texts, metadatas)]
return docs
|
Return list of Documents from list of texts and metadatas.
|
get_connection_string
|
connection_string: str = get_from_dict_or_env(data=kwargs, key=
'connection_string', env_key='PGVECTOR_CONNECTION_STRING')
if not connection_string:
raise ValueError(
'Postgres connection string is requiredEither pass it as a parameteror set the PGVECTOR_CONNECTION_STRING environment variable.'
)
return connection_string
|
@classmethod
def get_connection_string(cls, kwargs: Dict[str, Any]) ->str:
connection_string: str = get_from_dict_or_env(data=kwargs, key=
'connection_string', env_key='PGVECTOR_CONNECTION_STRING')
if not connection_string:
raise ValueError(
'Postgres connection string is requiredEither pass it as a parameteror set the PGVECTOR_CONNECTION_STRING environment variable.'
)
return connection_string
| null |
test_litellm_generate
|
"""Test generate method of anthropic."""
chat = ChatLiteLLM(model='test')
chat_messages: List[List[BaseMessage]] = [[HumanMessage(content=
'How many toes do dogs have?')]]
messages_copy = [messages.copy() for messages in chat_messages]
result: LLMResult = chat.generate(chat_messages)
assert isinstance(result, LLMResult)
for response in result.generations[0]:
assert isinstance(response, ChatGeneration)
assert isinstance(response.text, str)
assert response.text == response.message.content
assert chat_messages == messages_copy
|
def test_litellm_generate() ->None:
"""Test generate method of anthropic."""
chat = ChatLiteLLM(model='test')
chat_messages: List[List[BaseMessage]] = [[HumanMessage(content=
'How many toes do dogs have?')]]
messages_copy = [messages.copy() for messages in chat_messages]
result: LLMResult = chat.generate(chat_messages)
assert isinstance(result, LLMResult)
for response in result.generations[0]:
assert isinstance(response, ChatGeneration)
assert isinstance(response.text, str)
assert response.text == response.message.content
assert chat_messages == messages_copy
|
Test generate method of anthropic.
|
partial
|
"""Get a new ChatPromptTemplate with some input variables already filled in.
Args:
**kwargs: keyword arguments to use for filling in template variables. Ought
to be a subset of the input variables.
Returns:
A new ChatPromptTemplate.
Example:
.. code-block:: python
from langchain_core.prompts import ChatPromptTemplate
template = ChatPromptTemplate.from_messages(
[
("system", "You are an AI assistant named {name}."),
("human", "Hi I'm {user}"),
("ai", "Hi there, {user}, I'm {name}."),
("human", "{input}"),
]
)
template2 = template.partial(user="Lucy", name="R2D2")
template2.format_messages(input="hello")
"""
prompt_dict = self.__dict__.copy()
prompt_dict['input_variables'] = list(set(self.input_variables).difference(
kwargs))
prompt_dict['partial_variables'] = {**self.partial_variables, **kwargs}
return type(self)(**prompt_dict)
|
def partial(self, **kwargs: Union[str, Callable[[], str]]
) ->ChatPromptTemplate:
"""Get a new ChatPromptTemplate with some input variables already filled in.
Args:
**kwargs: keyword arguments to use for filling in template variables. Ought
to be a subset of the input variables.
Returns:
A new ChatPromptTemplate.
Example:
.. code-block:: python
from langchain_core.prompts import ChatPromptTemplate
template = ChatPromptTemplate.from_messages(
[
("system", "You are an AI assistant named {name}."),
("human", "Hi I'm {user}"),
("ai", "Hi there, {user}, I'm {name}."),
("human", "{input}"),
]
)
template2 = template.partial(user="Lucy", name="R2D2")
template2.format_messages(input="hello")
"""
prompt_dict = self.__dict__.copy()
prompt_dict['input_variables'] = list(set(self.input_variables).
difference(kwargs))
prompt_dict['partial_variables'] = {**self.partial_variables, **kwargs}
return type(self)(**prompt_dict)
|
Get a new ChatPromptTemplate with some input variables already filled in.
Args:
**kwargs: keyword arguments to use for filling in template variables. Ought
to be a subset of the input variables.
Returns:
A new ChatPromptTemplate.
Example:
.. code-block:: python
from langchain_core.prompts import ChatPromptTemplate
template = ChatPromptTemplate.from_messages(
[
("system", "You are an AI assistant named {name}."),
("human", "Hi I'm {user}"),
("ai", "Hi there, {user}, I'm {name}."),
("human", "{input}"),
]
)
template2 = template.partial(user="Lucy", name="R2D2")
template2.format_messages(input="hello")
|
_run
|
"""Use the Stack Exchange tool."""
return self.api_wrapper.run(query)
|
def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun]
=None) ->str:
"""Use the Stack Exchange tool."""
return self.api_wrapper.run(query)
|
Use the Stack Exchange tool.
|
_format_params
|
system, formatted_messages = _format_messages(messages)
rtn = {'model': self.model, 'max_tokens': self.max_tokens, 'messages':
formatted_messages, 'temperature': self.temperature, 'top_k': self.
top_k, 'top_p': self.top_p, 'stop_sequences': stop, 'system': system}
rtn = {k: v for k, v in rtn.items() if v is not None}
return rtn
|
def _format_params(self, *, messages: List[BaseMessage], stop: Optional[
List[str]]=None, **kwargs: Dict) ->Dict:
system, formatted_messages = _format_messages(messages)
rtn = {'model': self.model, 'max_tokens': self.max_tokens, 'messages':
formatted_messages, 'temperature': self.temperature, 'top_k': self.
top_k, 'top_p': self.top_p, 'stop_sequences': stop, 'system': system}
rtn = {k: v for k, v in rtn.items() if v is not None}
return rtn
| null |
_get_relevant_documents
|
from zep_python.memory import MemorySearchPayload
if not self.zep_client:
raise RuntimeError('Zep client not initialized.')
payload = MemorySearchPayload(text=query, metadata=metadata, search_scope=
self.search_scope, search_type=self.search_type, mmr_lambda=self.mmr_lambda
)
results: List[MemorySearchResult] = self.zep_client.memory.search_memory(self
.session_id, payload, limit=self.top_k)
if self.search_scope == SearchScope.summary:
return self._summary_search_result_to_doc(results)
return self._messages_search_result_to_doc(results)
|
def _get_relevant_documents(self, query: str, *, run_manager:
CallbackManagerForRetrieverRun, metadata: Optional[Dict[str, Any]]=None
) ->List[Document]:
from zep_python.memory import MemorySearchPayload
if not self.zep_client:
raise RuntimeError('Zep client not initialized.')
payload = MemorySearchPayload(text=query, metadata=metadata,
search_scope=self.search_scope, search_type=self.search_type,
mmr_lambda=self.mmr_lambda)
results: List[MemorySearchResult] = self.zep_client.memory.search_memory(
self.session_id, payload, limit=self.top_k)
if self.search_scope == SearchScope.summary:
return self._summary_search_result_to_doc(results)
return self._messages_search_result_to_doc(results)
| null |
clear
|
"""Clear cache. This is for all LLMs at once."""
self.astra_db.truncate_collection(self.collection_name)
|
def clear(self, **kwargs: Any) ->None:
"""Clear cache. This is for all LLMs at once."""
self.astra_db.truncate_collection(self.collection_name)
|
Clear cache. This is for all LLMs at once.
|
test_agent_stopped_early
|
"""Test react chain when max iterations or max execution time is exceeded."""
agent = _get_agent(max_iterations=0)
output = agent.run('when was langchain made')
assert output == 'Agent stopped due to iteration limit or time limit.'
agent = _get_agent(max_execution_time=0.0)
output = agent.run('when was langchain made')
assert output == 'Agent stopped due to iteration limit or time limit.'
|
def test_agent_stopped_early() ->None:
"""Test react chain when max iterations or max execution time is exceeded."""
agent = _get_agent(max_iterations=0)
output = agent.run('when was langchain made')
assert output == 'Agent stopped due to iteration limit or time limit.'
agent = _get_agent(max_execution_time=0.0)
output = agent.run('when was langchain made')
assert output == 'Agent stopped due to iteration limit or time limit.'
|
Test react chain when max iterations or max execution time is exceeded.
|
test_llm_with_callbacks
|
"""Test LLM callbacks."""
handler = FakeCallbackHandler()
llm = FakeListLLM(callbacks=[handler], verbose=True, responses=['foo'])
output = llm('foo')
assert output == 'foo'
assert handler.starts == 1
assert handler.ends == 1
assert handler.errors == 0
|
def test_llm_with_callbacks() ->None:
"""Test LLM callbacks."""
handler = FakeCallbackHandler()
llm = FakeListLLM(callbacks=[handler], verbose=True, responses=['foo'])
output = llm('foo')
assert output == 'foo'
assert handler.starts == 1
assert handler.ends == 1
assert handler.errors == 0
|
Test LLM callbacks.
|
format_chat_history
|
messages = format_messages(chain_input)
return {'chat_history': messages, 'text': chain_input.get('text')}
|
def format_chat_history(chain_input: dict) ->dict:
messages = format_messages(chain_input)
return {'chat_history': messages, 'text': chain_input.get('text')}
| null |
test_from_texts_with_metadatas_and_pre_filter
|
texts = ['Dogs are tough.', 'Cats have fluff.', 'What is a sandwich?',
'The fence is purple.']
metadatas = [{'a': 1}, {'b': 1}, {'c': 1}, {'d': 1, 'e': 2}]
vectorstore = MongoDBAtlasVectorSearch.from_texts(texts, embedding_openai,
metadatas=metadatas, collection=collection, index_name=INDEX_NAME)
sleep(1)
output = vectorstore.similarity_search('Sandwich', k=1, pre_filter={'range':
{'lte': 0, 'path': 'c'}})
assert output == []
|
def test_from_texts_with_metadatas_and_pre_filter(self, embedding_openai:
Embeddings, collection: Any) ->None:
texts = ['Dogs are tough.', 'Cats have fluff.', 'What is a sandwich?',
'The fence is purple.']
metadatas = [{'a': 1}, {'b': 1}, {'c': 1}, {'d': 1, 'e': 2}]
vectorstore = MongoDBAtlasVectorSearch.from_texts(texts,
embedding_openai, metadatas=metadatas, collection=collection,
index_name=INDEX_NAME)
sleep(1)
output = vectorstore.similarity_search('Sandwich', k=1, pre_filter={
'range': {'lte': 0, 'path': 'c'}})
assert output == []
| null |
create_tagging_chain_pydantic
|
"""Creates a chain that extracts information from a passage
based on a pydantic schema.
Args:
pydantic_schema: The pydantic schema of the entities to extract.
llm: The language model to use.
Returns:
Chain (LLMChain) that can be used to extract information from a passage.
"""
openai_schema = pydantic_schema.schema()
function = _get_tagging_function(openai_schema)
prompt = prompt or ChatPromptTemplate.from_template(_TAGGING_TEMPLATE)
output_parser = PydanticOutputFunctionsParser(pydantic_schema=pydantic_schema)
llm_kwargs = get_llm_kwargs(function)
chain = LLMChain(llm=llm, prompt=prompt, llm_kwargs=llm_kwargs,
output_parser=output_parser, **kwargs)
return chain
|
def create_tagging_chain_pydantic(pydantic_schema: Any, llm:
BaseLanguageModel, prompt: Optional[ChatPromptTemplate]=None, **kwargs: Any
) ->Chain:
"""Creates a chain that extracts information from a passage
based on a pydantic schema.
Args:
pydantic_schema: The pydantic schema of the entities to extract.
llm: The language model to use.
Returns:
Chain (LLMChain) that can be used to extract information from a passage.
"""
openai_schema = pydantic_schema.schema()
function = _get_tagging_function(openai_schema)
prompt = prompt or ChatPromptTemplate.from_template(_TAGGING_TEMPLATE)
output_parser = PydanticOutputFunctionsParser(pydantic_schema=
pydantic_schema)
llm_kwargs = get_llm_kwargs(function)
chain = LLMChain(llm=llm, prompt=prompt, llm_kwargs=llm_kwargs,
output_parser=output_parser, **kwargs)
return chain
|
Creates a chain that extracts information from a passage
based on a pydantic schema.
Args:
pydantic_schema: The pydantic schema of the entities to extract.
llm: The language model to use.
Returns:
Chain (LLMChain) that can be used to extract information from a passage.
|
test_wasm_chat_without_service_url
|
chat = WasmChatService()
system_message = SystemMessage(content='You are an AI assistant')
user_message = HumanMessage(content='What is the capital of France?')
messages = [system_message, user_message]
with pytest.raises(ValueError) as e:
chat(messages)
assert 'Error code: 503' in str(e)
assert 'reason: The IP address or port of the chat service is incorrect.' in str(
e)
|
def test_wasm_chat_without_service_url() ->None:
chat = WasmChatService()
system_message = SystemMessage(content='You are an AI assistant')
user_message = HumanMessage(content='What is the capital of France?')
messages = [system_message, user_message]
with pytest.raises(ValueError) as e:
chat(messages)
assert 'Error code: 503' in str(e)
assert 'reason: The IP address or port of the chat service is incorrect.' in str(
e)
| null |
_generate
|
"""Call out to OpenAI's endpoint with k unique prompts.
Args:
prompts: The prompts to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The full LLM output.
Example:
.. code-block:: python
response = openai.generate(["Tell me a joke."])
"""
params = self._invocation_params
params = {**params, **kwargs}
sub_prompts = self.get_sub_prompts(params, prompts, stop)
choices = []
token_usage: Dict[str, int] = {}
_keys = {'completion_tokens', 'prompt_tokens', 'total_tokens'}
system_fingerprint: Optional[str] = None
for _prompts in sub_prompts:
if self.streaming:
if len(_prompts) > 1:
raise ValueError('Cannot stream results with multiple prompts.')
generation: Optional[GenerationChunk] = None
for chunk in self._stream(_prompts[0], stop, run_manager, **kwargs):
if generation is None:
generation = chunk
else:
generation += chunk
assert generation is not None
choices.append({'text': generation.text, 'finish_reason':
generation.generation_info.get('finish_reason') if generation.
generation_info else None, 'logprobs': generation.
generation_info.get('logprobs') if generation.generation_info else
None})
else:
response = completion_with_retry(self, prompt=_prompts, run_manager
=run_manager, **params)
if not isinstance(response, dict):
response = response.dict()
choices.extend(response['choices'])
update_token_usage(_keys, response, token_usage)
if not system_fingerprint:
system_fingerprint = response.get('system_fingerprint')
return self.create_llm_result(choices, prompts, params, token_usage,
system_fingerprint=system_fingerprint)
|
def _generate(self, prompts: List[str], stop: Optional[List[str]]=None,
run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any
) ->LLMResult:
"""Call out to OpenAI's endpoint with k unique prompts.
Args:
prompts: The prompts to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The full LLM output.
Example:
.. code-block:: python
response = openai.generate(["Tell me a joke."])
"""
params = self._invocation_params
params = {**params, **kwargs}
sub_prompts = self.get_sub_prompts(params, prompts, stop)
choices = []
token_usage: Dict[str, int] = {}
_keys = {'completion_tokens', 'prompt_tokens', 'total_tokens'}
system_fingerprint: Optional[str] = None
for _prompts in sub_prompts:
if self.streaming:
if len(_prompts) > 1:
raise ValueError('Cannot stream results with multiple prompts.'
)
generation: Optional[GenerationChunk] = None
for chunk in self._stream(_prompts[0], stop, run_manager, **kwargs
):
if generation is None:
generation = chunk
else:
generation += chunk
assert generation is not None
choices.append({'text': generation.text, 'finish_reason':
generation.generation_info.get('finish_reason') if
generation.generation_info else None, 'logprobs':
generation.generation_info.get('logprobs') if generation.
generation_info else None})
else:
response = completion_with_retry(self, prompt=_prompts,
run_manager=run_manager, **params)
if not isinstance(response, dict):
response = response.dict()
choices.extend(response['choices'])
update_token_usage(_keys, response, token_usage)
if not system_fingerprint:
system_fingerprint = response.get('system_fingerprint')
return self.create_llm_result(choices, prompts, params, token_usage,
system_fingerprint=system_fingerprint)
|
Call out to OpenAI's endpoint with k unique prompts.
Args:
prompts: The prompts to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The full LLM output.
Example:
.. code-block:: python
response = openai.generate(["Tell me a joke."])
|
get_sync
|
"""Get the equivalent sync RunManager.
Returns:
CallbackManagerForRetrieverRun: The sync RunManager.
"""
return CallbackManagerForRetrieverRun(run_id=self.run_id, handlers=self.
handlers, inheritable_handlers=self.inheritable_handlers, parent_run_id
=self.parent_run_id, tags=self.tags, inheritable_tags=self.
inheritable_tags, metadata=self.metadata, inheritable_metadata=self.
inheritable_metadata)
|
def get_sync(self) ->CallbackManagerForRetrieverRun:
"""Get the equivalent sync RunManager.
Returns:
CallbackManagerForRetrieverRun: The sync RunManager.
"""
return CallbackManagerForRetrieverRun(run_id=self.run_id, handlers=self
.handlers, inheritable_handlers=self.inheritable_handlers,
parent_run_id=self.parent_run_id, tags=self.tags, inheritable_tags=
self.inheritable_tags, metadata=self.metadata, inheritable_metadata
=self.inheritable_metadata)
|
Get the equivalent sync RunManager.
Returns:
CallbackManagerForRetrieverRun: The sync RunManager.
|
test_events_call
|
search = DataForSeoAPIWrapper(params={'location_name': 'Spain',
'language_code': 'es', 'se_type': 'events'})
output = search.results('concerts')
assert any('Madrid' in ((i['location_info'] or dict())['address'] or '') for
i in output)
|
def test_events_call() ->None:
search = DataForSeoAPIWrapper(params={'location_name': 'Spain',
'language_code': 'es', 'se_type': 'events'})
output = search.results('concerts')
assert any('Madrid' in ((i['location_info'] or dict())['address'] or ''
) for i in output)
| null |
from_llm
|
"""Load the necessary chains."""
sql_chain = SQLDatabaseChain.from_llm(llm, db, prompt=query_prompt, **kwargs)
decider_chain = LLMChain(llm=llm, prompt=decider_prompt, output_key=
'table_names')
return cls(sql_chain=sql_chain, decider_chain=decider_chain, **kwargs)
|
@classmethod
def from_llm(cls, llm: BaseLanguageModel, db: SQLDatabase, query_prompt:
BasePromptTemplate=PROMPT, decider_prompt: BasePromptTemplate=
DECIDER_PROMPT, **kwargs: Any) ->SQLDatabaseSequentialChain:
"""Load the necessary chains."""
sql_chain = SQLDatabaseChain.from_llm(llm, db, prompt=query_prompt, **
kwargs)
decider_chain = LLMChain(llm=llm, prompt=decider_prompt, output_key=
'table_names')
return cls(sql_chain=sql_chain, decider_chain=decider_chain, **kwargs)
|
Load the necessary chains.
|
invoke
|
"""First evaluates the condition, then delegate to true or false branch."""
config = ensure_config(config)
callback_manager = get_callback_manager_for_config(config)
run_manager = callback_manager.on_chain_start(dumpd(self), input, name=
config.get('run_name'))
try:
for idx, branch in enumerate(self.branches):
condition, runnable = branch
expression_value = condition.invoke(input, config=patch_config(
config, callbacks=run_manager.get_child(tag=
f'condition:{idx + 1}')))
if expression_value:
output = runnable.invoke(input, config=patch_config(config,
callbacks=run_manager.get_child(tag=f'branch:{idx + 1}')),
**kwargs)
break
else:
output = self.default.invoke(input, config=patch_config(config,
callbacks=run_manager.get_child(tag='branch:default')), **kwargs)
except BaseException as e:
run_manager.on_chain_error(e)
raise
run_manager.on_chain_end(dumpd(output))
return output
|
def invoke(self, input: Input, config: Optional[RunnableConfig]=None, **
kwargs: Any) ->Output:
"""First evaluates the condition, then delegate to true or false branch."""
config = ensure_config(config)
callback_manager = get_callback_manager_for_config(config)
run_manager = callback_manager.on_chain_start(dumpd(self), input, name=
config.get('run_name'))
try:
for idx, branch in enumerate(self.branches):
condition, runnable = branch
expression_value = condition.invoke(input, config=patch_config(
config, callbacks=run_manager.get_child(tag=
f'condition:{idx + 1}')))
if expression_value:
output = runnable.invoke(input, config=patch_config(config,
callbacks=run_manager.get_child(tag=f'branch:{idx + 1}'
)), **kwargs)
break
else:
output = self.default.invoke(input, config=patch_config(config,
callbacks=run_manager.get_child(tag='branch:default')), **
kwargs)
except BaseException as e:
run_manager.on_chain_error(e)
raise
run_manager.on_chain_end(dumpd(output))
return output
|
First evaluates the condition, then delegate to true or false branch.
|
test_fake_retriever_v2
|
callbacks = FakeCallbackHandler()
assert fake_retriever_v2._new_arg_supported is True
results = fake_retriever_v2.get_relevant_documents('Foo', callbacks=[callbacks]
)
assert results[0].page_content == 'Foo'
assert callbacks.retriever_starts == 1
assert callbacks.retriever_ends == 1
assert callbacks.retriever_errors == 0
fake_retriever_v2.get_relevant_documents('Foo', callbacks=[callbacks])
with pytest.raises(ValueError, match='Test error'):
fake_erroring_retriever_v2.get_relevant_documents('Foo', callbacks=[
callbacks])
assert callbacks.retriever_errors == 1
|
def test_fake_retriever_v2(fake_retriever_v2: BaseRetriever,
fake_erroring_retriever_v2: BaseRetriever) ->None:
callbacks = FakeCallbackHandler()
assert fake_retriever_v2._new_arg_supported is True
results = fake_retriever_v2.get_relevant_documents('Foo', callbacks=[
callbacks])
assert results[0].page_content == 'Foo'
assert callbacks.retriever_starts == 1
assert callbacks.retriever_ends == 1
assert callbacks.retriever_errors == 0
fake_retriever_v2.get_relevant_documents('Foo', callbacks=[callbacks])
with pytest.raises(ValueError, match='Test error'):
fake_erroring_retriever_v2.get_relevant_documents('Foo', callbacks=
[callbacks])
assert callbacks.retriever_errors == 1
| null |
test_pypdfium2_parser
|
"""Test PyPDFium2 parser."""
_assert_with_parser(PyPDFium2Parser())
|
def test_pypdfium2_parser() ->None:
"""Test PyPDFium2 parser."""
_assert_with_parser(PyPDFium2Parser())
|
Test PyPDFium2 parser.
|
_load_template
|
"""Load template from the path if applicable."""
if f'{var_name}_path' in config:
if var_name in config:
raise ValueError(
f'Both `{var_name}_path` and `{var_name}` cannot be provided.')
template_path = Path(config.pop(f'{var_name}_path'))
if template_path.suffix == '.txt':
with open(template_path) as f:
template = f.read()
else:
raise ValueError
config[var_name] = template
return config
|
def _load_template(var_name: str, config: dict) ->dict:
"""Load template from the path if applicable."""
if f'{var_name}_path' in config:
if var_name in config:
raise ValueError(
f'Both `{var_name}_path` and `{var_name}` cannot be provided.')
template_path = Path(config.pop(f'{var_name}_path'))
if template_path.suffix == '.txt':
with open(template_path) as f:
template = f.read()
else:
raise ValueError
config[var_name] = template
return config
|
Load template from the path if applicable.
|
test__get_prompts_valid
|
_get_prompt(inputs)
|
@pytest.mark.parametrize('inputs', _VALID_PROMPTS)
def test__get_prompts_valid(inputs: Dict[str, Any]) ->None:
_get_prompt(inputs)
| null |
validate_environment
|
"""Validate that python package exists in environment."""
if not values.get('client'):
values['client'] = grpcclient.InferenceServerClient(values['server_url'])
return values
|
@root_validator(pre=True, allow_reuse=True)
def validate_environment(cls, values: Dict[str, Any]) ->Dict[str, Any]:
"""Validate that python package exists in environment."""
if not values.get('client'):
values['client'] = grpcclient.InferenceServerClient(values[
'server_url'])
return values
|
Validate that python package exists in environment.
|
_identifying_params
|
"""Get the identifying parameters."""
return {'temperature': self.temperature, 'model': self.model, 'profanity':
self.profanity, 'streaming': self.streaming, 'max_tokens': self.max_tokens}
|
@property
def _identifying_params(self) ->Dict[str, Any]:
"""Get the identifying parameters."""
return {'temperature': self.temperature, 'model': self.model,
'profanity': self.profanity, 'streaming': self.streaming,
'max_tokens': self.max_tokens}
|
Get the identifying parameters.
|
predict_messages
|
if stop is None:
_stop = None
else:
_stop = list(stop)
return self(messages, stop=_stop, **kwargs)
|
def predict_messages(self, messages: List[BaseMessage], *, stop: Optional[
Sequence[str]]=None, **kwargs: Any) ->BaseMessage:
if stop is None:
_stop = None
else:
_stop = list(stop)
return self(messages, stop=_stop, **kwargs)
| null |
test_evaluate_run
|
run_mapper = ChainStringRunMapper()
example_mapper = MagicMock()
string_evaluator = criteria.CriteriaEvalChain.from_llm(fake_llm.FakeLLM())
evaluator = StringRunEvaluatorChain(run_mapper=run_mapper, example_mapper=
example_mapper, name='test_evaluator', string_evaluator=string_evaluator)
run = MagicMock()
example = MagicMock()
res = evaluator.evaluate_run(run, example)
assert str(res.comment).startswith('Error evaluating run ')
assert res.key == string_evaluator.evaluation_name
|
def test_evaluate_run() ->None:
run_mapper = ChainStringRunMapper()
example_mapper = MagicMock()
string_evaluator = criteria.CriteriaEvalChain.from_llm(fake_llm.FakeLLM())
evaluator = StringRunEvaluatorChain(run_mapper=run_mapper,
example_mapper=example_mapper, name='test_evaluator',
string_evaluator=string_evaluator)
run = MagicMock()
example = MagicMock()
res = evaluator.evaluate_run(run, example)
assert str(res.comment).startswith('Error evaluating run ')
assert res.key == string_evaluator.evaluation_name
| null |
test_from_texts
|
input_texts = ['I have a pen.', 'Do you have a pen?', 'I have a bag.']
knn_retriever = KNNRetriever.from_texts(texts=input_texts, embeddings=
FakeEmbeddings(size=100))
assert len(knn_retriever.texts) == 3
|
def test_from_texts(self) ->None:
input_texts = ['I have a pen.', 'Do you have a pen?', 'I have a bag.']
knn_retriever = KNNRetriever.from_texts(texts=input_texts, embeddings=
FakeEmbeddings(size=100))
assert len(knn_retriever.texts) == 3
| null |
embeddings
|
return None
|
@property
def embeddings(self) ->Optional[Embeddings]:
return None
| null |
_unique_documents
|
return [doc for i, doc in enumerate(documents) if doc not in documents[:i]]
|
def _unique_documents(documents: Sequence[Document]) ->List[Document]:
return [doc for i, doc in enumerate(documents) if doc not in documents[:i]]
| null |
test_invalid_sparse_vector_name
|
with pytest.raises(QdrantException) as e:
QdrantSparseVectorRetriever(client=retriever.client, collection_name=
retriever.collection_name, sparse_vector_name=
'invalid sparse vector', sparse_encoder=consistent_fake_sparse_encoder)
assert 'does not contain sparse vector' in str(e.value)
|
def test_invalid_sparse_vector_name(retriever: QdrantSparseVectorRetriever
) ->None:
with pytest.raises(QdrantException) as e:
QdrantSparseVectorRetriever(client=retriever.client,
collection_name=retriever.collection_name, sparse_vector_name=
'invalid sparse vector', sparse_encoder=
consistent_fake_sparse_encoder)
assert 'does not contain sparse vector' in str(e.value)
| null |
scroll
|
if direction == 'up':
self.page.evaluate(
'(document.scrollingElement || document.body).scrollTop = (document.scrollingElement || document.body).scrollTop - window.innerHeight;'
)
elif direction == 'down':
self.page.evaluate(
'(document.scrollingElement || document.body).scrollTop = (document.scrollingElement || document.body).scrollTop + window.innerHeight;'
)
|
def scroll(self, direction: str) ->None:
if direction == 'up':
self.page.evaluate(
'(document.scrollingElement || document.body).scrollTop = (document.scrollingElement || document.body).scrollTop - window.innerHeight;'
)
elif direction == 'down':
self.page.evaluate(
'(document.scrollingElement || document.body).scrollTop = (document.scrollingElement || document.body).scrollTop + window.innerHeight;'
)
| null |
test_configured_system_message
|
messages = [HumanMessage(content='usr-msg-1')]
actual = model_cfg_sys_msg.predict_messages(messages).content
expected = """<s>[INST] <<SYS>>
sys-msg
<</SYS>>
usr-msg-1 [/INST]"""
assert actual == expected
|
def test_configured_system_message(model_cfg_sys_msg: Llama2Chat) ->None:
messages = [HumanMessage(content='usr-msg-1')]
actual = model_cfg_sys_msg.predict_messages(messages).content
expected = '<s>[INST] <<SYS>>\nsys-msg\n<</SYS>>\n\nusr-msg-1 [/INST]'
assert actual == expected
| null |
test_embed_query
|
text = 'query_text'
vector = cache_embeddings.embed_query(text)
expected_vector = [5.0, 6.0]
assert vector == expected_vector
|
def test_embed_query(cache_embeddings: CacheBackedEmbeddings) ->None:
text = 'query_text'
vector = cache_embeddings.embed_query(text)
expected_vector = [5.0, 6.0]
assert vector == expected_vector
| null |
deserialize_json_input
|
"""Use the serialized typescript dictionary.
Resolve the path, query params dict, and optional requestBody dict.
"""
args: dict = json.loads(serialized_args)
path = self._construct_path(args)
body_params = self._extract_body_params(args)
query_params = self._extract_query_params(args)
return {'url': path, 'data': body_params, 'params': query_params}
|
def deserialize_json_input(self, serialized_args: str) ->dict:
"""Use the serialized typescript dictionary.
Resolve the path, query params dict, and optional requestBody dict.
"""
args: dict = json.loads(serialized_args)
path = self._construct_path(args)
body_params = self._extract_body_params(args)
query_params = self._extract_query_params(args)
return {'url': path, 'data': body_params, 'params': query_params}
|
Use the serialized typescript dictionary.
Resolve the path, query params dict, and optional requestBody dict.
|
_display_prompt
|
"""Displays the given prompt to the user."""
print(f'\n{prompt}')
|
def _display_prompt(prompt: str) ->None:
"""Displays the given prompt to the user."""
print(f'\n{prompt}')
|
Displays the given prompt to the user.
|
test_call
|
"""Test that call gives the correct answer."""
search = GoogleSearchAPIWrapper()
output = search.run("What was Obama's first name?")
assert 'Barack Hussein Obama II' in output
|
def test_call() ->None:
"""Test that call gives the correct answer."""
search = GoogleSearchAPIWrapper()
output = search.run("What was Obama's first name?")
assert 'Barack Hussein Obama II' in output
|
Test that call gives the correct answer.
|
validate_environment
|
"""Validate that api key in environment."""
try:
import fireworks.client
except ImportError as e:
raise ImportError(
'Could not import fireworks-ai python package. Please install it with `pip install fireworks-ai`.'
) from e
fireworks_api_key = convert_to_secret_str(get_from_dict_or_env(values,
'fireworks_api_key', 'FIREWORKS_API_KEY'))
fireworks.client.api_key = fireworks_api_key.get_secret_value()
return values
|
@root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that api key in environment."""
try:
import fireworks.client
except ImportError as e:
raise ImportError(
'Could not import fireworks-ai python package. Please install it with `pip install fireworks-ai`.'
) from e
fireworks_api_key = convert_to_secret_str(get_from_dict_or_env(values,
'fireworks_api_key', 'FIREWORKS_API_KEY'))
fireworks.client.api_key = fireworks_api_key.get_secret_value()
return values
|
Validate that api key in environment.
|
_import_openlm
|
from langchain_community.llms.openlm import OpenLM
return OpenLM
|
def _import_openlm() ->Any:
from langchain_community.llms.openlm import OpenLM
return OpenLM
| null |
_create_search_params
|
"""Generate search params based on the current index type"""
from pymilvus import Collection
if isinstance(self.col, Collection) and self.search_params is None:
index = self._get_index()
if index is not None:
index_type: str = index['index_param']['index_type']
metric_type: str = index['index_param']['metric_type']
self.search_params = self.default_search_params[index_type]
self.search_params['metric_type'] = metric_type
|
def _create_search_params(self) ->None:
"""Generate search params based on the current index type"""
from pymilvus import Collection
if isinstance(self.col, Collection) and self.search_params is None:
index = self._get_index()
if index is not None:
index_type: str = index['index_param']['index_type']
metric_type: str = index['index_param']['metric_type']
self.search_params = self.default_search_params[index_type]
self.search_params['metric_type'] = metric_type
|
Generate search params based on the current index type
|
_import_slack_get_channel
|
from langchain_community.tools.slack.get_channel import SlackGetChannel
return SlackGetChannel
|
def _import_slack_get_channel() ->Any:
from langchain_community.tools.slack.get_channel import SlackGetChannel
return SlackGetChannel
| null |
validate_browser_provided
|
"""Check that the arguments are valid."""
lazy_import_playwright_browsers()
if values.get('async_browser') is None and values.get('sync_browser') is None:
raise ValueError('Either async_browser or sync_browser must be specified.')
return values
|
@root_validator
def validate_browser_provided(cls, values: dict) ->dict:
"""Check that the arguments are valid."""
lazy_import_playwright_browsers()
if values.get('async_browser') is None and values.get('sync_browser'
) is None:
raise ValueError(
'Either async_browser or sync_browser must be specified.')
return values
|
Check that the arguments are valid.
|
_create_index_if_not_exists
|
"""Create the Elasticsearch index if it doesn't already exist.
Args:
index_name: Name of the Elasticsearch index to create.
dims_length: Length of the embedding vectors.
"""
if self.client.indices.exists(index=index_name):
logger.debug(f'Index {index_name} already exists. Skipping creation.')
else:
if dims_length is None and self.strategy.require_inference():
raise ValueError(
"Cannot create index without specifying dims_length when the index doesn't already exist. We infer dims_length from the first embedding. Check that you have provided an embedding function."
)
self.strategy.before_index_setup(client=self.client, text_field=self.
query_field, vector_query_field=self.vector_query_field)
indexSettings = self.strategy.index(vector_query_field=self.
vector_query_field, dims_length=dims_length, similarity=self.
distance_strategy)
logger.debug(
f"Creating index {index_name} with mappings {indexSettings['mappings']}"
)
self.client.indices.create(index=index_name, **indexSettings)
|
def _create_index_if_not_exists(self, index_name: str, dims_length:
Optional[int]=None) ->None:
"""Create the Elasticsearch index if it doesn't already exist.
Args:
index_name: Name of the Elasticsearch index to create.
dims_length: Length of the embedding vectors.
"""
if self.client.indices.exists(index=index_name):
logger.debug(f'Index {index_name} already exists. Skipping creation.')
else:
if dims_length is None and self.strategy.require_inference():
raise ValueError(
"Cannot create index without specifying dims_length when the index doesn't already exist. We infer dims_length from the first embedding. Check that you have provided an embedding function."
)
self.strategy.before_index_setup(client=self.client, text_field=
self.query_field, vector_query_field=self.vector_query_field)
indexSettings = self.strategy.index(vector_query_field=self.
vector_query_field, dims_length=dims_length, similarity=self.
distance_strategy)
logger.debug(
f"Creating index {index_name} with mappings {indexSettings['mappings']}"
)
self.client.indices.create(index=index_name, **indexSettings)
|
Create the Elasticsearch index if it doesn't already exist.
Args:
index_name: Name of the Elasticsearch index to create.
dims_length: Length of the embedding vectors.
|
_identifying_params
|
return self._default_params
|
@property
def _identifying_params(self) ->Mapping[str, Any]:
return self._default_params
| null |
test_find_all_links_multiple
|
html = (
'<div><a class="blah" href="https://foobar.com">hullo</a></div><div><a class="bleh" href="/baz/cool">buhbye</a></div>'
)
actual = find_all_links(html)
assert sorted(actual) == ['/baz/cool', 'https://foobar.com']
|
def test_find_all_links_multiple() ->None:
html = (
'<div><a class="blah" href="https://foobar.com">hullo</a></div><div><a class="bleh" href="/baz/cool">buhbye</a></div>'
)
actual = find_all_links(html)
assert sorted(actual) == ['/baz/cool', 'https://foobar.com']
| null |
_replace_secrets
|
result = root.copy()
for path, secret_id in secrets_map.items():
[*parts, last] = path.split('.')
current = result
for part in parts:
if part not in current:
break
current[part] = current[part].copy()
current = current[part]
if last in current:
current[last] = {'lc': 1, 'type': 'secret', 'id': [secret_id]}
return result
|
def _replace_secrets(root: Dict[Any, Any], secrets_map: Dict[str, str]) ->Dict[
Any, Any]:
result = root.copy()
for path, secret_id in secrets_map.items():
[*parts, last] = path.split('.')
current = result
for part in parts:
if part not in current:
break
current[part] = current[part].copy()
current = current[part]
if last in current:
current[last] = {'lc': 1, 'type': 'secret', 'id': [secret_id]}
return result
| null |
file_store
|
with tempfile.TemporaryDirectory() as temp_dir:
store = LocalFileStore(temp_dir)
yield store
|
@pytest.fixture
def file_store() ->Generator[LocalFileStore, None, None]:
with tempfile.TemporaryDirectory() as temp_dir:
store = LocalFileStore(temp_dir)
yield store
| null |
test_embedding_query
|
document = 'foo bar'
model = VertexAIEmbeddings()
output = model.embed_query(document)
assert len(output) == 768
|
def test_embedding_query() ->None:
document = 'foo bar'
model = VertexAIEmbeddings()
output = model.embed_query(document)
assert len(output) == 768
| null |
_call
|
"""Call out to Nebula Service endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = nebula("Tell me a joke.")
"""
params = self._invocation_params(stop, **kwargs)
prompt = prompt.strip()
response = completion_with_retry(self, prompt=prompt, params=params, url=
f'{self.nebula_service_url}{self.nebula_service_path}')
_stop = params.get('stop_sequences')
return self._process_response(response, _stop)
|
def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager:
Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str:
"""Call out to Nebula Service endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = nebula("Tell me a joke.")
"""
params = self._invocation_params(stop, **kwargs)
prompt = prompt.strip()
response = completion_with_retry(self, prompt=prompt, params=params,
url=f'{self.nebula_service_url}{self.nebula_service_path}')
_stop = params.get('stop_sequences')
return self._process_response(response, _stop)
|
Call out to Nebula Service endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = nebula("Tell me a joke.")
|
parse_log
|
"""
Create Document objects from Datadog log items.
"""
attributes = log.get('attributes', {})
metadata = {'id': log.get('id', ''), 'status': attributes.get('status'),
'service': attributes.get('service', ''), 'tags': attributes.get('tags',
[]), 'timestamp': attributes.get('timestamp', '')}
message = attributes.get('message', '')
inside_attributes = attributes.get('attributes', {})
content_dict = {**inside_attributes, 'message': message}
content = ', '.join(f'{k}: {v}' for k, v in content_dict.items())
return Document(page_content=content, metadata=metadata)
|
def parse_log(self, log: dict) ->Document:
"""
Create Document objects from Datadog log items.
"""
attributes = log.get('attributes', {})
metadata = {'id': log.get('id', ''), 'status': attributes.get('status'),
'service': attributes.get('service', ''), 'tags': attributes.get(
'tags', []), 'timestamp': attributes.get('timestamp', '')}
message = attributes.get('message', '')
inside_attributes = attributes.get('attributes', {})
content_dict = {**inside_attributes, 'message': message}
content = ', '.join(f'{k}: {v}' for k, v in content_dict.items())
return Document(page_content=content, metadata=metadata)
|
Create Document objects from Datadog log items.
|
_import_office365_utils
|
from langchain_community.tools.office365.utils import authenticate
return authenticate
|
def _import_office365_utils() ->Any:
from langchain_community.tools.office365.utils import authenticate
return authenticate
| null |
test_deepsparse_call
|
"""Test valid call to DeepSparse."""
config = {'max_generated_tokens': 5, 'use_deepsparse_cache': False}
llm = DeepSparse(model=
'zoo:nlg/text_generation/codegen_mono-350m/pytorch/huggingface/bigpython_bigquery_thepile/base-none'
, config=config)
output = llm('def ')
assert isinstance(output, str)
assert len(output) > 1
assert output == 'ids_to_names'
|
def test_deepsparse_call() ->None:
"""Test valid call to DeepSparse."""
config = {'max_generated_tokens': 5, 'use_deepsparse_cache': False}
llm = DeepSparse(model=
'zoo:nlg/text_generation/codegen_mono-350m/pytorch/huggingface/bigpython_bigquery_thepile/base-none'
, config=config)
output = llm('def ')
assert isinstance(output, str)
assert len(output) > 1
assert output == 'ids_to_names'
|
Test valid call to DeepSparse.
|
_format_func
|
self._validate_func(func)
if isinstance(func, Operator):
value = OPERATOR_TO_TQL[func.value]
elif isinstance(func, Comparator):
value = COMPARATOR_TO_TQL[func.value]
return f'{value}'
|
def _format_func(self, func: Union[Operator, Comparator]) ->str:
self._validate_func(func)
if isinstance(func, Operator):
value = OPERATOR_TO_TQL[func.value]
elif isinstance(func, Comparator):
value = COMPARATOR_TO_TQL[func.value]
return f'{value}'
| null |
similarity_search
|
"""Return documents most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Array of Elasticsearch filter clauses to apply to the query.
Returns:
List of Documents most similar to the query,
in descending order of similarity.
"""
results = self.similarity_search_with_score(query=query, k=k, filter=filter,
**kwargs)
return [doc for doc, _ in results]
|
def similarity_search(self, query: str, k: int=4, filter: Optional[dict]=
None, **kwargs: Any) ->List[Document]:
"""Return documents most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Array of Elasticsearch filter clauses to apply to the query.
Returns:
List of Documents most similar to the query,
in descending order of similarity.
"""
results = self.similarity_search_with_score(query=query, k=k, filter=
filter, **kwargs)
return [doc for doc, _ in results]
|
Return documents most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Array of Elasticsearch filter clauses to apply to the query.
Returns:
List of Documents most similar to the query,
in descending order of similarity.
|
test_uses_actual_secret_value_from_secretstr
|
"""Test that actual secret is retrieved using `.get_secret_value()`."""
llm = Minimax(minimax_api_key='secret-api-key', minimax_group_id='group_id')
assert cast(SecretStr, llm.minimax_api_key).get_secret_value(
) == 'secret-api-key'
|
def test_uses_actual_secret_value_from_secretstr() ->None:
"""Test that actual secret is retrieved using `.get_secret_value()`."""
llm = Minimax(minimax_api_key='secret-api-key', minimax_group_id='group_id'
)
assert cast(SecretStr, llm.minimax_api_key).get_secret_value(
) == 'secret-api-key'
|
Test that actual secret is retrieved using `.get_secret_value()`.
|
_embedding_func
|
"""Call out to LocalAI's embedding endpoint."""
if self.model.endswith('001'):
text = text.replace('\n', ' ')
return embed_with_retry(self, input=[text], **self._invocation_params)['data'][
0]['embedding']
|
def _embedding_func(self, text: str, *, engine: str) ->List[float]:
"""Call out to LocalAI's embedding endpoint."""
if self.model.endswith('001'):
text = text.replace('\n', ' ')
return embed_with_retry(self, input=[text], **self._invocation_params)[
'data'][0]['embedding']
|
Call out to LocalAI's embedding endpoint.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.