method_name
stringlengths 1
78
| method_body
stringlengths 3
9.66k
| full_code
stringlengths 31
10.7k
| docstring
stringlengths 4
4.74k
⌀ |
|---|---|---|---|
__init__
|
try:
import streamlit as st
except ImportError as e:
raise ImportError(
'Unable to import streamlit, please run `pip install streamlit`.'
) from e
if key not in st.session_state:
st.session_state[key] = []
self._messages = st.session_state[key]
|
def __init__(self, key: str='langchain_messages'):
try:
import streamlit as st
except ImportError as e:
raise ImportError(
'Unable to import streamlit, please run `pip install streamlit`.'
) from e
if key not in st.session_state:
st.session_state[key] = []
self._messages = st.session_state[key]
| null |
test_metadata_without_frontmatter
|
"""Verify docs without frontmatter, still have basic metadata."""
doc = next(doc for doc in docs if doc.metadata['source'] == 'no_metadata.md')
assert set(doc.metadata) == STANDARD_METADATA_FIELDS
|
def test_metadata_without_frontmatter() ->None:
"""Verify docs without frontmatter, still have basic metadata."""
doc = next(doc for doc in docs if doc.metadata['source'] ==
'no_metadata.md')
assert set(doc.metadata) == STANDARD_METADATA_FIELDS
|
Verify docs without frontmatter, still have basic metadata.
|
_get_local_name
|
if '#' in iri:
local_name = iri.split('#')[-1]
elif '/' in iri:
local_name = iri.split('/')[-1]
else:
raise ValueError(f"Unexpected IRI '{iri}', contains neither '#' nor '/'.")
return local_name
|
@staticmethod
def _get_local_name(iri: str) ->str:
if '#' in iri:
local_name = iri.split('#')[-1]
elif '/' in iri:
local_name = iri.split('/')[-1]
else:
raise ValueError(
f"Unexpected IRI '{iri}', contains neither '#' nor '/'.")
return local_name
| null |
test_get_session_pool
|
mock_session_pool.return_value = MagicMock()
nebula_graph = NebulaGraph(self.space, self.username, self.password, self.
address, self.port, self.session_pool_size)
session_pool = nebula_graph._get_session_pool()
self.assertIsInstance(session_pool, MagicMock)
|
@patch('nebula3.gclient.net.SessionPool.SessionPool')
def test_get_session_pool(self, mock_session_pool: Any) ->None:
mock_session_pool.return_value = MagicMock()
nebula_graph = NebulaGraph(self.space, self.username, self.password,
self.address, self.port, self.session_pool_size)
session_pool = nebula_graph._get_session_pool()
self.assertIsInstance(session_pool, MagicMock)
| null |
test_openai_streaming_n_error
|
"""Test validation for streaming fails if n is not 1."""
with pytest.raises(ValueError):
OpenAI(n=2, streaming=True)
|
def test_openai_streaming_n_error() ->None:
"""Test validation for streaming fails if n is not 1."""
with pytest.raises(ValueError):
OpenAI(n=2, streaming=True)
|
Test validation for streaming fails if n is not 1.
|
mget
|
"""Get the values associated with the given keys."""
keys = [self._get_prefixed_key(key) for key in keys]
return cast(List[Optional[str]], self.client.mget(*keys))
|
def mget(self, keys: Sequence[str]) ->List[Optional[str]]:
"""Get the values associated with the given keys."""
keys = [self._get_prefixed_key(key) for key in keys]
return cast(List[Optional[str]], self.client.mget(*keys))
|
Get the values associated with the given keys.
|
_type
|
return 'json_functions'
|
@property
def _type(self) ->str:
return 'json_functions'
| null |
lookup
|
"""Look up based on prompt and llm_string."""
item = self.kv_cache.get(llm_string=_hash(llm_string), prompt=_hash(prompt))
if item is not None:
generations = _loads_generations(item['body_blob'])
if generations is not None:
return generations
else:
return None
else:
return None
|
def lookup(self, prompt: str, llm_string: str) ->Optional[RETURN_VAL_TYPE]:
"""Look up based on prompt and llm_string."""
item = self.kv_cache.get(llm_string=_hash(llm_string), prompt=_hash(prompt)
)
if item is not None:
generations = _loads_generations(item['body_blob'])
if generations is not None:
return generations
else:
return None
else:
return None
|
Look up based on prompt and llm_string.
|
similarity_search
|
"""Return docs most similar to query.
Args:
query: The string that will be used to search for similar documents.
k: The amount of neighbors that will be retrieved.
filter: Optional. A list of Namespaces for filtering the matching results.
For example:
[Namespace("color", ["red"], []), Namespace("shape", [], ["squared"])]
will match datapoints that satisfy "red color" but not include
datapoints with "squared shape". Please refer to
https://cloud.google.com/vertex-ai/docs/matching-engine/filtering#json
for more detail.
Returns:
A list of k matching documents.
"""
docs_and_scores = self.similarity_search_with_score(query, k=k, filter=
filter, **kwargs)
return [doc for doc, _ in docs_and_scores]
|
def similarity_search(self, query: str, k: int=4, filter: Optional[List[
Namespace]]=None, **kwargs: Any) ->List[Document]:
"""Return docs most similar to query.
Args:
query: The string that will be used to search for similar documents.
k: The amount of neighbors that will be retrieved.
filter: Optional. A list of Namespaces for filtering the matching results.
For example:
[Namespace("color", ["red"], []), Namespace("shape", [], ["squared"])]
will match datapoints that satisfy "red color" but not include
datapoints with "squared shape". Please refer to
https://cloud.google.com/vertex-ai/docs/matching-engine/filtering#json
for more detail.
Returns:
A list of k matching documents.
"""
docs_and_scores = self.similarity_search_with_score(query, k=k, filter=
filter, **kwargs)
return [doc for doc, _ in docs_and_scores]
|
Return docs most similar to query.
Args:
query: The string that will be used to search for similar documents.
k: The amount of neighbors that will be retrieved.
filter: Optional. A list of Namespaces for filtering the matching results.
For example:
[Namespace("color", ["red"], []), Namespace("shape", [], ["squared"])]
will match datapoints that satisfy "red color" but not include
datapoints with "squared shape". Please refer to
https://cloud.google.com/vertex-ai/docs/matching-engine/filtering#json
for more detail.
Returns:
A list of k matching documents.
|
__init__
|
"""Initialize the OBSFileLoader with the specified settings.
Args:
bucket (str): The name of the OBS bucket to be used.
key (str): The name of the object in the OBS bucket.
client (ObsClient, optional): An instance of the ObsClient to connect to OBS.
endpoint (str, optional): The endpoint URL of your OBS bucket. This parameter is mandatory if `client` is not provided.
config (dict, optional): The parameters for connecting to OBS, provided as a dictionary. This parameter is ignored if `client` is provided. The dictionary could have the following keys:
- "ak" (str, optional): Your OBS access key (required if `get_token_from_ecs` is False and bucket policy is not public read).
- "sk" (str, optional): Your OBS secret key (required if `get_token_from_ecs` is False and bucket policy is not public read).
- "token" (str, optional): Your security token (required if using temporary credentials).
- "get_token_from_ecs" (bool, optional): Whether to retrieve the security token from ECS. Defaults to False if not provided. If set to True, `ak`, `sk`, and `token` will be ignored.
Raises:
ValueError: If the `esdk-obs-python` package is not installed.
TypeError: If the provided `client` is not an instance of ObsClient.
ValueError: If `client` is not provided, but `endpoint` is missing.
Note:
Before using this class, make sure you have registered with OBS and have the necessary credentials. The `ak`, `sk`, and `endpoint` values are mandatory unless `get_token_from_ecs` is True or the bucket policy is public read. `token` is required when using temporary credentials.
Example:
To create a new OBSFileLoader with a new client:
```
config = {
"ak": "your-access-key",
"sk": "your-secret-key"
}
obs_loader = OBSFileLoader("your-bucket-name", "your-object-key", config=config)
```
To create a new OBSFileLoader with an existing client:
```
from obs import ObsClient
# Assuming you have an existing ObsClient object 'obs_client'
obs_loader = OBSFileLoader("your-bucket-name", "your-object-key", client=obs_client)
```
To create a new OBSFileLoader without an existing client:
```
obs_loader = OBSFileLoader("your-bucket-name", "your-object-key", endpoint="your-endpoint-url")
```
"""
try:
from obs import ObsClient
except ImportError:
raise ImportError(
'Could not import esdk-obs-python python package. Please install it with `pip install esdk-obs-python`.'
)
if not client:
if not endpoint:
raise ValueError('Either OBSClient or endpoint must be provided.')
if not config:
config = dict()
if config.get('get_token_from_ecs'):
client = ObsClient(server=endpoint, security_provider_policy='ECS')
else:
client = ObsClient(access_key_id=config.get('ak'),
secret_access_key=config.get('sk'), security_token=config.get(
'token'), server=endpoint)
if not isinstance(client, ObsClient):
raise TypeError('Client must be ObsClient type')
self.client = client
self.bucket = bucket
self.key = key
|
def __init__(self, bucket: str, key: str, client: Any=None, endpoint: str=
'', config: Optional[dict]=None) ->None:
"""Initialize the OBSFileLoader with the specified settings.
Args:
bucket (str): The name of the OBS bucket to be used.
key (str): The name of the object in the OBS bucket.
client (ObsClient, optional): An instance of the ObsClient to connect to OBS.
endpoint (str, optional): The endpoint URL of your OBS bucket. This parameter is mandatory if `client` is not provided.
config (dict, optional): The parameters for connecting to OBS, provided as a dictionary. This parameter is ignored if `client` is provided. The dictionary could have the following keys:
- "ak" (str, optional): Your OBS access key (required if `get_token_from_ecs` is False and bucket policy is not public read).
- "sk" (str, optional): Your OBS secret key (required if `get_token_from_ecs` is False and bucket policy is not public read).
- "token" (str, optional): Your security token (required if using temporary credentials).
- "get_token_from_ecs" (bool, optional): Whether to retrieve the security token from ECS. Defaults to False if not provided. If set to True, `ak`, `sk`, and `token` will be ignored.
Raises:
ValueError: If the `esdk-obs-python` package is not installed.
TypeError: If the provided `client` is not an instance of ObsClient.
ValueError: If `client` is not provided, but `endpoint` is missing.
Note:
Before using this class, make sure you have registered with OBS and have the necessary credentials. The `ak`, `sk`, and `endpoint` values are mandatory unless `get_token_from_ecs` is True or the bucket policy is public read. `token` is required when using temporary credentials.
Example:
To create a new OBSFileLoader with a new client:
```
config = {
"ak": "your-access-key",
"sk": "your-secret-key"
}
obs_loader = OBSFileLoader("your-bucket-name", "your-object-key", config=config)
```
To create a new OBSFileLoader with an existing client:
```
from obs import ObsClient
# Assuming you have an existing ObsClient object 'obs_client'
obs_loader = OBSFileLoader("your-bucket-name", "your-object-key", client=obs_client)
```
To create a new OBSFileLoader without an existing client:
```
obs_loader = OBSFileLoader("your-bucket-name", "your-object-key", endpoint="your-endpoint-url")
```
"""
try:
from obs import ObsClient
except ImportError:
raise ImportError(
'Could not import esdk-obs-python python package. Please install it with `pip install esdk-obs-python`.'
)
if not client:
if not endpoint:
raise ValueError('Either OBSClient or endpoint must be provided.')
if not config:
config = dict()
if config.get('get_token_from_ecs'):
client = ObsClient(server=endpoint, security_provider_policy='ECS')
else:
client = ObsClient(access_key_id=config.get('ak'),
secret_access_key=config.get('sk'), security_token=config.
get('token'), server=endpoint)
if not isinstance(client, ObsClient):
raise TypeError('Client must be ObsClient type')
self.client = client
self.bucket = bucket
self.key = key
|
Initialize the OBSFileLoader with the specified settings.
Args:
bucket (str): The name of the OBS bucket to be used.
key (str): The name of the object in the OBS bucket.
client (ObsClient, optional): An instance of the ObsClient to connect to OBS.
endpoint (str, optional): The endpoint URL of your OBS bucket. This parameter is mandatory if `client` is not provided.
config (dict, optional): The parameters for connecting to OBS, provided as a dictionary. This parameter is ignored if `client` is provided. The dictionary could have the following keys:
- "ak" (str, optional): Your OBS access key (required if `get_token_from_ecs` is False and bucket policy is not public read).
- "sk" (str, optional): Your OBS secret key (required if `get_token_from_ecs` is False and bucket policy is not public read).
- "token" (str, optional): Your security token (required if using temporary credentials).
- "get_token_from_ecs" (bool, optional): Whether to retrieve the security token from ECS. Defaults to False if not provided. If set to True, `ak`, `sk`, and `token` will be ignored.
Raises:
ValueError: If the `esdk-obs-python` package is not installed.
TypeError: If the provided `client` is not an instance of ObsClient.
ValueError: If `client` is not provided, but `endpoint` is missing.
Note:
Before using this class, make sure you have registered with OBS and have the necessary credentials. The `ak`, `sk`, and `endpoint` values are mandatory unless `get_token_from_ecs` is True or the bucket policy is public read. `token` is required when using temporary credentials.
Example:
To create a new OBSFileLoader with a new client:
```
config = {
"ak": "your-access-key",
"sk": "your-secret-key"
}
obs_loader = OBSFileLoader("your-bucket-name", "your-object-key", config=config)
```
To create a new OBSFileLoader with an existing client:
```
from obs import ObsClient
# Assuming you have an existing ObsClient object 'obs_client'
obs_loader = OBSFileLoader("your-bucket-name", "your-object-key", client=obs_client)
```
To create a new OBSFileLoader without an existing client:
```
obs_loader = OBSFileLoader("your-bucket-name", "your-object-key", endpoint="your-endpoint-url")
```
|
get_sql_model_class
|
return Model
|
def get_sql_model_class(self) ->Any:
return Model
| null |
_import_wolfram_alpha
|
from langchain_community.utilities.wolfram_alpha import WolframAlphaAPIWrapper
return WolframAlphaAPIWrapper
|
def _import_wolfram_alpha() ->Any:
from langchain_community.utilities.wolfram_alpha import WolframAlphaAPIWrapper
return WolframAlphaAPIWrapper
| null |
_raise_functions_not_supported
|
raise ValueError(
'Function messages are not supported by the MLflow AI Gateway. Please create a feature request at https://github.com/mlflow/mlflow/issues.'
)
|
@staticmethod
def _raise_functions_not_supported() ->None:
raise ValueError(
'Function messages are not supported by the MLflow AI Gateway. Please create a feature request at https://github.com/mlflow/mlflow/issues.'
)
| null |
predict
|
if stop is None:
_stop = None
else:
_stop = list(stop)
result = self([HumanMessage(content=text)], stop=_stop, **kwargs)
if isinstance(result.content, str):
return result.content
else:
raise ValueError('Cannot use predict when output is not a string.')
|
def predict(self, text: str, *, stop: Optional[Sequence[str]]=None, **
kwargs: Any) ->str:
if stop is None:
_stop = None
else:
_stop = list(stop)
result = self([HumanMessage(content=text)], stop=_stop, **kwargs)
if isinstance(result.content, str):
return result.content
else:
raise ValueError('Cannot use predict when output is not a string.')
| null |
_import_human
|
from langchain_community.llms.human import HumanInputLLM
return HumanInputLLM
|
def _import_human() ->Any:
from langchain_community.llms.human import HumanInputLLM
return HumanInputLLM
| null |
completion_with_retry
|
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator(llm)
@retry_decorator
def _completion_with_retry(**kwargs: Any) ->Any:
return llm.client.generate(**kwargs)
return _completion_with_retry(**kwargs)
|
def completion_with_retry(llm: Cohere, **kwargs: Any) ->Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator(llm)
@retry_decorator
def _completion_with_retry(**kwargs: Any) ->Any:
return llm.client.generate(**kwargs)
return _completion_with_retry(**kwargs)
|
Use tenacity to retry the completion call.
|
_llm_type
|
"""Return type of llm."""
return 'forefrontai'
|
@property
def _llm_type(self) ->str:
"""Return type of llm."""
return 'forefrontai'
|
Return type of llm.
|
lc_secrets
|
return {'cohere_api_key': 'COHERE_API_KEY'}
|
@property
def lc_secrets(self) ->Dict[str, str]:
return {'cohere_api_key': 'COHERE_API_KEY'}
| null |
test_shell_tool_run_str
|
placeholder = PlaceholderProcess(output='hello')
shell_tool = ShellTool(process=placeholder)
result = shell_tool._run(commands="echo 'Hello, World!'")
assert result.strip() == 'hello'
|
def test_shell_tool_run_str() ->None:
placeholder = PlaceholderProcess(output='hello')
shell_tool = ShellTool(process=placeholder)
result = shell_tool._run(commands="echo 'Hello, World!'")
assert result.strip() == 'hello'
| null |
on_tool_start
|
"""Run when tool starts running."""
|
def on_tool_start(self, serialized: Dict[str, Any], input_str: str, **
kwargs: Any) ->None:
"""Run when tool starts running."""
|
Run when tool starts running.
|
_import_office365_create_draft_message
|
from langchain_community.tools.office365.create_draft_message import O365CreateDraftMessage
return O365CreateDraftMessage
|
def _import_office365_create_draft_message() ->Any:
from langchain_community.tools.office365.create_draft_message import O365CreateDraftMessage
return O365CreateDraftMessage
| null |
from_texts
|
"""Return VectorStore initialized from texts and embeddings."""
vs_obj = BigQueryVectorSearch(embedding=embedding, **kwargs)
vs_obj.add_texts(texts, metadatas)
return vs_obj
|
@classmethod
def from_texts(cls: Type['BigQueryVectorSearch'], texts: List[str],
embedding: Embeddings, metadatas: Optional[List[dict]]=None, **kwargs: Any
) ->'BigQueryVectorSearch':
"""Return VectorStore initialized from texts and embeddings."""
vs_obj = BigQueryVectorSearch(embedding=embedding, **kwargs)
vs_obj.add_texts(texts, metadatas)
return vs_obj
|
Return VectorStore initialized from texts and embeddings.
|
test_huggingface_pipeline_device_map
|
"""Test pipelines specifying the device map parameter."""
llm = HuggingFacePipeline.from_model_id(model_id='gpt2', task=
'text-generation', device_map='auto', pipeline_kwargs={'max_new_tokens':
10})
output = llm('Say foo:')
assert isinstance(output, str)
|
def test_huggingface_pipeline_device_map() ->None:
"""Test pipelines specifying the device map parameter."""
llm = HuggingFacePipeline.from_model_id(model_id='gpt2', task=
'text-generation', device_map='auto', pipeline_kwargs={
'max_new_tokens': 10})
output = llm('Say foo:')
assert isinstance(output, str)
|
Test pipelines specifying the device map parameter.
|
visit_comparison
|
field = f'metadata.{comparison.attribute}'
is_range_comparator = comparison.comparator in [Comparator.GT, Comparator.
GTE, Comparator.LT, Comparator.LTE]
if is_range_comparator:
return {'range': {field: {self._format_func(comparison.comparator):
comparison.value}}}
if comparison.comparator == Comparator.CONTAIN:
return {self._format_func(comparison.comparator): {field: {'query':
comparison.value}}}
if comparison.comparator == Comparator.LIKE:
return {self._format_func(comparison.comparator): {field: {'query':
comparison.value, 'fuzziness': 'AUTO'}}}
field = f'{field}.keyword' if isinstance(comparison.value, str) else field
return {self._format_func(comparison.comparator): {field: comparison.value}}
|
def visit_comparison(self, comparison: Comparison) ->Dict:
field = f'metadata.{comparison.attribute}'
is_range_comparator = comparison.comparator in [Comparator.GT,
Comparator.GTE, Comparator.LT, Comparator.LTE]
if is_range_comparator:
return {'range': {field: {self._format_func(comparison.comparator):
comparison.value}}}
if comparison.comparator == Comparator.CONTAIN:
return {self._format_func(comparison.comparator): {field: {'query':
comparison.value}}}
if comparison.comparator == Comparator.LIKE:
return {self._format_func(comparison.comparator): {field: {'query':
comparison.value, 'fuzziness': 'AUTO'}}}
field = f'{field}.keyword' if isinstance(comparison.value, str) else field
return {self._format_func(comparison.comparator): {field: comparison.value}
}
| null |
_get_last_completed_thought
|
"""Return our most recent completed LLMThought, or None if we don't have one."""
if len(self._completed_thoughts) > 0:
return self._completed_thoughts[len(self._completed_thoughts) - 1]
return None
|
def _get_last_completed_thought(self) ->Optional[LLMThought]:
"""Return our most recent completed LLMThought, or None if we don't have one."""
if len(self._completed_thoughts) > 0:
return self._completed_thoughts[len(self._completed_thoughts) - 1]
return None
|
Return our most recent completed LLMThought, or None if we don't have one.
|
client
|
import meilisearch
return meilisearch.Client(TEST_MEILI_HTTP_ADDR, TEST_MEILI_MASTER_KEY)
|
def client(self) ->'meilisearch.Client':
import meilisearch
return meilisearch.Client(TEST_MEILI_HTTP_ADDR, TEST_MEILI_MASTER_KEY)
| null |
on_tool_end
|
"""Run when tool ends running."""
|
def on_tool_end(self, output: str, **kwargs: Any) ->None:
"""Run when tool ends running."""
|
Run when tool ends running.
|
_is_vision_model
|
return 'vision' in model
|
def _is_vision_model(model: str) ->bool:
return 'vision' in model
| null |
_prompt_type
|
"""Name of prompt type."""
return 'chat'
|
@property
def _prompt_type(self) ->str:
"""Name of prompt type."""
return 'chat'
|
Name of prompt type.
|
add_documents
|
"""Adds documents to the docstore and vectorstores.
Args:
documents: List of documents to add
ids: Optional list of ids for documents. If provided should be the same
length as the list of documents. Can provided if parent documents
are already in the document store and you don't want to re-add
to the docstore. If not provided, random UUIDs will be used as
ids.
add_to_docstore: Boolean of whether to add documents to docstore.
This can be false if and only if `ids` are provided. You may want
to set this to False if the documents are already in the docstore
and you don't want to re-add them.
"""
if self.parent_splitter is not None:
documents = self.parent_splitter.split_documents(documents)
if ids is None:
doc_ids = [str(uuid.uuid4()) for _ in documents]
if not add_to_docstore:
raise ValueError(
'If ids are not passed in, `add_to_docstore` MUST be True')
else:
if len(documents) != len(ids):
raise ValueError(
'Got uneven list of documents and ids. If `ids` is provided, should be same length as `documents`.'
)
doc_ids = ids
docs = []
full_docs = []
for i, doc in enumerate(documents):
_id = doc_ids[i]
sub_docs = self.child_splitter.split_documents([doc])
for _doc in sub_docs:
_doc.metadata[self.id_key] = _id
docs.extend(sub_docs)
full_docs.append((_id, doc))
self.vectorstore.add_documents(docs)
if add_to_docstore:
self.docstore.mset(full_docs)
|
def add_documents(self, documents: List[Document], ids: Optional[List[str]]
=None, add_to_docstore: bool=True) ->None:
"""Adds documents to the docstore and vectorstores.
Args:
documents: List of documents to add
ids: Optional list of ids for documents. If provided should be the same
length as the list of documents. Can provided if parent documents
are already in the document store and you don't want to re-add
to the docstore. If not provided, random UUIDs will be used as
ids.
add_to_docstore: Boolean of whether to add documents to docstore.
This can be false if and only if `ids` are provided. You may want
to set this to False if the documents are already in the docstore
and you don't want to re-add them.
"""
if self.parent_splitter is not None:
documents = self.parent_splitter.split_documents(documents)
if ids is None:
doc_ids = [str(uuid.uuid4()) for _ in documents]
if not add_to_docstore:
raise ValueError(
'If ids are not passed in, `add_to_docstore` MUST be True')
else:
if len(documents) != len(ids):
raise ValueError(
'Got uneven list of documents and ids. If `ids` is provided, should be same length as `documents`.'
)
doc_ids = ids
docs = []
full_docs = []
for i, doc in enumerate(documents):
_id = doc_ids[i]
sub_docs = self.child_splitter.split_documents([doc])
for _doc in sub_docs:
_doc.metadata[self.id_key] = _id
docs.extend(sub_docs)
full_docs.append((_id, doc))
self.vectorstore.add_documents(docs)
if add_to_docstore:
self.docstore.mset(full_docs)
|
Adds documents to the docstore and vectorstores.
Args:
documents: List of documents to add
ids: Optional list of ids for documents. If provided should be the same
length as the list of documents. Can provided if parent documents
are already in the document store and you don't want to re-add
to the docstore. If not provided, random UUIDs will be used as
ids.
add_to_docstore: Boolean of whether to add documents to docstore.
This can be false if and only if `ids` are provided. You may want
to set this to False if the documents are already in the docstore
and you don't want to re-add them.
|
_select_relevance_score_fn
|
"""
The 'correct' relevance function
may differ depending on a few things, including:
- the distance / similarity metric used by the VectorStore
- the scale of your embeddings (OpenAI's are unit normed. Many others are not!)
- embedding dimensionality
- etc.
"""
if self.override_relevance_score_fn is not None:
return self.override_relevance_score_fn
if self._distance_strategy == DistanceStrategy.COSINE:
return self._cosine_relevance_score_fn
elif self._distance_strategy == DistanceStrategy.EUCLIDEAN:
return self._euclidean_relevance_score_fn
elif self._distance_strategy == DistanceStrategy.MAX_INNER_PRODUCT:
return self._max_inner_product_relevance_score_fn
else:
raise ValueError(
f'No supported normalization function for distance_strategy of {self._distance_strategy}.Consider providing relevance_score_fn to PGVector constructor.'
)
|
def _select_relevance_score_fn(self) ->Callable[[float], float]:
"""
The 'correct' relevance function
may differ depending on a few things, including:
- the distance / similarity metric used by the VectorStore
- the scale of your embeddings (OpenAI's are unit normed. Many others are not!)
- embedding dimensionality
- etc.
"""
if self.override_relevance_score_fn is not None:
return self.override_relevance_score_fn
if self._distance_strategy == DistanceStrategy.COSINE:
return self._cosine_relevance_score_fn
elif self._distance_strategy == DistanceStrategy.EUCLIDEAN:
return self._euclidean_relevance_score_fn
elif self._distance_strategy == DistanceStrategy.MAX_INNER_PRODUCT:
return self._max_inner_product_relevance_score_fn
else:
raise ValueError(
f'No supported normalization function for distance_strategy of {self._distance_strategy}.Consider providing relevance_score_fn to PGVector constructor.'
)
|
The 'correct' relevance function
may differ depending on a few things, including:
- the distance / similarity metric used by the VectorStore
- the scale of your embeddings (OpenAI's are unit normed. Many others are not!)
- embedding dimensionality
- etc.
|
__init__
|
"""Initialize with parameters."""
if not integration_token:
raise ValueError('integration_token must be provided')
if not database_id:
raise ValueError('database_id must be provided')
self.token = integration_token
self.database_id = database_id
self.headers = {'Authorization': 'Bearer ' + self.token, 'Content-Type':
'application/json', 'Notion-Version': '2022-06-28'}
self.request_timeout_sec = request_timeout_sec
|
def __init__(self, integration_token: str, database_id: str,
request_timeout_sec: Optional[int]=10) ->None:
"""Initialize with parameters."""
if not integration_token:
raise ValueError('integration_token must be provided')
if not database_id:
raise ValueError('database_id must be provided')
self.token = integration_token
self.database_id = database_id
self.headers = {'Authorization': 'Bearer ' + self.token, 'Content-Type':
'application/json', 'Notion-Version': '2022-06-28'}
self.request_timeout_sec = request_timeout_sec
|
Initialize with parameters.
|
_validate_tools
|
validate_tools_single_input(cls.__name__, tools)
super()._validate_tools(tools)
if len(tools) != 2:
raise ValueError(f'Exactly two tools must be specified, but got {tools}')
tool_names = {tool.name for tool in tools}
if tool_names != {'Lookup', 'Search'}:
raise ValueError(
f'Tool names should be Lookup and Search, got {tool_names}')
|
@classmethod
def _validate_tools(cls, tools: Sequence[BaseTool]) ->None:
validate_tools_single_input(cls.__name__, tools)
super()._validate_tools(tools)
if len(tools) != 2:
raise ValueError(
f'Exactly two tools must be specified, but got {tools}')
tool_names = {tool.name for tool in tools}
if tool_names != {'Lookup', 'Search'}:
raise ValueError(
f'Tool names should be Lookup and Search, got {tool_names}')
| null |
get_output_schema
|
runnable, config = self._prepare(config)
return runnable.get_output_schema(config)
|
def get_output_schema(self, config: Optional[RunnableConfig]=None) ->Type[
BaseModel]:
runnable, config = self._prepare(config)
return runnable.get_output_schema(config)
| null |
configure
|
"""Configure the async callback manager.
Args:
inheritable_callbacks (Optional[Callbacks], optional): The inheritable
callbacks. Defaults to None.
local_callbacks (Optional[Callbacks], optional): The local callbacks.
Defaults to None.
verbose (bool, optional): Whether to enable verbose mode. Defaults to False.
inheritable_tags (Optional[List[str]], optional): The inheritable tags.
Defaults to None.
local_tags (Optional[List[str]], optional): The local tags.
Defaults to None.
inheritable_metadata (Optional[Dict[str, Any]], optional): The inheritable
metadata. Defaults to None.
local_metadata (Optional[Dict[str, Any]], optional): The local metadata.
Defaults to None.
Returns:
AsyncCallbackManager: The configured async callback manager.
"""
return _configure(cls, inheritable_callbacks, local_callbacks, verbose,
inheritable_tags, local_tags, inheritable_metadata, local_metadata)
|
@classmethod
def configure(cls, inheritable_callbacks: Callbacks=None, local_callbacks:
Callbacks=None, verbose: bool=False, inheritable_tags: Optional[List[
str]]=None, local_tags: Optional[List[str]]=None, inheritable_metadata:
Optional[Dict[str, Any]]=None, local_metadata: Optional[Dict[str, Any]]
=None) ->AsyncCallbackManager:
"""Configure the async callback manager.
Args:
inheritable_callbacks (Optional[Callbacks], optional): The inheritable
callbacks. Defaults to None.
local_callbacks (Optional[Callbacks], optional): The local callbacks.
Defaults to None.
verbose (bool, optional): Whether to enable verbose mode. Defaults to False.
inheritable_tags (Optional[List[str]], optional): The inheritable tags.
Defaults to None.
local_tags (Optional[List[str]], optional): The local tags.
Defaults to None.
inheritable_metadata (Optional[Dict[str, Any]], optional): The inheritable
metadata. Defaults to None.
local_metadata (Optional[Dict[str, Any]], optional): The local metadata.
Defaults to None.
Returns:
AsyncCallbackManager: The configured async callback manager.
"""
return _configure(cls, inheritable_callbacks, local_callbacks, verbose,
inheritable_tags, local_tags, inheritable_metadata, local_metadata)
|
Configure the async callback manager.
Args:
inheritable_callbacks (Optional[Callbacks], optional): The inheritable
callbacks. Defaults to None.
local_callbacks (Optional[Callbacks], optional): The local callbacks.
Defaults to None.
verbose (bool, optional): Whether to enable verbose mode. Defaults to False.
inheritable_tags (Optional[List[str]], optional): The inheritable tags.
Defaults to None.
local_tags (Optional[List[str]], optional): The local tags.
Defaults to None.
inheritable_metadata (Optional[Dict[str, Any]], optional): The inheritable
metadata. Defaults to None.
local_metadata (Optional[Dict[str, Any]], optional): The local metadata.
Defaults to None.
Returns:
AsyncCallbackManager: The configured async callback manager.
|
_get_arn
|
region_name = self.client.meta.region_name
service = 'comprehend'
prompt_safety_endpoint = 'document-classifier-endpoint/prompt-safety'
return f'arn:aws:{service}:{region_name}:aws:{prompt_safety_endpoint}'
|
def _get_arn(self) ->str:
region_name = self.client.meta.region_name
service = 'comprehend'
prompt_safety_endpoint = 'document-classifier-endpoint/prompt-safety'
return f'arn:aws:{service}:{region_name}:aws:{prompt_safety_endpoint}'
| null |
_call
|
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
question = inputs['question']
get_chat_history = self.get_chat_history or _get_chat_history
chat_history_str = get_chat_history(inputs['chat_history'])
if chat_history_str:
callbacks = _run_manager.get_child()
new_question = self.question_generator.run(question=question,
chat_history=chat_history_str, callbacks=callbacks)
else:
new_question = question
accepts_run_manager = 'run_manager' in inspect.signature(self._get_docs
).parameters
if accepts_run_manager:
docs = self._get_docs(new_question, inputs, run_manager=_run_manager)
else:
docs = self._get_docs(new_question, inputs)
output: Dict[str, Any] = {}
if self.response_if_no_docs_found is not None and len(docs) == 0:
output[self.output_key] = self.response_if_no_docs_found
else:
new_inputs = inputs.copy()
if self.rephrase_question:
new_inputs['question'] = new_question
new_inputs['chat_history'] = chat_history_str
answer = self.combine_docs_chain.run(input_documents=docs, callbacks=
_run_manager.get_child(), **new_inputs)
output[self.output_key] = answer
if self.return_source_documents:
output['source_documents'] = docs
if self.return_generated_question:
output['generated_question'] = new_question
return output
|
def _call(self, inputs: Dict[str, Any], run_manager: Optional[
CallbackManagerForChainRun]=None) ->Dict[str, Any]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
question = inputs['question']
get_chat_history = self.get_chat_history or _get_chat_history
chat_history_str = get_chat_history(inputs['chat_history'])
if chat_history_str:
callbacks = _run_manager.get_child()
new_question = self.question_generator.run(question=question,
chat_history=chat_history_str, callbacks=callbacks)
else:
new_question = question
accepts_run_manager = 'run_manager' in inspect.signature(self._get_docs
).parameters
if accepts_run_manager:
docs = self._get_docs(new_question, inputs, run_manager=_run_manager)
else:
docs = self._get_docs(new_question, inputs)
output: Dict[str, Any] = {}
if self.response_if_no_docs_found is not None and len(docs) == 0:
output[self.output_key] = self.response_if_no_docs_found
else:
new_inputs = inputs.copy()
if self.rephrase_question:
new_inputs['question'] = new_question
new_inputs['chat_history'] = chat_history_str
answer = self.combine_docs_chain.run(input_documents=docs,
callbacks=_run_manager.get_child(), **new_inputs)
output[self.output_key] = answer
if self.return_source_documents:
output['source_documents'] = docs
if self.return_generated_question:
output['generated_question'] = new_question
return output
| null |
_similarity_search_with_relevance_scores
|
"""
Default similarity search with relevance scores. Modify if necessary
in subclass.
Return docs and relevance scores in the range [0, 1].
0 is dissimilar, 1 is most similar.
Args:
query: input text
k: Number of Documents to return. Defaults to 4.
metadata: Optional, metadata filter
**kwargs: kwargs to be passed to similarity search. Should include:
score_threshold: Optional, a floating point value between 0 to 1 and
filter the resulting set of retrieved docs
Returns:
List of Tuples of (doc, similarity_score)
"""
if not self._collection:
raise ValueError(
'collection should be an instance of a Zep DocumentCollection')
if not self._collection.is_auto_embedded and self._embedding:
query_vector = self._embedding.embed_query(query)
results = self._collection.search(embedding=query_vector, limit=k,
metadata=metadata, **kwargs)
else:
results = self._collection.search(query, limit=k, metadata=metadata, **
kwargs)
return [(Document(page_content=doc.content, metadata=doc.metadata), doc.
score or 0.0) for doc in results]
|
def _similarity_search_with_relevance_scores(self, query: str, k: int=4,
metadata: Optional[Dict[str, Any]]=None, **kwargs: Any) ->List[Tuple[
Document, float]]:
"""
Default similarity search with relevance scores. Modify if necessary
in subclass.
Return docs and relevance scores in the range [0, 1].
0 is dissimilar, 1 is most similar.
Args:
query: input text
k: Number of Documents to return. Defaults to 4.
metadata: Optional, metadata filter
**kwargs: kwargs to be passed to similarity search. Should include:
score_threshold: Optional, a floating point value between 0 to 1 and
filter the resulting set of retrieved docs
Returns:
List of Tuples of (doc, similarity_score)
"""
if not self._collection:
raise ValueError(
'collection should be an instance of a Zep DocumentCollection')
if not self._collection.is_auto_embedded and self._embedding:
query_vector = self._embedding.embed_query(query)
results = self._collection.search(embedding=query_vector, limit=k,
metadata=metadata, **kwargs)
else:
results = self._collection.search(query, limit=k, metadata=metadata,
**kwargs)
return [(Document(page_content=doc.content, metadata=doc.metadata), doc
.score or 0.0) for doc in results]
|
Default similarity search with relevance scores. Modify if necessary
in subclass.
Return docs and relevance scores in the range [0, 1].
0 is dissimilar, 1 is most similar.
Args:
query: input text
k: Number of Documents to return. Defaults to 4.
metadata: Optional, metadata filter
**kwargs: kwargs to be passed to similarity search. Should include:
score_threshold: Optional, a floating point value between 0 to 1 and
filter the resulting set of retrieved docs
Returns:
List of Tuples of (doc, similarity_score)
|
similarity_search_by_vector_with_relevance_scores
|
"""Accepts a query_embedding (vector), and returns documents with
similar embeddings along with their relevance scores."""
q_str = self._build_query_sql(embedding, distance_func, k, where_str)
try:
query_response = self._client.Queries.query(sql={'query': q_str})
except Exception as e:
logger.error('Exception when querying Rockset: %s\n', e)
return []
finalResult: list[Tuple[Document, float]] = []
for document in query_response.results:
metadata = {}
assert isinstance(document, dict
), 'document should be of type `dict[str,Any]`. But found: `{}`'.format(
type(document))
for k, v in document.items():
if k == self._text_key:
assert isinstance(v, str
), 'page content stored in column `{}` must be of type `str`. But found: `{}`'.format(
self._text_key, type(v))
page_content = v
elif k == 'dist':
assert isinstance(v, float
), 'Computed distance between vectors must of type `float`. But found {}'.format(
type(v))
score = v
elif k not in ['_id', '_event_time', '_meta']:
metadata[k] = v
finalResult.append((Document(page_content=page_content, metadata=
metadata), score))
return finalResult
|
def similarity_search_by_vector_with_relevance_scores(self, embedding: List
[float], k: int=4, distance_func: DistanceFunction=DistanceFunction.
COSINE_SIM, where_str: Optional[str]=None, **kwargs: Any) ->List[Tuple[
Document, float]]:
"""Accepts a query_embedding (vector), and returns documents with
similar embeddings along with their relevance scores."""
q_str = self._build_query_sql(embedding, distance_func, k, where_str)
try:
query_response = self._client.Queries.query(sql={'query': q_str})
except Exception as e:
logger.error('Exception when querying Rockset: %s\n', e)
return []
finalResult: list[Tuple[Document, float]] = []
for document in query_response.results:
metadata = {}
assert isinstance(document, dict
), 'document should be of type `dict[str,Any]`. But found: `{}`'.format(
type(document))
for k, v in document.items():
if k == self._text_key:
assert isinstance(v, str
), 'page content stored in column `{}` must be of type `str`. But found: `{}`'.format(
self._text_key, type(v))
page_content = v
elif k == 'dist':
assert isinstance(v, float
), 'Computed distance between vectors must of type `float`. But found {}'.format(
type(v))
score = v
elif k not in ['_id', '_event_time', '_meta']:
metadata[k] = v
finalResult.append((Document(page_content=page_content, metadata=
metadata), score))
return finalResult
|
Accepts a query_embedding (vector), and returns documents with
similar embeddings along with their relevance scores.
|
embeddings
|
return self._embedding
|
@property
def embeddings(self) ->Embeddings:
return self._embedding
| null |
setter
|
return ContextSet(_key, _value, prefix=self.prefix, **kwargs)
|
def setter(self, _key: Optional[str]=None, _value: Optional[SetValue]=None,
/, **kwargs: SetValue) ->ContextSet:
return ContextSet(_key, _value, prefix=self.prefix, **kwargs)
| null |
_default_api_url
|
return f'https://api-inference.huggingface.co/pipeline/feature-extraction/{self.model_name}'
|
@property
def _default_api_url(self) ->str:
return (
f'https://api-inference.huggingface.co/pipeline/feature-extraction/{self.model_name}'
)
| null |
_parse_note
|
note_dict: Dict[str, Any] = {}
resources = []
def add_prefix(element_tag: str) ->str:
if prefix is None:
return element_tag
return f'{prefix}.{element_tag}'
for elem in note:
if elem.tag == 'content':
note_dict[elem.tag] = EverNoteLoader._parse_content(elem.text)
note_dict['content-raw'] = elem.text
elif elem.tag == 'resource':
resources.append(EverNoteLoader._parse_resource(elem))
elif elem.tag == 'created' or elem.tag == 'updated':
note_dict[elem.tag] = strptime(elem.text, '%Y%m%dT%H%M%SZ')
elif elem.tag == 'note-attributes':
additional_attributes = EverNoteLoader._parse_note(elem, elem.tag)
note_dict.update(additional_attributes)
else:
note_dict[elem.tag] = elem.text
if len(resources) > 0:
note_dict['resource'] = resources
return {add_prefix(key): value for key, value in note_dict.items()}
|
@staticmethod
def _parse_note(note: List, prefix: Optional[str]=None) ->dict:
note_dict: Dict[str, Any] = {}
resources = []
def add_prefix(element_tag: str) ->str:
if prefix is None:
return element_tag
return f'{prefix}.{element_tag}'
for elem in note:
if elem.tag == 'content':
note_dict[elem.tag] = EverNoteLoader._parse_content(elem.text)
note_dict['content-raw'] = elem.text
elif elem.tag == 'resource':
resources.append(EverNoteLoader._parse_resource(elem))
elif elem.tag == 'created' or elem.tag == 'updated':
note_dict[elem.tag] = strptime(elem.text, '%Y%m%dT%H%M%SZ')
elif elem.tag == 'note-attributes':
additional_attributes = EverNoteLoader._parse_note(elem, elem.tag)
note_dict.update(additional_attributes)
else:
note_dict[elem.tag] = elem.text
if len(resources) > 0:
note_dict['resource'] = resources
return {add_prefix(key): value for key, value in note_dict.items()}
| null |
_import_databricks
|
from langchain_community.llms.databricks import Databricks
return Databricks
|
def _import_databricks() ->Any:
from langchain_community.llms.databricks import Databricks
return Databricks
| null |
parse_triples
|
"""Parse knowledge triples from the knowledge string."""
knowledge_str = knowledge_str.strip()
if not knowledge_str or knowledge_str == 'NONE':
return []
triple_strs = knowledge_str.split(KG_TRIPLE_DELIMITER)
results = []
for triple_str in triple_strs:
try:
kg_triple = KnowledgeTriple.from_string(triple_str)
except ValueError:
continue
results.append(kg_triple)
return results
|
def parse_triples(knowledge_str: str) ->List[KnowledgeTriple]:
"""Parse knowledge triples from the knowledge string."""
knowledge_str = knowledge_str.strip()
if not knowledge_str or knowledge_str == 'NONE':
return []
triple_strs = knowledge_str.split(KG_TRIPLE_DELIMITER)
results = []
for triple_str in triple_strs:
try:
kg_triple = KnowledgeTriple.from_string(triple_str)
except ValueError:
continue
results.append(kg_triple)
return results
|
Parse knowledge triples from the knowledge string.
|
similarity_search_with_score_by_vector
|
"""Return docs most similar to query.
Args:
embedding: Embedding vector to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, Any]]): Filter by metadata. Defaults to None.
fetch_k: (Optional[int]) Number of Documents to fetch before filtering.
Defaults to 20.
**kwargs: kwargs to be passed to similarity search. Can include:
score_threshold: Optional, a floating point value between 0 to 1 to
filter the resulting set of retrieved docs
Returns:
List of documents most similar to the query text and L2 distance
in float for each. Lower score represents more similarity.
"""
faiss = dependable_faiss_import()
vector = np.array([embedding], dtype=np.float32)
if self._normalize_L2:
faiss.normalize_L2(vector)
scores, indices = self.index.search(vector, k if filter is None else fetch_k)
docs = []
for j, i in enumerate(indices[0]):
if i == -1:
continue
_id = self.index_to_docstore_id[i]
doc = self.docstore.search(_id)
if not isinstance(doc, Document):
raise ValueError(f'Could not find document for id {_id}, got {doc}')
if filter is not None:
filter = {key: ([value] if not isinstance(value, list) else value) for
key, value in filter.items()}
if all(doc.metadata.get(key) in value for key, value in filter.items()
):
docs.append((doc, scores[0][j]))
else:
docs.append((doc, scores[0][j]))
score_threshold = kwargs.get('score_threshold')
if score_threshold is not None:
cmp = operator.ge if self.distance_strategy in (DistanceStrategy.
MAX_INNER_PRODUCT, DistanceStrategy.JACCARD) else operator.le
docs = [(doc, similarity) for doc, similarity in docs if cmp(similarity,
score_threshold)]
return docs[:k]
|
def similarity_search_with_score_by_vector(self, embedding: List[float], k:
int=4, filter: Optional[Dict[str, Any]]=None, fetch_k: int=20, **kwargs:
Any) ->List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
embedding: Embedding vector to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, Any]]): Filter by metadata. Defaults to None.
fetch_k: (Optional[int]) Number of Documents to fetch before filtering.
Defaults to 20.
**kwargs: kwargs to be passed to similarity search. Can include:
score_threshold: Optional, a floating point value between 0 to 1 to
filter the resulting set of retrieved docs
Returns:
List of documents most similar to the query text and L2 distance
in float for each. Lower score represents more similarity.
"""
faiss = dependable_faiss_import()
vector = np.array([embedding], dtype=np.float32)
if self._normalize_L2:
faiss.normalize_L2(vector)
scores, indices = self.index.search(vector, k if filter is None else
fetch_k)
docs = []
for j, i in enumerate(indices[0]):
if i == -1:
continue
_id = self.index_to_docstore_id[i]
doc = self.docstore.search(_id)
if not isinstance(doc, Document):
raise ValueError(f'Could not find document for id {_id}, got {doc}'
)
if filter is not None:
filter = {key: ([value] if not isinstance(value, list) else
value) for key, value in filter.items()}
if all(doc.metadata.get(key) in value for key, value in filter.
items()):
docs.append((doc, scores[0][j]))
else:
docs.append((doc, scores[0][j]))
score_threshold = kwargs.get('score_threshold')
if score_threshold is not None:
cmp = operator.ge if self.distance_strategy in (DistanceStrategy.
MAX_INNER_PRODUCT, DistanceStrategy.JACCARD) else operator.le
docs = [(doc, similarity) for doc, similarity in docs if cmp(
similarity, score_threshold)]
return docs[:k]
|
Return docs most similar to query.
Args:
embedding: Embedding vector to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, Any]]): Filter by metadata. Defaults to None.
fetch_k: (Optional[int]) Number of Documents to fetch before filtering.
Defaults to 20.
**kwargs: kwargs to be passed to similarity search. Can include:
score_threshold: Optional, a floating point value between 0 to 1 to
filter the resulting set of retrieved docs
Returns:
List of documents most similar to the query text and L2 distance
in float for each. Lower score represents more similarity.
|
init_in_memory
|
from docarray import BaseDoc
from docarray.index import InMemoryExactNNIndex
class MyDoc(BaseDoc):
title: str
title_embedding: NdArray[32]
other_emb: NdArray[32]
year: int
embeddings = FakeEmbeddings(size=32)
in_memory_db = InMemoryExactNNIndex[MyDoc]()
in_memory_db.index([MyDoc(title=f'My document {i}', title_embedding=np.
array(embeddings.embed_query(f'fake emb {i}')), other_emb=np.array(
embeddings.embed_query(f'other fake emb {i}')), year=i) for i in range(
100)])
filter_query = {'year': {'$lte': 90}}
return in_memory_db, filter_query, embeddings
|
@pytest.fixture
def init_in_memory() ->Tuple[InMemoryExactNNIndex, Dict[str, Any],
FakeEmbeddings]:
from docarray import BaseDoc
from docarray.index import InMemoryExactNNIndex
class MyDoc(BaseDoc):
title: str
title_embedding: NdArray[32]
other_emb: NdArray[32]
year: int
embeddings = FakeEmbeddings(size=32)
in_memory_db = InMemoryExactNNIndex[MyDoc]()
in_memory_db.index([MyDoc(title=f'My document {i}', title_embedding=np.
array(embeddings.embed_query(f'fake emb {i}')), other_emb=np.array(
embeddings.embed_query(f'other fake emb {i}')), year=i) for i in
range(100)])
filter_query = {'year': {'$lte': 90}}
return in_memory_db, filter_query, embeddings
| null |
__init__
|
super().__init__()
warnings.warn(
"""`OSSContentFormatter` will be deprecated in the future.
Please use `GPT2ContentFormatter` instead.
"""
)
|
def __init__(self) ->None:
super().__init__()
warnings.warn(
"""`OSSContentFormatter` will be deprecated in the future.
Please use `GPT2ContentFormatter` instead.
"""
)
| null |
test_initialize_watsonxllm_cpd_bad_path_password_without_username
|
try:
WatsonxLLM(model_id='google/flan-ul2', url=
'https://cpd-zen.apps.cpd48.cp.fyre.ibm.com', password='test_password')
except ValueError as e:
assert 'WATSONX_USERNAME' in e.__str__()
|
def test_initialize_watsonxllm_cpd_bad_path_password_without_username() ->None:
try:
WatsonxLLM(model_id='google/flan-ul2', url=
'https://cpd-zen.apps.cpd48.cp.fyre.ibm.com', password=
'test_password')
except ValueError as e:
assert 'WATSONX_USERNAME' in e.__str__()
| null |
test_chat_ernie_bot_with_model_name
|
chat = ErnieBotChat(model_name='ERNIE-Bot')
message = HumanMessage(content='Hello')
response = chat([message])
assert isinstance(response, AIMessage)
assert isinstance(response.content, str)
|
def test_chat_ernie_bot_with_model_name() ->None:
chat = ErnieBotChat(model_name='ERNIE-Bot')
message = HumanMessage(content='Hello')
response = chat([message])
assert isinstance(response, AIMessage)
assert isinstance(response.content, str)
| null |
test_anthropic_streaming
|
"""Test streaming tokens from anthropic."""
chat = ChatAnthropic(model='test', streaming=True)
message = HumanMessage(content='Hello')
response = chat([message])
assert isinstance(response, AIMessage)
assert isinstance(response.content, str)
|
@pytest.mark.scheduled
def test_anthropic_streaming() ->None:
"""Test streaming tokens from anthropic."""
chat = ChatAnthropic(model='test', streaming=True)
message = HumanMessage(content='Hello')
response = chat([message])
assert isinstance(response, AIMessage)
assert isinstance(response.content, str)
|
Test streaming tokens from anthropic.
|
get_num_tokens
|
return len(text.split())
|
def get_num_tokens(self, text: str) ->int:
return len(text.split())
| null |
_should_continue
|
if self.max_iterations is not None and iterations >= self.max_iterations:
return False
if self.max_execution_time is not None and time_elapsed >= self.max_execution_time:
return False
return True
|
def _should_continue(self, iterations: int, time_elapsed: float) ->bool:
if self.max_iterations is not None and iterations >= self.max_iterations:
return False
if (self.max_execution_time is not None and time_elapsed >= self.
max_execution_time):
return False
return True
| null |
__init__
|
"""Initialize the record manager.
Args:
namespace (str): The namespace for the record manager.
"""
self.namespace = namespace
|
def __init__(self, namespace: str) ->None:
"""Initialize the record manager.
Args:
namespace (str): The namespace for the record manager.
"""
self.namespace = namespace
|
Initialize the record manager.
Args:
namespace (str): The namespace for the record manager.
|
test_vectara_from_files
|
"""Test end to end construction and search."""
urls = [
'https://papers.nips.cc/paper_files/paper/2017/file/3f5ee243547dee91fbd053c1c4a845aa-Paper.pdf'
,
'https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/Final-DengYu-NOW-Book-DeepLearn2013-ForLecturesJuly2.docx'
]
files_list = []
for url in urls:
name = tempfile.NamedTemporaryFile().name
urllib.request.urlretrieve(url, name)
files_list.append(name)
docsearch: Vectara = Vectara()
doc_ids = docsearch.add_files(files_list=files_list, embedding=
FakeEmbeddings(), metadatas=[{'url': url, 'test_num': '2'} for url in urls]
)
output = docsearch.similarity_search(
'By the commonly adopted machine learning tradition', k=1,
n_sentence_context=0, filter='doc.test_num = 2')
assert output[0
].page_content == 'By the commonly adopted machine learning tradition (e.g., Chapter 28 in Murphy, 2012; Deng and Li, 2013), it may be natural to just classify deep learning techniques into deep discriminative models (e.g., DNNs) and deep probabilistic generative models (e.g., DBN, Deep Boltzmann Machine (DBM)).'
output = docsearch.similarity_search(
'By the commonly adopted machine learning tradition', k=1,
n_sentence_context=1, filter='doc.test_num = 2')
assert output[0
].page_content == 'Note the use of “hybrid” in 3) above is different from that used sometimes in the literature, which for example refers to the hybrid systems for speech recognition feeding the output probabilities of a neural network into an HMM (Bengio et al., 1991; Bourlard and Morgan, 1993; Morgan, 2012). By the commonly adopted machine learning tradition (e.g., Chapter 28 in Murphy, 2012; Deng and Li, 2013), it may be natural to just classify deep learning techniques into deep discriminative models (e.g., DNNs) and deep probabilistic generative models (e.g., DBN, Deep Boltzmann Machine (DBM)). This classification scheme, however, misses a key insight gained in deep learning research about how generative models can greatly improve the training of DNNs and other deep discriminative models via better regularization.'
for doc_id in doc_ids:
docsearch._delete_doc(doc_id)
|
def test_vectara_from_files() ->None:
"""Test end to end construction and search."""
urls = [
'https://papers.nips.cc/paper_files/paper/2017/file/3f5ee243547dee91fbd053c1c4a845aa-Paper.pdf'
,
'https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/Final-DengYu-NOW-Book-DeepLearn2013-ForLecturesJuly2.docx'
]
files_list = []
for url in urls:
name = tempfile.NamedTemporaryFile().name
urllib.request.urlretrieve(url, name)
files_list.append(name)
docsearch: Vectara = Vectara()
doc_ids = docsearch.add_files(files_list=files_list, embedding=
FakeEmbeddings(), metadatas=[{'url': url, 'test_num': '2'} for url in
urls])
output = docsearch.similarity_search(
'By the commonly adopted machine learning tradition', k=1,
n_sentence_context=0, filter='doc.test_num = 2')
assert output[0
].page_content == 'By the commonly adopted machine learning tradition (e.g., Chapter 28 in Murphy, 2012; Deng and Li, 2013), it may be natural to just classify deep learning techniques into deep discriminative models (e.g., DNNs) and deep probabilistic generative models (e.g., DBN, Deep Boltzmann Machine (DBM)).'
output = docsearch.similarity_search(
'By the commonly adopted machine learning tradition', k=1,
n_sentence_context=1, filter='doc.test_num = 2')
assert output[0
].page_content == 'Note the use of “hybrid” in 3) above is different from that used sometimes in the literature, which for example refers to the hybrid systems for speech recognition feeding the output probabilities of a neural network into an HMM (Bengio et al., 1991; Bourlard and Morgan, 1993; Morgan, 2012). By the commonly adopted machine learning tradition (e.g., Chapter 28 in Murphy, 2012; Deng and Li, 2013), it may be natural to just classify deep learning techniques into deep discriminative models (e.g., DNNs) and deep probabilistic generative models (e.g., DBN, Deep Boltzmann Machine (DBM)). This classification scheme, however, misses a key insight gained in deep learning research about how generative models can greatly improve the training of DNNs and other deep discriminative models via better regularization.'
for doc_id in doc_ids:
docsearch._delete_doc(doc_id)
|
Test end to end construction and search.
|
_identifying_params
|
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {**{'model_kwargs': _model_kwargs}}
|
@property
def _identifying_params(self) ->Dict[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {**{'model_kwargs': _model_kwargs}}
|
Get the identifying parameters.
|
test_import_storage
|
from langchain_community.storage.upstash_redis import UpstashRedisStore
|
@pytest.mark.requires('upstash_redis')
def test_import_storage() ->None:
from langchain_community.storage.upstash_redis import UpstashRedisStore
| null |
convert_bytes
|
if isinstance(data, bytes):
return data.decode('ascii')
if isinstance(data, dict):
return dict(map(convert_bytes, data.items()))
if isinstance(data, list):
return list(map(convert_bytes, data))
if isinstance(data, tuple):
return map(convert_bytes, data)
return data
|
def convert_bytes(data: Any) ->Any:
if isinstance(data, bytes):
return data.decode('ascii')
if isinstance(data, dict):
return dict(map(convert_bytes, data.items()))
if isinstance(data, list):
return list(map(convert_bytes, data))
if isinstance(data, tuple):
return map(convert_bytes, data)
return data
| null |
_default_true
|
return True
|
def _default_true(_: Dict[str, Any]) ->bool:
return True
| null |
_parse_input
|
"""Parse input of the form data["key1"][0]["key2"] into a list of keys."""
_res = re.findall('\\[.*?]', text)
res = [i[1:-1].replace('"', '').replace("'", '') for i in _res]
res = [(int(i) if i.isdigit() else i) for i in res]
return res
|
def _parse_input(text: str) ->List[Union[str, int]]:
"""Parse input of the form data["key1"][0]["key2"] into a list of keys."""
_res = re.findall('\\[.*?]', text)
res = [i[1:-1].replace('"', '').replace("'", '') for i in _res]
res = [(int(i) if i.isdigit() else i) for i in res]
return res
|
Parse input of the form data["key1"][0]["key2"] into a list of keys.
|
test_anthropic_model_name_param
|
llm = Anthropic(model_name='foo')
assert llm.model == 'foo'
|
@pytest.mark.requires('anthropic')
def test_anthropic_model_name_param() ->None:
llm = Anthropic(model_name='foo')
assert llm.model == 'foo'
| null |
lc_secrets
|
return {'url': 'WATSONX_URL', 'apikey': 'WATSONX_APIKEY', 'token':
'WATSONX_TOKEN', 'password': 'WATSONX_PASSWORD', 'username':
'WATSONX_USERNAME', 'instance_id': 'WATSONX_INSTANCE_ID'}
|
@property
def lc_secrets(self) ->Dict[str, str]:
return {'url': 'WATSONX_URL', 'apikey': 'WATSONX_APIKEY', 'token':
'WATSONX_TOKEN', 'password': 'WATSONX_PASSWORD', 'username':
'WATSONX_USERNAME', 'instance_id': 'WATSONX_INSTANCE_ID'}
| null |
test_huggingface_endpoint_summarization
|
"""Test valid call to HuggingFace summarization model."""
llm = HuggingFaceEndpoint(endpoint_url='', task='summarization')
output = llm('Say foo:')
assert isinstance(output, str)
|
@unittest.skip(
'This test requires an inference endpoint. Tested with Hugging Face endpoints'
)
def test_huggingface_endpoint_summarization() ->None:
"""Test valid call to HuggingFace summarization model."""
llm = HuggingFaceEndpoint(endpoint_url='', task='summarization')
output = llm('Say foo:')
assert isinstance(output, str)
|
Test valid call to HuggingFace summarization model.
|
get_tools
|
"""Get the tools in the toolkit."""
tools: List[BaseTool] = []
for vectorstore_info in self.vectorstores:
description = VectorStoreQATool.get_description(vectorstore_info.name,
vectorstore_info.description)
qa_tool = VectorStoreQATool(name=vectorstore_info.name, description=
description, vectorstore=vectorstore_info.vectorstore, llm=self.llm)
tools.append(qa_tool)
return tools
|
def get_tools(self) ->List[BaseTool]:
"""Get the tools in the toolkit."""
tools: List[BaseTool] = []
for vectorstore_info in self.vectorstores:
description = VectorStoreQATool.get_description(vectorstore_info.
name, vectorstore_info.description)
qa_tool = VectorStoreQATool(name=vectorstore_info.name, description
=description, vectorstore=vectorstore_info.vectorstore, llm=
self.llm)
tools.append(qa_tool)
return tools
|
Get the tools in the toolkit.
|
_load_map_reduce_chain
|
_question_prompt = (question_prompt or map_reduce_prompt.
QUESTION_PROMPT_SELECTOR.get_prompt(llm))
_combine_prompt = (combine_prompt or map_reduce_prompt.
COMBINE_PROMPT_SELECTOR.get_prompt(llm))
map_chain = LLMChain(llm=llm, prompt=_question_prompt, verbose=verbose,
callback_manager=callback_manager, callbacks=callbacks)
_reduce_llm = reduce_llm or llm
reduce_chain = LLMChain(llm=_reduce_llm, prompt=_combine_prompt, verbose=
verbose, callback_manager=callback_manager, callbacks=callbacks)
combine_documents_chain = StuffDocumentsChain(llm_chain=reduce_chain,
document_variable_name=combine_document_variable_name, verbose=verbose,
callback_manager=callback_manager, callbacks=callbacks)
if collapse_prompt is None:
collapse_chain = None
if collapse_llm is not None:
raise ValueError(
'collapse_llm provided, but collapse_prompt was not: please provide one or stop providing collapse_llm.'
)
else:
_collapse_llm = collapse_llm or llm
collapse_chain = StuffDocumentsChain(llm_chain=LLMChain(llm=
_collapse_llm, prompt=collapse_prompt, verbose=verbose,
callback_manager=callback_manager, callbacks=callbacks),
document_variable_name=combine_document_variable_name, verbose=
verbose, callback_manager=callback_manager)
reduce_documents_chain = ReduceDocumentsChain(combine_documents_chain=
combine_documents_chain, collapse_documents_chain=collapse_chain,
token_max=token_max, verbose=verbose)
return MapReduceDocumentsChain(llm_chain=map_chain, document_variable_name=
map_reduce_document_variable_name, reduce_documents_chain=
reduce_documents_chain, verbose=verbose, callback_manager=
callback_manager, callbacks=callbacks, **kwargs)
|
def _load_map_reduce_chain(llm: BaseLanguageModel, question_prompt:
Optional[BasePromptTemplate]=None, combine_prompt: Optional[
BasePromptTemplate]=None, combine_document_variable_name: str=
'summaries', map_reduce_document_variable_name: str='context',
collapse_prompt: Optional[BasePromptTemplate]=None, reduce_llm:
Optional[BaseLanguageModel]=None, collapse_llm: Optional[
BaseLanguageModel]=None, verbose: Optional[bool]=None, callback_manager:
Optional[BaseCallbackManager]=None, callbacks: Callbacks=None,
token_max: int=3000, **kwargs: Any) ->MapReduceDocumentsChain:
_question_prompt = (question_prompt or map_reduce_prompt.
QUESTION_PROMPT_SELECTOR.get_prompt(llm))
_combine_prompt = (combine_prompt or map_reduce_prompt.
COMBINE_PROMPT_SELECTOR.get_prompt(llm))
map_chain = LLMChain(llm=llm, prompt=_question_prompt, verbose=verbose,
callback_manager=callback_manager, callbacks=callbacks)
_reduce_llm = reduce_llm or llm
reduce_chain = LLMChain(llm=_reduce_llm, prompt=_combine_prompt,
verbose=verbose, callback_manager=callback_manager, callbacks=callbacks
)
combine_documents_chain = StuffDocumentsChain(llm_chain=reduce_chain,
document_variable_name=combine_document_variable_name, verbose=
verbose, callback_manager=callback_manager, callbacks=callbacks)
if collapse_prompt is None:
collapse_chain = None
if collapse_llm is not None:
raise ValueError(
'collapse_llm provided, but collapse_prompt was not: please provide one or stop providing collapse_llm.'
)
else:
_collapse_llm = collapse_llm or llm
collapse_chain = StuffDocumentsChain(llm_chain=LLMChain(llm=
_collapse_llm, prompt=collapse_prompt, verbose=verbose,
callback_manager=callback_manager, callbacks=callbacks),
document_variable_name=combine_document_variable_name, verbose=
verbose, callback_manager=callback_manager)
reduce_documents_chain = ReduceDocumentsChain(combine_documents_chain=
combine_documents_chain, collapse_documents_chain=collapse_chain,
token_max=token_max, verbose=verbose)
return MapReduceDocumentsChain(llm_chain=map_chain,
document_variable_name=map_reduce_document_variable_name,
reduce_documents_chain=reduce_documents_chain, verbose=verbose,
callback_manager=callback_manager, callbacks=callbacks, **kwargs)
| null |
parse_result
|
generation = result[0]
if not isinstance(generation, ChatGeneration):
raise OutputParserException(
'This output parser can only be used with a chat generation.')
message = generation.message
try:
func_call = copy.deepcopy(message.additional_kwargs['function_call'])
except KeyError as exc:
raise OutputParserException(f'Could not parse function call: {exc}')
if self.args_only:
return func_call['arguments']
return func_call
|
def parse_result(self, result: List[Generation], *, partial: bool=False) ->Any:
generation = result[0]
if not isinstance(generation, ChatGeneration):
raise OutputParserException(
'This output parser can only be used with a chat generation.')
message = generation.message
try:
func_call = copy.deepcopy(message.additional_kwargs['function_call'])
except KeyError as exc:
raise OutputParserException(f'Could not parse function call: {exc}')
if self.args_only:
return func_call['arguments']
return func_call
| null |
add_texts
|
"""Embed texts and add to the vector store.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
Returns:
List of ids from adding the texts into the vectorstore.
"""
ids: List[str] = []
embeddings = self.embedding.embed_documents(list(texts))
for i, (t, e) in enumerate(zip(texts, embeddings)):
m = metadatas[i] if metadatas else {}
doc = self.doc_cls(text=t, embedding=e, metadata=m)
self.doc_index.index([doc])
ids.append(str(doc.id))
return ids
|
def add_texts(self, texts: Iterable[str], metadatas: Optional[List[dict]]=
None, **kwargs: Any) ->List[str]:
"""Embed texts and add to the vector store.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
Returns:
List of ids from adding the texts into the vectorstore.
"""
ids: List[str] = []
embeddings = self.embedding.embed_documents(list(texts))
for i, (t, e) in enumerate(zip(texts, embeddings)):
m = metadatas[i] if metadatas else {}
doc = self.doc_cls(text=t, embedding=e, metadata=m)
self.doc_index.index([doc])
ids.append(str(doc.id))
return ids
|
Embed texts and add to the vector store.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
Returns:
List of ids from adding the texts into the vectorstore.
|
_prepare_output
|
parsed = {'score': result['score']}
if RUN_KEY in result:
parsed[RUN_KEY] = result[RUN_KEY]
return parsed
|
def _prepare_output(self, result: dict) ->dict:
parsed = {'score': result['score']}
if RUN_KEY in result:
parsed[RUN_KEY] = result[RUN_KEY]
return parsed
| null |
test_marqo_weighted_query
|
"""Test end to end construction and search."""
texts = ['Smartphone', 'Telephone']
marqo_search = Marqo.from_texts(texts=texts, index_name=INDEX_NAME, url=
DEFAULT_MARQO_URL, api_key=DEFAULT_MARQO_API_KEY, verbose=False)
results = marqo_search.similarity_search({'communications device': 1.0,
'Old technology': -5.0}, k=1)
assert results == [Document(page_content='Smartphone')]
|
def test_marqo_weighted_query(client: Marqo) ->None:
"""Test end to end construction and search."""
texts = ['Smartphone', 'Telephone']
marqo_search = Marqo.from_texts(texts=texts, index_name=INDEX_NAME, url
=DEFAULT_MARQO_URL, api_key=DEFAULT_MARQO_API_KEY, verbose=False)
results = marqo_search.similarity_search({'communications device': 1.0,
'Old technology': -5.0}, k=1)
assert results == [Document(page_content='Smartphone')]
|
Test end to end construction and search.
|
test_character_text_splitter_short_words_first
|
"""Test splitting by character count when shorter words are first."""
text = 'a a foo bar baz'
splitter = CharacterTextSplitter(separator=' ', chunk_size=3, chunk_overlap=1)
output = splitter.split_text(text)
expected_output = ['a a', 'foo', 'bar', 'baz']
assert output == expected_output
|
def test_character_text_splitter_short_words_first() ->None:
"""Test splitting by character count when shorter words are first."""
text = 'a a foo bar baz'
splitter = CharacterTextSplitter(separator=' ', chunk_size=3,
chunk_overlap=1)
output = splitter.split_text(text)
expected_output = ['a a', 'foo', 'bar', 'baz']
assert output == expected_output
|
Test splitting by character count when shorter words are first.
|
_Yield
|
self.write('(')
self.write('yield')
if t.value:
self.write(' ')
self.dispatch(t.value)
self.write(')')
|
def _Yield(self, t):
self.write('(')
self.write('yield')
if t.value:
self.write(' ')
self.dispatch(t.value)
self.write(')')
| null |
mdelete
|
"""Delete the given keys."""
_keys = [self._get_prefixed_key(key) for key in keys]
self.client.delete(*_keys)
|
def mdelete(self, keys: Sequence[str]) ->None:
"""Delete the given keys."""
_keys = [self._get_prefixed_key(key) for key in keys]
self.client.delete(*_keys)
|
Delete the given keys.
|
get_prompt_strings
|
role_strings: List[Tuple[Type[BaseMessagePromptTemplate], str]] = []
role_strings.append((HumanMessagePromptTemplate,
"""Question: {question}
Answer: Let's work this out in a step by step way to be sure we have the right answer:"""
))
if stage == 'ideation':
return role_strings
role_strings.extend([*[(AIMessagePromptTemplate, 'Idea ' + str(i + 1) +
': {idea_' + str(i + 1) + '}') for i in range(self.n_ideas)], (
HumanMessagePromptTemplate,
f"You are a researcher tasked with investigating the {self.n_ideas} response options provided. List the flaws and faulty logic of each answer options. Let'w work this out in a step by step way to be sure we have all the errors:"
)])
if stage == 'critique':
return role_strings
role_strings.extend([(AIMessagePromptTemplate, 'Critique: {critique}'), (
HumanMessagePromptTemplate,
f"You are a resolver tasked with 1) finding which of the {self.n_ideas} answer options the researcher thought was best,2) improving that answer and 3) printing the answer in full. Don't output anything for step 1 or 2, only the full answer in 3. Let's work this out in a step by step way to be sure we have the right answer:"
)])
if stage == 'resolve':
return role_strings
raise ValueError(
f"stage should be either 'ideation', 'critique' or 'resolve', but it is '{stage}'. This should never happen."
)
|
def get_prompt_strings(self, stage: str) ->List[Tuple[Type[
BaseMessagePromptTemplate], str]]:
role_strings: List[Tuple[Type[BaseMessagePromptTemplate], str]] = []
role_strings.append((HumanMessagePromptTemplate,
"""Question: {question}
Answer: Let's work this out in a step by step way to be sure we have the right answer:"""
))
if stage == 'ideation':
return role_strings
role_strings.extend([*[(AIMessagePromptTemplate, 'Idea ' + str(i + 1) +
': {idea_' + str(i + 1) + '}') for i in range(self.n_ideas)], (
HumanMessagePromptTemplate,
f"You are a researcher tasked with investigating the {self.n_ideas} response options provided. List the flaws and faulty logic of each answer options. Let'w work this out in a step by step way to be sure we have all the errors:"
)])
if stage == 'critique':
return role_strings
role_strings.extend([(AIMessagePromptTemplate, 'Critique: {critique}'),
(HumanMessagePromptTemplate,
f"You are a resolver tasked with 1) finding which of the {self.n_ideas} answer options the researcher thought was best,2) improving that answer and 3) printing the answer in full. Don't output anything for step 1 or 2, only the full answer in 3. Let's work this out in a step by step way to be sure we have the right answer:"
)])
if stage == 'resolve':
return role_strings
raise ValueError(
f"stage should be either 'ideation', 'critique' or 'resolve', but it is '{stage}'. This should never happen."
)
| null |
evaluation_name
|
"""Get the name of the evaluation.
Returns
-------
str
The name of the evaluation.
"""
return f'score_string:{self.criterion_name}'
|
@property
def evaluation_name(self) ->str:
"""Get the name of the evaluation.
Returns
-------
str
The name of the evaluation.
"""
return f'score_string:{self.criterion_name}'
|
Get the name of the evaluation.
Returns
-------
str
The name of the evaluation.
|
__iter__
|
"""Return self as a generator."""
return self
|
def __iter__(self) ->StreamingResponseGenerator:
"""Return self as a generator."""
return self
|
Return self as a generator.
|
_get_channel_id
|
request = self.youtube_client.search().list(part='id', q=channel_name, type
='channel', maxResults=1)
response = request.execute()
channel_id = response['items'][0]['id']['channelId']
return channel_id
|
def _get_channel_id(self, channel_name: str) ->str:
request = self.youtube_client.search().list(part='id', q=channel_name,
type='channel', maxResults=1)
response = request.execute()
channel_id = response['items'][0]['id']['channelId']
return channel_id
| null |
output_keys
|
"""
Returns a list of output keys.
This method defines the output keys that will be used to access the output
values produced by the chain or function. It ensures that the specified keys
are available to access the outputs.
Returns:
List[str]: A list of output keys.
Note:
This method is considered private and may not be intended for direct
external use.
"""
return [self.output_key]
|
@property
def output_keys(self) ->List[str]:
"""
Returns a list of output keys.
This method defines the output keys that will be used to access the output
values produced by the chain or function. It ensures that the specified keys
are available to access the outputs.
Returns:
List[str]: A list of output keys.
Note:
This method is considered private and may not be intended for direct
external use.
"""
return [self.output_key]
|
Returns a list of output keys.
This method defines the output keys that will be used to access the output
values produced by the chain or function. It ensures that the specified keys
are available to access the outputs.
Returns:
List[str]: A list of output keys.
Note:
This method is considered private and may not be intended for direct
external use.
|
test_single_agent_action_observation
|
intermediate_steps = [(AgentAction(tool='Tool1', tool_input='input1', log=
'Log1'), 'Observation1')]
expected_result = """Log1
Observation: Observation1
Thought: """
assert format_log_to_str(intermediate_steps) == expected_result
|
def test_single_agent_action_observation() ->None:
intermediate_steps = [(AgentAction(tool='Tool1', tool_input='input1',
log='Log1'), 'Observation1')]
expected_result = 'Log1\nObservation: Observation1\nThought: '
assert format_log_to_str(intermediate_steps) == expected_result
| null |
test_named_tool_decorator_return_direct
|
"""Test functionality when arguments and return direct are provided as input."""
@tool('search', return_direct=True)
def search_api(query: str, *args: Any) ->str:
"""Search the API for the query."""
return 'API result'
assert isinstance(search_api, BaseTool)
assert search_api.name == 'search'
assert search_api.return_direct
assert search_api.run({'query': 'foo'}) == 'API result'
|
def test_named_tool_decorator_return_direct() ->None:
"""Test functionality when arguments and return direct are provided as input."""
@tool('search', return_direct=True)
def search_api(query: str, *args: Any) ->str:
"""Search the API for the query."""
return 'API result'
assert isinstance(search_api, BaseTool)
assert search_api.name == 'search'
assert search_api.return_direct
assert search_api.run({'query': 'foo'}) == 'API result'
|
Test functionality when arguments and return direct are provided as input.
|
embed_documents
|
"""Return simple embeddings."""
return [([float(1.0)] * (ADA_TOKEN_COUNT - 1) + [float(i)]) for i in range(
len(texts))]
|
def embed_documents(self, texts: List[str]) ->List[List[float]]:
"""Return simple embeddings."""
return [([float(1.0)] * (ADA_TOKEN_COUNT - 1) + [float(i)]) for i in
range(len(texts))]
|
Return simple embeddings.
|
_create_rspace_client
|
"""Create a RSpace client."""
try:
from rspace_client.eln import eln, field_content
except ImportError:
raise ImportError('You must run `pip install rspace_client`')
try:
eln = eln.ELNClient(self.url, self.api_key)
eln.get_status()
except Exception:
raise Exception(
f'Unable to initialize client - is url {self.url} or api key correct?'
)
return eln, field_content.FieldContent
|
def _create_rspace_client(self) ->Any:
"""Create a RSpace client."""
try:
from rspace_client.eln import eln, field_content
except ImportError:
raise ImportError('You must run `pip install rspace_client`')
try:
eln = eln.ELNClient(self.url, self.api_key)
eln.get_status()
except Exception:
raise Exception(
f'Unable to initialize client - is url {self.url} or api key correct?'
)
return eln, field_content.FieldContent
|
Create a RSpace client.
|
test_modern_treasury_loader
|
"""Test Modern Treasury file loader."""
modern_treasury_loader = ModernTreasuryLoader('payment_orders')
documents = modern_treasury_loader.load()
assert len(documents) == 1
|
def test_modern_treasury_loader() ->None:
"""Test Modern Treasury file loader."""
modern_treasury_loader = ModernTreasuryLoader('payment_orders')
documents = modern_treasury_loader.load()
assert len(documents) == 1
|
Test Modern Treasury file loader.
|
test_check_package_version_pass
|
check_package_version('PyYAML', gte_version='5.4.1')
|
def test_check_package_version_pass() ->None:
check_package_version('PyYAML', gte_version='5.4.1')
| null |
test_wrong_temperature_1
|
chat = ErnieBotChat()
message = HumanMessage(content='Hello')
with pytest.raises(ValueError) as e:
chat([message], temperature=1.2)
assert 'parameter check failed, temperature range is (0, 1.0]' in str(e)
|
def test_wrong_temperature_1() ->None:
chat = ErnieBotChat()
message = HumanMessage(content='Hello')
with pytest.raises(ValueError) as e:
chat([message], temperature=1.2)
assert 'parameter check failed, temperature range is (0, 1.0]' in str(e)
| null |
validate_environment
|
"""Validate that login and password exists in environment."""
login = get_from_dict_or_env(values, 'api_login', 'DATAFORSEO_LOGIN')
password = get_from_dict_or_env(values, 'api_password', 'DATAFORSEO_PASSWORD')
values['api_login'] = login
values['api_password'] = password
return values
|
@root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that login and password exists in environment."""
login = get_from_dict_or_env(values, 'api_login', 'DATAFORSEO_LOGIN')
password = get_from_dict_or_env(values, 'api_password',
'DATAFORSEO_PASSWORD')
values['api_login'] = login
values['api_password'] = password
return values
|
Validate that login and password exists in environment.
|
_generate
|
should_stream = stream if stream is not None else False
if should_stream:
stream_iter = self._stream(messages, stop=stop, run_manager=run_manager,
**kwargs)
return generate_from_stream(stream_iter)
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs}
response = self.completion_with_retry(messages=message_dicts, run_manager=
run_manager, **params)
return self._create_chat_result(response)
|
def _generate(self, messages: List[BaseMessage], stop: Optional[List[str]]=
None, run_manager: Optional[CallbackManagerForLLMRun]=None, stream:
Optional[bool]=None, **kwargs: Any) ->ChatResult:
should_stream = stream if stream is not None else False
if should_stream:
stream_iter = self._stream(messages, stop=stop, run_manager=
run_manager, **kwargs)
return generate_from_stream(stream_iter)
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs}
response = self.completion_with_retry(messages=message_dicts,
run_manager=run_manager, **params)
return self._create_chat_result(response)
| null |
load
|
"""Load all documents."""
return list(self.lazy_load())
|
def load(self) ->List[Document]:
"""Load all documents."""
return list(self.lazy_load())
|
Load all documents.
|
lazy_load
|
"""
Get issues of a GitHub repository.
Returns:
A list of Documents with attributes:
- page_content
- metadata
- url
- title
- creator
- created_at
- last_update_time
- closed_time
- number of comments
- state
- labels
- assignee
- assignees
- milestone
- locked
- number
- is_pull_request
"""
url: Optional[str] = self.url
while url:
response = requests.get(url, headers=self.headers)
response.raise_for_status()
issues = response.json()
for issue in issues:
doc = self.parse_issue(issue)
if not self.include_prs and doc.metadata['is_pull_request']:
continue
yield doc
if response.links and response.links.get('next'):
url = response.links['next']['url']
else:
url = None
|
def lazy_load(self) ->Iterator[Document]:
"""
Get issues of a GitHub repository.
Returns:
A list of Documents with attributes:
- page_content
- metadata
- url
- title
- creator
- created_at
- last_update_time
- closed_time
- number of comments
- state
- labels
- assignee
- assignees
- milestone
- locked
- number
- is_pull_request
"""
url: Optional[str] = self.url
while url:
response = requests.get(url, headers=self.headers)
response.raise_for_status()
issues = response.json()
for issue in issues:
doc = self.parse_issue(issue)
if not self.include_prs and doc.metadata['is_pull_request']:
continue
yield doc
if response.links and response.links.get('next'):
url = response.links['next']['url']
else:
url = None
|
Get issues of a GitHub repository.
Returns:
A list of Documents with attributes:
- page_content
- metadata
- url
- title
- creator
- created_at
- last_update_time
- closed_time
- number of comments
- state
- labels
- assignee
- assignees
- milestone
- locked
- number
- is_pull_request
|
from_api_operation
|
"""Create an OpenAPIEndpointChain from an operation and a spec."""
param_mapping = _ParamMapping(query_params=operation.query_params,
body_params=operation.body_params, path_params=operation.path_params)
requests_chain = APIRequesterChain.from_llm_and_typescript(llm,
typescript_definition=operation.to_typescript(), verbose=verbose,
callbacks=callbacks)
if raw_response:
response_chain = None
else:
response_chain = APIResponderChain.from_llm(llm, verbose=verbose,
callbacks=callbacks)
_requests = requests or Requests()
return cls(api_request_chain=requests_chain, api_response_chain=
response_chain, api_operation=operation, requests=_requests,
param_mapping=param_mapping, verbose=verbose, return_intermediate_steps
=return_intermediate_steps, callbacks=callbacks, **kwargs)
|
@classmethod
def from_api_operation(cls, operation: APIOperation, llm: BaseLanguageModel,
requests: Optional[Requests]=None, verbose: bool=False,
return_intermediate_steps: bool=False, raw_response: bool=False,
callbacks: Callbacks=None, **kwargs: Any) ->'OpenAPIEndpointChain':
"""Create an OpenAPIEndpointChain from an operation and a spec."""
param_mapping = _ParamMapping(query_params=operation.query_params,
body_params=operation.body_params, path_params=operation.path_params)
requests_chain = APIRequesterChain.from_llm_and_typescript(llm,
typescript_definition=operation.to_typescript(), verbose=verbose,
callbacks=callbacks)
if raw_response:
response_chain = None
else:
response_chain = APIResponderChain.from_llm(llm, verbose=verbose,
callbacks=callbacks)
_requests = requests or Requests()
return cls(api_request_chain=requests_chain, api_response_chain=
response_chain, api_operation=operation, requests=_requests,
param_mapping=param_mapping, verbose=verbose,
return_intermediate_steps=return_intermediate_steps, callbacks=
callbacks, **kwargs)
|
Create an OpenAPIEndpointChain from an operation and a spec.
|
connection_string_from_db_params
|
"""Return connection string from database parameters."""
return f'postgresql+{driver}://{user}:{password}@{host}:{port}/{database}'
|
@classmethod
def connection_string_from_db_params(cls, driver: str, host: str, port: int,
database: str, user: str, password: str) ->str:
"""Return connection string from database parameters."""
return f'postgresql+{driver}://{user}:{password}@{host}:{port}/{database}'
|
Return connection string from database parameters.
|
find_all_links
|
"""Extract all links from a raw html string.
Args:
raw_html: original html.
pattern: Regex to use for extracting links from raw html.
Returns:
List[str]: all links
"""
pattern = pattern or DEFAULT_LINK_REGEX
return list(set(re.findall(pattern, raw_html)))
|
def find_all_links(raw_html: str, *, pattern: Union[str, re.Pattern, None]=None
) ->List[str]:
"""Extract all links from a raw html string.
Args:
raw_html: original html.
pattern: Regex to use for extracting links from raw html.
Returns:
List[str]: all links
"""
pattern = pattern or DEFAULT_LINK_REGEX
return list(set(re.findall(pattern, raw_html)))
|
Extract all links from a raw html string.
Args:
raw_html: original html.
pattern: Regex to use for extracting links from raw html.
Returns:
List[str]: all links
|
mock_unstructured_local
|
with patch(
'langchain_community.document_loaders.lakefs.UnstructuredLakeFSLoader'
) as mock_unstructured_lakefs:
mock_unstructured_lakefs.return_value.load.return_value = [(
'text content', 'pdf content')]
yield mock_unstructured_lakefs.return_value
|
@pytest.fixture
def mock_unstructured_local() ->Any:
with patch(
'langchain_community.document_loaders.lakefs.UnstructuredLakeFSLoader'
) as mock_unstructured_lakefs:
mock_unstructured_lakefs.return_value.load.return_value = [(
'text content', 'pdf content')]
yield mock_unstructured_lakefs.return_value
| null |
load_guide
|
"""Load a guide
Args:
url_override: A URL to override the default URL.
Returns: List[Document]
"""
if url_override is None:
url = IFIXIT_BASE_URL + '/guides/' + self.id
else:
url = url_override
res = requests.get(url)
if res.status_code != 200:
raise ValueError('Could not load guide: ' + self.web_path + '\n' + res.
json())
data = res.json()
doc_parts = ['# ' + data['title'], data['introduction_raw']]
doc_parts.append("""
###Tools Required:""")
if len(data['tools']) == 0:
doc_parts.append('\n - None')
else:
for tool in data['tools']:
doc_parts.append('\n - ' + tool['text'])
doc_parts.append("""
###Parts Required:""")
if len(data['parts']) == 0:
doc_parts.append('\n - None')
else:
for part in data['parts']:
doc_parts.append('\n - ' + part['text'])
for row in data['steps']:
doc_parts.append('\n\n## ' + (row['title'] if row['title'] != '' else
'Step {}'.format(row['orderby'])))
for line in row['lines']:
doc_parts.append(line['text_raw'])
doc_parts.append(data['conclusion_raw'])
text = '\n'.join(doc_parts)
metadata = {'source': self.web_path, 'title': data['title']}
return [Document(page_content=text, metadata=metadata)]
|
def load_guide(self, url_override: Optional[str]=None) ->List[Document]:
"""Load a guide
Args:
url_override: A URL to override the default URL.
Returns: List[Document]
"""
if url_override is None:
url = IFIXIT_BASE_URL + '/guides/' + self.id
else:
url = url_override
res = requests.get(url)
if res.status_code != 200:
raise ValueError('Could not load guide: ' + self.web_path + '\n' +
res.json())
data = res.json()
doc_parts = ['# ' + data['title'], data['introduction_raw']]
doc_parts.append('\n\n###Tools Required:')
if len(data['tools']) == 0:
doc_parts.append('\n - None')
else:
for tool in data['tools']:
doc_parts.append('\n - ' + tool['text'])
doc_parts.append('\n\n###Parts Required:')
if len(data['parts']) == 0:
doc_parts.append('\n - None')
else:
for part in data['parts']:
doc_parts.append('\n - ' + part['text'])
for row in data['steps']:
doc_parts.append('\n\n## ' + (row['title'] if row['title'] != '' else
'Step {}'.format(row['orderby'])))
for line in row['lines']:
doc_parts.append(line['text_raw'])
doc_parts.append(data['conclusion_raw'])
text = '\n'.join(doc_parts)
metadata = {'source': self.web_path, 'title': data['title']}
return [Document(page_content=text, metadata=metadata)]
|
Load a guide
Args:
url_override: A URL to override the default URL.
Returns: List[Document]
|
output_keys
|
"""Expect input key.
:meta private:
"""
_output_keys = super().output_keys
if self.return_intermediate_steps:
_output_keys = _output_keys + ['intermediate_steps']
return _output_keys
|
@property
def output_keys(self) ->List[str]:
"""Expect input key.
:meta private:
"""
_output_keys = super().output_keys
if self.return_intermediate_steps:
_output_keys = _output_keys + ['intermediate_steps']
return _output_keys
|
Expect input key.
:meta private:
|
_split_text
|
"""Split incoming text and return chunks."""
final_chunks = []
separator = separators[-1]
new_separators = []
for i, _s in enumerate(separators):
_separator = _s if self._is_separator_regex else re.escape(_s)
if _s == '':
separator = _s
break
if re.search(_separator, text):
separator = _s
new_separators = separators[i + 1:]
break
_separator = separator if self._is_separator_regex else re.escape(separator)
splits = _split_text_with_regex(text, _separator, self._keep_separator)
_good_splits = []
_separator = '' if self._keep_separator else separator
for s in splits:
if self._length_function(s) < self._chunk_size:
_good_splits.append(s)
else:
if _good_splits:
merged_text = self._merge_splits(_good_splits, _separator)
final_chunks.extend(merged_text)
_good_splits = []
if not new_separators:
final_chunks.append(s)
else:
other_info = self._split_text(s, new_separators)
final_chunks.extend(other_info)
if _good_splits:
merged_text = self._merge_splits(_good_splits, _separator)
final_chunks.extend(merged_text)
return final_chunks
|
def _split_text(self, text: str, separators: List[str]) ->List[str]:
"""Split incoming text and return chunks."""
final_chunks = []
separator = separators[-1]
new_separators = []
for i, _s in enumerate(separators):
_separator = _s if self._is_separator_regex else re.escape(_s)
if _s == '':
separator = _s
break
if re.search(_separator, text):
separator = _s
new_separators = separators[i + 1:]
break
_separator = separator if self._is_separator_regex else re.escape(separator
)
splits = _split_text_with_regex(text, _separator, self._keep_separator)
_good_splits = []
_separator = '' if self._keep_separator else separator
for s in splits:
if self._length_function(s) < self._chunk_size:
_good_splits.append(s)
else:
if _good_splits:
merged_text = self._merge_splits(_good_splits, _separator)
final_chunks.extend(merged_text)
_good_splits = []
if not new_separators:
final_chunks.append(s)
else:
other_info = self._split_text(s, new_separators)
final_chunks.extend(other_info)
if _good_splits:
merged_text = self._merge_splits(_good_splits, _separator)
final_chunks.extend(merged_text)
return final_chunks
|
Split incoming text and return chunks.
|
_configure
|
"""Configure the callback manager.
Args:
callback_manager_cls (Type[T]): The callback manager class.
inheritable_callbacks (Optional[Callbacks], optional): The inheritable
callbacks. Defaults to None.
local_callbacks (Optional[Callbacks], optional): The local callbacks.
Defaults to None.
verbose (bool, optional): Whether to enable verbose mode. Defaults to False.
inheritable_tags (Optional[List[str]], optional): The inheritable tags.
Defaults to None.
local_tags (Optional[List[str]], optional): The local tags. Defaults to None.
inheritable_metadata (Optional[Dict[str, Any]], optional): The inheritable
metadata. Defaults to None.
local_metadata (Optional[Dict[str, Any]], optional): The local metadata.
Defaults to None.
Returns:
T: The configured callback manager.
"""
from langchain_core.tracers.context import _configure_hooks, _get_tracer_project, _tracing_v2_is_enabled, tracing_v2_callback_var
run_tree = get_run_tree_context()
parent_run_id = None if run_tree is None else getattr(run_tree, 'id')
callback_manager = callback_manager_cls(handlers=[], parent_run_id=
parent_run_id)
if inheritable_callbacks or local_callbacks:
if isinstance(inheritable_callbacks, list
) or inheritable_callbacks is None:
inheritable_callbacks_ = inheritable_callbacks or []
callback_manager = callback_manager_cls(handlers=
inheritable_callbacks_.copy(), inheritable_handlers=
inheritable_callbacks_.copy(), parent_run_id=parent_run_id)
else:
callback_manager = callback_manager_cls(handlers=
inheritable_callbacks.handlers.copy(), inheritable_handlers=
inheritable_callbacks.inheritable_handlers.copy(),
parent_run_id=inheritable_callbacks.parent_run_id, tags=
inheritable_callbacks.tags.copy(), inheritable_tags=
inheritable_callbacks.inheritable_tags.copy(), metadata=
inheritable_callbacks.metadata.copy(), inheritable_metadata=
inheritable_callbacks.inheritable_metadata.copy())
local_handlers_ = local_callbacks if isinstance(local_callbacks, list
) else local_callbacks.handlers if local_callbacks else []
for handler in local_handlers_:
callback_manager.add_handler(handler, False)
if inheritable_tags or local_tags:
callback_manager.add_tags(inheritable_tags or [])
callback_manager.add_tags(local_tags or [], False)
if inheritable_metadata or local_metadata:
callback_manager.add_metadata(inheritable_metadata or {})
callback_manager.add_metadata(local_metadata or {}, False)
tracer_v2 = tracing_v2_callback_var.get()
tracing_v2_enabled_ = _tracing_v2_is_enabled()
tracer_project = _get_tracer_project()
debug = _get_debug()
if verbose or debug or tracing_v2_enabled_:
from langchain_core.tracers.langchain import LangChainTracer
from langchain_core.tracers.stdout import ConsoleCallbackHandler
if verbose and not any(isinstance(handler, StdOutCallbackHandler) for
handler in callback_manager.handlers):
if debug:
pass
else:
callback_manager.add_handler(StdOutCallbackHandler(), False)
if debug and not any(isinstance(handler, ConsoleCallbackHandler) for
handler in callback_manager.handlers):
callback_manager.add_handler(ConsoleCallbackHandler(), True)
if tracing_v2_enabled_ and not any(isinstance(handler, LangChainTracer) for
handler in callback_manager.handlers):
if tracer_v2:
callback_manager.add_handler(tracer_v2, True)
else:
try:
handler = LangChainTracer(project_name=tracer_project)
callback_manager.add_handler(handler, True)
except Exception as e:
logger.warning(
'Unable to load requested LangChainTracer. To disable this warning, unset the LANGCHAIN_TRACING_V2 environment variables.'
, e)
for var, inheritable, handler_class, env_var in _configure_hooks:
create_one = env_var is not None and env_var_is_set(env_var
) and handler_class is not None
if var.get() is not None or create_one:
var_handler = var.get() or cast(Type[BaseCallbackHandler],
handler_class)()
if handler_class is None:
if not any(handler is var_handler for handler in
callback_manager.handlers):
callback_manager.add_handler(var_handler, inheritable)
elif not any(isinstance(handler, handler_class) for handler in
callback_manager.handlers):
callback_manager.add_handler(var_handler, inheritable)
return callback_manager
|
def _configure(callback_manager_cls: Type[T], inheritable_callbacks:
Callbacks=None, local_callbacks: Callbacks=None, verbose: bool=False,
inheritable_tags: Optional[List[str]]=None, local_tags: Optional[List[
str]]=None, inheritable_metadata: Optional[Dict[str, Any]]=None,
local_metadata: Optional[Dict[str, Any]]=None) ->T:
"""Configure the callback manager.
Args:
callback_manager_cls (Type[T]): The callback manager class.
inheritable_callbacks (Optional[Callbacks], optional): The inheritable
callbacks. Defaults to None.
local_callbacks (Optional[Callbacks], optional): The local callbacks.
Defaults to None.
verbose (bool, optional): Whether to enable verbose mode. Defaults to False.
inheritable_tags (Optional[List[str]], optional): The inheritable tags.
Defaults to None.
local_tags (Optional[List[str]], optional): The local tags. Defaults to None.
inheritable_metadata (Optional[Dict[str, Any]], optional): The inheritable
metadata. Defaults to None.
local_metadata (Optional[Dict[str, Any]], optional): The local metadata.
Defaults to None.
Returns:
T: The configured callback manager.
"""
from langchain_core.tracers.context import _configure_hooks, _get_tracer_project, _tracing_v2_is_enabled, tracing_v2_callback_var
run_tree = get_run_tree_context()
parent_run_id = None if run_tree is None else getattr(run_tree, 'id')
callback_manager = callback_manager_cls(handlers=[], parent_run_id=
parent_run_id)
if inheritable_callbacks or local_callbacks:
if isinstance(inheritable_callbacks, list
) or inheritable_callbacks is None:
inheritable_callbacks_ = inheritable_callbacks or []
callback_manager = callback_manager_cls(handlers=
inheritable_callbacks_.copy(), inheritable_handlers=
inheritable_callbacks_.copy(), parent_run_id=parent_run_id)
else:
callback_manager = callback_manager_cls(handlers=
inheritable_callbacks.handlers.copy(), inheritable_handlers
=inheritable_callbacks.inheritable_handlers.copy(),
parent_run_id=inheritable_callbacks.parent_run_id, tags=
inheritable_callbacks.tags.copy(), inheritable_tags=
inheritable_callbacks.inheritable_tags.copy(), metadata=
inheritable_callbacks.metadata.copy(), inheritable_metadata
=inheritable_callbacks.inheritable_metadata.copy())
local_handlers_ = local_callbacks if isinstance(local_callbacks, list
) else local_callbacks.handlers if local_callbacks else []
for handler in local_handlers_:
callback_manager.add_handler(handler, False)
if inheritable_tags or local_tags:
callback_manager.add_tags(inheritable_tags or [])
callback_manager.add_tags(local_tags or [], False)
if inheritable_metadata or local_metadata:
callback_manager.add_metadata(inheritable_metadata or {})
callback_manager.add_metadata(local_metadata or {}, False)
tracer_v2 = tracing_v2_callback_var.get()
tracing_v2_enabled_ = _tracing_v2_is_enabled()
tracer_project = _get_tracer_project()
debug = _get_debug()
if verbose or debug or tracing_v2_enabled_:
from langchain_core.tracers.langchain import LangChainTracer
from langchain_core.tracers.stdout import ConsoleCallbackHandler
if verbose and not any(isinstance(handler, StdOutCallbackHandler) for
handler in callback_manager.handlers):
if debug:
pass
else:
callback_manager.add_handler(StdOutCallbackHandler(), False)
if debug and not any(isinstance(handler, ConsoleCallbackHandler) for
handler in callback_manager.handlers):
callback_manager.add_handler(ConsoleCallbackHandler(), True)
if tracing_v2_enabled_ and not any(isinstance(handler,
LangChainTracer) for handler in callback_manager.handlers):
if tracer_v2:
callback_manager.add_handler(tracer_v2, True)
else:
try:
handler = LangChainTracer(project_name=tracer_project)
callback_manager.add_handler(handler, True)
except Exception as e:
logger.warning(
'Unable to load requested LangChainTracer. To disable this warning, unset the LANGCHAIN_TRACING_V2 environment variables.'
, e)
for var, inheritable, handler_class, env_var in _configure_hooks:
create_one = env_var is not None and env_var_is_set(env_var
) and handler_class is not None
if var.get() is not None or create_one:
var_handler = var.get() or cast(Type[BaseCallbackHandler],
handler_class)()
if handler_class is None:
if not any(handler is var_handler for handler in
callback_manager.handlers):
callback_manager.add_handler(var_handler, inheritable)
elif not any(isinstance(handler, handler_class) for handler in
callback_manager.handlers):
callback_manager.add_handler(var_handler, inheritable)
return callback_manager
|
Configure the callback manager.
Args:
callback_manager_cls (Type[T]): The callback manager class.
inheritable_callbacks (Optional[Callbacks], optional): The inheritable
callbacks. Defaults to None.
local_callbacks (Optional[Callbacks], optional): The local callbacks.
Defaults to None.
verbose (bool, optional): Whether to enable verbose mode. Defaults to False.
inheritable_tags (Optional[List[str]], optional): The inheritable tags.
Defaults to None.
local_tags (Optional[List[str]], optional): The local tags. Defaults to None.
inheritable_metadata (Optional[Dict[str, Any]], optional): The inheritable
metadata. Defaults to None.
local_metadata (Optional[Dict[str, Any]], optional): The local metadata.
Defaults to None.
Returns:
T: The configured callback manager.
|
check_if_not_null
|
"""Check if the values are not None or empty string"""
for prop, value in zip(props, values):
if not value:
raise ValueError(f'Parameter `{prop}` must not be None or empty string'
)
|
def check_if_not_null(props: List[str], values: List[Any]) ->None:
"""Check if the values are not None or empty string"""
for prop, value in zip(props, values):
if not value:
raise ValueError(
f'Parameter `{prop}` must not be None or empty string')
|
Check if the values are not None or empty string
|
collection
|
return prepare_collection()
|
@pytest.fixture()
def collection() ->Any:
return prepare_collection()
| null |
test_all_imports
|
"""Simple test to make sure all things can be imported."""
for cls in vectorstores.__all__:
if cls not in ['AlibabaCloudOpenSearchSettings', 'ClickhouseSettings',
'MyScaleSettings']:
assert issubclass(getattr(vectorstores, cls), VectorStore)
|
def test_all_imports() ->None:
"""Simple test to make sure all things can be imported."""
for cls in vectorstores.__all__:
if cls not in ['AlibabaCloudOpenSearchSettings',
'ClickhouseSettings', 'MyScaleSettings']:
assert issubclass(getattr(vectorstores, cls), VectorStore)
|
Simple test to make sure all things can be imported.
|
_call
|
return prompt
|
def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager:
Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str:
return prompt
| null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.