method_name
stringlengths 1
78
| method_body
stringlengths 3
9.66k
| full_code
stringlengths 31
10.7k
| docstring
stringlengths 4
4.74k
⌀ |
|---|---|---|---|
_euclidean_distance
|
"""Compute the Euclidean distance between two vectors.
Args:
a (np.ndarray): The first vector.
b (np.ndarray): The second vector.
Returns:
np.floating: The Euclidean distance.
"""
return np.linalg.norm(a - b)
|
@staticmethod
def _euclidean_distance(a: np.ndarray, b: np.ndarray) ->np.floating:
"""Compute the Euclidean distance between two vectors.
Args:
a (np.ndarray): The first vector.
b (np.ndarray): The second vector.
Returns:
np.floating: The Euclidean distance.
"""
return np.linalg.norm(a - b)
|
Compute the Euclidean distance between two vectors.
Args:
a (np.ndarray): The first vector.
b (np.ndarray): The second vector.
Returns:
np.floating: The Euclidean distance.
|
visit_comparison
|
"""Translate a Comparison."""
|
@abstractmethod
def visit_comparison(self, comparison: Comparison) ->Any:
"""Translate a Comparison."""
|
Translate a Comparison.
|
test_exception_handling_non_tool_exception
|
_tool = _FakeExceptionTool(exception=ValueError())
with pytest.raises(ValueError):
_tool.run({})
|
def test_exception_handling_non_tool_exception() ->None:
_tool = _FakeExceptionTool(exception=ValueError())
with pytest.raises(ValueError):
_tool.run({})
| null |
__add__
|
"""Combine two prompt templates.
Args:
other: Another prompt template.
Returns:
Combined prompt template.
"""
prompt = ChatPromptTemplate(messages=[self])
return prompt + other
|
def __add__(self, other: Any) ->ChatPromptTemplate:
"""Combine two prompt templates.
Args:
other: Another prompt template.
Returns:
Combined prompt template.
"""
prompt = ChatPromptTemplate(messages=[self])
return prompt + other
|
Combine two prompt templates.
Args:
other: Another prompt template.
Returns:
Combined prompt template.
|
test_public_api
|
"""Hard-code public API to help determine if we have broken it."""
assert sorted(__all__) == ['Blob', 'BlobLoader', 'FileSystemBlobLoader',
'YoutubeAudioLoader']
|
def test_public_api() ->None:
"""Hard-code public API to help determine if we have broken it."""
assert sorted(__all__) == ['Blob', 'BlobLoader', 'FileSystemBlobLoader',
'YoutubeAudioLoader']
|
Hard-code public API to help determine if we have broken it.
|
_llm_type
|
"""Returns the type of LLM."""
return 'human-input-chat-model'
|
@property
def _llm_type(self) ->str:
"""Returns the type of LLM."""
return 'human-input-chat-model'
|
Returns the type of LLM.
|
lazy_load
|
"""A lazy loader for document content."""
for row in self.rdd_df.toLocalIterator():
metadata = {self.column_names[i]: row[i] for i in range(len(row))}
text = metadata[self.page_content_column]
metadata.pop(self.page_content_column)
yield Document(page_content=text, metadata=metadata)
|
def lazy_load(self) ->Iterator[Document]:
"""A lazy loader for document content."""
for row in self.rdd_df.toLocalIterator():
metadata = {self.column_names[i]: row[i] for i in range(len(row))}
text = metadata[self.page_content_column]
metadata.pop(self.page_content_column)
yield Document(page_content=text, metadata=metadata)
|
A lazy loader for document content.
|
input_iter
|
for token in STREAMED_TOKENS:
yield token
|
def input_iter(_: Any) ->Iterator[str]:
for token in STREAMED_TOKENS:
yield token
| null |
__getattr__
|
if name == 'AnalyticDB':
return _import_analyticdb()
elif name == 'AlibabaCloudOpenSearch':
return _import_alibaba_cloud_open_search()
elif name == 'AlibabaCloudOpenSearchSettings':
return _import_alibaba_cloud_open_search_settings()
elif name == 'AzureCosmosDBVectorSearch':
return _import_azure_cosmos_db()
elif name == 'ElasticKnnSearch':
return _import_elastic_knn_search()
elif name == 'ElasticVectorSearch':
return _import_elastic_vector_search()
elif name == 'Annoy':
return _import_annoy()
elif name == 'AtlasDB':
return _import_atlas()
elif name == 'AwaDB':
return _import_awadb()
elif name == 'AzureSearch':
return _import_azuresearch()
elif name == 'Bagel':
return _import_bageldb()
elif name == 'BESVectorStore':
return _import_baiducloud_vector_search()
elif name == 'Cassandra':
return _import_cassandra()
elif name == 'AstraDB':
return _import_astradb()
elif name == 'Chroma':
return _import_chroma()
elif name == 'Clarifai':
return _import_clarifai()
elif name == 'ClickhouseSettings':
return _import_clickhouse_settings()
elif name == 'Clickhouse':
return _import_clickhouse()
elif name == 'DashVector':
return _import_dashvector()
elif name == 'DatabricksVectorSearch':
return _import_databricks_vector_search()
elif name == 'DeepLake':
return _import_deeplake()
elif name == 'Dingo':
return _import_dingo()
elif name == 'DocArrayInMemorySearch':
return _import_docarray_inmemory()
elif name == 'DocArrayHnswSearch':
return _import_docarray_hnsw()
elif name == 'ElasticsearchStore':
return _import_elasticsearch()
elif name == 'Epsilla':
return _import_epsilla()
elif name == 'FAISS':
return _import_faiss()
elif name == 'Hologres':
return _import_hologres()
elif name == 'LanceDB':
return _import_lancedb()
elif name == 'LLMRails':
return _import_llm_rails()
elif name == 'Marqo':
return _import_marqo()
elif name == 'MatchingEngine':
return _import_matching_engine()
elif name == 'Meilisearch':
return _import_meilisearch()
elif name == 'Milvus':
return _import_milvus()
elif name == 'MomentoVectorIndex':
return _import_momento_vector_index()
elif name == 'MongoDBAtlasVectorSearch':
return _import_mongodb_atlas()
elif name == 'MyScaleSettings':
return _import_myscale_settings()
elif name == 'MyScale':
return _import_myscale()
elif name == 'Neo4jVector':
return _import_neo4j_vector()
elif name == 'OpenSearchVectorSearch':
return _import_opensearch_vector_search()
elif name == 'PGEmbedding':
return _import_pgembedding()
elif name == 'PGVector':
return _import_pgvector()
elif name == 'Pinecone':
return _import_pinecone()
elif name == 'Qdrant':
return _import_qdrant()
elif name == 'Redis':
return _import_redis()
elif name == 'Rockset':
return _import_rocksetdb()
elif name == 'ScaNN':
return _import_scann()
elif name == 'SemaDB':
return _import_semadb()
elif name == 'SingleStoreDB':
return _import_singlestoredb()
elif name == 'SKLearnVectorStore':
return _import_sklearn()
elif name == 'SQLiteVSS':
return _import_sqlitevss()
elif name == 'StarRocks':
return _import_starrocks()
elif name == 'SupabaseVectorStore':
return _import_supabase()
elif name == 'Tair':
return _import_tair()
elif name == 'TencentVectorDB':
return _import_tencentvectordb()
elif name == 'TileDB':
return _import_tiledb()
elif name == 'Tigris':
return _import_tigris()
elif name == 'TimescaleVector':
return _import_timescalevector()
elif name == 'Typesense':
return _import_typesense()
elif name == 'USearch':
return _import_usearch()
elif name == 'Vald':
return _import_vald()
elif name == 'Vearch':
return _import_vearch()
elif name == 'Vectara':
return _import_vectara()
elif name == 'Weaviate':
return _import_weaviate()
elif name == 'Yellowbrick':
return _import_yellowbrick()
elif name == 'ZepVectorStore':
return _import_zep()
elif name == 'Zilliz':
return _import_zilliz()
elif name == 'VespaStore':
return _import_vespa()
else:
raise AttributeError(f'Could not find: {name}')
|
def __getattr__(name: str) ->Any:
if name == 'AnalyticDB':
return _import_analyticdb()
elif name == 'AlibabaCloudOpenSearch':
return _import_alibaba_cloud_open_search()
elif name == 'AlibabaCloudOpenSearchSettings':
return _import_alibaba_cloud_open_search_settings()
elif name == 'AzureCosmosDBVectorSearch':
return _import_azure_cosmos_db()
elif name == 'ElasticKnnSearch':
return _import_elastic_knn_search()
elif name == 'ElasticVectorSearch':
return _import_elastic_vector_search()
elif name == 'Annoy':
return _import_annoy()
elif name == 'AtlasDB':
return _import_atlas()
elif name == 'AwaDB':
return _import_awadb()
elif name == 'AzureSearch':
return _import_azuresearch()
elif name == 'Bagel':
return _import_bageldb()
elif name == 'BESVectorStore':
return _import_baiducloud_vector_search()
elif name == 'Cassandra':
return _import_cassandra()
elif name == 'AstraDB':
return _import_astradb()
elif name == 'Chroma':
return _import_chroma()
elif name == 'Clarifai':
return _import_clarifai()
elif name == 'ClickhouseSettings':
return _import_clickhouse_settings()
elif name == 'Clickhouse':
return _import_clickhouse()
elif name == 'DashVector':
return _import_dashvector()
elif name == 'DatabricksVectorSearch':
return _import_databricks_vector_search()
elif name == 'DeepLake':
return _import_deeplake()
elif name == 'Dingo':
return _import_dingo()
elif name == 'DocArrayInMemorySearch':
return _import_docarray_inmemory()
elif name == 'DocArrayHnswSearch':
return _import_docarray_hnsw()
elif name == 'ElasticsearchStore':
return _import_elasticsearch()
elif name == 'Epsilla':
return _import_epsilla()
elif name == 'FAISS':
return _import_faiss()
elif name == 'Hologres':
return _import_hologres()
elif name == 'LanceDB':
return _import_lancedb()
elif name == 'LLMRails':
return _import_llm_rails()
elif name == 'Marqo':
return _import_marqo()
elif name == 'MatchingEngine':
return _import_matching_engine()
elif name == 'Meilisearch':
return _import_meilisearch()
elif name == 'Milvus':
return _import_milvus()
elif name == 'MomentoVectorIndex':
return _import_momento_vector_index()
elif name == 'MongoDBAtlasVectorSearch':
return _import_mongodb_atlas()
elif name == 'MyScaleSettings':
return _import_myscale_settings()
elif name == 'MyScale':
return _import_myscale()
elif name == 'Neo4jVector':
return _import_neo4j_vector()
elif name == 'OpenSearchVectorSearch':
return _import_opensearch_vector_search()
elif name == 'PGEmbedding':
return _import_pgembedding()
elif name == 'PGVector':
return _import_pgvector()
elif name == 'Pinecone':
return _import_pinecone()
elif name == 'Qdrant':
return _import_qdrant()
elif name == 'Redis':
return _import_redis()
elif name == 'Rockset':
return _import_rocksetdb()
elif name == 'ScaNN':
return _import_scann()
elif name == 'SemaDB':
return _import_semadb()
elif name == 'SingleStoreDB':
return _import_singlestoredb()
elif name == 'SKLearnVectorStore':
return _import_sklearn()
elif name == 'SQLiteVSS':
return _import_sqlitevss()
elif name == 'StarRocks':
return _import_starrocks()
elif name == 'SupabaseVectorStore':
return _import_supabase()
elif name == 'Tair':
return _import_tair()
elif name == 'TencentVectorDB':
return _import_tencentvectordb()
elif name == 'TileDB':
return _import_tiledb()
elif name == 'Tigris':
return _import_tigris()
elif name == 'TimescaleVector':
return _import_timescalevector()
elif name == 'Typesense':
return _import_typesense()
elif name == 'USearch':
return _import_usearch()
elif name == 'Vald':
return _import_vald()
elif name == 'Vearch':
return _import_vearch()
elif name == 'Vectara':
return _import_vectara()
elif name == 'Weaviate':
return _import_weaviate()
elif name == 'Yellowbrick':
return _import_yellowbrick()
elif name == 'ZepVectorStore':
return _import_zep()
elif name == 'Zilliz':
return _import_zilliz()
elif name == 'VespaStore':
return _import_vespa()
else:
raise AttributeError(f'Could not find: {name}')
| null |
__getattr__
|
"""Get attr name."""
if name == 'create_python_agent':
HERE = Path(__file__).parents[3]
here = as_import_path(Path(__file__).parent, relative_to=HERE)
old_path = 'langchain.' + here + '.' + name
new_path = 'langchain_experimental.' + here + '.' + name
raise ImportError(
f"""This agent has been moved to langchain experiment. This agent relies on python REPL tool under the hood, so to use it safely please sandbox the python REPL. Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md and https://github.com/langchain-ai/langchain/discussions/11680To keep using this code as is, install langchain experimental and update your import statement from:
`{old_path}` to `{new_path}`."""
)
raise AttributeError(f'{name} does not exist')
|
def __getattr__(name: str) ->Any:
"""Get attr name."""
if name == 'create_python_agent':
HERE = Path(__file__).parents[3]
here = as_import_path(Path(__file__).parent, relative_to=HERE)
old_path = 'langchain.' + here + '.' + name
new_path = 'langchain_experimental.' + here + '.' + name
raise ImportError(
f"""This agent has been moved to langchain experiment. This agent relies on python REPL tool under the hood, so to use it safely please sandbox the python REPL. Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md and https://github.com/langchain-ai/langchain/discussions/11680To keep using this code as is, install langchain experimental and update your import statement from:
`{old_path}` to `{new_path}`."""
)
raise AttributeError(f'{name} does not exist')
|
Get attr name.
|
test_simple_question
|
"""Test simple question that should not need python."""
question = 'What is 1 plus 1?'
output = fake_llm_symbolic_math_chain.run(question)
assert output == 'Answer: 2'
|
def test_simple_question(fake_llm_symbolic_math_chain: LLMSymbolicMathChain
) ->None:
"""Test simple question that should not need python."""
question = 'What is 1 plus 1?'
output = fake_llm_symbolic_math_chain.run(question)
assert output == 'Answer: 2'
|
Test simple question that should not need python.
|
from_texts
|
"""Create an DocArrayInMemorySearch store and insert data.
Args:
texts (List[str]): Text data.
embedding (Embeddings): Embedding function.
metadatas (Optional[List[Dict[Any, Any]]]): Metadata for each text
if it exists. Defaults to None.
metric (str): metric for exact nearest-neighbor search.
Can be one of: "cosine_sim", "euclidean_dist" and "sqeuclidean_dist".
Defaults to "cosine_sim".
Returns:
DocArrayInMemorySearch Vector Store
"""
store = cls.from_params(embedding, **kwargs)
store.add_texts(texts=texts, metadatas=metadatas)
return store
|
@classmethod
def from_texts(cls, texts: List[str], embedding: Embeddings, metadatas:
Optional[List[Dict[Any, Any]]]=None, **kwargs: Any
) ->DocArrayInMemorySearch:
"""Create an DocArrayInMemorySearch store and insert data.
Args:
texts (List[str]): Text data.
embedding (Embeddings): Embedding function.
metadatas (Optional[List[Dict[Any, Any]]]): Metadata for each text
if it exists. Defaults to None.
metric (str): metric for exact nearest-neighbor search.
Can be one of: "cosine_sim", "euclidean_dist" and "sqeuclidean_dist".
Defaults to "cosine_sim".
Returns:
DocArrayInMemorySearch Vector Store
"""
store = cls.from_params(embedding, **kwargs)
store.add_texts(texts=texts, metadatas=metadatas)
return store
|
Create an DocArrayInMemorySearch store and insert data.
Args:
texts (List[str]): Text data.
embedding (Embeddings): Embedding function.
metadatas (Optional[List[Dict[Any, Any]]]): Metadata for each text
if it exists. Defaults to None.
metric (str): metric for exact nearest-neighbor search.
Can be one of: "cosine_sim", "euclidean_dist" and "sqeuclidean_dist".
Defaults to "cosine_sim".
Returns:
DocArrayInMemorySearch Vector Store
|
messages
|
"""Retrieve all session messages from DB"""
message_blobs = [doc['body_blob'] for doc in sorted(self.collection.
paginated_find(filter={'session_id': self.session_id}, projection={
'timestamp': 1, 'body_blob': 1}), key=lambda _doc: _doc['timestamp'])]
items = [json.loads(message_blob) for message_blob in message_blobs]
messages = messages_from_dict(items)
return messages
|
@property
def messages(self) ->List[BaseMessage]:
"""Retrieve all session messages from DB"""
message_blobs = [doc['body_blob'] for doc in sorted(self.collection.
paginated_find(filter={'session_id': self.session_id}, projection={
'timestamp': 1, 'body_blob': 1}), key=lambda _doc: _doc['timestamp'])]
items = [json.loads(message_blob) for message_blob in message_blobs]
messages = messages_from_dict(items)
return messages
|
Retrieve all session messages from DB
|
test_confirm_full_coverage
|
self.assertEqual(list(AgentType), list(AGENT_TO_CLASS.keys()))
|
def test_confirm_full_coverage(self) ->None:
self.assertEqual(list(AgentType), list(AGENT_TO_CLASS.keys()))
| null |
similarity_search_by_vector
|
"""Perform a similarity search against the query string."""
res = self.similarity_search_with_score_by_vector(embedding=embedding, k=k,
param=param, expr=expr, timeout=timeout, **kwargs)
return [doc for doc, _ in res]
|
def similarity_search_by_vector(self, embedding: List[float], k: int=4,
param: Optional[dict]=None, expr: Optional[str]=None, timeout: Optional
[int]=None, **kwargs: Any) ->List[Document]:
"""Perform a similarity search against the query string."""
res = self.similarity_search_with_score_by_vector(embedding=embedding,
k=k, param=param, expr=expr, timeout=timeout, **kwargs)
return [doc for doc, _ in res]
|
Perform a similarity search against the query string.
|
test_milvus_with_score
|
"""Test end to end construction and search with scores and IDs."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = _milvus_from_texts(metadatas=metadatas)
output = docsearch.similarity_search_with_score('foo', k=3)
docs = [o[0] for o in output]
scores = [o[1] for o in output]
assert docs == [Document(page_content='foo', metadata={'page': 0}),
Document(page_content='bar', metadata={'page': 1}), Document(
page_content='baz', metadata={'page': 2})]
assert scores[0] < scores[1] < scores[2]
|
def test_milvus_with_score() ->None:
"""Test end to end construction and search with scores and IDs."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = _milvus_from_texts(metadatas=metadatas)
output = docsearch.similarity_search_with_score('foo', k=3)
docs = [o[0] for o in output]
scores = [o[1] for o in output]
assert docs == [Document(page_content='foo', metadata={'page': 0}),
Document(page_content='bar', metadata={'page': 1}), Document(
page_content='baz', metadata={'page': 2})]
assert scores[0] < scores[1] < scores[2]
|
Test end to end construction and search with scores and IDs.
|
test_all_imports
|
assert set(__all__) == set(EXPECTED_ALL)
|
def test_all_imports() ->None:
assert set(__all__) == set(EXPECTED_ALL)
| null |
__ne__
|
"""Create a RedisTag inequality filter expression.
Args:
other (Union[List[str], Set[str], Tuple[str], str]):
The tag(s) to filter on.
Example:
>>> from langchain_community.vectorstores.redis import RedisTag
>>> filter = RedisTag("brand") != "nike"
"""
self._set_tag_value(other, RedisFilterOperator.NE)
return RedisFilterExpression(str(self))
|
@check_operator_misuse
def __ne__(self, other: Union[List[str], Set[str], Tuple[str], str]
) ->'RedisFilterExpression':
"""Create a RedisTag inequality filter expression.
Args:
other (Union[List[str], Set[str], Tuple[str], str]):
The tag(s) to filter on.
Example:
>>> from langchain_community.vectorstores.redis import RedisTag
>>> filter = RedisTag("brand") != "nike"
"""
self._set_tag_value(other, RedisFilterOperator.NE)
return RedisFilterExpression(str(self))
|
Create a RedisTag inequality filter expression.
Args:
other (Union[List[str], Set[str], Tuple[str], str]):
The tag(s) to filter on.
Example:
>>> from langchain_community.vectorstores.redis import RedisTag
>>> filter = RedisTag("brand") != "nike"
|
_create_message_dicts
|
params = self._client_params
if stop is not None:
if 'stop' in params:
raise ValueError('`stop` found in both the input and default params.')
params['stop'] = stop
message_dicts = [convert_message_to_dict(m) for m in messages]
return message_dicts, params
|
def _create_message_dicts(self, messages: List[BaseMessage], stop: Optional
[List[str]]) ->Tuple[List[Dict[str, Any]], Dict[str, Any]]:
params = self._client_params
if stop is not None:
if 'stop' in params:
raise ValueError(
'`stop` found in both the input and default params.')
params['stop'] = stop
message_dicts = [convert_message_to_dict(m) for m in messages]
return message_dicts, params
| null |
_get_elements
|
from unstructured.partition.xlsx import partition_xlsx
return partition_xlsx(filename=self.file_path, **self.unstructured_kwargs)
|
def _get_elements(self) ->List:
from unstructured.partition.xlsx import partition_xlsx
return partition_xlsx(filename=self.file_path, **self.unstructured_kwargs)
| null |
_parse_json_multilevel
|
for section, subsections in extracted_data.items():
indentation = ' ' * level
if isinstance(subsections, str):
subsections = subsections.replace('\n', ',')
formatted_list.append(f'{indentation}{section} : {subsections}')
elif isinstance(subsections, list):
formatted_list.append(f'{indentation}{section} : ')
self._list_handling(subsections, formatted_list, level + 1)
elif isinstance(subsections, dict):
formatted_list.append(f'{indentation}{section} : ')
self._parse_json_multilevel(subsections, formatted_list, level + 1)
|
def _parse_json_multilevel(self, extracted_data: dict, formatted_list: list,
level: int=0) ->None:
for section, subsections in extracted_data.items():
indentation = ' ' * level
if isinstance(subsections, str):
subsections = subsections.replace('\n', ',')
formatted_list.append(f'{indentation}{section} : {subsections}')
elif isinstance(subsections, list):
formatted_list.append(f'{indentation}{section} : ')
self._list_handling(subsections, formatted_list, level + 1)
elif isinstance(subsections, dict):
formatted_list.append(f'{indentation}{section} : ')
self._parse_json_multilevel(subsections, formatted_list, level + 1)
| null |
get_llm_cache
|
"""Get the value of the `llm_cache` global setting."""
try:
import langchain
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message=
'Importing llm_cache from langchain root module is no longer supported'
)
old_llm_cache = langchain.llm_cache
except ImportError:
old_llm_cache = None
global _llm_cache
return _llm_cache or old_llm_cache
|
def get_llm_cache() ->'BaseCache':
"""Get the value of the `llm_cache` global setting."""
try:
import langchain
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message=
'Importing llm_cache from langchain root module is no longer supported'
)
old_llm_cache = langchain.llm_cache
except ImportError:
old_llm_cache = None
global _llm_cache
return _llm_cache or old_llm_cache
|
Get the value of the `llm_cache` global setting.
|
is_lc_serializable
|
return True
|
@classmethod
def is_lc_serializable(cls) ->bool:
return True
| null |
return_stopped_response
|
"""Return response when agent has been stopped due to max iterations."""
if early_stopping_method == 'force':
return AgentFinish({'output':
'Agent stopped due to iteration limit or time limit.'}, '')
else:
raise ValueError(
f'Got unsupported early_stopping_method `{early_stopping_method}`')
|
def return_stopped_response(self, early_stopping_method: str,
intermediate_steps: List[Tuple[AgentAction, str]], **kwargs: Any
) ->AgentFinish:
"""Return response when agent has been stopped due to max iterations."""
if early_stopping_method == 'force':
return AgentFinish({'output':
'Agent stopped due to iteration limit or time limit.'}, '')
else:
raise ValueError(
f'Got unsupported early_stopping_method `{early_stopping_method}`')
|
Return response when agent has been stopped due to max iterations.
|
_type
|
return 'pydantic'
|
@property
def _type(self) ->str:
return 'pydantic'
| null |
from_llm_and_ai_plugin
|
"""Instantiate the toolkit from an OpenAPI Spec URL"""
spec = OpenAPISpec.from_url(ai_plugin.api.url)
return cls.from_llm_and_spec(llm=llm, spec=spec, requests=requests, verbose
=verbose, **kwargs)
|
@classmethod
def from_llm_and_ai_plugin(cls, llm: BaseLanguageModel, ai_plugin: AIPlugin,
requests: Optional[Requests]=None, verbose: bool=False, **kwargs: Any
) ->NLAToolkit:
"""Instantiate the toolkit from an OpenAPI Spec URL"""
spec = OpenAPISpec.from_url(ai_plugin.api.url)
return cls.from_llm_and_spec(llm=llm, spec=spec, requests=requests,
verbose=verbose, **kwargs)
|
Instantiate the toolkit from an OpenAPI Spec URL
|
test_all_imports
|
assert sorted(EXPECTED_ALL) == sorted(__all__)
|
def test_all_imports() ->None:
assert sorted(EXPECTED_ALL) == sorted(__all__)
| null |
test_load_success
|
docs = retriever.get_relevant_documents('HUNTER X HUNTER')
assert len(docs) > 1
assert len(docs) <= 3
assert_docs(docs, all_meta=False)
|
def test_load_success(retriever: WikipediaRetriever) ->None:
docs = retriever.get_relevant_documents('HUNTER X HUNTER')
assert len(docs) > 1
assert len(docs) <= 3
assert_docs(docs, all_meta=False)
| null |
test_tag_filter_varied
|
if operation == '==':
tf = Tag('tag_field') == tags
elif operation == '!=':
tf = Tag('tag_field') != tags
else:
raise ValueError(f'Unsupported operation: {operation}')
assert str(tf) == expected
|
@pytest.mark.parametrize('operation,tags,expected', [('==', 'simpletag',
'@tag_field:{simpletag}'), ('==', 'tag with space',
'@tag_field:{tag\\ with\\ space}'), ('==', 'special$char',
'@tag_field:{special\\$char}'), ('!=', 'negated',
'(-@tag_field:{negated})'), ('==', ['tag1', 'tag2'],
'@tag_field:{tag1|tag2}'), ('==', ['alpha', 'beta with space',
'gamma$special'],
'@tag_field:{alpha|beta\\ with\\ space|gamma\\$special}'), ('!=', [
'tagA', 'tagB'], '(-@tag_field:{tagA|tagB})'), ('==', 'weird:tag',
'@tag_field:{weird\\:tag}'), ('==', 'tag&another',
'@tag_field:{tag\\&another}'), ('==', 'tag/with/slashes',
'@tag_field:{tag\\/with\\/slashes}'), ('==', ['hyphen-tag',
'under_score', 'dot.tag'],
'@tag_field:{hyphen\\-tag|under_score|dot\\.tag}')])
def test_tag_filter_varied(operation: str, tags: str, expected: str) ->None:
if operation == '==':
tf = Tag('tag_field') == tags
elif operation == '!=':
tf = Tag('tag_field') != tags
else:
raise ValueError(f'Unsupported operation: {operation}')
assert str(tf) == expected
| null |
from_documents
|
"""Create alibaba cloud opensearch vector store instance.
Args:
documents: Documents to be inserted into the vector storage,
should not be empty.
embedding: Embedding function, Embedding function.
config: Alibaba OpenSearch instance configuration.
ids: Specify the ID for the inserted document. If left empty, the ID will be
automatically generated based on the text content.
Returns:
AlibabaCloudOpenSearch: Alibaba cloud opensearch vector store instance.
"""
if documents is None or len(documents) == 0:
raise Exception('the inserted documents, should not be empty.')
if embedding is None:
raise Exception('the embeddings should not be empty.')
if config is None:
raise Exception("config can't be none")
texts = [d.page_content for d in documents]
metadatas = [d.metadata for d in documents]
return cls.from_texts(texts=texts, embedding=embedding, metadatas=metadatas,
config=config, **kwargs)
|
@classmethod
def from_documents(cls, documents: List[Document], embedding: Embeddings,
config: Optional[AlibabaCloudOpenSearchSettings]=None, **kwargs: Any
) ->'AlibabaCloudOpenSearch':
"""Create alibaba cloud opensearch vector store instance.
Args:
documents: Documents to be inserted into the vector storage,
should not be empty.
embedding: Embedding function, Embedding function.
config: Alibaba OpenSearch instance configuration.
ids: Specify the ID for the inserted document. If left empty, the ID will be
automatically generated based on the text content.
Returns:
AlibabaCloudOpenSearch: Alibaba cloud opensearch vector store instance.
"""
if documents is None or len(documents) == 0:
raise Exception('the inserted documents, should not be empty.')
if embedding is None:
raise Exception('the embeddings should not be empty.')
if config is None:
raise Exception("config can't be none")
texts = [d.page_content for d in documents]
metadatas = [d.metadata for d in documents]
return cls.from_texts(texts=texts, embedding=embedding, metadatas=
metadatas, config=config, **kwargs)
|
Create alibaba cloud opensearch vector store instance.
Args:
documents: Documents to be inserted into the vector storage,
should not be empty.
embedding: Embedding function, Embedding function.
config: Alibaba OpenSearch instance configuration.
ids: Specify the ID for the inserted document. If left empty, the ID will be
automatically generated based on the text content.
Returns:
AlibabaCloudOpenSearch: Alibaba cloud opensearch vector store instance.
|
_delete_doc
|
"""
Delete a document from the Vectara corpus.
Args:
url (str): URL of the page to delete.
doc_id (str): ID of the document to delete.
Returns:
bool: True if deletion was successful, False otherwise.
"""
body = {'customer_id': self._vectara_customer_id, 'corpus_id': self.
_vectara_corpus_id, 'document_id': doc_id}
response = self._session.post('https://api.vectara.io/v1/delete-doc', data=
json.dumps(body), verify=True, headers=self._get_post_headers(),
timeout=self.vectara_api_timeout)
if response.status_code != 200:
logger.error(
f'Delete request failed for doc_id = {doc_id} with status code {response.status_code}, reason {response.reason}, text {response.text}'
)
return False
return True
|
def _delete_doc(self, doc_id: str) ->bool:
"""
Delete a document from the Vectara corpus.
Args:
url (str): URL of the page to delete.
doc_id (str): ID of the document to delete.
Returns:
bool: True if deletion was successful, False otherwise.
"""
body = {'customer_id': self._vectara_customer_id, 'corpus_id': self.
_vectara_corpus_id, 'document_id': doc_id}
response = self._session.post('https://api.vectara.io/v1/delete-doc',
data=json.dumps(body), verify=True, headers=self._get_post_headers(
), timeout=self.vectara_api_timeout)
if response.status_code != 200:
logger.error(
f'Delete request failed for doc_id = {doc_id} with status code {response.status_code}, reason {response.reason}, text {response.text}'
)
return False
return True
|
Delete a document from the Vectara corpus.
Args:
url (str): URL of the page to delete.
doc_id (str): ID of the document to delete.
Returns:
bool: True if deletion was successful, False otherwise.
|
warning_emitting_wrapper
|
"""Wrapper for the original wrapped callable that emits a warning.
Args:
*args: The positional arguments to the function.
**kwargs: The keyword arguments to the function.
Returns:
The return value of the function being wrapped.
"""
emit_warning()
return wrapped(*args, **kwargs)
|
def warning_emitting_wrapper(*args: Any, **kwargs: Any) ->Any:
"""Wrapper for the original wrapped callable that emits a warning.
Args:
*args: The positional arguments to the function.
**kwargs: The keyword arguments to the function.
Returns:
The return value of the function being wrapped.
"""
emit_warning()
return wrapped(*args, **kwargs)
|
Wrapper for the original wrapped callable that emits a warning.
Args:
*args: The positional arguments to the function.
**kwargs: The keyword arguments to the function.
Returns:
The return value of the function being wrapped.
|
test_vald_delete
|
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = _vald_from_texts(metadatas=metadatas)
time.sleep(WAIT_TIME)
output = docsearch.similarity_search('foo', k=10)
assert len(output) == 3
docsearch.delete(['foo'])
time.sleep(WAIT_TIME)
output = docsearch.similarity_search('foo', k=10)
assert len(output) == 2
|
def test_vald_delete() ->None:
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = _vald_from_texts(metadatas=metadatas)
time.sleep(WAIT_TIME)
output = docsearch.similarity_search('foo', k=10)
assert len(output) == 3
docsearch.delete(['foo'])
time.sleep(WAIT_TIME)
output = docsearch.similarity_search('foo', k=10)
assert len(output) == 2
| null |
_load
|
"""
load vearch engine for standalone vearch
"""
self.vearch.load()
|
def _load(self) ->None:
"""
load vearch engine for standalone vearch
"""
self.vearch.load()
|
load vearch engine for standalone vearch
|
test_generativeai_stream
|
llm = GooglePalm(temperature=0, model_name='gemini-pro')
outputs = list(llm.stream('Please say foo:'))
assert isinstance(outputs[0], str)
|
def test_generativeai_stream() ->None:
llm = GooglePalm(temperature=0, model_name='gemini-pro')
outputs = list(llm.stream('Please say foo:'))
assert isinstance(outputs[0], str)
| null |
_import_meilisearch
|
from langchain_community.vectorstores.meilisearch import Meilisearch
return Meilisearch
|
def _import_meilisearch() ->Any:
from langchain_community.vectorstores.meilisearch import Meilisearch
return Meilisearch
| null |
__init__
|
from langchain.hub import pull
pulled = pull(owner_repo_commit, api_url=api_url, api_key=api_key)
super_kwargs = {'kwargs': {}, 'config': {}, **kwargs, 'bound': pulled,
'owner_repo_commit': owner_repo_commit}
super().__init__(**super_kwargs)
|
def __init__(self, owner_repo_commit: str, *, api_url: Optional[str]=None,
api_key: Optional[str]=None, **kwargs: Any) ->None:
from langchain.hub import pull
pulled = pull(owner_repo_commit, api_url=api_url, api_key=api_key)
super_kwargs = {'kwargs': {}, 'config': {}, **kwargs, 'bound': pulled,
'owner_repo_commit': owner_repo_commit}
super().__init__(**super_kwargs)
| null |
_generate
|
"""Top Level call"""
|
@abstractmethod
def _generate(self, messages: List[BaseMessage], stop: Optional[List[str]]=
None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any
) ->ChatResult:
"""Top Level call"""
|
Top Level call
|
test_trajectory_output_parser_parse
|
trajectory_output_parser = TrajectoryOutputParser()
text = """Judgment: Given the good reasoning in the final answer
but otherwise poor performance, we give the model a score of 2.
Score: 2"""
got = trajectory_output_parser.parse_folder(text)
want = TrajectoryEval(score=0.25, reasoning=
"""Judgment: Given the good reasoning in the final answer
but otherwise poor performance, we give the model a score of 2."""
)
assert got['score'] == want['score']
assert got['reasoning'] == want['reasoning']
with pytest.raises(OutputParserException):
trajectory_output_parser.parse_folder(
"""Judgment: Given the good reasoning in the final answer
but otherwise poor performance, we give the model a score of 2."""
)
with pytest.raises(OutputParserException):
trajectory_output_parser.parse_folder(
"""Judgment: Given the good reasoning in the final answer
but otherwise poor performance, we give the model a score of 2.
Score: 9"""
)
with pytest.raises(OutputParserException):
trajectory_output_parser.parse_folder(
"""Judgment: Given the good reasoning in the final answer
but otherwise poor performance, we give the model a score of 2.
Score: 10"""
)
with pytest.raises(OutputParserException):
trajectory_output_parser.parse_folder(
"""Judgment: Given the good reasoning in the final answer
but otherwise poor performance, we give the model a score of 2.
Score: 0.1"""
)
with pytest.raises(OutputParserException):
trajectory_output_parser.parse_folder(
"""Judgment: Given the good reasoning in the final answer
but otherwise poor performance, we give the model a score of 2.
Score: One"""
)
|
def test_trajectory_output_parser_parse() ->None:
trajectory_output_parser = TrajectoryOutputParser()
text = """Judgment: Given the good reasoning in the final answer
but otherwise poor performance, we give the model a score of 2.
Score: 2"""
got = trajectory_output_parser.parse_folder(text)
want = TrajectoryEval(score=0.25, reasoning=
"""Judgment: Given the good reasoning in the final answer
but otherwise poor performance, we give the model a score of 2."""
)
assert got['score'] == want['score']
assert got['reasoning'] == want['reasoning']
with pytest.raises(OutputParserException):
trajectory_output_parser.parse_folder(
"""Judgment: Given the good reasoning in the final answer
but otherwise poor performance, we give the model a score of 2."""
)
with pytest.raises(OutputParserException):
trajectory_output_parser.parse_folder(
"""Judgment: Given the good reasoning in the final answer
but otherwise poor performance, we give the model a score of 2.
Score: 9"""
)
with pytest.raises(OutputParserException):
trajectory_output_parser.parse_folder(
"""Judgment: Given the good reasoning in the final answer
but otherwise poor performance, we give the model a score of 2.
Score: 10"""
)
with pytest.raises(OutputParserException):
trajectory_output_parser.parse_folder(
"""Judgment: Given the good reasoning in the final answer
but otherwise poor performance, we give the model a score of 2.
Score: 0.1"""
)
with pytest.raises(OutputParserException):
trajectory_output_parser.parse_folder(
"""Judgment: Given the good reasoning in the final answer
but otherwise poor performance, we give the model a score of 2.
Score: One"""
)
| null |
add_texts
|
"""
Add texts along with their corresponding embeddings and optional
metadata to the BagelDB cluster.
Args:
texts (Iterable[str]): Texts to be added.
embeddings (Optional[List[float]]): List of embeddingvectors
metadatas (Optional[List[dict]]): Optional list of metadatas.
ids (Optional[List[str]]): List of unique ID for the texts.
Returns:
List[str]: List of unique ID representing the added texts.
"""
if ids is None:
ids = [str(uuid.uuid1()) for _ in texts]
texts = list(texts)
if self._embedding_function and embeddings is None and texts:
embeddings = self._embedding_function.embed_documents(texts)
if metadatas:
length_diff = len(texts) - len(metadatas)
if length_diff:
metadatas = metadatas + [{}] * length_diff
empty_ids = []
non_empty_ids = []
for idx, metadata in enumerate(metadatas):
if metadata:
non_empty_ids.append(idx)
else:
empty_ids.append(idx)
if non_empty_ids:
metadatas = [metadatas[idx] for idx in non_empty_ids]
texts_with_metadatas = [texts[idx] for idx in non_empty_ids]
embeddings_with_metadatas = [embeddings[idx] for idx in non_empty_ids
] if embeddings else None
ids_with_metadata = [ids[idx] for idx in non_empty_ids]
self._cluster.upsert(embeddings=embeddings_with_metadatas,
metadatas=metadatas, documents=texts_with_metadatas, ids=
ids_with_metadata)
if empty_ids:
texts_without_metadatas = [texts[j] for j in empty_ids]
embeddings_without_metadatas = [embeddings[j] for j in empty_ids
] if embeddings else None
ids_without_metadatas = [ids[j] for j in empty_ids]
self._cluster.upsert(embeddings=embeddings_without_metadatas,
documents=texts_without_metadatas, ids=ids_without_metadatas)
else:
metadatas = [{}] * len(texts)
self._cluster.upsert(embeddings=embeddings, documents=texts, metadatas=
metadatas, ids=ids)
return ids
|
def add_texts(self, texts: Iterable[str], metadatas: Optional[List[dict]]=
None, ids: Optional[List[str]]=None, embeddings: Optional[List[List[
float]]]=None, **kwargs: Any) ->List[str]:
"""
Add texts along with their corresponding embeddings and optional
metadata to the BagelDB cluster.
Args:
texts (Iterable[str]): Texts to be added.
embeddings (Optional[List[float]]): List of embeddingvectors
metadatas (Optional[List[dict]]): Optional list of metadatas.
ids (Optional[List[str]]): List of unique ID for the texts.
Returns:
List[str]: List of unique ID representing the added texts.
"""
if ids is None:
ids = [str(uuid.uuid1()) for _ in texts]
texts = list(texts)
if self._embedding_function and embeddings is None and texts:
embeddings = self._embedding_function.embed_documents(texts)
if metadatas:
length_diff = len(texts) - len(metadatas)
if length_diff:
metadatas = metadatas + [{}] * length_diff
empty_ids = []
non_empty_ids = []
for idx, metadata in enumerate(metadatas):
if metadata:
non_empty_ids.append(idx)
else:
empty_ids.append(idx)
if non_empty_ids:
metadatas = [metadatas[idx] for idx in non_empty_ids]
texts_with_metadatas = [texts[idx] for idx in non_empty_ids]
embeddings_with_metadatas = [embeddings[idx] for idx in
non_empty_ids] if embeddings else None
ids_with_metadata = [ids[idx] for idx in non_empty_ids]
self._cluster.upsert(embeddings=embeddings_with_metadatas,
metadatas=metadatas, documents=texts_with_metadatas, ids=
ids_with_metadata)
if empty_ids:
texts_without_metadatas = [texts[j] for j in empty_ids]
embeddings_without_metadatas = [embeddings[j] for j in empty_ids
] if embeddings else None
ids_without_metadatas = [ids[j] for j in empty_ids]
self._cluster.upsert(embeddings=embeddings_without_metadatas,
documents=texts_without_metadatas, ids=ids_without_metadatas)
else:
metadatas = [{}] * len(texts)
self._cluster.upsert(embeddings=embeddings, documents=texts,
metadatas=metadatas, ids=ids)
return ids
|
Add texts along with their corresponding embeddings and optional
metadata to the BagelDB cluster.
Args:
texts (Iterable[str]): Texts to be added.
embeddings (Optional[List[float]]): List of embeddingvectors
metadatas (Optional[List[dict]]): Optional list of metadatas.
ids (Optional[List[str]]): List of unique ID for the texts.
Returns:
List[str]: List of unique ID representing the added texts.
|
_convert_messages_to_ollama_messages
|
ollama_messages = []
for message in messages:
role = ''
if isinstance(message, HumanMessage):
role = 'user'
elif isinstance(message, AIMessage):
role = 'assistant'
elif isinstance(message, SystemMessage):
role = 'system'
else:
raise ValueError('Received unsupported message type for Ollama.')
content = ''
images = []
if isinstance(message.content, str):
content = message.content
else:
for content_part in message.content:
if content_part.get('type') == 'text':
content += f"\n{content_part['text']}"
elif content_part.get('type') == 'image_url':
if isinstance(content_part.get('image_url'), str):
image_url_components = content_part['image_url'].split(',')
if len(image_url_components) > 1:
images.append(image_url_components[1])
else:
images.append(image_url_components[0])
else:
raise ValueError(
'Only string image_url content parts are supported.')
else:
raise ValueError(
"Unsupported message content type. Must either have type 'text' or type 'image_url' with a string 'image_url' field."
)
ollama_messages.append({'role': role, 'content': content, 'images': images}
)
return ollama_messages
|
def _convert_messages_to_ollama_messages(self, messages: List[BaseMessage]
) ->List[Dict[str, Union[str, List[str]]]]:
ollama_messages = []
for message in messages:
role = ''
if isinstance(message, HumanMessage):
role = 'user'
elif isinstance(message, AIMessage):
role = 'assistant'
elif isinstance(message, SystemMessage):
role = 'system'
else:
raise ValueError('Received unsupported message type for Ollama.')
content = ''
images = []
if isinstance(message.content, str):
content = message.content
else:
for content_part in message.content:
if content_part.get('type') == 'text':
content += f"\n{content_part['text']}"
elif content_part.get('type') == 'image_url':
if isinstance(content_part.get('image_url'), str):
image_url_components = content_part['image_url'].split(
',')
if len(image_url_components) > 1:
images.append(image_url_components[1])
else:
images.append(image_url_components[0])
else:
raise ValueError(
'Only string image_url content parts are supported.'
)
else:
raise ValueError(
"Unsupported message content type. Must either have type 'text' or type 'image_url' with a string 'image_url' field."
)
ollama_messages.append({'role': role, 'content': content, 'images':
images})
return ollama_messages
| null |
_run
|
"""Use the tool."""
query_params = {'file_url': query, 'language': self.language,
'attributes_as_list': False}
return self._call_eden_ai(query_params)
|
def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun]
=None) ->str:
"""Use the tool."""
query_params = {'file_url': query, 'language': self.language,
'attributes_as_list': False}
return self._call_eden_ai(query_params)
|
Use the tool.
|
similarity_search_with_score_by_vector
|
"""Return docs most similar to query vector, with its score."""
with Session(self._engine) as _session:
real_distance_func = (self._table.embedding.squared_euclidean_distance if
distance_func == 'sqrt_euclid' else self._table.embedding.
negative_dot_product_distance if distance_func == 'neg_dot_prod' else
self._table.embedding.negative_cosine_distance if distance_func ==
'ned_cos' else None)
if real_distance_func is None:
raise ValueError('Invalid distance function')
t = select(self._table, real_distance_func(query_vector).label('score')
).order_by('score').limit(k)
return [(Document(page_content=row[0].text, metadata=row[0].meta), row[
1]) for row in _session.execute(t)]
|
def similarity_search_with_score_by_vector(self, query_vector: List[float],
k: int=4, distance_func: Literal['sqrt_euclid', 'neg_dot_prod',
'ned_cos']='sqrt_euclid', **kwargs: Any) ->List[Tuple[Document, float]]:
"""Return docs most similar to query vector, with its score."""
with Session(self._engine) as _session:
real_distance_func = (self._table.embedding.
squared_euclidean_distance if distance_func == 'sqrt_euclid' else
self._table.embedding.negative_dot_product_distance if
distance_func == 'neg_dot_prod' else self._table.embedding.
negative_cosine_distance if distance_func == 'ned_cos' else None)
if real_distance_func is None:
raise ValueError('Invalid distance function')
t = select(self._table, real_distance_func(query_vector).label('score')
).order_by('score').limit(k)
return [(Document(page_content=row[0].text, metadata=row[0].meta),
row[1]) for row in _session.execute(t)]
|
Return docs most similar to query vector, with its score.
|
test_all_imports
|
assert set(__all__) == set(EXPECTED_ALL)
|
def test_all_imports() ->None:
assert set(__all__) == set(EXPECTED_ALL)
| null |
validate_environment
|
"""Validate that api key and python package exists in environment."""
wolfram_alpha_appid = get_from_dict_or_env(values, 'wolfram_alpha_appid',
'WOLFRAM_ALPHA_APPID')
values['wolfram_alpha_appid'] = wolfram_alpha_appid
try:
import wolframalpha
except ImportError:
raise ImportError(
'wolframalpha is not installed. Please install it with `pip install wolframalpha`'
)
client = wolframalpha.Client(wolfram_alpha_appid)
values['wolfram_client'] = client
return values
|
@root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that api key and python package exists in environment."""
wolfram_alpha_appid = get_from_dict_or_env(values,
'wolfram_alpha_appid', 'WOLFRAM_ALPHA_APPID')
values['wolfram_alpha_appid'] = wolfram_alpha_appid
try:
import wolframalpha
except ImportError:
raise ImportError(
'wolframalpha is not installed. Please install it with `pip install wolframalpha`'
)
client = wolframalpha.Client(wolfram_alpha_appid)
values['wolfram_client'] = client
return values
|
Validate that api key and python package exists in environment.
|
_import_dashvector
|
from langchain_community.vectorstores.dashvector import DashVector
return DashVector
|
def _import_dashvector() ->Any:
from langchain_community.vectorstores.dashvector import DashVector
return DashVector
| null |
test_special_tokens
|
assert len(_get_token_ids_default_method('a:b_c d')) == 6
|
def test_special_tokens(self) ->None:
assert len(_get_token_ids_default_method('a:b_c d')) == 6
| null |
test_cleanup_with_different_batchsize
|
"""Check that we can clean up with different batch size."""
docs = [Document(page_content='This is a test document.', metadata={
'source': str(d)}) for d in range(1000)]
assert index(docs, record_manager, vector_store, cleanup='full') == {
'num_added': 1000, 'num_deleted': 0, 'num_skipped': 0, 'num_updated': 0}
docs = [Document(page_content='Different doc', metadata={'source': str(d)}) for
d in range(1001)]
assert index(docs, record_manager, vector_store, cleanup='full',
cleanup_batch_size=17) == {'num_added': 1001, 'num_deleted': 1000,
'num_skipped': 0, 'num_updated': 0}
|
def test_cleanup_with_different_batchsize(record_manager: SQLRecordManager,
vector_store: VectorStore) ->None:
"""Check that we can clean up with different batch size."""
docs = [Document(page_content='This is a test document.', metadata={
'source': str(d)}) for d in range(1000)]
assert index(docs, record_manager, vector_store, cleanup='full') == {
'num_added': 1000, 'num_deleted': 0, 'num_skipped': 0, 'num_updated': 0
}
docs = [Document(page_content='Different doc', metadata={'source': str(
d)}) for d in range(1001)]
assert index(docs, record_manager, vector_store, cleanup='full',
cleanup_batch_size=17) == {'num_added': 1001, 'num_deleted': 1000,
'num_skipped': 0, 'num_updated': 0}
|
Check that we can clean up with different batch size.
|
embed_query
|
"""Return simple embeddings."""
return [float(1.0)] * (ADA_TOKEN_COUNT - 1) + [float(0.0)]
|
def embed_query(self, text: str) ->List[float]:
"""Return simple embeddings."""
return [float(1.0)] * (ADA_TOKEN_COUNT - 1) + [float(0.0)]
|
Return simple embeddings.
|
from_texts
|
"""Return Vearch VectorStore"""
vearch_db = cls(embedding_function=embedding, embedding=embedding,
path_or_url=path_or_url, db_name=db_name, table_name=table_name, flag=flag)
vearch_db.add_texts(texts=texts, metadatas=metadatas)
return vearch_db
|
@classmethod
def from_texts(cls: Type[Vearch], texts: List[str], embedding: Embeddings,
metadatas: Optional[List[dict]]=None, path_or_url: Optional[str]=None,
table_name: str=_DEFAULT_TABLE_NAME, db_name: str=
_DEFAULT_CLUSTER_DB_NAME, flag: int=_DEFAULT_VERSION, **kwargs: Any
) ->Vearch:
"""Return Vearch VectorStore"""
vearch_db = cls(embedding_function=embedding, embedding=embedding,
path_or_url=path_or_url, db_name=db_name, table_name=table_name,
flag=flag)
vearch_db.add_texts(texts=texts, metadatas=metadatas)
return vearch_db
|
Return Vearch VectorStore
|
test_get_action_and_input_sql_query
|
"""Test getting the action and action input from the text
when the LLM output is a well formed SQL query
"""
llm_output = """
I should query for the largest single shift payment for every unique user.
Action: query_sql_db
Action Input: SELECT "UserName", MAX(totalpayment) FROM user_shifts GROUP BY "UserName" """
action, action_input = get_action_and_input(llm_output)
assert action == 'query_sql_db'
assert action_input == 'SELECT "UserName", MAX(totalpayment) FROM user_shifts GROUP BY "UserName"'
|
def test_get_action_and_input_sql_query() ->None:
"""Test getting the action and action input from the text
when the LLM output is a well formed SQL query
"""
llm_output = """
I should query for the largest single shift payment for every unique user.
Action: query_sql_db
Action Input: SELECT "UserName", MAX(totalpayment) FROM user_shifts GROUP BY "UserName" """
action, action_input = get_action_and_input(llm_output)
assert action == 'query_sql_db'
assert action_input == 'SELECT "UserName", MAX(totalpayment) FROM user_shifts GROUP BY "UserName"'
|
Test getting the action and action input from the text
when the LLM output is a well formed SQL query
|
test_huggingface_type_check
|
"""Test that type checks are done properly on input."""
with pytest.raises(ValueError):
CharacterTextSplitter.from_huggingface_tokenizer('foo')
|
def test_huggingface_type_check() ->None:
"""Test that type checks are done properly on input."""
with pytest.raises(ValueError):
CharacterTextSplitter.from_huggingface_tokenizer('foo')
|
Test that type checks are done properly on input.
|
_get_relevant_documents
|
"""Get documents relevant for a query.
Args:
query: string to find relevant documents for
Returns:
Sequence of relevant documents
"""
docs = self.base_retriever.get_relevant_documents(query, callbacks=
run_manager.get_child(), **kwargs)
if docs:
compressed_docs = self.base_compressor.compress_documents(docs, query,
callbacks=run_manager.get_child())
return list(compressed_docs)
else:
return []
|
def _get_relevant_documents(self, query: str, *, run_manager:
CallbackManagerForRetrieverRun, **kwargs: Any) ->List[Document]:
"""Get documents relevant for a query.
Args:
query: string to find relevant documents for
Returns:
Sequence of relevant documents
"""
docs = self.base_retriever.get_relevant_documents(query, callbacks=
run_manager.get_child(), **kwargs)
if docs:
compressed_docs = self.base_compressor.compress_documents(docs,
query, callbacks=run_manager.get_child())
return list(compressed_docs)
else:
return []
|
Get documents relevant for a query.
Args:
query: string to find relevant documents for
Returns:
Sequence of relevant documents
|
test_character_text_splitter_discard_separator_regex
|
"""Test splitting by characters discarding the separator
that is a regex special character."""
text = 'foo.bar.baz.123'
splitter = CharacterTextSplitter(separator=separator, chunk_size=1,
chunk_overlap=0, keep_separator=False, is_separator_regex=
is_separator_regex)
output = splitter.split_text(text)
expected_output = ['foo', 'bar', 'baz', '123']
assert output == expected_output
|
@pytest.mark.parametrize('separator, is_separator_regex', [(re.escape('.'),
True), ('.', False)])
def test_character_text_splitter_discard_separator_regex(separator: str,
is_separator_regex: bool) ->None:
"""Test splitting by characters discarding the separator
that is a regex special character."""
text = 'foo.bar.baz.123'
splitter = CharacterTextSplitter(separator=separator, chunk_size=1,
chunk_overlap=0, keep_separator=False, is_separator_regex=
is_separator_regex)
output = splitter.split_text(text)
expected_output = ['foo', 'bar', 'baz', '123']
assert output == expected_output
|
Test splitting by characters discarding the separator
that is a regex special character.
|
__init__
|
"""Initialize the class."""
if not endpoint_api_key or not endpoint_url:
raise ValueError(
"""A key/token and REST endpoint should
be provided to invoke the endpoint"""
)
self.endpoint_url = endpoint_url
self.endpoint_api_key = endpoint_api_key
self.deployment_name = deployment_name
|
def __init__(self, endpoint_url: str, endpoint_api_key: str,
deployment_name: str='') ->None:
"""Initialize the class."""
if not endpoint_api_key or not endpoint_url:
raise ValueError(
"""A key/token and REST endpoint should
be provided to invoke the endpoint"""
)
self.endpoint_url = endpoint_url
self.endpoint_api_key = endpoint_api_key
self.deployment_name = deployment_name
|
Initialize the class.
|
get_format_instructions
|
return self.parser.get_format_instructions()
|
def get_format_instructions(self) ->str:
return self.parser.get_format_instructions()
| null |
_default_params
|
"""Get the default parameters for calling Cohere API."""
return {'temperature': self.temperature}
|
@property
def _default_params(self) ->Dict[str, Any]:
"""Get the default parameters for calling Cohere API."""
return {'temperature': self.temperature}
|
Get the default parameters for calling Cohere API.
|
test_chroma_search_filter
|
"""Test end to end construction and search with metadata filtering."""
texts = ['far', 'bar', 'baz']
metadatas = [{'first_letter': '{}'.format(text[0])} for text in texts]
docsearch = Chroma.from_texts(collection_name='test_collection', texts=
texts, embedding=FakeEmbeddings(), metadatas=metadatas)
output = docsearch.similarity_search('far', k=1, filter={'first_letter': 'f'})
assert output == [Document(page_content='far', metadata={'first_letter': 'f'})]
output = docsearch.similarity_search('far', k=1, filter={'first_letter': 'b'})
assert output == [Document(page_content='bar', metadata={'first_letter': 'b'})]
|
def test_chroma_search_filter() ->None:
"""Test end to end construction and search with metadata filtering."""
texts = ['far', 'bar', 'baz']
metadatas = [{'first_letter': '{}'.format(text[0])} for text in texts]
docsearch = Chroma.from_texts(collection_name='test_collection', texts=
texts, embedding=FakeEmbeddings(), metadatas=metadatas)
output = docsearch.similarity_search('far', k=1, filter={'first_letter':
'f'})
assert output == [Document(page_content='far', metadata={'first_letter':
'f'})]
output = docsearch.similarity_search('far', k=1, filter={'first_letter':
'b'})
assert output == [Document(page_content='bar', metadata={'first_letter':
'b'})]
|
Test end to end construction and search with metadata filtering.
|
_run
|
"""Use the tool."""
try:
text_analysis_result = self._text_analysis(query)
return self._format_text_analysis_result(text_analysis_result)
except Exception as e:
raise RuntimeError(
f'Error while running AzureCogsTextAnalyticsHealthTool: {e}')
|
def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun]
=None) ->str:
"""Use the tool."""
try:
text_analysis_result = self._text_analysis(query)
return self._format_text_analysis_result(text_analysis_result)
except Exception as e:
raise RuntimeError(
f'Error while running AzureCogsTextAnalyticsHealthTool: {e}')
|
Use the tool.
|
from_texts
|
raise NotImplementedError
|
@classmethod
def from_texts(cls: Type[VST], texts: List[str], embedding: Embeddings,
metadatas: Optional[List[dict]]=None, **kwargs: Any) ->VST:
raise NotImplementedError
| null |
test_add_texts_with_given_embedding
|
texts = ['foo', 'bar', 'baz']
embedding = FakeEmbeddings()
docsearch = Weaviate.from_texts(texts, embedding=embedding, weaviate_url=
weaviate_url)
docsearch.add_texts(['foo'])
output = docsearch.similarity_search_by_vector(embedding.embed_query('foo'),
k=2)
assert output == [Document(page_content='foo'), Document(page_content='foo')]
|
def test_add_texts_with_given_embedding(self, weaviate_url: str) ->None:
texts = ['foo', 'bar', 'baz']
embedding = FakeEmbeddings()
docsearch = Weaviate.from_texts(texts, embedding=embedding,
weaviate_url=weaviate_url)
docsearch.add_texts(['foo'])
output = docsearch.similarity_search_by_vector(embedding.embed_query(
'foo'), k=2)
assert output == [Document(page_content='foo'), Document(page_content=
'foo')]
| null |
stream
|
"""
Default implementation of stream, which calls invoke.
Subclasses should override this method if they support streaming output.
"""
yield self.invoke(input, config, **kwargs)
|
def stream(self, input: Input, config: Optional[RunnableConfig]=None, **
kwargs: Optional[Any]) ->Iterator[Output]:
"""
Default implementation of stream, which calls invoke.
Subclasses should override this method if they support streaming output.
"""
yield self.invoke(input, config, **kwargs)
|
Default implementation of stream, which calls invoke.
Subclasses should override this method if they support streaming output.
|
_import_gradient_ai
|
from langchain_community.llms.gradient_ai import GradientLLM
return GradientLLM
|
def _import_gradient_ai() ->Any:
from langchain_community.llms.gradient_ai import GradientLLM
return GradientLLM
| null |
input_keys
|
return [self.input_key]
|
@property
def input_keys(self) ->List[str]:
return [self.input_key]
| null |
search
|
"""Return the fake document."""
document = Document(page_content=_PAGE_CONTENT)
return document
|
def search(self, search: str) ->Union[str, Document]:
"""Return the fake document."""
document = Document(page_content=_PAGE_CONTENT)
return document
|
Return the fake document.
|
__init__
|
super().__init__(*args, **kwargs)
if model is None:
from sentence_transformers import SentenceTransformer
model = SentenceTransformer('all-mpnet-base-v2')
self.model = model
self.auto_embed = auto_embed
|
def __init__(self, auto_embed: bool, model: Optional[Any]=None, *args: Any,
**kwargs: Any):
super().__init__(*args, **kwargs)
if model is None:
from sentence_transformers import SentenceTransformer
model = SentenceTransformer('all-mpnet-base-v2')
self.model = model
self.auto_embed = auto_embed
| null |
test_named_tool_decorator
|
"""Test functionality when arguments are provided as input to decorator."""
@tool('search')
def search_api(query: str) ->str:
"""Search the API for the query."""
assert isinstance(query, str)
return f'API result - {query}'
assert isinstance(search_api, BaseTool)
assert search_api.name == 'search'
assert not search_api.return_direct
assert search_api.run({'query': 'foo'}) == 'API result - foo'
|
def test_named_tool_decorator() ->None:
"""Test functionality when arguments are provided as input to decorator."""
@tool('search')
def search_api(query: str) ->str:
"""Search the API for the query."""
assert isinstance(query, str)
return f'API result - {query}'
assert isinstance(search_api, BaseTool)
assert search_api.name == 'search'
assert not search_api.return_direct
assert search_api.run({'query': 'foo'}) == 'API result - foo'
|
Test functionality when arguments are provided as input to decorator.
|
_import_bulk
|
"""Import bulk if available, otherwise raise error."""
try:
from opensearchpy.helpers import bulk
except ImportError:
raise ImportError(IMPORT_OPENSEARCH_PY_ERROR)
return bulk
|
def _import_bulk() ->Any:
"""Import bulk if available, otherwise raise error."""
try:
from opensearchpy.helpers import bulk
except ImportError:
raise ImportError(IMPORT_OPENSEARCH_PY_ERROR)
return bulk
|
Import bulk if available, otherwise raise error.
|
history2
|
history2 = AstraDBChatMessageHistory(session_id='session-test-2',
collection_name='langchain_cmh_test', token=os.environ[
'ASTRA_DB_APPLICATION_TOKEN'], api_endpoint=os.environ[
'ASTRA_DB_API_ENDPOINT'], namespace=os.environ.get('ASTRA_DB_KEYSPACE'))
yield history2
history2.astra_db.delete_collection('langchain_cmh_test')
|
@pytest.fixture(scope='function')
def history2() ->Iterable[AstraDBChatMessageHistory]:
history2 = AstraDBChatMessageHistory(session_id='session-test-2',
collection_name='langchain_cmh_test', token=os.environ[
'ASTRA_DB_APPLICATION_TOKEN'], api_endpoint=os.environ[
'ASTRA_DB_API_ENDPOINT'], namespace=os.environ.get('ASTRA_DB_KEYSPACE')
)
yield history2
history2.astra_db.delete_collection('langchain_cmh_test')
| null |
_on_chain_end
|
"""Process the Chain Run."""
|
def _on_chain_end(self, run: Run) ->None:
"""Process the Chain Run."""
|
Process the Chain Run.
|
test_similarity_search_with_metadata_and_filter
|
"""Test end to end construction and search with metadata."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = Weaviate.from_texts(texts, embedding_openai, metadatas=
metadatas, weaviate_url=weaviate_url)
output = docsearch.similarity_search('foo', k=2, where_filter={'path': [
'page'], 'operator': 'Equal', 'valueNumber': 0})
assert output == [Document(page_content='foo', metadata={'page': 0})]
|
@pytest.mark.vcr(ignore_localhost=True)
def test_similarity_search_with_metadata_and_filter(self, weaviate_url: str,
embedding_openai: OpenAIEmbeddings) ->None:
"""Test end to end construction and search with metadata."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = Weaviate.from_texts(texts, embedding_openai, metadatas=
metadatas, weaviate_url=weaviate_url)
output = docsearch.similarity_search('foo', k=2, where_filter={'path':
['page'], 'operator': 'Equal', 'valueNumber': 0})
assert output == [Document(page_content='foo', metadata={'page': 0})]
|
Test end to end construction and search with metadata.
|
lc_secrets
|
return {'gpt_router_api_key': 'GPT_ROUTER_API_KEY'}
|
@property
def lc_secrets(self) ->Dict[str, str]:
return {'gpt_router_api_key': 'GPT_ROUTER_API_KEY'}
| null |
OutputType
|
return List[self.bound.OutputType]
|
@property
def OutputType(self) ->Type[List[Output]]:
return List[self.bound.OutputType]
| null |
test_qdrant_similarity_search
|
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
docsearch = Qdrant.from_texts(texts, ConsistentFakeEmbeddings(), location=
':memory:', content_payload_key=content_payload_key,
metadata_payload_key=metadata_payload_key, batch_size=batch_size,
vector_name=vector_name)
output = docsearch.similarity_search('foo', k=1)
assert output == [Document(page_content='foo')]
|
@pytest.mark.parametrize('batch_size', [1, 64])
@pytest.mark.parametrize('content_payload_key', [Qdrant.CONTENT_KEY, 'foo'])
@pytest.mark.parametrize('metadata_payload_key', [Qdrant.METADATA_KEY, 'bar'])
@pytest.mark.parametrize('vector_name', [None, 'my-vector'])
def test_qdrant_similarity_search(batch_size: int, content_payload_key: str,
metadata_payload_key: str, vector_name: Optional[str]) ->None:
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
docsearch = Qdrant.from_texts(texts, ConsistentFakeEmbeddings(),
location=':memory:', content_payload_key=content_payload_key,
metadata_payload_key=metadata_payload_key, batch_size=batch_size,
vector_name=vector_name)
output = docsearch.similarity_search('foo', k=1)
assert output == [Document(page_content='foo')]
|
Test end to end construction and search.
|
test_baichuan_key_masked_when_passed_via_constructor
|
"""Test initialization with an API key provided via the initializer"""
chat = ChatBaichuan(baichuan_api_key='test-api-key', baichuan_secret_key=
'test-secret-key')
print(chat.baichuan_api_key, end='')
captured = capsys.readouterr()
assert captured.out == '**********'
print(chat.baichuan_secret_key, end='')
captured = capsys.readouterr()
assert captured.out == '**********'
|
def test_baichuan_key_masked_when_passed_via_constructor(capsys: CaptureFixture
) ->None:
"""Test initialization with an API key provided via the initializer"""
chat = ChatBaichuan(baichuan_api_key='test-api-key',
baichuan_secret_key='test-secret-key')
print(chat.baichuan_api_key, end='')
captured = capsys.readouterr()
assert captured.out == '**********'
print(chat.baichuan_secret_key, end='')
captured = capsys.readouterr()
assert captured.out == '**********'
|
Test initialization with an API key provided via the initializer
|
test_pdfminer_parser
|
"""Test PDFMiner parser."""
_assert_with_parser(PDFMinerParser(), splits_by_page=False)
|
@pytest.mark.requires('pdfminer')
def test_pdfminer_parser() ->None:
"""Test PDFMiner parser."""
_assert_with_parser(PDFMinerParser(), splits_by_page=False)
|
Test PDFMiner parser.
|
load
|
return list(self.lazy_load())
|
def load(self) ->List[Document]:
return list(self.lazy_load())
| null |
from_texts
|
try:
from sklearn.feature_extraction.text import TfidfVectorizer
except ImportError:
raise ImportError(
'Could not import scikit-learn, please install with `pip install scikit-learn`.'
)
tfidf_params = tfidf_params or {}
vectorizer = TfidfVectorizer(**tfidf_params)
tfidf_array = vectorizer.fit_transform(texts)
metadatas = metadatas or ({} for _ in texts)
docs = [Document(page_content=t, metadata=m) for t, m in zip(texts, metadatas)]
return cls(vectorizer=vectorizer, docs=docs, tfidf_array=tfidf_array, **kwargs)
|
@classmethod
def from_texts(cls, texts: Iterable[str], metadatas: Optional[Iterable[dict
]]=None, tfidf_params: Optional[Dict[str, Any]]=None, **kwargs: Any
) ->TFIDFRetriever:
try:
from sklearn.feature_extraction.text import TfidfVectorizer
except ImportError:
raise ImportError(
'Could not import scikit-learn, please install with `pip install scikit-learn`.'
)
tfidf_params = tfidf_params or {}
vectorizer = TfidfVectorizer(**tfidf_params)
tfidf_array = vectorizer.fit_transform(texts)
metadatas = metadatas or ({} for _ in texts)
docs = [Document(page_content=t, metadata=m) for t, m in zip(texts,
metadatas)]
return cls(vectorizer=vectorizer, docs=docs, tfidf_array=tfidf_array,
**kwargs)
| null |
file_to_base64
|
"""Convert a file to base64."""
with open(path, 'rb') as f:
return base64.b64encode(f.read()).decode()
|
def file_to_base64(path: str) ->str:
"""Convert a file to base64."""
with open(path, 'rb') as f:
return base64.b64encode(f.read()).decode()
|
Convert a file to base64.
|
test_valid_single_message
|
expected_output = f'Human: {self.human_msg.content}'
self.assertEqual(get_buffer_string([self.human_msg]), expected_output)
|
def test_valid_single_message(self) ->None:
expected_output = f'Human: {self.human_msg.content}'
self.assertEqual(get_buffer_string([self.human_msg]), expected_output)
| null |
_import_searx_search
|
from langchain_community.utilities.searx_search import SearxSearchWrapper
return SearxSearchWrapper
|
def _import_searx_search() ->Any:
from langchain_community.utilities.searx_search import SearxSearchWrapper
return SearxSearchWrapper
| null |
_llm_type
|
return 'llama-2-chat'
|
@property
def _llm_type(self) ->str:
return 'llama-2-chat'
| null |
_parse_input
|
"""Parse the json string into a dict."""
return json.loads(text)
|
def _parse_input(text: str) ->Dict[str, Any]:
"""Parse the json string into a dict."""
return json.loads(text)
|
Parse the json string into a dict.
|
validate_environment
|
values['qianfan_ak'] = convert_to_secret_str(get_from_dict_or_env(values,
'qianfan_ak', 'QIANFAN_AK', default=''))
values['qianfan_sk'] = convert_to_secret_str(get_from_dict_or_env(values,
'qianfan_sk', 'QIANFAN_SK', default=''))
params = {**values.get('init_kwargs', {}), 'model': values['model'],
'stream': values['streaming']}
if values['qianfan_ak'].get_secret_value() != '':
params['ak'] = values['qianfan_ak'].get_secret_value()
if values['qianfan_sk'].get_secret_value() != '':
params['sk'] = values['qianfan_sk'].get_secret_value()
if values['endpoint'] is not None and values['endpoint'] != '':
params['endpoint'] = values['endpoint']
try:
import qianfan
values['client'] = qianfan.ChatCompletion(**params)
except ImportError:
raise ValueError(
'qianfan package not found, please install it with `pip install qianfan`'
)
return values
|
@root_validator()
def validate_environment(cls, values: Dict) ->Dict:
values['qianfan_ak'] = convert_to_secret_str(get_from_dict_or_env(
values, 'qianfan_ak', 'QIANFAN_AK', default=''))
values['qianfan_sk'] = convert_to_secret_str(get_from_dict_or_env(
values, 'qianfan_sk', 'QIANFAN_SK', default=''))
params = {**values.get('init_kwargs', {}), 'model': values['model'],
'stream': values['streaming']}
if values['qianfan_ak'].get_secret_value() != '':
params['ak'] = values['qianfan_ak'].get_secret_value()
if values['qianfan_sk'].get_secret_value() != '':
params['sk'] = values['qianfan_sk'].get_secret_value()
if values['endpoint'] is not None and values['endpoint'] != '':
params['endpoint'] = values['endpoint']
try:
import qianfan
values['client'] = qianfan.ChatCompletion(**params)
except ImportError:
raise ValueError(
'qianfan package not found, please install it with `pip install qianfan`'
)
return values
| null |
_call
|
"""Call the Llama model and return the output.
Args:
prompt: The prompt to use for generation.
stop: A list of strings to stop generation when encountered.
Returns:
The generated text.
Example:
.. code-block:: python
from langchain_community.llms import LlamaCpp
llm = LlamaCpp(model_path="/path/to/local/llama/model.bin")
llm("This is a prompt.")
"""
if self.streaming:
combined_text_output = ''
for chunk in self._stream(prompt=prompt, stop=stop, run_manager=
run_manager, **kwargs):
combined_text_output += chunk.text
return combined_text_output
else:
params = self._get_parameters(stop)
params = {**params, **kwargs}
result = self.client(prompt=prompt, **params)
return result['choices'][0]['text']
|
def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager:
Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str:
"""Call the Llama model and return the output.
Args:
prompt: The prompt to use for generation.
stop: A list of strings to stop generation when encountered.
Returns:
The generated text.
Example:
.. code-block:: python
from langchain_community.llms import LlamaCpp
llm = LlamaCpp(model_path="/path/to/local/llama/model.bin")
llm("This is a prompt.")
"""
if self.streaming:
combined_text_output = ''
for chunk in self._stream(prompt=prompt, stop=stop, run_manager=
run_manager, **kwargs):
combined_text_output += chunk.text
return combined_text_output
else:
params = self._get_parameters(stop)
params = {**params, **kwargs}
result = self.client(prompt=prompt, **params)
return result['choices'][0]['text']
|
Call the Llama model and return the output.
Args:
prompt: The prompt to use for generation.
stop: A list of strings to stop generation when encountered.
Returns:
The generated text.
Example:
.. code-block:: python
from langchain_community.llms import LlamaCpp
llm = LlamaCpp(model_path="/path/to/local/llama/model.bin")
llm("This is a prompt.")
|
get_sync
|
"""Get the equivalent sync RunManager.
Returns:
RunManager: The sync RunManager.
"""
|
@abstractmethod
def get_sync(self) ->RunManager:
"""Get the equivalent sync RunManager.
Returns:
RunManager: The sync RunManager.
"""
|
Get the equivalent sync RunManager.
Returns:
RunManager: The sync RunManager.
|
test_openai_call
|
"""Test valid call to openai."""
llm = OpenAI()
output = llm('Say something nice:')
assert isinstance(output, str)
|
@pytest.mark.scheduled
def test_openai_call() ->None:
"""Test valid call to openai."""
llm = OpenAI()
output = llm('Say something nice:')
assert isinstance(output, str)
|
Test valid call to openai.
|
from_texts
|
"""Return VectorStore initialized from texts and embeddings."""
if not index_name:
raise ValueError('`index_name` is required')
if not client:
client = TigrisClient()
store = cls(client, embedding, index_name)
store.add_texts(texts=texts, metadatas=metadatas, ids=ids)
return store
|
@classmethod
def from_texts(cls, texts: List[str], embedding: Embeddings, metadatas:
Optional[List[dict]]=None, ids: Optional[List[str]]=None, client:
Optional[TigrisClient]=None, index_name: Optional[str]=None, **kwargs: Any
) ->Tigris:
"""Return VectorStore initialized from texts and embeddings."""
if not index_name:
raise ValueError('`index_name` is required')
if not client:
client = TigrisClient()
store = cls(client, embedding, index_name)
store.add_texts(texts=texts, metadatas=metadatas, ids=ids)
return store
|
Return VectorStore initialized from texts and embeddings.
|
process_image
|
try:
import pytesseract
from PIL import Image
except ImportError:
raise ImportError(
'`pytesseract` or `Pillow` package not found, please run `pip install pytesseract Pillow`'
)
response = self.confluence.request(path=link, absolute=True)
text = ''
if response.status_code != 200 or response.content == b'' or response.content is None:
return text
try:
image = Image.open(BytesIO(response.content))
except OSError:
return text
return pytesseract.image_to_string(image, lang=ocr_languages)
|
def process_image(self, link: str, ocr_languages: Optional[str]=None) ->str:
try:
import pytesseract
from PIL import Image
except ImportError:
raise ImportError(
'`pytesseract` or `Pillow` package not found, please run `pip install pytesseract Pillow`'
)
response = self.confluence.request(path=link, absolute=True)
text = ''
if (response.status_code != 200 or response.content == b'' or response.
content is None):
return text
try:
image = Image.open(BytesIO(response.content))
except OSError:
return text
return pytesseract.image_to_string(image, lang=ocr_languages)
| null |
_llm_type
|
"""Return type of llm."""
return 'bananadev'
|
@property
def _llm_type(self) ->str:
"""Return type of llm."""
return 'bananadev'
|
Return type of llm.
|
_read_json
|
"""Read JSON data from a zip subfile."""
with zip_file.open(file_path, 'r') as f:
data = json.load(f)
return data
|
def _read_json(self, zip_file: zipfile.ZipFile, file_path: str) ->List[dict]:
"""Read JSON data from a zip subfile."""
with zip_file.open(file_path, 'r') as f:
data = json.load(f)
return data
|
Read JSON data from a zip subfile.
|
_validate_func
|
if isinstance(func, Operator) and self.allowed_operators is not None:
if func not in self.allowed_operators:
raise ValueError(
f'Received disallowed operator {func}. Allowed comparators are {self.allowed_operators}'
)
if isinstance(func, Comparator) and self.allowed_comparators is not None:
if func not in self.allowed_comparators:
raise ValueError(
f'Received disallowed comparator {func}. Allowed comparators are {self.allowed_comparators}'
)
|
def _validate_func(self, func: Union[Operator, Comparator]) ->None:
if isinstance(func, Operator) and self.allowed_operators is not None:
if func not in self.allowed_operators:
raise ValueError(
f'Received disallowed operator {func}. Allowed comparators are {self.allowed_operators}'
)
if isinstance(func, Comparator) and self.allowed_comparators is not None:
if func not in self.allowed_comparators:
raise ValueError(
f'Received disallowed comparator {func}. Allowed comparators are {self.allowed_comparators}'
)
| null |
get_lc_namespace
|
"""Get the namespace of the langchain object."""
return ['langchain', 'llms', 'replicate']
|
@classmethod
def get_lc_namespace(cls) ->List[str]:
"""Get the namespace of the langchain object."""
return ['langchain', 'llms', 'replicate']
|
Get the namespace of the langchain object.
|
resolve_prompt_inputs
|
return {'question': self.question, **{f'idea_{i + 1}': idea for i, idea in
enumerate(self.ideas)}, 'critique': self.critique}
|
def resolve_prompt_inputs(self) ->Dict[str, Any]:
return {'question': self.question, **{f'idea_{i + 1}': idea for i, idea in
enumerate(self.ideas)}, 'critique': self.critique}
| null |
__getitem__
|
return self._children[item]
|
def __getitem__(self, item: Union[int, slice]) ->Union[Iterator[T], Tuple[
Iterator[T], ...]]:
return self._children[item]
| null |
test_openai_streaming_best_of_error
|
"""Test validation for streaming fails if best_of is not 1."""
with pytest.raises(ValueError):
_get_llm(best_of=2, streaming=True)
|
def test_openai_streaming_best_of_error() ->None:
"""Test validation for streaming fails if best_of is not 1."""
with pytest.raises(ValueError):
_get_llm(best_of=2, streaming=True)
|
Test validation for streaming fails if best_of is not 1.
|
upsert_messages
|
"""Update the cosmosdb item."""
if not self._container:
raise ValueError('Container not initialized')
self._container.upsert_item(body={'id': self.session_id, 'user_id': self.
user_id, 'messages': messages_to_dict(self.messages)})
|
def upsert_messages(self) ->None:
"""Update the cosmosdb item."""
if not self._container:
raise ValueError('Container not initialized')
self._container.upsert_item(body={'id': self.session_id, 'user_id':
self.user_id, 'messages': messages_to_dict(self.messages)})
|
Update the cosmosdb item.
|
test_from_existing_index_with_namespaces
|
"""Test that namespaces are properly handled."""
texts_1 = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts_1))]
Pinecone.from_texts(texts_1, embedding_openai, index_name=index_name,
metadatas=metadatas, namespace=f'{index_name}-1')
texts_2 = ['foo2', 'bar2', 'baz2']
metadatas = [{'page': i} for i in range(len(texts_2))]
Pinecone.from_texts(texts_2, embedding_openai, index_name=index_name,
metadatas=metadatas, namespace=f'{index_name}-2')
docsearch = Pinecone.from_existing_index(index_name=index_name, embedding=
embedding_openai, namespace=f'{index_name}-1')
output = docsearch.similarity_search('foo', k=20, namespace=f'{index_name}-1')
page_contents = sorted(set([o.page_content for o in output]))
assert all(content in ['foo', 'bar', 'baz'] for content in page_contents)
assert all(content not in ['foo2', 'bar2', 'baz2'] for content in page_contents
)
|
def test_from_existing_index_with_namespaces(self, embedding_openai:
OpenAIEmbeddings) ->None:
"""Test that namespaces are properly handled."""
texts_1 = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts_1))]
Pinecone.from_texts(texts_1, embedding_openai, index_name=index_name,
metadatas=metadatas, namespace=f'{index_name}-1')
texts_2 = ['foo2', 'bar2', 'baz2']
metadatas = [{'page': i} for i in range(len(texts_2))]
Pinecone.from_texts(texts_2, embedding_openai, index_name=index_name,
metadatas=metadatas, namespace=f'{index_name}-2')
docsearch = Pinecone.from_existing_index(index_name=index_name,
embedding=embedding_openai, namespace=f'{index_name}-1')
output = docsearch.similarity_search('foo', k=20, namespace=
f'{index_name}-1')
page_contents = sorted(set([o.page_content for o in output]))
assert all(content in ['foo', 'bar', 'baz'] for content in page_contents)
assert all(content not in ['foo2', 'bar2', 'baz2'] for content in
page_contents)
|
Test that namespaces are properly handled.
|
get_relevant_documents
|
assert isinstance(self, FakeRetrieverV1)
return [Document(page_content=query, metadata=where_filter or {})]
|
def get_relevant_documents(self, query: str, where_filter: Optional[Dict[
str, object]]=None) ->List[Document]:
assert isinstance(self, FakeRetrieverV1)
return [Document(page_content=query, metadata=where_filter or {})]
| null |
save_history
|
input.pop('response')
graph.query(
"""MERGE (u:User {id: $user_id})
WITH u
OPTIONAL MATCH (u)-[:HAS_SESSION]->(s:Session{id: $session_id}),
(s)-[l:LAST_MESSAGE]->(last_message)
FOREACH (_ IN CASE WHEN last_message IS NULL THEN [1] ELSE [] END |
CREATE (u)-[:HAS_SESSION]->(s1:Session {id:$session_id}),
(s1)-[:LAST_MESSAGE]->(q:Question {text:$question, cypher:$query, date:datetime()}),
(q)-[:HAS_ANSWER]->(:Answer {text:$output}))
FOREACH (_ IN CASE WHEN last_message IS NOT NULL THEN [1] ELSE [] END |
CREATE (last_message)-[:NEXT]->(q:Question
{text:$question, cypher:$query, date:datetime()}),
(q)-[:HAS_ANSWER]->(:Answer {text:$output}),
(s)-[:LAST_MESSAGE]->(q)
DELETE l) """
, params=input)
return input['output']
|
def save_history(input):
input.pop('response')
graph.query(
"""MERGE (u:User {id: $user_id})
WITH u
OPTIONAL MATCH (u)-[:HAS_SESSION]->(s:Session{id: $session_id}),
(s)-[l:LAST_MESSAGE]->(last_message)
FOREACH (_ IN CASE WHEN last_message IS NULL THEN [1] ELSE [] END |
CREATE (u)-[:HAS_SESSION]->(s1:Session {id:$session_id}),
(s1)-[:LAST_MESSAGE]->(q:Question {text:$question, cypher:$query, date:datetime()}),
(q)-[:HAS_ANSWER]->(:Answer {text:$output}))
FOREACH (_ IN CASE WHEN last_message IS NOT NULL THEN [1] ELSE [] END |
CREATE (last_message)-[:NEXT]->(q:Question
{text:$question, cypher:$query, date:datetime()}),
(q)-[:HAS_ANSWER]->(:Answer {text:$output}),
(s)-[:LAST_MESSAGE]->(q)
DELETE l) """
, params=input)
return input['output']
| null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.