method_name
stringlengths 1
78
| method_body
stringlengths 3
9.66k
| full_code
stringlengths 31
10.7k
| docstring
stringlengths 4
4.74k
⌀ |
|---|---|---|---|
compress_documents
|
"""Compress page content of raw documents."""
compressed_docs = []
for doc in documents:
_input = self.get_input(query, doc)
output = self.llm_chain.predict_and_parse(**_input, callbacks=callbacks)
if len(output) == 0:
continue
compressed_docs.append(Document(page_content=output, metadata=doc.metadata)
)
return compressed_docs
|
def compress_documents(self, documents: Sequence[Document], query: str,
callbacks: Optional[Callbacks]=None) ->Sequence[Document]:
"""Compress page content of raw documents."""
compressed_docs = []
for doc in documents:
_input = self.get_input(query, doc)
output = self.llm_chain.predict_and_parse(**_input, callbacks=callbacks
)
if len(output) == 0:
continue
compressed_docs.append(Document(page_content=output, metadata=doc.
metadata))
return compressed_docs
|
Compress page content of raw documents.
|
__init__
|
"""
Initialize the cache with all relevant parameters.
Args:
collection_name (str): name of the Astra DB collection to create/use.
token (Optional[str]): API token for Astra DB usage.
api_endpoint (Optional[str]): full URL to the API endpoint,
such as "https://<DB-ID>-us-east1.apps.astra.datastax.com".
astra_db_client (Optional[Any]): *alternative to token+api_endpoint*,
you can pass an already-created 'astrapy.db.AstraDB' instance.
namespace (Optional[str]): namespace (aka keyspace) where the
collection is created. Defaults to the database's "default namespace".
embedding (Embedding): Embedding provider for semantic
encoding and search.
metric: the function to use for evaluating similarity of text embeddings.
Defaults to 'cosine' (alternatives: 'euclidean', 'dot_product')
similarity_threshold (float, optional): the minimum similarity
for accepting a (semantic-search) match.
The default score threshold is tuned to the default metric.
Tune it carefully yourself if switching to another distance metric.
"""
try:
from astrapy.db import AstraDB as LibAstraDB
except (ImportError, ModuleNotFoundError):
raise ImportError(
'Could not import a recent astrapy python package. Please install it with `pip install --upgrade astrapy`.'
)
if astra_db_client is not None:
if token is not None or api_endpoint is not None:
raise ValueError(
"You cannot pass 'astra_db_client' to AstraDB if passing 'token' and 'api_endpoint'."
)
self.embedding = embedding
self.metric = metric
self.similarity_threshold = similarity_threshold
@lru_cache(maxsize=ASTRA_DB_SEMANTIC_CACHE_EMBEDDING_CACHE_SIZE)
def _cache_embedding(text: str) ->List[float]:
return self.embedding.embed_query(text=text)
self._get_embedding = _cache_embedding
self.embedding_dimension = self._get_embedding_dimension()
self.collection_name = collection_name
self.token = token
self.api_endpoint = api_endpoint
self.namespace = namespace
if astra_db_client is not None:
self.astra_db = astra_db_client
else:
self.astra_db = LibAstraDB(token=self.token, api_endpoint=self.
api_endpoint, namespace=self.namespace)
self.collection = self.astra_db.create_collection(collection_name=self.
collection_name, dimension=self.embedding_dimension, metric=self.metric)
|
def __init__(self, *, collection_name: str=
ASTRA_DB_CACHE_DEFAULT_COLLECTION_NAME, token: Optional[str]=None,
api_endpoint: Optional[str]=None, astra_db_client: Optional[Any]=None,
namespace: Optional[str]=None, embedding: Embeddings, metric: Optional[
str]=None, similarity_threshold: float=
ASTRA_DB_SEMANTIC_CACHE_DEFAULT_THRESHOLD):
"""
Initialize the cache with all relevant parameters.
Args:
collection_name (str): name of the Astra DB collection to create/use.
token (Optional[str]): API token for Astra DB usage.
api_endpoint (Optional[str]): full URL to the API endpoint,
such as "https://<DB-ID>-us-east1.apps.astra.datastax.com".
astra_db_client (Optional[Any]): *alternative to token+api_endpoint*,
you can pass an already-created 'astrapy.db.AstraDB' instance.
namespace (Optional[str]): namespace (aka keyspace) where the
collection is created. Defaults to the database's "default namespace".
embedding (Embedding): Embedding provider for semantic
encoding and search.
metric: the function to use for evaluating similarity of text embeddings.
Defaults to 'cosine' (alternatives: 'euclidean', 'dot_product')
similarity_threshold (float, optional): the minimum similarity
for accepting a (semantic-search) match.
The default score threshold is tuned to the default metric.
Tune it carefully yourself if switching to another distance metric.
"""
try:
from astrapy.db import AstraDB as LibAstraDB
except (ImportError, ModuleNotFoundError):
raise ImportError(
'Could not import a recent astrapy python package. Please install it with `pip install --upgrade astrapy`.'
)
if astra_db_client is not None:
if token is not None or api_endpoint is not None:
raise ValueError(
"You cannot pass 'astra_db_client' to AstraDB if passing 'token' and 'api_endpoint'."
)
self.embedding = embedding
self.metric = metric
self.similarity_threshold = similarity_threshold
@lru_cache(maxsize=ASTRA_DB_SEMANTIC_CACHE_EMBEDDING_CACHE_SIZE)
def _cache_embedding(text: str) ->List[float]:
return self.embedding.embed_query(text=text)
self._get_embedding = _cache_embedding
self.embedding_dimension = self._get_embedding_dimension()
self.collection_name = collection_name
self.token = token
self.api_endpoint = api_endpoint
self.namespace = namespace
if astra_db_client is not None:
self.astra_db = astra_db_client
else:
self.astra_db = LibAstraDB(token=self.token, api_endpoint=self.
api_endpoint, namespace=self.namespace)
self.collection = self.astra_db.create_collection(collection_name=self.
collection_name, dimension=self.embedding_dimension, metric=self.metric
)
|
Initialize the cache with all relevant parameters.
Args:
collection_name (str): name of the Astra DB collection to create/use.
token (Optional[str]): API token for Astra DB usage.
api_endpoint (Optional[str]): full URL to the API endpoint,
such as "https://<DB-ID>-us-east1.apps.astra.datastax.com".
astra_db_client (Optional[Any]): *alternative to token+api_endpoint*,
you can pass an already-created 'astrapy.db.AstraDB' instance.
namespace (Optional[str]): namespace (aka keyspace) where the
collection is created. Defaults to the database's "default namespace".
embedding (Embedding): Embedding provider for semantic
encoding and search.
metric: the function to use for evaluating similarity of text embeddings.
Defaults to 'cosine' (alternatives: 'euclidean', 'dot_product')
similarity_threshold (float, optional): the minimum similarity
for accepting a (semantic-search) match.
The default score threshold is tuned to the default metric.
Tune it carefully yourself if switching to another distance metric.
|
__init__
|
"""Initializes private fields."""
super().__init__(**data)
api_key = cast(SecretStr, self.arcee_api_key)
self._client = ArceeWrapper(arcee_api_key=api_key, arcee_api_url=self.
arcee_api_url, arcee_api_version=self.arcee_api_version, model_kwargs=
self.model_kwargs, model_name=self.model)
|
def __init__(self, **data: Any) ->None:
"""Initializes private fields."""
super().__init__(**data)
api_key = cast(SecretStr, self.arcee_api_key)
self._client = ArceeWrapper(arcee_api_key=api_key, arcee_api_url=self.
arcee_api_url, arcee_api_version=self.arcee_api_version,
model_kwargs=self.model_kwargs, model_name=self.model)
|
Initializes private fields.
|
_import_pubmed_tool
|
from langchain_community.tools.pubmed.tool import PubmedQueryRun
return PubmedQueryRun
|
def _import_pubmed_tool() ->Any:
from langchain_community.tools.pubmed.tool import PubmedQueryRun
return PubmedQueryRun
| null |
lazy_parse
|
"""Lazily parse the blob."""
import pypdfium2
with blob.as_bytes_io() as file_path:
pdf_reader = pypdfium2.PdfDocument(file_path, autoclose=True)
try:
for page_number, page in enumerate(pdf_reader):
text_page = page.get_textpage()
content = text_page.get_text_range()
text_page.close()
content += '\n' + self._extract_images_from_page(page)
page.close()
metadata = {'source': blob.source, 'page': page_number}
yield Document(page_content=content, metadata=metadata)
finally:
pdf_reader.close()
|
def lazy_parse(self, blob: Blob) ->Iterator[Document]:
"""Lazily parse the blob."""
import pypdfium2
with blob.as_bytes_io() as file_path:
pdf_reader = pypdfium2.PdfDocument(file_path, autoclose=True)
try:
for page_number, page in enumerate(pdf_reader):
text_page = page.get_textpage()
content = text_page.get_text_range()
text_page.close()
content += '\n' + self._extract_images_from_page(page)
page.close()
metadata = {'source': blob.source, 'page': page_number}
yield Document(page_content=content, metadata=metadata)
finally:
pdf_reader.close()
|
Lazily parse the blob.
|
chain
|
...
|
@overload
def chain(func: Callable[[Input], Iterator[Output]]) ->Runnable[Input, Output]:
...
| null |
validate_environment
|
"""Validate that api key and python package exists in environment."""
nebula_service_url = get_from_dict_or_env(values, 'nebula_service_url',
'NEBULA_SERVICE_URL', DEFAULT_NEBULA_SERVICE_URL)
nebula_service_path = get_from_dict_or_env(values, 'nebula_service_path',
'NEBULA_SERVICE_PATH', DEFAULT_NEBULA_SERVICE_PATH)
nebula_api_key = convert_to_secret_str(get_from_dict_or_env(values,
'nebula_api_key', 'NEBULA_API_KEY', None))
if nebula_service_url.endswith('/'):
nebula_service_url = nebula_service_url[:-1]
if not nebula_service_path.startswith('/'):
nebula_service_path = '/' + nebula_service_path
values['nebula_service_url'] = nebula_service_url
values['nebula_service_path'] = nebula_service_path
values['nebula_api_key'] = nebula_api_key
return values
|
@root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that api key and python package exists in environment."""
nebula_service_url = get_from_dict_or_env(values, 'nebula_service_url',
'NEBULA_SERVICE_URL', DEFAULT_NEBULA_SERVICE_URL)
nebula_service_path = get_from_dict_or_env(values,
'nebula_service_path', 'NEBULA_SERVICE_PATH',
DEFAULT_NEBULA_SERVICE_PATH)
nebula_api_key = convert_to_secret_str(get_from_dict_or_env(values,
'nebula_api_key', 'NEBULA_API_KEY', None))
if nebula_service_url.endswith('/'):
nebula_service_url = nebula_service_url[:-1]
if not nebula_service_path.startswith('/'):
nebula_service_path = '/' + nebula_service_path
values['nebula_service_url'] = nebula_service_url
values['nebula_service_path'] = nebula_service_path
values['nebula_api_key'] = nebula_api_key
return values
|
Validate that api key and python package exists in environment.
|
_import_playwright_NavigateBackTool
|
from langchain_community.tools.playwright import NavigateBackTool
return NavigateBackTool
|
def _import_playwright_NavigateBackTool() ->Any:
from langchain_community.tools.playwright import NavigateBackTool
return NavigateBackTool
| null |
on_tool_error
|
"""Run when tool errors."""
self.step += 1
self.errors += 1
|
def on_tool_error(self, error: BaseException, **kwargs: Any) ->None:
"""Run when tool errors."""
self.step += 1
self.errors += 1
|
Run when tool errors.
|
_get_schema_with_defaults
|
from langchain_community.vectorstores.redis.schema import RedisModel, read_schema
schema = RedisModel()
if index_schema:
schema_values = read_schema(index_schema)
schema = RedisModel(**schema_values)
schema.add_content_field()
try:
schema.content_vector
if vector_schema:
logger.warning(
'`vector_schema` is ignored since content_vector is ' +
'overridden in `index_schema`.')
except ValueError:
vector_field = self.DEFAULT_VECTOR_SCHEMA.copy()
if vector_schema:
vector_field.update(vector_schema)
schema.add_vector_field(vector_field)
return schema
|
def _get_schema_with_defaults(self, index_schema: Optional[Union[Dict[str,
str], str, os.PathLike]]=None, vector_schema: Optional[Dict[str, Union[
str, int]]]=None) ->'RedisModel':
from langchain_community.vectorstores.redis.schema import RedisModel, read_schema
schema = RedisModel()
if index_schema:
schema_values = read_schema(index_schema)
schema = RedisModel(**schema_values)
schema.add_content_field()
try:
schema.content_vector
if vector_schema:
logger.warning(
'`vector_schema` is ignored since content_vector is ' +
'overridden in `index_schema`.')
except ValueError:
vector_field = self.DEFAULT_VECTOR_SCHEMA.copy()
if vector_schema:
vector_field.update(vector_schema)
schema.add_vector_field(vector_field)
return schema
| null |
critique_prompt
|
return ChatPromptTemplate.from_strings(self.get_prompt_strings('critique'))
|
def critique_prompt(self) ->ChatPromptTemplate:
return ChatPromptTemplate.from_strings(self.get_prompt_strings('critique'))
| null |
_lazy_import_pexpect
|
"""Import pexpect only when needed."""
if platform.system() == 'Windows':
raise ValueError(
'Persistent bash processes are not yet supported on Windows.')
try:
import pexpect
except ImportError:
raise ImportError(
'pexpect required for persistent bash processes. To install, run `pip install pexpect`.'
)
return pexpect
|
@staticmethod
def _lazy_import_pexpect() ->pexpect:
"""Import pexpect only when needed."""
if platform.system() == 'Windows':
raise ValueError(
'Persistent bash processes are not yet supported on Windows.')
try:
import pexpect
except ImportError:
raise ImportError(
'pexpect required for persistent bash processes. To install, run `pip install pexpect`.'
)
return pexpect
|
Import pexpect only when needed.
|
_send_request
|
"""Sends request to the oci data science model deployment endpoint.
Args:
data (Json serializable):
data need to be sent to the endpoint.
endpoint (str):
The model HTTP endpoint.
header (dict, optional):
A dictionary of HTTP headers to send to the specified url.
Defaults to {}.
kwargs:
Additional ``**kwargs`` to pass to requests.post.
Raises:
Exception:
Raise when invoking fails.
Returns:
A JSON representation of a requests.Response object.
"""
if not header:
header = {}
header['Content-Type'] = header.pop('content_type', DEFAULT_CONTENT_TYPE_JSON
) or DEFAULT_CONTENT_TYPE_JSON
request_kwargs = {'json': data}
request_kwargs['headers'] = header
timeout = kwargs.pop('timeout', DEFAULT_TIME_OUT)
attempts = 0
while attempts < 2:
request_kwargs['auth'] = self.auth.get('signer')
response = requests.post(endpoint, timeout=timeout, **request_kwargs,
**kwargs)
if response.status_code == 401:
self._refresh_signer()
attempts += 1
continue
break
try:
response.raise_for_status()
response_json = response.json()
except Exception:
logger.error('DEBUG INFO: request_kwargs=%s, status_code=%s, content=%s',
request_kwargs, response.status_code, response.content)
raise
return response_json
|
def _send_request(self, data: Any, endpoint: str, header: Optional[dict]={},
**kwargs: Any) ->Dict:
"""Sends request to the oci data science model deployment endpoint.
Args:
data (Json serializable):
data need to be sent to the endpoint.
endpoint (str):
The model HTTP endpoint.
header (dict, optional):
A dictionary of HTTP headers to send to the specified url.
Defaults to {}.
kwargs:
Additional ``**kwargs`` to pass to requests.post.
Raises:
Exception:
Raise when invoking fails.
Returns:
A JSON representation of a requests.Response object.
"""
if not header:
header = {}
header['Content-Type'] = header.pop('content_type',
DEFAULT_CONTENT_TYPE_JSON) or DEFAULT_CONTENT_TYPE_JSON
request_kwargs = {'json': data}
request_kwargs['headers'] = header
timeout = kwargs.pop('timeout', DEFAULT_TIME_OUT)
attempts = 0
while attempts < 2:
request_kwargs['auth'] = self.auth.get('signer')
response = requests.post(endpoint, timeout=timeout, **
request_kwargs, **kwargs)
if response.status_code == 401:
self._refresh_signer()
attempts += 1
continue
break
try:
response.raise_for_status()
response_json = response.json()
except Exception:
logger.error(
'DEBUG INFO: request_kwargs=%s, status_code=%s, content=%s',
request_kwargs, response.status_code, response.content)
raise
return response_json
|
Sends request to the oci data science model deployment endpoint.
Args:
data (Json serializable):
data need to be sent to the endpoint.
endpoint (str):
The model HTTP endpoint.
header (dict, optional):
A dictionary of HTTP headers to send to the specified url.
Defaults to {}.
kwargs:
Additional ``**kwargs`` to pass to requests.post.
Raises:
Exception:
Raise when invoking fails.
Returns:
A JSON representation of a requests.Response object.
|
docai_parse
|
"""Runs Google Document AI PDF Batch Processing on a list of blobs.
Args:
blobs: a list of blobs to be parsed
gcs_output_path: a path (folder) on GCS to store results
processor_name: name of a Document AI processor.
batch_size: amount of documents per batch
enable_native_pdf_parsing: a config option for the parser
field_mask: a comma-separated list of which fields to include in the
Document AI response.
suggested: "text,pages.pageNumber,pages.layout"
Document AI has a 1000 file limit per batch, so batches larger than that need
to be split into multiple requests.
Batch processing is an async long-running operation
and results are stored in a output GCS bucket.
"""
try:
from google.cloud import documentai
from google.cloud.documentai_v1.types import OcrConfig, ProcessOptions
except ImportError as exc:
raise ImportError(
'documentai package not found, please install it with `pip install google-cloud-documentai`'
) from exc
output_path = gcs_output_path or self._gcs_output_path
if output_path is None:
raise ValueError(
'An output path on Google Cloud Storage should be provided.')
processor_name = processor_name or self._processor_name
if processor_name is None:
raise ValueError('A Document AI processor name should be provided.')
operations = []
for batch in batch_iterate(size=batch_size, iterable=blobs):
input_config = documentai.BatchDocumentsInputConfig(gcs_documents=
documentai.GcsDocuments(documents=[documentai.GcsDocument(gcs_uri=
blob.path, mime_type=blob.mimetype or 'application/pdf') for blob in
batch]))
output_config = documentai.DocumentOutputConfig(gcs_output_config=
documentai.DocumentOutputConfig.GcsOutputConfig(gcs_uri=output_path,
field_mask=field_mask))
process_options = ProcessOptions(ocr_config=OcrConfig(
enable_native_pdf_parsing=enable_native_pdf_parsing)
) if enable_native_pdf_parsing else None
operations.append(self._client.batch_process_documents(documentai.
BatchProcessRequest(name=processor_name, input_documents=
input_config, document_output_config=output_config, process_options
=process_options, skip_human_review=True)))
return operations
|
def docai_parse(self, blobs: Sequence[Blob], *, gcs_output_path: Optional[
str]=None, processor_name: Optional[str]=None, batch_size: int=1000,
enable_native_pdf_parsing: bool=True, field_mask: Optional[str]=None
) ->List['Operation']:
"""Runs Google Document AI PDF Batch Processing on a list of blobs.
Args:
blobs: a list of blobs to be parsed
gcs_output_path: a path (folder) on GCS to store results
processor_name: name of a Document AI processor.
batch_size: amount of documents per batch
enable_native_pdf_parsing: a config option for the parser
field_mask: a comma-separated list of which fields to include in the
Document AI response.
suggested: "text,pages.pageNumber,pages.layout"
Document AI has a 1000 file limit per batch, so batches larger than that need
to be split into multiple requests.
Batch processing is an async long-running operation
and results are stored in a output GCS bucket.
"""
try:
from google.cloud import documentai
from google.cloud.documentai_v1.types import OcrConfig, ProcessOptions
except ImportError as exc:
raise ImportError(
'documentai package not found, please install it with `pip install google-cloud-documentai`'
) from exc
output_path = gcs_output_path or self._gcs_output_path
if output_path is None:
raise ValueError(
'An output path on Google Cloud Storage should be provided.')
processor_name = processor_name or self._processor_name
if processor_name is None:
raise ValueError('A Document AI processor name should be provided.')
operations = []
for batch in batch_iterate(size=batch_size, iterable=blobs):
input_config = documentai.BatchDocumentsInputConfig(gcs_documents=
documentai.GcsDocuments(documents=[documentai.GcsDocument(
gcs_uri=blob.path, mime_type=blob.mimetype or 'application/pdf'
) for blob in batch]))
output_config = documentai.DocumentOutputConfig(gcs_output_config=
documentai.DocumentOutputConfig.GcsOutputConfig(gcs_uri=
output_path, field_mask=field_mask))
process_options = ProcessOptions(ocr_config=OcrConfig(
enable_native_pdf_parsing=enable_native_pdf_parsing)
) if enable_native_pdf_parsing else None
operations.append(self._client.batch_process_documents(documentai.
BatchProcessRequest(name=processor_name, input_documents=
input_config, document_output_config=output_config,
process_options=process_options, skip_human_review=True)))
return operations
|
Runs Google Document AI PDF Batch Processing on a list of blobs.
Args:
blobs: a list of blobs to be parsed
gcs_output_path: a path (folder) on GCS to store results
processor_name: name of a Document AI processor.
batch_size: amount of documents per batch
enable_native_pdf_parsing: a config option for the parser
field_mask: a comma-separated list of which fields to include in the
Document AI response.
suggested: "text,pages.pageNumber,pages.layout"
Document AI has a 1000 file limit per batch, so batches larger than that need
to be split into multiple requests.
Batch processing is an async long-running operation
and results are stored in a output GCS bucket.
|
from_texts
|
"""Return VectorStore initialized from texts and embeddings."""
if not client:
raise ValueError('Supabase client is required.')
if not table_name:
raise ValueError('Supabase document table_name is required.')
embeddings = embedding.embed_documents(texts)
ids = [str(uuid.uuid4()) for _ in texts]
docs = cls._texts_to_documents(texts, metadatas)
cls._add_vectors(client, table_name, embeddings, docs, ids, chunk_size)
return cls(client=client, embedding=embedding, table_name=table_name,
query_name=query_name, chunk_size=chunk_size)
|
@classmethod
def from_texts(cls: Type['SupabaseVectorStore'], texts: List[str],
embedding: Embeddings, metadatas: Optional[List[dict]]=None, client:
Optional[supabase.client.Client]=None, table_name: Optional[str]=
'documents', query_name: Union[str, None]='match_documents', chunk_size:
int=500, ids: Optional[List[str]]=None, **kwargs: Any
) ->'SupabaseVectorStore':
"""Return VectorStore initialized from texts and embeddings."""
if not client:
raise ValueError('Supabase client is required.')
if not table_name:
raise ValueError('Supabase document table_name is required.')
embeddings = embedding.embed_documents(texts)
ids = [str(uuid.uuid4()) for _ in texts]
docs = cls._texts_to_documents(texts, metadatas)
cls._add_vectors(client, table_name, embeddings, docs, ids, chunk_size)
return cls(client=client, embedding=embedding, table_name=table_name,
query_name=query_name, chunk_size=chunk_size)
|
Return VectorStore initialized from texts and embeddings.
|
test_astradb_vectorstore_crud
|
"""Basic add/delete/update behaviour."""
res0 = store_someemb.similarity_search('Abc', k=2)
assert res0 == []
store_someemb.add_texts(texts=['aa', 'bb', 'cc'], metadatas=[{'k': 'a',
'ord': 0}, {'k': 'b', 'ord': 1}, {'k': 'c', 'ord': 2}], ids=['a', 'b', 'c']
)
res1 = store_someemb.similarity_search('Abc', k=5)
assert {doc.page_content for doc in res1} == {'aa', 'bb', 'cc'}
store_someemb.add_texts(texts=['cc', 'dd'], metadatas=[{'k': 'c_new', 'ord':
102}, {'k': 'd_new', 'ord': 103}], ids=['c', 'd'])
res2 = store_someemb.similarity_search('Abc', k=10)
assert len(res2) == 4
res3 = store_someemb.similarity_search_with_score_id(query='cc', k=1,
filter={'k': 'c_new'})
print(str(res3))
doc3, score3, id3 = res3[0]
assert doc3.page_content == 'cc'
assert doc3.metadata == {'k': 'c_new', 'ord': 102}
assert score3 > 0.999
assert id3 == 'c'
del1_res = store_someemb.delete(['b'])
assert del1_res is True
del2_res = store_someemb.delete(['a', 'c', 'Z!'])
assert del2_res is True
assert len(store_someemb.similarity_search('xy', k=10)) == 1
store_someemb.clear()
assert store_someemb.similarity_search('Abc', k=2) == []
store_someemb.add_documents([Document(page_content='vv', metadata={'k': 'v',
'ord': 204}), Document(page_content='ww', metadata={'k': 'w', 'ord':
205})], ids=['v', 'w'])
assert len(store_someemb.similarity_search('xy', k=10)) == 2
res4 = store_someemb.similarity_search('ww', k=1, filter={'k': 'w'})
assert res4[0].metadata['ord'] == 205
|
def test_astradb_vectorstore_crud(self, store_someemb: AstraDB) ->None:
"""Basic add/delete/update behaviour."""
res0 = store_someemb.similarity_search('Abc', k=2)
assert res0 == []
store_someemb.add_texts(texts=['aa', 'bb', 'cc'], metadatas=[{'k': 'a',
'ord': 0}, {'k': 'b', 'ord': 1}, {'k': 'c', 'ord': 2}], ids=['a',
'b', 'c'])
res1 = store_someemb.similarity_search('Abc', k=5)
assert {doc.page_content for doc in res1} == {'aa', 'bb', 'cc'}
store_someemb.add_texts(texts=['cc', 'dd'], metadatas=[{'k': 'c_new',
'ord': 102}, {'k': 'd_new', 'ord': 103}], ids=['c', 'd'])
res2 = store_someemb.similarity_search('Abc', k=10)
assert len(res2) == 4
res3 = store_someemb.similarity_search_with_score_id(query='cc', k=1,
filter={'k': 'c_new'})
print(str(res3))
doc3, score3, id3 = res3[0]
assert doc3.page_content == 'cc'
assert doc3.metadata == {'k': 'c_new', 'ord': 102}
assert score3 > 0.999
assert id3 == 'c'
del1_res = store_someemb.delete(['b'])
assert del1_res is True
del2_res = store_someemb.delete(['a', 'c', 'Z!'])
assert del2_res is True
assert len(store_someemb.similarity_search('xy', k=10)) == 1
store_someemb.clear()
assert store_someemb.similarity_search('Abc', k=2) == []
store_someemb.add_documents([Document(page_content='vv', metadata={'k':
'v', 'ord': 204}), Document(page_content='ww', metadata={'k': 'w',
'ord': 205})], ids=['v', 'w'])
assert len(store_someemb.similarity_search('xy', k=10)) == 2
res4 = store_someemb.similarity_search('ww', k=1, filter={'k': 'w'})
assert res4[0].metadata['ord'] == 205
|
Basic add/delete/update behaviour.
|
_import_graphql
|
from langchain_community.utilities.graphql import GraphQLAPIWrapper
return GraphQLAPIWrapper
|
def _import_graphql() ->Any:
from langchain_community.utilities.graphql import GraphQLAPIWrapper
return GraphQLAPIWrapper
| null |
test_tracer_tool_run_on_error
|
"""Test tracer on a Tool run with an error."""
exception = Exception('test')
uuid = uuid4()
compare_run = Run(id=str(uuid), start_time=datetime.now(timezone.utc),
end_time=datetime.now(timezone.utc), events=[{'name': 'start', 'time':
datetime.now(timezone.utc)}, {'name': 'error', 'time': datetime.now(
timezone.utc)}], extra={}, execution_order=1, child_execution_order=1,
serialized={'name': 'tool'}, inputs=dict(input='test'), outputs=None,
action="{'name': 'tool'}", error=repr(exception), run_type='tool',
trace_id=uuid, dotted_order=f'20230101T000000000000Z{uuid}')
tracer = FakeTracer()
tracer.on_tool_start(serialized={'name': 'tool'}, input_str='test', run_id=uuid
)
tracer.on_tool_error(exception, run_id=uuid)
_compare_run_with_error(tracer.runs[0], compare_run)
|
@freeze_time('2023-01-01')
def test_tracer_tool_run_on_error() ->None:
"""Test tracer on a Tool run with an error."""
exception = Exception('test')
uuid = uuid4()
compare_run = Run(id=str(uuid), start_time=datetime.now(timezone.utc),
end_time=datetime.now(timezone.utc), events=[{'name': 'start',
'time': datetime.now(timezone.utc)}, {'name': 'error', 'time':
datetime.now(timezone.utc)}], extra={}, execution_order=1,
child_execution_order=1, serialized={'name': 'tool'}, inputs=dict(
input='test'), outputs=None, action="{'name': 'tool'}", error=repr(
exception), run_type='tool', trace_id=uuid, dotted_order=
f'20230101T000000000000Z{uuid}')
tracer = FakeTracer()
tracer.on_tool_start(serialized={'name': 'tool'}, input_str='test',
run_id=uuid)
tracer.on_tool_error(exception, run_id=uuid)
_compare_run_with_error(tracer.runs[0], compare_run)
|
Test tracer on a Tool run with an error.
|
_call_with_config
|
"""Helper method to transform an Input value to an Output value,
with callbacks. Use this method to implement invoke() in subclasses."""
config = ensure_config(config)
callback_manager = get_callback_manager_for_config(config)
run_manager = callback_manager.on_chain_start(dumpd(self), input, run_type=
run_type, name=config.get('run_name') or self.get_name())
try:
child_config = patch_config(config, callbacks=run_manager.get_child())
context = copy_context()
context.run(var_child_runnable_config.set, child_config)
output = cast(Output, context.run(call_func_with_variable_args, func,
input, config, run_manager, **kwargs))
except BaseException as e:
run_manager.on_chain_error(e)
raise
else:
run_manager.on_chain_end(dumpd(output))
return output
|
def _call_with_config(self, func: Union[Callable[[Input], Output], Callable
[[Input, CallbackManagerForChainRun], Output], Callable[[Input,
CallbackManagerForChainRun, RunnableConfig], Output]], input: Input,
config: Optional[RunnableConfig], run_type: Optional[str]=None, **
kwargs: Optional[Any]) ->Output:
"""Helper method to transform an Input value to an Output value,
with callbacks. Use this method to implement invoke() in subclasses."""
config = ensure_config(config)
callback_manager = get_callback_manager_for_config(config)
run_manager = callback_manager.on_chain_start(dumpd(self), input,
run_type=run_type, name=config.get('run_name') or self.get_name())
try:
child_config = patch_config(config, callbacks=run_manager.get_child())
context = copy_context()
context.run(var_child_runnable_config.set, child_config)
output = cast(Output, context.run(call_func_with_variable_args,
func, input, config, run_manager, **kwargs))
except BaseException as e:
run_manager.on_chain_error(e)
raise
else:
run_manager.on_chain_end(dumpd(output))
return output
|
Helper method to transform an Input value to an Output value,
with callbacks. Use this method to implement invoke() in subclasses.
|
test_all_imports
|
assert set(__all__) == set(EXPECTED_ALL)
|
def test_all_imports() ->None:
assert set(__all__) == set(EXPECTED_ALL)
| null |
split_text
|
return self._split_text(text, self._separators)
|
def split_text(self, text: str) ->List[str]:
return self._split_text(text, self._separators)
| null |
save
|
"""Save the agent.
Args:
file_path: Path to file to save the agent to.
Example:
.. code-block:: python
# If working with agent executor
agent.agent.save(file_path="path/agent.yaml")
"""
if isinstance(file_path, str):
save_path = Path(file_path)
else:
save_path = file_path
agent_dict = self.dict()
if '_type' not in agent_dict:
raise NotImplementedError(f'Agent {self} does not support saving.')
directory_path = save_path.parent
directory_path.mkdir(parents=True, exist_ok=True)
if save_path.suffix == '.json':
with open(file_path, 'w') as f:
json.dump(agent_dict, f, indent=4)
elif save_path.suffix == '.yaml':
with open(file_path, 'w') as f:
yaml.dump(agent_dict, f, default_flow_style=False)
else:
raise ValueError(f'{save_path} must be json or yaml')
|
def save(self, file_path: Union[Path, str]) ->None:
"""Save the agent.
Args:
file_path: Path to file to save the agent to.
Example:
.. code-block:: python
# If working with agent executor
agent.agent.save(file_path="path/agent.yaml")
"""
if isinstance(file_path, str):
save_path = Path(file_path)
else:
save_path = file_path
agent_dict = self.dict()
if '_type' not in agent_dict:
raise NotImplementedError(f'Agent {self} does not support saving.')
directory_path = save_path.parent
directory_path.mkdir(parents=True, exist_ok=True)
if save_path.suffix == '.json':
with open(file_path, 'w') as f:
json.dump(agent_dict, f, indent=4)
elif save_path.suffix == '.yaml':
with open(file_path, 'w') as f:
yaml.dump(agent_dict, f, default_flow_style=False)
else:
raise ValueError(f'{save_path} must be json or yaml')
|
Save the agent.
Args:
file_path: Path to file to save the agent to.
Example:
.. code-block:: python
# If working with agent executor
agent.agent.save(file_path="path/agent.yaml")
|
test_llamacpp_inference
|
"""Test valid llama.cpp inference."""
model_path = get_model()
llm = LlamaCpp(model_path=model_path)
output = llm('Say foo:')
assert isinstance(output, str)
assert len(output) > 1
|
def test_llamacpp_inference() ->None:
"""Test valid llama.cpp inference."""
model_path = get_model()
llm = LlamaCpp(model_path=model_path)
output = llm('Say foo:')
assert isinstance(output, str)
assert len(output) > 1
|
Test valid llama.cpp inference.
|
_get_video_info
|
"""Get important video information.
Components are:
- title
- description
- thumbnail url,
- publish_date
- channel_author
- and more.
"""
try:
from pytube import YouTube
except ImportError:
raise ImportError(
'Could not import pytube python package. Please install it with `pip install pytube`.'
)
yt = YouTube(f'https://www.youtube.com/watch?v={self.video_id}')
video_info = {'title': yt.title or 'Unknown', 'description': yt.description or
'Unknown', 'view_count': yt.views or 0, 'thumbnail_url': yt.
thumbnail_url or 'Unknown', 'publish_date': yt.publish_date.strftime(
'%Y-%m-%d %H:%M:%S') if yt.publish_date else 'Unknown', 'length': yt.
length or 0, 'author': yt.author or 'Unknown'}
return video_info
|
def _get_video_info(self) ->dict:
"""Get important video information.
Components are:
- title
- description
- thumbnail url,
- publish_date
- channel_author
- and more.
"""
try:
from pytube import YouTube
except ImportError:
raise ImportError(
'Could not import pytube python package. Please install it with `pip install pytube`.'
)
yt = YouTube(f'https://www.youtube.com/watch?v={self.video_id}')
video_info = {'title': yt.title or 'Unknown', 'description': yt.
description or 'Unknown', 'view_count': yt.views or 0,
'thumbnail_url': yt.thumbnail_url or 'Unknown', 'publish_date': yt.
publish_date.strftime('%Y-%m-%d %H:%M:%S') if yt.publish_date else
'Unknown', 'length': yt.length or 0, 'author': yt.author or 'Unknown'}
return video_info
|
Get important video information.
Components are:
- title
- description
- thumbnail url,
- publish_date
- channel_author
- and more.
|
test_respect_user_specified_user_agent
|
user_specified_user_agent = 'user_specified_user_agent'
header_template = {'User-Agent': user_specified_user_agent}
url = 'https://www.example.com'
loader = WebBaseLoader(url, header_template=header_template)
assert loader.session.headers['User-Agent'] == user_specified_user_agent
|
@pytest.mark.requires('bs4')
def test_respect_user_specified_user_agent(self) ->None:
user_specified_user_agent = 'user_specified_user_agent'
header_template = {'User-Agent': user_specified_user_agent}
url = 'https://www.example.com'
loader = WebBaseLoader(url, header_template=header_template)
assert loader.session.headers['User-Agent'] == user_specified_user_agent
| null |
_run
|
"""Use the tool."""
return str(self.api_wrapper.run(query))
|
def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun]
=None) ->str:
"""Use the tool."""
return str(self.api_wrapper.run(query))
|
Use the tool.
|
_mlflow_extras
|
return ''
|
@property
def _mlflow_extras(self) ->str:
return ''
| null |
test_format_doc_with_metadata
|
"""Test format doc on a valid document."""
doc = Document(page_content='foo', metadata={'bar': 'baz'})
prompt = PromptTemplate(input_variables=['page_content', 'bar'], template=
'{page_content}, {bar}')
expected_output = 'foo, baz'
output = format_document(doc, prompt)
assert output == expected_output
|
def test_format_doc_with_metadata() ->None:
"""Test format doc on a valid document."""
doc = Document(page_content='foo', metadata={'bar': 'baz'})
prompt = PromptTemplate(input_variables=['page_content', 'bar'],
template='{page_content}, {bar}')
expected_output = 'foo, baz'
output = format_document(doc, prompt)
assert output == expected_output
|
Test format doc on a valid document.
|
_chain_type
|
return 'graph_cypher_chain'
|
@property
def _chain_type(self) ->str:
return 'graph_cypher_chain'
| null |
test_single_input_agent_raises_error_on_structured_tool
|
"""Test that older agents raise errors on older tools."""
@tool
def the_tool(foo: str, bar: str) ->str:
"""Return the concat of foo and bar."""
return foo + bar
with pytest.raises(ValueError, match=
f'{agent_cls.__name__} does not support multi-input tool {the_tool.name}.'
):
agent_cls.from_llm_and_tools(MagicMock(), [the_tool])
|
@pytest.mark.parametrize('agent_cls', [ZeroShotAgent, ChatAgent,
ConversationalChatAgent, ConversationalAgent, ReActDocstoreAgent,
ReActTextWorldAgent, SelfAskWithSearchAgent])
def test_single_input_agent_raises_error_on_structured_tool(agent_cls: Type
[Agent]) ->None:
"""Test that older agents raise errors on older tools."""
@tool
def the_tool(foo: str, bar: str) ->str:
"""Return the concat of foo and bar."""
return foo + bar
with pytest.raises(ValueError, match=
f'{agent_cls.__name__} does not support multi-input tool {the_tool.name}.'
):
agent_cls.from_llm_and_tools(MagicMock(), [the_tool])
|
Test that older agents raise errors on older tools.
|
format_request_payload
|
input_str = json.dumps({'incorrect_input': {'input_string': [prompt]},
'parameters': model_kwargs})
return str.encode(input_str)
|
def format_request_payload(self, prompt: str, model_kwargs: Dict) ->bytes:
input_str = json.dumps({'incorrect_input': {'input_string': [prompt]},
'parameters': model_kwargs})
return str.encode(input_str)
| null |
_llm_type
|
return 'fake-openai-chat-model'
|
@property
def _llm_type(self) ->str:
return 'fake-openai-chat-model'
| null |
test_azure_openai_embedding_documents
|
"""Test openai embeddings."""
documents = ['foo bar']
embedding = _get_embeddings()
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 1536
|
@pytest.mark.scheduled
def test_azure_openai_embedding_documents() ->None:
"""Test openai embeddings."""
documents = ['foo bar']
embedding = _get_embeddings()
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 1536
|
Test openai embeddings.
|
save
|
if self.example_selector:
raise ValueError('Saving an example selector is not currently supported')
return super().save(file_path)
|
def save(self, file_path: Union[Path, str]) ->None:
if self.example_selector:
raise ValueError(
'Saving an example selector is not currently supported')
return super().save(file_path)
| null |
format
|
"""Format the prompt with inputs generating a string.
Use this method to generate a string representation of a prompt consisting
of chat messages.
Useful for feeding into a string based completion language model or debugging.
Args:
**kwargs: keyword arguments to use for formatting.
Returns:
A string representation of the prompt
"""
messages = self.format_messages(**kwargs)
return get_buffer_string(messages)
|
def format(self, **kwargs: Any) ->str:
"""Format the prompt with inputs generating a string.
Use this method to generate a string representation of a prompt consisting
of chat messages.
Useful for feeding into a string based completion language model or debugging.
Args:
**kwargs: keyword arguments to use for formatting.
Returns:
A string representation of the prompt
"""
messages = self.format_messages(**kwargs)
return get_buffer_string(messages)
|
Format the prompt with inputs generating a string.
Use this method to generate a string representation of a prompt consisting
of chat messages.
Useful for feeding into a string based completion language model or debugging.
Args:
**kwargs: keyword arguments to use for formatting.
Returns:
A string representation of the prompt
|
__init__
|
super().__init__()
try:
import label_studio_sdk as ls
except ImportError:
raise ImportError(
f"You're using {self.__class__.__name__} in your code, but you don't have the LabelStudio SDK Python package installed or upgraded to the latest version. Please run `pip install -U label-studio-sdk` before using this callback."
)
if not api_key:
if os.getenv('LABEL_STUDIO_API_KEY'):
api_key = str(os.getenv('LABEL_STUDIO_API_KEY'))
else:
raise ValueError(
f"You're using {self.__class__.__name__} in your code, Label Studio API key is not provided. Please provide Label Studio API key: go to the Label Studio instance, navigate to Account & Settings -> Access Token and copy the key. Use the key as a parameter for the callback: {self.__class__.__name__}(label_studio_api_key='<your_key_here>', ...) or set the environment variable LABEL_STUDIO_API_KEY=<your_key_here>"
)
self.api_key = api_key
if not url:
if os.getenv('LABEL_STUDIO_URL'):
url = os.getenv('LABEL_STUDIO_URL')
else:
warnings.warn(
f"Label Studio URL is not provided, using default URL: {ls.LABEL_STUDIO_DEFAULT_URL}If you want to provide your own URL, use the parameter: {self.__class__.__name__}(label_studio_url='<your_url_here>', ...) or set the environment variable LABEL_STUDIO_URL=<your_url_here>"
)
url = ls.LABEL_STUDIO_DEFAULT_URL
self.url = url
self.payload: Dict[str, Dict] = {}
self.ls_client = ls.Client(url=self.url, api_key=self.api_key)
self.project_name = project_name
if project_config:
self.project_config = project_config
self.mode = None
else:
self.project_config, self.mode = get_default_label_configs(mode)
self.project_id = project_id or os.getenv('LABEL_STUDIO_PROJECT_ID')
if self.project_id is not None:
self.ls_project = self.ls_client.get_project(int(self.project_id))
else:
project_title = datetime.today().strftime(self.project_name)
existing_projects = self.ls_client.get_projects(title=project_title)
if existing_projects:
self.ls_project = existing_projects[0]
self.project_id = self.ls_project.id
else:
self.ls_project = self.ls_client.create_project(title=project_title,
label_config=self.project_config)
self.project_id = self.ls_project.id
self.parsed_label_config = self.ls_project.parsed_label_config
self.from_name, self.to_name, self.value, self.input_type = (None, None,
None, None)
for tag_name, tag_info in self.parsed_label_config.items():
if tag_info['type'] == 'TextArea':
self.from_name = tag_name
self.to_name = tag_info['to_name'][0]
self.value = tag_info['inputs'][0]['value']
self.input_type = tag_info['inputs'][0]['type']
break
if not self.from_name:
error_message = (
f'Label Studio project "{self.project_name}" does not have a TextArea tag. Please add a TextArea tag to the project.'
)
if self.mode == LabelStudioMode.PROMPT:
error_message += """
HINT: go to project Settings -> Labeling Interface -> Browse Templates and select "Generative AI -> Supervised Language Model Fine-tuning" template."""
else:
error_message += """
HINT: go to project Settings -> Labeling Interface -> Browse Templates and check available templates under "Generative AI" section."""
raise ValueError(error_message)
|
def __init__(self, api_key: Optional[str]=None, url: Optional[str]=None,
project_id: Optional[int]=None, project_name: str=DEFAULT_PROJECT_NAME,
project_config: Optional[str]=None, mode: Union[str, LabelStudioMode]=
LabelStudioMode.PROMPT):
super().__init__()
try:
import label_studio_sdk as ls
except ImportError:
raise ImportError(
f"You're using {self.__class__.__name__} in your code, but you don't have the LabelStudio SDK Python package installed or upgraded to the latest version. Please run `pip install -U label-studio-sdk` before using this callback."
)
if not api_key:
if os.getenv('LABEL_STUDIO_API_KEY'):
api_key = str(os.getenv('LABEL_STUDIO_API_KEY'))
else:
raise ValueError(
f"You're using {self.__class__.__name__} in your code, Label Studio API key is not provided. Please provide Label Studio API key: go to the Label Studio instance, navigate to Account & Settings -> Access Token and copy the key. Use the key as a parameter for the callback: {self.__class__.__name__}(label_studio_api_key='<your_key_here>', ...) or set the environment variable LABEL_STUDIO_API_KEY=<your_key_here>"
)
self.api_key = api_key
if not url:
if os.getenv('LABEL_STUDIO_URL'):
url = os.getenv('LABEL_STUDIO_URL')
else:
warnings.warn(
f"Label Studio URL is not provided, using default URL: {ls.LABEL_STUDIO_DEFAULT_URL}If you want to provide your own URL, use the parameter: {self.__class__.__name__}(label_studio_url='<your_url_here>', ...) or set the environment variable LABEL_STUDIO_URL=<your_url_here>"
)
url = ls.LABEL_STUDIO_DEFAULT_URL
self.url = url
self.payload: Dict[str, Dict] = {}
self.ls_client = ls.Client(url=self.url, api_key=self.api_key)
self.project_name = project_name
if project_config:
self.project_config = project_config
self.mode = None
else:
self.project_config, self.mode = get_default_label_configs(mode)
self.project_id = project_id or os.getenv('LABEL_STUDIO_PROJECT_ID')
if self.project_id is not None:
self.ls_project = self.ls_client.get_project(int(self.project_id))
else:
project_title = datetime.today().strftime(self.project_name)
existing_projects = self.ls_client.get_projects(title=project_title)
if existing_projects:
self.ls_project = existing_projects[0]
self.project_id = self.ls_project.id
else:
self.ls_project = self.ls_client.create_project(title=
project_title, label_config=self.project_config)
self.project_id = self.ls_project.id
self.parsed_label_config = self.ls_project.parsed_label_config
self.from_name, self.to_name, self.value, self.input_type = (None, None,
None, None)
for tag_name, tag_info in self.parsed_label_config.items():
if tag_info['type'] == 'TextArea':
self.from_name = tag_name
self.to_name = tag_info['to_name'][0]
self.value = tag_info['inputs'][0]['value']
self.input_type = tag_info['inputs'][0]['type']
break
if not self.from_name:
error_message = (
f'Label Studio project "{self.project_name}" does not have a TextArea tag. Please add a TextArea tag to the project.'
)
if self.mode == LabelStudioMode.PROMPT:
error_message += """
HINT: go to project Settings -> Labeling Interface -> Browse Templates and select "Generative AI -> Supervised Language Model Fine-tuning" template."""
else:
error_message += """
HINT: go to project Settings -> Labeling Interface -> Browse Templates and check available templates under "Generative AI" section."""
raise ValueError(error_message)
| null |
embed_documents
|
embeddings: List[List[float]] = []
for txt in _chunk(texts, 20):
resp = self._client.predict(endpoint=self.endpoint, inputs={'input': txt})
embeddings.extend(r['embedding'] for r in resp['data'])
return embeddings
|
def embed_documents(self, texts: List[str]) ->List[List[float]]:
embeddings: List[List[float]] = []
for txt in _chunk(texts, 20):
resp = self._client.predict(endpoint=self.endpoint, inputs={'input':
txt})
embeddings.extend(r['embedding'] for r in resp['data'])
return embeddings
| null |
test_api_key_masked_when_passed_via_constructor
|
llm = ChatFireworks(fireworks_api_key='secret-api-key')
print(llm.fireworks_api_key, end='')
captured = capsys.readouterr()
assert captured.out == '**********'
|
@pytest.mark.requires('fireworks')
def test_api_key_masked_when_passed_via_constructor(capsys: CaptureFixture
) ->None:
llm = ChatFireworks(fireworks_api_key='secret-api-key')
print(llm.fireworks_api_key, end='')
captured = capsys.readouterr()
assert captured.out == '**********'
| null |
embed_documents
|
"""Return consistent embeddings for each text seen so far."""
out_vectors = []
for text in texts:
if text not in self.known_texts:
self.known_texts.append(text)
vector = [float(1.0)] * (self.dimensionality - 1) + [float(self.
known_texts.index(text))]
out_vectors.append(vector)
return out_vectors
|
def embed_documents(self, texts: List[str]) ->List[List[float]]:
"""Return consistent embeddings for each text seen so far."""
out_vectors = []
for text in texts:
if text not in self.known_texts:
self.known_texts.append(text)
vector = [float(1.0)] * (self.dimensionality - 1) + [float(self.
known_texts.index(text))]
out_vectors.append(vector)
return out_vectors
|
Return consistent embeddings for each text seen so far.
|
_identifying_params
|
return {**{'deployment_name': self.deployment_name}, **super().
_identifying_params}
|
@property
def _identifying_params(self) ->Mapping[str, Any]:
return {**{'deployment_name': self.deployment_name}, **super().
_identifying_params}
| null |
authenticate
|
"""Authenticate using the Microsoft Grah API"""
try:
from O365 import Account
except ImportError as e:
raise ImportError(
'Cannot import 0365. Please install the package with `pip install O365`.'
) from e
if 'CLIENT_ID' in os.environ and 'CLIENT_SECRET' in os.environ:
client_id = os.environ['CLIENT_ID']
client_secret = os.environ['CLIENT_SECRET']
credentials = client_id, client_secret
else:
logger.error(
'Error: The CLIENT_ID and CLIENT_SECRET environmental variables have not been set. Visit the following link on how to acquire these authorization tokens: https://learn.microsoft.com/en-us/graph/auth/'
)
return None
account = Account(credentials)
if account.is_authenticated is False:
if not account.authenticate(scopes=[
'https://graph.microsoft.com/Mail.ReadWrite',
'https://graph.microsoft.com/Mail.Send',
'https://graph.microsoft.com/Calendars.ReadWrite',
'https://graph.microsoft.com/MailboxSettings.ReadWrite']):
print('Error: Could not authenticate')
return None
else:
return account
else:
return account
|
def authenticate() ->Account:
"""Authenticate using the Microsoft Grah API"""
try:
from O365 import Account
except ImportError as e:
raise ImportError(
'Cannot import 0365. Please install the package with `pip install O365`.'
) from e
if 'CLIENT_ID' in os.environ and 'CLIENT_SECRET' in os.environ:
client_id = os.environ['CLIENT_ID']
client_secret = os.environ['CLIENT_SECRET']
credentials = client_id, client_secret
else:
logger.error(
'Error: The CLIENT_ID and CLIENT_SECRET environmental variables have not been set. Visit the following link on how to acquire these authorization tokens: https://learn.microsoft.com/en-us/graph/auth/'
)
return None
account = Account(credentials)
if account.is_authenticated is False:
if not account.authenticate(scopes=[
'https://graph.microsoft.com/Mail.ReadWrite',
'https://graph.microsoft.com/Mail.Send',
'https://graph.microsoft.com/Calendars.ReadWrite',
'https://graph.microsoft.com/MailboxSettings.ReadWrite']):
print('Error: Could not authenticate')
return None
else:
return account
else:
return account
|
Authenticate using the Microsoft Grah API
|
escape
|
if not isinstance(value, str):
raise TypeError(
f'Value must be a string object for token escaping.Got type {type(value)}'
)
def escape_symbol(match: re.Match) ->str:
value = match.group(0)
return f'\\{value}'
return self.escaped_chars_re.sub(escape_symbol, value)
|
def escape(self, value: str) ->str:
if not isinstance(value, str):
raise TypeError(
f'Value must be a string object for token escaping.Got type {type(value)}'
)
def escape_symbol(match: re.Match) ->str:
value = match.group(0)
return f'\\{value}'
return self.escaped_chars_re.sub(escape_symbol, value)
| null |
before_record_request
|
for host in skipped_host:
if request.host.startswith(host) or request.host.endswith(host):
return None
return request
|
def before_record_request(request: Request) ->Union[Request, None]:
for host in skipped_host:
if request.host.startswith(host) or request.host.endswith(host):
return None
return request
| null |
load_bibtex_entries
|
"""Load bibtex entries from the bibtex file at the given path."""
import bibtexparser
with open(path) as file:
entries = bibtexparser.load(file).entries
return entries
|
def load_bibtex_entries(self, path: str) ->List[Dict[str, Any]]:
"""Load bibtex entries from the bibtex file at the given path."""
import bibtexparser
with open(path) as file:
entries = bibtexparser.load(file).entries
return entries
|
Load bibtex entries from the bibtex file at the given path.
|
_get_anthropic_stop
|
if not self.HUMAN_PROMPT or not self.AI_PROMPT:
raise NameError('Please ensure the anthropic package is loaded')
if stop is None:
stop = []
stop.extend([self.HUMAN_PROMPT])
return stop
|
def _get_anthropic_stop(self, stop: Optional[List[str]]=None) ->List[str]:
if not self.HUMAN_PROMPT or not self.AI_PROMPT:
raise NameError('Please ensure the anthropic package is loaded')
if stop is None:
stop = []
stop.extend([self.HUMAN_PROMPT])
return stop
| null |
test_neo4jvector_hybrid_deduplicate
|
"""Test result deduplication with hybrid search."""
text_embeddings = FakeEmbeddingsWithOsDimension().embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
docsearch = Neo4jVector.from_embeddings(text_embeddings=
text_embedding_pairs, embedding=FakeEmbeddingsWithOsDimension(), url=
url, username=username, password=password, pre_delete_collection=True,
search_type=SearchType.HYBRID)
output = docsearch.similarity_search('foo', k=3)
assert output == [Document(page_content='foo'), Document(page_content='bar'
), Document(page_content='baz')]
drop_vector_indexes(docsearch)
|
def test_neo4jvector_hybrid_deduplicate() ->None:
"""Test result deduplication with hybrid search."""
text_embeddings = FakeEmbeddingsWithOsDimension().embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
docsearch = Neo4jVector.from_embeddings(text_embeddings=
text_embedding_pairs, embedding=FakeEmbeddingsWithOsDimension(),
url=url, username=username, password=password,
pre_delete_collection=True, search_type=SearchType.HYBRID)
output = docsearch.similarity_search('foo', k=3)
assert output == [Document(page_content='foo'), Document(page_content=
'bar'), Document(page_content='baz')]
drop_vector_indexes(docsearch)
|
Test result deduplication with hybrid search.
|
setup
|
index_stats = self.index.describe_index_stats()
for _namespace_name in index_stats['namespaces'].keys():
self.index.delete(delete_all=True, namespace=_namespace_name)
reset_pinecone()
|
@pytest.fixture(autouse=True)
def setup(self) ->None:
index_stats = self.index.describe_index_stats()
for _namespace_name in index_stats['namespaces'].keys():
self.index.delete(delete_all=True, namespace=_namespace_name)
reset_pinecone()
| null |
test__validate_example_inputs_for_chain_single_input_multi_expect
|
mock_ = mock.MagicMock()
mock_.inputs = {'foo': 'bar'}
chain = mock.MagicMock()
chain.input_keys = ['def not foo', 'oh here is another']
with pytest.raises(InputFormatError, match='Example inputs missing expected'):
_validate_example_inputs_for_chain(mock_, chain, None)
|
def test__validate_example_inputs_for_chain_single_input_multi_expect() ->None:
mock_ = mock.MagicMock()
mock_.inputs = {'foo': 'bar'}
chain = mock.MagicMock()
chain.input_keys = ['def not foo', 'oh here is another']
with pytest.raises(InputFormatError, match=
'Example inputs missing expected'):
_validate_example_inputs_for_chain(mock_, chain, None)
| null |
raise_deprecation
|
if 'llm' in values:
warnings.warn(
'Directly instantiating an SQLDatabaseChain with an llm is deprecated. Please instantiate with llm_chain argument or using the from_llm class method.'
)
if 'llm_chain' not in values and values['llm'] is not None:
database = values['database']
prompt = values.get('prompt') or SQL_PROMPTS.get(database.dialect,
PROMPT)
values['llm_chain'] = LLMChain(llm=values['llm'], prompt=prompt)
return values
|
@root_validator(pre=True)
def raise_deprecation(cls, values: Dict) ->Dict:
if 'llm' in values:
warnings.warn(
'Directly instantiating an SQLDatabaseChain with an llm is deprecated. Please instantiate with llm_chain argument or using the from_llm class method.'
)
if 'llm_chain' not in values and values['llm'] is not None:
database = values['database']
prompt = values.get('prompt') or SQL_PROMPTS.get(database.
dialect, PROMPT)
values['llm_chain'] = LLMChain(llm=values['llm'], prompt=prompt)
return values
| null |
from_embeddings
|
"""Construct OpenSearchVectorSearch wrapper from pre-vectorized embeddings.
Example:
.. code-block:: python
from langchain_community.vectorstores import OpenSearchVectorSearch
from langchain_community.embeddings import OpenAIEmbeddings
embedder = OpenAIEmbeddings()
embeddings = embedder.embed_documents(["foo", "bar"])
opensearch_vector_search = OpenSearchVectorSearch.from_embeddings(
embeddings,
texts,
embedder,
opensearch_url="http://localhost:9200"
)
OpenSearch by default supports Approximate Search powered by nmslib, faiss
and lucene engines recommended for large datasets. Also supports brute force
search through Script Scoring and Painless Scripting.
Optional Args:
vector_field: Document field embeddings are stored in. Defaults to
"vector_field".
text_field: Document field the text of the document is stored in. Defaults
to "text".
Optional Keyword Args for Approximate Search:
engine: "nmslib", "faiss", "lucene"; default: "nmslib"
space_type: "l2", "l1", "cosinesimil", "linf", "innerproduct"; default: "l2"
ef_search: Size of the dynamic list used during k-NN searches. Higher values
lead to more accurate but slower searches; default: 512
ef_construction: Size of the dynamic list used during k-NN graph creation.
Higher values lead to more accurate graph but slower indexing speed;
default: 512
m: Number of bidirectional links created for each new element. Large impact
on memory consumption. Between 2 and 100; default: 16
Keyword Args for Script Scoring or Painless Scripting:
is_appx_search: False
"""
opensearch_url = get_from_dict_or_env(kwargs, 'opensearch_url',
'OPENSEARCH_URL')
keys_list = ['opensearch_url', 'index_name', 'is_appx_search',
'vector_field', 'text_field', 'engine', 'space_type', 'ef_search',
'ef_construction', 'm', 'max_chunk_bytes', 'is_aoss']
_validate_embeddings_and_bulk_size(len(embeddings), bulk_size)
dim = len(embeddings[0])
index_name = get_from_dict_or_env(kwargs, 'index_name',
'OPENSEARCH_INDEX_NAME', default=uuid.uuid4().hex)
is_appx_search = kwargs.get('is_appx_search', True)
vector_field = kwargs.get('vector_field', 'vector_field')
text_field = kwargs.get('text_field', 'text')
max_chunk_bytes = kwargs.get('max_chunk_bytes', 1 * 1024 * 1024)
http_auth = kwargs.get('http_auth')
is_aoss = _is_aoss_enabled(http_auth=http_auth)
engine = None
if is_aoss and not is_appx_search:
raise ValueError(
'Amazon OpenSearch Service Serverless only supports `approximate_search`'
)
if is_appx_search:
engine = kwargs.get('engine', 'nmslib')
space_type = kwargs.get('space_type', 'l2')
ef_search = kwargs.get('ef_search', 512)
ef_construction = kwargs.get('ef_construction', 512)
m = kwargs.get('m', 16)
_validate_aoss_with_engines(is_aoss, engine)
mapping = _default_text_mapping(dim, engine, space_type, ef_search,
ef_construction, m, vector_field)
else:
mapping = _default_scripting_text_mapping(dim)
[kwargs.pop(key, None) for key in keys_list]
client = _get_opensearch_client(opensearch_url, **kwargs)
_bulk_ingest_embeddings(client, index_name, embeddings, texts, ids=ids,
metadatas=metadatas, vector_field=vector_field, text_field=text_field,
mapping=mapping, max_chunk_bytes=max_chunk_bytes, is_aoss=is_aoss)
kwargs['engine'] = engine
return cls(opensearch_url, index_name, embedding, **kwargs)
|
@classmethod
def from_embeddings(cls, embeddings: List[List[float]], texts: List[str],
embedding: Embeddings, metadatas: Optional[List[dict]]=None, bulk_size:
int=500, ids: Optional[List[str]]=None, **kwargs: Any
) ->OpenSearchVectorSearch:
"""Construct OpenSearchVectorSearch wrapper from pre-vectorized embeddings.
Example:
.. code-block:: python
from langchain_community.vectorstores import OpenSearchVectorSearch
from langchain_community.embeddings import OpenAIEmbeddings
embedder = OpenAIEmbeddings()
embeddings = embedder.embed_documents(["foo", "bar"])
opensearch_vector_search = OpenSearchVectorSearch.from_embeddings(
embeddings,
texts,
embedder,
opensearch_url="http://localhost:9200"
)
OpenSearch by default supports Approximate Search powered by nmslib, faiss
and lucene engines recommended for large datasets. Also supports brute force
search through Script Scoring and Painless Scripting.
Optional Args:
vector_field: Document field embeddings are stored in. Defaults to
"vector_field".
text_field: Document field the text of the document is stored in. Defaults
to "text".
Optional Keyword Args for Approximate Search:
engine: "nmslib", "faiss", "lucene"; default: "nmslib"
space_type: "l2", "l1", "cosinesimil", "linf", "innerproduct"; default: "l2"
ef_search: Size of the dynamic list used during k-NN searches. Higher values
lead to more accurate but slower searches; default: 512
ef_construction: Size of the dynamic list used during k-NN graph creation.
Higher values lead to more accurate graph but slower indexing speed;
default: 512
m: Number of bidirectional links created for each new element. Large impact
on memory consumption. Between 2 and 100; default: 16
Keyword Args for Script Scoring or Painless Scripting:
is_appx_search: False
"""
opensearch_url = get_from_dict_or_env(kwargs, 'opensearch_url',
'OPENSEARCH_URL')
keys_list = ['opensearch_url', 'index_name', 'is_appx_search',
'vector_field', 'text_field', 'engine', 'space_type', 'ef_search',
'ef_construction', 'm', 'max_chunk_bytes', 'is_aoss']
_validate_embeddings_and_bulk_size(len(embeddings), bulk_size)
dim = len(embeddings[0])
index_name = get_from_dict_or_env(kwargs, 'index_name',
'OPENSEARCH_INDEX_NAME', default=uuid.uuid4().hex)
is_appx_search = kwargs.get('is_appx_search', True)
vector_field = kwargs.get('vector_field', 'vector_field')
text_field = kwargs.get('text_field', 'text')
max_chunk_bytes = kwargs.get('max_chunk_bytes', 1 * 1024 * 1024)
http_auth = kwargs.get('http_auth')
is_aoss = _is_aoss_enabled(http_auth=http_auth)
engine = None
if is_aoss and not is_appx_search:
raise ValueError(
'Amazon OpenSearch Service Serverless only supports `approximate_search`'
)
if is_appx_search:
engine = kwargs.get('engine', 'nmslib')
space_type = kwargs.get('space_type', 'l2')
ef_search = kwargs.get('ef_search', 512)
ef_construction = kwargs.get('ef_construction', 512)
m = kwargs.get('m', 16)
_validate_aoss_with_engines(is_aoss, engine)
mapping = _default_text_mapping(dim, engine, space_type, ef_search,
ef_construction, m, vector_field)
else:
mapping = _default_scripting_text_mapping(dim)
[kwargs.pop(key, None) for key in keys_list]
client = _get_opensearch_client(opensearch_url, **kwargs)
_bulk_ingest_embeddings(client, index_name, embeddings, texts, ids=ids,
metadatas=metadatas, vector_field=vector_field, text_field=
text_field, mapping=mapping, max_chunk_bytes=max_chunk_bytes,
is_aoss=is_aoss)
kwargs['engine'] = engine
return cls(opensearch_url, index_name, embedding, **kwargs)
|
Construct OpenSearchVectorSearch wrapper from pre-vectorized embeddings.
Example:
.. code-block:: python
from langchain_community.vectorstores import OpenSearchVectorSearch
from langchain_community.embeddings import OpenAIEmbeddings
embedder = OpenAIEmbeddings()
embeddings = embedder.embed_documents(["foo", "bar"])
opensearch_vector_search = OpenSearchVectorSearch.from_embeddings(
embeddings,
texts,
embedder,
opensearch_url="http://localhost:9200"
)
OpenSearch by default supports Approximate Search powered by nmslib, faiss
and lucene engines recommended for large datasets. Also supports brute force
search through Script Scoring and Painless Scripting.
Optional Args:
vector_field: Document field embeddings are stored in. Defaults to
"vector_field".
text_field: Document field the text of the document is stored in. Defaults
to "text".
Optional Keyword Args for Approximate Search:
engine: "nmslib", "faiss", "lucene"; default: "nmslib"
space_type: "l2", "l1", "cosinesimil", "linf", "innerproduct"; default: "l2"
ef_search: Size of the dynamic list used during k-NN searches. Higher values
lead to more accurate but slower searches; default: 512
ef_construction: Size of the dynamic list used during k-NN graph creation.
Higher values lead to more accurate graph but slower indexing speed;
default: 512
m: Number of bidirectional links created for each new element. Large impact
on memory consumption. Between 2 and 100; default: 16
Keyword Args for Script Scoring or Painless Scripting:
is_appx_search: False
|
test_async_recursive_url_loader
|
url = 'https://docs.python.org/3.9/'
loader = RecursiveUrlLoader(url, extractor=lambda _: 'placeholder',
use_async=True, max_depth=3, timeout=None, check_response_status=True)
docs = loader.load()
assert len(docs) == 513
assert docs[0].page_content == 'placeholder'
|
def test_async_recursive_url_loader() ->None:
url = 'https://docs.python.org/3.9/'
loader = RecursiveUrlLoader(url, extractor=lambda _: 'placeholder',
use_async=True, max_depth=3, timeout=None, check_response_status=True)
docs = loader.load()
assert len(docs) == 513
assert docs[0].page_content == 'placeholder'
| null |
prt
|
with open('/tmp/debugjaguar.log', 'a') as file:
print(f'msg={msg}', file=file, flush=True)
|
def prt(self, msg: str) ->None:
with open('/tmp/debugjaguar.log', 'a') as file:
print(f'msg={msg}', file=file, flush=True)
| null |
test_sequential_valid_outputs
|
"""Test chain runs when valid outputs are specified."""
chain_1 = FakeChain(input_variables=['foo'], output_variables=['bar'])
chain_2 = FakeChain(input_variables=['bar'], output_variables=['baz'])
chain = SequentialChain(chains=[chain_1, chain_2], input_variables=['foo'],
output_variables=['bar', 'baz'])
output = chain({'foo': '123'}, return_only_outputs=True)
expected_output = {'baz': '123foofoo', 'bar': '123foo'}
assert output == expected_output
|
def test_sequential_valid_outputs() ->None:
"""Test chain runs when valid outputs are specified."""
chain_1 = FakeChain(input_variables=['foo'], output_variables=['bar'])
chain_2 = FakeChain(input_variables=['bar'], output_variables=['baz'])
chain = SequentialChain(chains=[chain_1, chain_2], input_variables=[
'foo'], output_variables=['bar', 'baz'])
output = chain({'foo': '123'}, return_only_outputs=True)
expected_output = {'baz': '123foofoo', 'bar': '123foo'}
assert output == expected_output
|
Test chain runs when valid outputs are specified.
|
import_langkit
|
"""Import the langkit python package and raise an error if it is not installed.
Args:
sentiment: Whether to import the langkit.sentiment module. Defaults to False.
toxicity: Whether to import the langkit.toxicity module. Defaults to False.
themes: Whether to import the langkit.themes module. Defaults to False.
Returns:
The imported langkit module.
"""
try:
import langkit
import langkit.regexes
import langkit.textstat
if sentiment:
import langkit.sentiment
if toxicity:
import langkit.toxicity
if themes:
import langkit.themes
except ImportError:
raise ImportError(
'To use the whylabs callback manager you need to have the `langkit` python package installed. Please install it with `pip install langkit`.'
)
return langkit
|
def import_langkit(sentiment: bool=False, toxicity: bool=False, themes:
bool=False) ->Any:
"""Import the langkit python package and raise an error if it is not installed.
Args:
sentiment: Whether to import the langkit.sentiment module. Defaults to False.
toxicity: Whether to import the langkit.toxicity module. Defaults to False.
themes: Whether to import the langkit.themes module. Defaults to False.
Returns:
The imported langkit module.
"""
try:
import langkit
import langkit.regexes
import langkit.textstat
if sentiment:
import langkit.sentiment
if toxicity:
import langkit.toxicity
if themes:
import langkit.themes
except ImportError:
raise ImportError(
'To use the whylabs callback manager you need to have the `langkit` python package installed. Please install it with `pip install langkit`.'
)
return langkit
|
Import the langkit python package and raise an error if it is not installed.
Args:
sentiment: Whether to import the langkit.sentiment module. Defaults to False.
toxicity: Whether to import the langkit.toxicity module. Defaults to False.
themes: Whether to import the langkit.themes module. Defaults to False.
Returns:
The imported langkit module.
|
__repr__
|
return self.__str__()
|
def __repr__(self) ->str:
return self.__str__()
| null |
test_vertexai_args_passed
|
response_text = 'Goodbye'
user_prompt = 'Hello'
prompt_params = {'max_output_tokens': 1, 'temperature': 10000.0, 'top_k':
10, 'top_p': 0.5}
with patch('vertexai.language_models._language_models.ChatModel.start_chat'
) as start_chat:
mock_response = MagicMock()
mock_response.candidates = [Mock(text=response_text)]
mock_chat = MagicMock()
start_chat.return_value = mock_chat
mock_send_message = MagicMock(return_value=mock_response)
mock_chat.send_message = mock_send_message
model = ChatVertexAI(**prompt_params)
message = HumanMessage(content=user_prompt)
if stop:
response = model([message], stop=[stop])
else:
response = model([message])
assert response.content == response_text
mock_send_message.assert_called_once_with(user_prompt, candidate_count=1)
expected_stop_sequence = [stop] if stop else None
start_chat.assert_called_once_with(context=None, message_history=[], **
prompt_params, stop_sequences=expected_stop_sequence)
|
@pytest.mark.parametrize('stop', [None, 'stop1'])
def test_vertexai_args_passed(stop: Optional[str]) ->None:
response_text = 'Goodbye'
user_prompt = 'Hello'
prompt_params = {'max_output_tokens': 1, 'temperature': 10000.0,
'top_k': 10, 'top_p': 0.5}
with patch('vertexai.language_models._language_models.ChatModel.start_chat'
) as start_chat:
mock_response = MagicMock()
mock_response.candidates = [Mock(text=response_text)]
mock_chat = MagicMock()
start_chat.return_value = mock_chat
mock_send_message = MagicMock(return_value=mock_response)
mock_chat.send_message = mock_send_message
model = ChatVertexAI(**prompt_params)
message = HumanMessage(content=user_prompt)
if stop:
response = model([message], stop=[stop])
else:
response = model([message])
assert response.content == response_text
mock_send_message.assert_called_once_with(user_prompt,
candidate_count=1)
expected_stop_sequence = [stop] if stop else None
start_chat.assert_called_once_with(context=None, message_history=[],
**prompt_params, stop_sequences=expected_stop_sequence)
| null |
_default_params
|
"""Get the default parameters for calling aphrodite."""
return {'n': self.n, 'best_of': self.best_of, 'max_tokens': self.max_tokens,
'top_k': self.top_k, 'top_p': self.top_p, 'top_a': self.top_a, 'min_p':
self.min_p, 'temperature': self.temperature, 'presence_penalty': self.
presence_penalty, 'frequency_penalty': self.frequency_penalty,
'repetition_penalty': self.repetition_penalty, 'tfs': self.tfs,
'eta_cutoff': self.eta_cutoff, 'epsilon_cutoff': self.epsilon_cutoff,
'typical_p': self.typical_p, 'mirostat_mode': self.mirostat_mode,
'mirostat_tau': self.mirostat_tau, 'length_penalty': self.
length_penalty, 'early_stopping': self.early_stopping,
'use_beam_search': self.use_beam_search, 'stop': self.stop,
'ignore_eos': self.ignore_eos, 'logprobs': self.logprobs,
'prompt_logprobs': self.prompt_logprobs, 'custom_token_bans': self.
custom_token_bans, 'skip_special_tokens': self.skip_special_tokens,
'spaces_between_special_tokens': self.spaces_between_special_tokens,
'logit_bias': self.logit_bias}
|
@property
def _default_params(self) ->Dict[str, Any]:
"""Get the default parameters for calling aphrodite."""
return {'n': self.n, 'best_of': self.best_of, 'max_tokens': self.
max_tokens, 'top_k': self.top_k, 'top_p': self.top_p, 'top_a': self
.top_a, 'min_p': self.min_p, 'temperature': self.temperature,
'presence_penalty': self.presence_penalty, 'frequency_penalty':
self.frequency_penalty, 'repetition_penalty': self.
repetition_penalty, 'tfs': self.tfs, 'eta_cutoff': self.eta_cutoff,
'epsilon_cutoff': self.epsilon_cutoff, 'typical_p': self.typical_p,
'mirostat_mode': self.mirostat_mode, 'mirostat_tau': self.
mirostat_tau, 'length_penalty': self.length_penalty,
'early_stopping': self.early_stopping, 'use_beam_search': self.
use_beam_search, 'stop': self.stop, 'ignore_eos': self.ignore_eos,
'logprobs': self.logprobs, 'prompt_logprobs': self.prompt_logprobs,
'custom_token_bans': self.custom_token_bans, 'skip_special_tokens':
self.skip_special_tokens, 'spaces_between_special_tokens': self.
spaces_between_special_tokens, 'logit_bias': self.logit_bias}
|
Get the default parameters for calling aphrodite.
|
max_marginal_relevance_search_with_score_by_vector
|
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
Defaults to 20.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
filter: Filter by metadata. Defaults to None.
search_params: Additional search params
score_threshold:
Define a minimal score threshold for the result.
If defined, less similar results will not be returned.
Score of the returned result might be higher or smaller than the
threshold depending on the Distance function used.
E.g. for cosine similarity only higher scores will be returned.
consistency:
Read consistency of the search. Defines how many replicas should be
queried before returning the result.
Values:
- int - number of replicas to query, values should present in all
queried replicas
- 'majority' - query all replicas, but return values present in the
majority of replicas
- 'quorum' - query the majority of replicas, return values present in
all of them
- 'all' - query all replicas, and return values present in all replicas
**kwargs:
Any other named arguments to pass through to QdrantClient.search()
Returns:
List of Documents selected by maximal marginal relevance and distance for
each.
"""
query_vector = embedding
if self.vector_name is not None:
query_vector = self.vector_name, query_vector
results = self.client.search(collection_name=self.collection_name,
query_vector=query_vector, query_filter=filter, search_params=
search_params, limit=fetch_k, with_payload=True, with_vectors=True,
score_threshold=score_threshold, consistency=consistency, **kwargs)
embeddings = [(result.vector.get(self.vector_name) if self.vector_name is not
None else result.vector) for result in results]
mmr_selected = maximal_marginal_relevance(np.array(embedding), embeddings,
k=k, lambda_mult=lambda_mult)
return [(self._document_from_scored_point(results[i], self.
content_payload_key, self.metadata_payload_key), results[i].score) for
i in mmr_selected]
|
def max_marginal_relevance_search_with_score_by_vector(self, embedding:
List[float], k: int=4, fetch_k: int=20, lambda_mult: float=0.5, filter:
Optional[MetadataFilter]=None, search_params: Optional[common_types.
SearchParams]=None, score_threshold: Optional[float]=None, consistency:
Optional[common_types.ReadConsistency]=None, **kwargs: Any) ->List[Tuple
[Document, float]]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
Defaults to 20.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
filter: Filter by metadata. Defaults to None.
search_params: Additional search params
score_threshold:
Define a minimal score threshold for the result.
If defined, less similar results will not be returned.
Score of the returned result might be higher or smaller than the
threshold depending on the Distance function used.
E.g. for cosine similarity only higher scores will be returned.
consistency:
Read consistency of the search. Defines how many replicas should be
queried before returning the result.
Values:
- int - number of replicas to query, values should present in all
queried replicas
- 'majority' - query all replicas, but return values present in the
majority of replicas
- 'quorum' - query the majority of replicas, return values present in
all of them
- 'all' - query all replicas, and return values present in all replicas
**kwargs:
Any other named arguments to pass through to QdrantClient.search()
Returns:
List of Documents selected by maximal marginal relevance and distance for
each.
"""
query_vector = embedding
if self.vector_name is not None:
query_vector = self.vector_name, query_vector
results = self.client.search(collection_name=self.collection_name,
query_vector=query_vector, query_filter=filter, search_params=
search_params, limit=fetch_k, with_payload=True, with_vectors=True,
score_threshold=score_threshold, consistency=consistency, **kwargs)
embeddings = [(result.vector.get(self.vector_name) if self.vector_name
is not None else result.vector) for result in results]
mmr_selected = maximal_marginal_relevance(np.array(embedding),
embeddings, k=k, lambda_mult=lambda_mult)
return [(self._document_from_scored_point(results[i], self.
content_payload_key, self.metadata_payload_key), results[i].score) for
i in mmr_selected]
|
Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
Defaults to 20.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
filter: Filter by metadata. Defaults to None.
search_params: Additional search params
score_threshold:
Define a minimal score threshold for the result.
If defined, less similar results will not be returned.
Score of the returned result might be higher or smaller than the
threshold depending on the Distance function used.
E.g. for cosine similarity only higher scores will be returned.
consistency:
Read consistency of the search. Defines how many replicas should be
queried before returning the result.
Values:
- int - number of replicas to query, values should present in all
queried replicas
- 'majority' - query all replicas, but return values present in the
majority of replicas
- 'quorum' - query the majority of replicas, return values present in
all of them
- 'all' - query all replicas, and return values present in all replicas
**kwargs:
Any other named arguments to pass through to QdrantClient.search()
Returns:
List of Documents selected by maximal marginal relevance and distance for
each.
|
to_json_not_implemented
|
"""Serialize a "not implemented" object.
Args:
obj: object to serialize
Returns:
SerializedNotImplemented
"""
_id: List[str] = []
try:
if hasattr(obj, '__name__'):
_id = [*obj.__module__.split('.'), obj.__name__]
elif hasattr(obj, '__class__'):
_id = [*obj.__class__.__module__.split('.'), obj.__class__.__name__]
except Exception:
pass
result: SerializedNotImplemented = {'lc': 1, 'type': 'not_implemented',
'id': _id, 'repr': None}
try:
result['repr'] = repr(obj)
except Exception:
pass
return result
|
def to_json_not_implemented(obj: object) ->SerializedNotImplemented:
"""Serialize a "not implemented" object.
Args:
obj: object to serialize
Returns:
SerializedNotImplemented
"""
_id: List[str] = []
try:
if hasattr(obj, '__name__'):
_id = [*obj.__module__.split('.'), obj.__name__]
elif hasattr(obj, '__class__'):
_id = [*obj.__class__.__module__.split('.'), obj.__class__.__name__
]
except Exception:
pass
result: SerializedNotImplemented = {'lc': 1, 'type': 'not_implemented',
'id': _id, 'repr': None}
try:
result['repr'] = repr(obj)
except Exception:
pass
return result
|
Serialize a "not implemented" object.
Args:
obj: object to serialize
Returns:
SerializedNotImplemented
|
test_tiledb_mmr_with_metadatas_and_filter
|
texts = ['foo', 'foo', 'fou', 'foy']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = TileDB.from_texts(texts=texts, metadatas=metadatas, embedding=
ConsistentFakeEmbeddings(), index_uri=f'{str(tmp_path)}/flat',
index_type='FLAT')
query_vec = ConsistentFakeEmbeddings().embed_query(text='foo')
output = docsearch.max_marginal_relevance_search_with_score_by_vector(query_vec
, k=3, lambda_mult=0.1, filter={'page': 1})
assert len(output) == 1
assert output[0][0] == Document(page_content='foo', metadata={'page': 1})
assert output[0][1] == 0.0
docsearch = TileDB.from_texts(texts=texts, metadatas=metadatas, embedding=
ConsistentFakeEmbeddings(), index_uri=f'{str(tmp_path)}/ivf_flat',
index_type='IVF_FLAT')
query_vec = ConsistentFakeEmbeddings().embed_query(text='foo')
output = docsearch.max_marginal_relevance_search_with_score_by_vector(query_vec
, k=3, lambda_mult=0.1, filter={'page': 1}, nprobe=docsearch.
vector_index.partitions)
assert len(output) == 1
assert output[0][0] == Document(page_content='foo', metadata={'page': 1})
assert output[0][1] == 0.0
|
@pytest.mark.requires('tiledb-vector-search')
def test_tiledb_mmr_with_metadatas_and_filter(tmp_path: Path) ->None:
texts = ['foo', 'foo', 'fou', 'foy']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = TileDB.from_texts(texts=texts, metadatas=metadatas,
embedding=ConsistentFakeEmbeddings(), index_uri=
f'{str(tmp_path)}/flat', index_type='FLAT')
query_vec = ConsistentFakeEmbeddings().embed_query(text='foo')
output = docsearch.max_marginal_relevance_search_with_score_by_vector(
query_vec, k=3, lambda_mult=0.1, filter={'page': 1})
assert len(output) == 1
assert output[0][0] == Document(page_content='foo', metadata={'page': 1})
assert output[0][1] == 0.0
docsearch = TileDB.from_texts(texts=texts, metadatas=metadatas,
embedding=ConsistentFakeEmbeddings(), index_uri=
f'{str(tmp_path)}/ivf_flat', index_type='IVF_FLAT')
query_vec = ConsistentFakeEmbeddings().embed_query(text='foo')
output = docsearch.max_marginal_relevance_search_with_score_by_vector(
query_vec, k=3, lambda_mult=0.1, filter={'page': 1}, nprobe=
docsearch.vector_index.partitions)
assert len(output) == 1
assert output[0][0] == Document(page_content='foo', metadata={'page': 1})
assert output[0][1] == 0.0
| null |
test_all_imports
|
assert set(__all__) == set(EXPECTED_ALL)
|
def test_all_imports() ->None:
assert set(__all__) == set(EXPECTED_ALL)
| null |
drop_vector_indexes
|
"""Cleanup all vector indexes"""
all_indexes = store.query(
"""
SHOW INDEXES YIELD name, type
WHERE type IN ["VECTOR", "FULLTEXT"]
RETURN name
"""
)
for index in all_indexes:
store.query(f"DROP INDEX {index['name']}")
|
def drop_vector_indexes(store: Neo4jVector) ->None:
"""Cleanup all vector indexes"""
all_indexes = store.query(
"""
SHOW INDEXES YIELD name, type
WHERE type IN ["VECTOR", "FULLTEXT"]
RETURN name
"""
)
for index in all_indexes:
store.query(f"DROP INDEX {index['name']}")
|
Cleanup all vector indexes
|
parse
|
return self.parse_result([Generation(text=text)])
|
def parse(self, text: str) ->Any:
return self.parse_result([Generation(text=text)])
| null |
input_keys
|
"""Return the input keys.
:meta private:
"""
return [self.input_key]
|
@property
def input_keys(self) ->List[str]:
"""Return the input keys.
:meta private:
"""
return [self.input_key]
|
Return the input keys.
:meta private:
|
__init__
|
"""
Initialize the object for file processing with Azure Document Intelligence
(formerly Form Recognizer).
This constructor initializes a AzureAIDocumentIntelligenceParser object to be
used for parsing files using the Azure Document Intelligence API. The load
method generates Documents whose content representations are determined by the
mode parameter.
Parameters:
-----------
api_endpoint: str
The API endpoint to use for DocumentIntelligenceClient construction.
api_key: str
The API key to use for DocumentIntelligenceClient construction.
file_path : Optional[str]
The path to the file that needs to be loaded.
Either file_path or url_path must be specified.
url_path : Optional[str]
The URL to the file that needs to be loaded.
Either file_path or url_path must be specified.
api_version: Optional[str]
The API version for DocumentIntelligenceClient. Setting None to use
the default value from SDK.
api_model: str
The model name or ID to be used for form recognition in Azure.
Examples:
---------
>>> obj = AzureAIDocumentIntelligenceLoader(
... file_path="path/to/file",
... api_endpoint="https://endpoint.azure.com",
... api_key="APIKEY",
... api_version="2023-10-31-preview",
... model="prebuilt-document"
... )
"""
assert file_path is not None or url_path is not None, 'file_path or url_path must be provided'
self.file_path = file_path
self.url_path = url_path
self.parser = AzureAIDocumentIntelligenceParser(api_endpoint=api_endpoint,
api_key=api_key, api_version=api_version, api_model=api_model, mode=mode)
|
def __init__(self, api_endpoint: str, api_key: str, file_path: Optional[str
]=None, url_path: Optional[str]=None, api_version: Optional[str]=None,
api_model: str='prebuilt-layout', mode: str='markdown') ->None:
"""
Initialize the object for file processing with Azure Document Intelligence
(formerly Form Recognizer).
This constructor initializes a AzureAIDocumentIntelligenceParser object to be
used for parsing files using the Azure Document Intelligence API. The load
method generates Documents whose content representations are determined by the
mode parameter.
Parameters:
-----------
api_endpoint: str
The API endpoint to use for DocumentIntelligenceClient construction.
api_key: str
The API key to use for DocumentIntelligenceClient construction.
file_path : Optional[str]
The path to the file that needs to be loaded.
Either file_path or url_path must be specified.
url_path : Optional[str]
The URL to the file that needs to be loaded.
Either file_path or url_path must be specified.
api_version: Optional[str]
The API version for DocumentIntelligenceClient. Setting None to use
the default value from SDK.
api_model: str
The model name or ID to be used for form recognition in Azure.
Examples:
---------
>>> obj = AzureAIDocumentIntelligenceLoader(
... file_path="path/to/file",
... api_endpoint="https://endpoint.azure.com",
... api_key="APIKEY",
... api_version="2023-10-31-preview",
... model="prebuilt-document"
... )
"""
assert file_path is not None or url_path is not None, 'file_path or url_path must be provided'
self.file_path = file_path
self.url_path = url_path
self.parser = AzureAIDocumentIntelligenceParser(api_endpoint=
api_endpoint, api_key=api_key, api_version=api_version, api_model=
api_model, mode=mode)
|
Initialize the object for file processing with Azure Document Intelligence
(formerly Form Recognizer).
This constructor initializes a AzureAIDocumentIntelligenceParser object to be
used for parsing files using the Azure Document Intelligence API. The load
method generates Documents whose content representations are determined by the
mode parameter.
Parameters:
-----------
api_endpoint: str
The API endpoint to use for DocumentIntelligenceClient construction.
api_key: str
The API key to use for DocumentIntelligenceClient construction.
file_path : Optional[str]
The path to the file that needs to be loaded.
Either file_path or url_path must be specified.
url_path : Optional[str]
The URL to the file that needs to be loaded.
Either file_path or url_path must be specified.
api_version: Optional[str]
The API version for DocumentIntelligenceClient. Setting None to use
the default value from SDK.
api_model: str
The model name or ID to be used for form recognition in Azure.
Examples:
---------
>>> obj = AzureAIDocumentIntelligenceLoader(
... file_path="path/to/file",
... api_endpoint="https://endpoint.azure.com",
... api_key="APIKEY",
... api_version="2023-10-31-preview",
... model="prebuilt-document"
... )
|
_on_chat_model_start
|
"""Persist an LLM run."""
if run.parent_run_id is None:
run.reference_example_id = self.example_id
self._submit(self._persist_run_single, _copy(run))
|
def _on_chat_model_start(self, run: Run) ->None:
"""Persist an LLM run."""
if run.parent_run_id is None:
run.reference_example_id = self.example_id
self._submit(self._persist_run_single, _copy(run))
|
Persist an LLM run.
|
test_initialize_watsonxllm_cpd_bad_path_apikey_without_username
|
try:
WatsonxLLM(model_id='google/flan-ul2', url=
'https://cpd-zen.apps.cpd48.cp.fyre.ibm.com', apikey='test_apikey')
except ValueError as e:
assert 'WATSONX_USERNAME' in e.__str__()
|
def test_initialize_watsonxllm_cpd_bad_path_apikey_without_username() ->None:
try:
WatsonxLLM(model_id='google/flan-ul2', url=
'https://cpd-zen.apps.cpd48.cp.fyre.ibm.com', apikey='test_apikey')
except ValueError as e:
assert 'WATSONX_USERNAME' in e.__str__()
| null |
similarity_search_by_vector
|
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Doc fields filter conditions that meet the SQL where clause
specification.
Returns:
List of Documents most similar to the query vector.
"""
docs_and_scores = self._similarity_search_with_score_by_vector(embedding, k,
filter)
return [doc for doc, _ in docs_and_scores]
|
def similarity_search_by_vector(self, embedding: List[float], k: int=4,
filter: Optional[str]=None, **kwargs: Any) ->List[Document]:
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Doc fields filter conditions that meet the SQL where clause
specification.
Returns:
List of Documents most similar to the query vector.
"""
docs_and_scores = self._similarity_search_with_score_by_vector(embedding,
k, filter)
return [doc for doc, _ in docs_and_scores]
|
Return docs most similar to embedding vector.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Doc fields filter conditions that meet the SQL where clause
specification.
Returns:
List of Documents most similar to the query vector.
|
_Break
|
self.fill('break')
|
def _Break(self, t):
self.fill('break')
| null |
get_spans
|
for quote in self.substring_quote:
yield from self._get_span(quote, context)
|
def get_spans(self, context: str) ->Iterator[str]:
for quote in self.substring_quote:
yield from self._get_span(quote, context)
| null |
remove_dependencies_from_pyproject_toml
|
"""Remove dependencies from pyproject.toml."""
with open(pyproject_toml, encoding='utf-8') as f:
pyproject: Dict[str, Any] = load(f)
dependencies = pyproject['tool']['poetry']['dependencies']
for name in local_editable_dependencies:
try:
del dependencies[name]
except KeyError:
pass
with open(pyproject_toml, 'w', encoding='utf-8') as f:
dump(pyproject, f)
|
def remove_dependencies_from_pyproject_toml(pyproject_toml: Path,
local_editable_dependencies: Iterable[str]) ->None:
"""Remove dependencies from pyproject.toml."""
with open(pyproject_toml, encoding='utf-8') as f:
pyproject: Dict[str, Any] = load(f)
dependencies = pyproject['tool']['poetry']['dependencies']
for name in local_editable_dependencies:
try:
del dependencies[name]
except KeyError:
pass
with open(pyproject_toml, 'w', encoding='utf-8') as f:
dump(pyproject, f)
|
Remove dependencies from pyproject.toml.
|
args
|
"""The tool's input arguments."""
if self.args_schema is not None:
return self.args_schema.schema()['properties']
return {'tool_input': {'type': 'string'}}
|
@property
def args(self) ->dict:
"""The tool's input arguments."""
if self.args_schema is not None:
return self.args_schema.schema()['properties']
return {'tool_input': {'type': 'string'}}
|
The tool's input arguments.
|
test_generate
|
"""Test valid call to volc engine."""
llm = VolcEngineMaasLLM()
output = llm.generate(['tell me a joke'])
assert isinstance(output, LLMResult)
assert isinstance(output.generations, list)
|
def test_generate() ->None:
"""Test valid call to volc engine."""
llm = VolcEngineMaasLLM()
output = llm.generate(['tell me a joke'])
assert isinstance(output, LLMResult)
assert isinstance(output.generations, list)
|
Test valid call to volc engine.
|
map_to_base_relationship
|
"""Map the KnowledgeGraph Relationship to the base Relationship."""
source = map_to_base_node(rel.source)
target = map_to_base_node(rel.target)
properties = props_to_dict(rel.properties) if rel.properties else {}
return BaseRelationship(source=source, target=target, type=rel.type,
properties=properties)
|
def map_to_base_relationship(rel: Relationship) ->BaseRelationship:
"""Map the KnowledgeGraph Relationship to the base Relationship."""
source = map_to_base_node(rel.source)
target = map_to_base_node(rel.target)
properties = props_to_dict(rel.properties) if rel.properties else {}
return BaseRelationship(source=source, target=target, type=rel.type,
properties=properties)
|
Map the KnowledgeGraph Relationship to the base Relationship.
|
_identifying_params
|
"""Get the identifying parameters."""
return {**{'model_id': self.model_id}, **{'model_kwargs': self.model_kwargs}}
|
@property
def _identifying_params(self) ->Mapping[str, Any]:
"""Get the identifying parameters."""
return {**{'model_id': self.model_id}, **{'model_kwargs': self.
model_kwargs}}
|
Get the identifying parameters.
|
test_load_multiple_pages
|
loader = GitbookLoader(web_page, load_all_paths=True)
result = loader.load()
print(len(result))
assert len(result) > 10
|
@pytest.mark.parametrize('web_page', ['https://platform-docs.opentargets.org/']
)
def test_load_multiple_pages(self, web_page: str) ->None:
loader = GitbookLoader(web_page, load_all_paths=True)
result = loader.load()
print(len(result))
assert len(result) > 10
| null |
__init__
|
"""Initialize by creating the engine and all tables."""
engine = create_engine(f'sqlite:///{database_path}')
super().__init__(engine)
|
def __init__(self, database_path: str='.langchain.db'):
"""Initialize by creating the engine and all tables."""
engine = create_engine(f'sqlite:///{database_path}')
super().__init__(engine)
|
Initialize by creating the engine and all tables.
|
_identifying_params
|
"""Get the identifying parameters."""
return {**{}, **self._default_params}
|
@property
def _identifying_params(self) ->Mapping[str, Any]:
"""Get the identifying parameters."""
return {**{}, **self._default_params}
|
Get the identifying parameters.
|
lower_case_transform
|
v = v.lower()
return v
|
@validator('name')
def lower_case_transform(cls, v: str) ->str:
v = v.lower()
return v
| null |
test_extract_functions_classes
|
"""Test that functions and classes are extracted correctly."""
segmenter = CobolSegmenter(EXAMPLE_CODE)
extracted_code = segmenter.extract_functions_classes()
assert extracted_code == [
"""A000-INITIALIZE-PARA.
DISPLAY 'Initialization Paragraph'.
MOVE 'New Value' TO SAMPLE-VAR."""
, """A100-PROCESS-PARA.
DISPLAY SAMPLE-VAR.
STOP RUN."""]
|
def test_extract_functions_classes() ->None:
"""Test that functions and classes are extracted correctly."""
segmenter = CobolSegmenter(EXAMPLE_CODE)
extracted_code = segmenter.extract_functions_classes()
assert extracted_code == [
"""A000-INITIALIZE-PARA.
DISPLAY 'Initialization Paragraph'.
MOVE 'New Value' TO SAMPLE-VAR."""
, """A100-PROCESS-PARA.
DISPLAY SAMPLE-VAR.
STOP RUN."""]
|
Test that functions and classes are extracted correctly.
|
test__convert_dict_to_message_human
|
message = {'role': 'user', 'content': 'foo'}
result = convert_dict_to_message(message)
expected_output = HumanMessage(content='foo')
assert result == expected_output
|
def test__convert_dict_to_message_human() ->None:
message = {'role': 'user', 'content': 'foo'}
result = convert_dict_to_message(message)
expected_output = HumanMessage(content='foo')
assert result == expected_output
| null |
test_similarity_search_exact_search
|
"""Test end to end construction and search with metadata."""
texts = ['foo', 'bar', 'baz']
docsearch = ElasticsearchStore.from_texts(texts, FakeEmbeddings(), **
elasticsearch_connection, index_name=index_name, strategy=
ElasticsearchStore.ExactRetrievalStrategy())
expected_query = {'query': {'script_score': {'query': {'match_all': {}},
'script': {'source':
"cosineSimilarity(params.query_vector, 'vector') + 1.0", 'params': {
'query_vector': [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0]}}}}}
def assert_query(query_body: dict, query: str) ->dict:
assert query_body == expected_query
return query_body
output = docsearch.similarity_search('foo', k=1, custom_query=assert_query)
assert output == [Document(page_content='foo')]
|
def test_similarity_search_exact_search(self, elasticsearch_connection:
dict, index_name: str) ->None:
"""Test end to end construction and search with metadata."""
texts = ['foo', 'bar', 'baz']
docsearch = ElasticsearchStore.from_texts(texts, FakeEmbeddings(), **
elasticsearch_connection, index_name=index_name, strategy=
ElasticsearchStore.ExactRetrievalStrategy())
expected_query = {'query': {'script_score': {'query': {'match_all': {}},
'script': {'source':
"cosineSimilarity(params.query_vector, 'vector') + 1.0", 'params':
{'query_vector': [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0]}}}}
}
def assert_query(query_body: dict, query: str) ->dict:
assert query_body == expected_query
return query_body
output = docsearch.similarity_search('foo', k=1, custom_query=assert_query)
assert output == [Document(page_content='foo')]
|
Test end to end construction and search with metadata.
|
test_api_key_is_string
|
llm = EmbaasEmbeddings(embaas_api_key='secret-api-key')
assert isinstance(llm.embaas_api_key, SecretStr)
|
def test_api_key_is_string() ->None:
llm = EmbaasEmbeddings(embaas_api_key='secret-api-key')
assert isinstance(llm.embaas_api_key, SecretStr)
| null |
_import_openapi_utils_openapi_utils
|
from langchain_community.tools.openapi.utils.openapi_utils import OpenAPISpec
return OpenAPISpec
|
def _import_openapi_utils_openapi_utils() ->Any:
from langchain_community.tools.openapi.utils.openapi_utils import OpenAPISpec
return OpenAPISpec
| null |
embeddings
|
return self.embedding_func
|
@property
def embeddings(self) ->Optional[Embeddings]:
return self.embedding_func
| null |
_import_rocksetdb
|
from langchain_community.vectorstores.rocksetdb import Rockset
return Rockset
|
def _import_rocksetdb() ->Any:
from langchain_community.vectorstores.rocksetdb import Rockset
return Rockset
| null |
similarity_search_with_score
|
embedding_vector = self.embedding.embed_query(query)
return self.similarity_search_with_score_by_vector(embedding_vector, k,
filter=filter)
|
def similarity_search_with_score(self, query: str, k: int=4, filter:
Optional[Dict[str, str]]=None) ->List[Tuple[Document, float]]:
embedding_vector = self.embedding.embed_query(query)
return self.similarity_search_with_score_by_vector(embedding_vector, k,
filter=filter)
| null |
_search
|
"""Return searched documents result from BES
Args:
query: Text to look up documents similar to.
query_vector: Embedding to look up documents similar to.
filter: Array of Baidu ElasticSearch filter clauses to apply to the query.
custom_query: Function to modify the query body before it is sent to BES.
Returns:
List of Documents most similar to the query and score for each
"""
if self.embedding and query is not None:
query_vector = self.embedding.embed_query(query)
query_body = self._query_body(query_vector=query_vector, filter=filter,
search_params=search_params)
if custom_query is not None:
query_body = custom_query(query_body, query)
logger.debug(f'Calling custom_query, Query body now: {query_body}')
logger.debug(f'Query body: {query_body}')
response = self.client.search(index=self.index_name, body=query_body)
logger.debug(f'response={response}')
hits = [hit for hit in response['hits']['hits']]
docs_and_scores = [(Document(page_content=hit['_source'][self.query_field],
metadata=hit['_source']['metadata']), hit['_score']) for hit in hits]
return docs_and_scores
|
def _search(self, query: Optional[str]=None, query_vector: Union[List[float
], None]=None, filter: Optional[dict]=None, custom_query: Optional[
Callable[[Dict, Union[str, None]], Dict]]=None, search_params: Dict={}
) ->List[Tuple[Document, float]]:
"""Return searched documents result from BES
Args:
query: Text to look up documents similar to.
query_vector: Embedding to look up documents similar to.
filter: Array of Baidu ElasticSearch filter clauses to apply to the query.
custom_query: Function to modify the query body before it is sent to BES.
Returns:
List of Documents most similar to the query and score for each
"""
if self.embedding and query is not None:
query_vector = self.embedding.embed_query(query)
query_body = self._query_body(query_vector=query_vector, filter=filter,
search_params=search_params)
if custom_query is not None:
query_body = custom_query(query_body, query)
logger.debug(f'Calling custom_query, Query body now: {query_body}')
logger.debug(f'Query body: {query_body}')
response = self.client.search(index=self.index_name, body=query_body)
logger.debug(f'response={response}')
hits = [hit for hit in response['hits']['hits']]
docs_and_scores = [(Document(page_content=hit['_source'][self.
query_field], metadata=hit['_source']['metadata']), hit['_score']) for
hit in hits]
return docs_and_scores
|
Return searched documents result from BES
Args:
query: Text to look up documents similar to.
query_vector: Embedding to look up documents similar to.
filter: Array of Baidu ElasticSearch filter clauses to apply to the query.
custom_query: Function to modify the query body before it is sent to BES.
Returns:
List of Documents most similar to the query and score for each
|
create_json_chat_agent
|
"""Create an agent that uses JSON to format its logic, build for Chat Models.
Examples:
.. code-block:: python
from langchain import hub
from langchain_community.chat_models import ChatOpenAI
from langchain.agents import AgentExecutor, create_json_chat_agent
prompt = hub.pull("hwchase17/react-chat-json")
model = ChatOpenAI()
tools = ...
agent = create_json_chat_agent(model, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools)
agent_executor.invoke({"input": "hi"})
# Using with chat history
from langchain_core.messages import AIMessage, HumanMessage
agent_executor.invoke(
{
"input": "what's my name?",
"chat_history": [
HumanMessage(content="hi! my name is bob"),
AIMessage(content="Hello Bob! How can I assist you today?"),
],
}
)
Args:
llm: LLM to use as the agent.
tools: Tools this agent has access to.
prompt: The prompt to use, must have input keys of
`tools`, `tool_names`, and `agent_scratchpad`.
Returns:
A runnable sequence representing an agent. It takes as input all the same input
variables as the prompt passed in does. It returns as output either an
AgentAction or AgentFinish.
"""
missing_vars = {'tools', 'tool_names', 'agent_scratchpad'}.difference(prompt
.input_variables)
if missing_vars:
raise ValueError(f'Prompt missing required variables: {missing_vars}')
prompt = prompt.partial(tools=render_text_description(list(tools)),
tool_names=', '.join([t.name for t in tools]))
llm_with_stop = llm.bind(stop=['\nObservation'])
agent = RunnablePassthrough.assign(agent_scratchpad=lambda x:
format_log_to_messages(x['intermediate_steps'], template_tool_response=
TEMPLATE_TOOL_RESPONSE)) | prompt | llm_with_stop | JSONAgentOutputParser()
return agent
|
def create_json_chat_agent(llm: BaseLanguageModel, tools: Sequence[BaseTool
], prompt: ChatPromptTemplate) ->Runnable:
"""Create an agent that uses JSON to format its logic, build for Chat Models.
Examples:
.. code-block:: python
from langchain import hub
from langchain_community.chat_models import ChatOpenAI
from langchain.agents import AgentExecutor, create_json_chat_agent
prompt = hub.pull("hwchase17/react-chat-json")
model = ChatOpenAI()
tools = ...
agent = create_json_chat_agent(model, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools)
agent_executor.invoke({"input": "hi"})
# Using with chat history
from langchain_core.messages import AIMessage, HumanMessage
agent_executor.invoke(
{
"input": "what's my name?",
"chat_history": [
HumanMessage(content="hi! my name is bob"),
AIMessage(content="Hello Bob! How can I assist you today?"),
],
}
)
Args:
llm: LLM to use as the agent.
tools: Tools this agent has access to.
prompt: The prompt to use, must have input keys of
`tools`, `tool_names`, and `agent_scratchpad`.
Returns:
A runnable sequence representing an agent. It takes as input all the same input
variables as the prompt passed in does. It returns as output either an
AgentAction or AgentFinish.
"""
missing_vars = {'tools', 'tool_names', 'agent_scratchpad'}.difference(
prompt.input_variables)
if missing_vars:
raise ValueError(f'Prompt missing required variables: {missing_vars}')
prompt = prompt.partial(tools=render_text_description(list(tools)),
tool_names=', '.join([t.name for t in tools]))
llm_with_stop = llm.bind(stop=['\nObservation'])
agent = RunnablePassthrough.assign(agent_scratchpad=lambda x:
format_log_to_messages(x['intermediate_steps'],
template_tool_response=TEMPLATE_TOOL_RESPONSE)
) | prompt | llm_with_stop | JSONAgentOutputParser()
return agent
|
Create an agent that uses JSON to format its logic, build for Chat Models.
Examples:
.. code-block:: python
from langchain import hub
from langchain_community.chat_models import ChatOpenAI
from langchain.agents import AgentExecutor, create_json_chat_agent
prompt = hub.pull("hwchase17/react-chat-json")
model = ChatOpenAI()
tools = ...
agent = create_json_chat_agent(model, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools)
agent_executor.invoke({"input": "hi"})
# Using with chat history
from langchain_core.messages import AIMessage, HumanMessage
agent_executor.invoke(
{
"input": "what's my name?",
"chat_history": [
HumanMessage(content="hi! my name is bob"),
AIMessage(content="Hello Bob! How can I assist you today?"),
],
}
)
Args:
llm: LLM to use as the agent.
tools: Tools this agent has access to.
prompt: The prompt to use, must have input keys of
`tools`, `tool_names`, and `agent_scratchpad`.
Returns:
A runnable sequence representing an agent. It takes as input all the same input
variables as the prompt passed in does. It returns as output either an
AgentAction or AgentFinish.
|
validate_environment
|
"""Validate that api key and python package exists in environment."""
try:
from bs4 import BeautifulSoup
except ImportError:
raise ImportError(
'Could not import bs4 python package. Please install it with `pip install bs4`.'
)
return values
|
@root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that api key and python package exists in environment."""
try:
from bs4 import BeautifulSoup
except ImportError:
raise ImportError(
'Could not import bs4 python package. Please install it with `pip install bs4`.'
)
return values
|
Validate that api key and python package exists in environment.
|
check_mypy
|
"""Run mypy on a file."""
cmd = (
f"mypy {'--strict' if strict else ''} --follow-imports={follow_imports} {filepath}"
)
subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True, text=
True, timeout=3)
|
def check_mypy(filepath: str, strict: bool=True, follow_imports: str='skip'):
"""Run mypy on a file."""
cmd = (
f"mypy {'--strict' if strict else ''} --follow-imports={follow_imports} {filepath}"
)
subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True, text
=True, timeout=3)
|
Run mypy on a file.
|
get_current_entities
|
chain = LLMChain(llm=self.llm, prompt=self.entity_extraction_prompt)
buffer_string = get_buffer_string(self.chat_memory.messages[-self.k * 2:],
human_prefix=self.human_prefix, ai_prefix=self.ai_prefix)
output = chain.predict(history=buffer_string, input=input_string)
return get_entities(output)
|
def get_current_entities(self, input_string: str) ->List[str]:
chain = LLMChain(llm=self.llm, prompt=self.entity_extraction_prompt)
buffer_string = get_buffer_string(self.chat_memory.messages[-self.k * 2
:], human_prefix=self.human_prefix, ai_prefix=self.ai_prefix)
output = chain.predict(history=buffer_string, input=input_string)
return get_entities(output)
| null |
parse_iter
|
"""Parse the output of an LLM call."""
return re.finditer(self.pattern, text, re.MULTILINE)
|
def parse_iter(self, text: str) ->Iterator[re.Match]:
"""Parse the output of an LLM call."""
return re.finditer(self.pattern, text, re.MULTILINE)
|
Parse the output of an LLM call.
|
__init__
|
warnings.warn(
'`MlflowAIGateway` is deprecated. Use `Mlflow` or `Databricks` instead.',
DeprecationWarning)
try:
import mlflow.gateway
except ImportError as e:
raise ImportError(
'Could not import `mlflow.gateway` module. Please install it with `pip install mlflow[gateway]`.'
) from e
super().__init__(**kwargs)
if self.gateway_uri:
mlflow.gateway.set_gateway_uri(self.gateway_uri)
|
def __init__(self, **kwargs: Any):
warnings.warn(
'`MlflowAIGateway` is deprecated. Use `Mlflow` or `Databricks` instead.'
, DeprecationWarning)
try:
import mlflow.gateway
except ImportError as e:
raise ImportError(
'Could not import `mlflow.gateway` module. Please install it with `pip install mlflow[gateway]`.'
) from e
super().__init__(**kwargs)
if self.gateway_uri:
mlflow.gateway.set_gateway_uri(self.gateway_uri)
| null |
get_package_root
|
package_root = Path.cwd() if cwd is None else cwd
visited: Set[Path] = set()
while package_root not in visited:
visited.add(package_root)
pyproject_path = package_root / 'pyproject.toml'
if pyproject_path.exists():
return package_root
package_root = package_root.parent
raise FileNotFoundError('No pyproject.toml found')
|
def get_package_root(cwd: Optional[Path]=None) ->Path:
package_root = Path.cwd() if cwd is None else cwd
visited: Set[Path] = set()
while package_root not in visited:
visited.add(package_root)
pyproject_path = package_root / 'pyproject.toml'
if pyproject_path.exists():
return package_root
package_root = package_root.parent
raise FileNotFoundError('No pyproject.toml found')
| null |
on_tool_start
|
self.on_tool_start_common()
|
def on_tool_start(self, *args: Any, **kwargs: Any) ->Any:
self.on_tool_start_common()
| null |
assert_query
|
assert query_body == {'knn': {'field': 'vector', 'filter': [], 'k': 1,
'num_candidates': 50, 'query_vector': [1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 0.0]}, 'query': {'bool': {'filter': [], 'must': [{
'match': {'text': {'query': 'foo'}}}]}}, 'rank': {'rrf': {}}}
return query_body
|
def assert_query(query_body: dict, query: str) ->dict:
assert query_body == {'knn': {'field': 'vector', 'filter': [], 'k': 1,
'num_candidates': 50, 'query_vector': [1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 0.0]}, 'query': {'bool': {'filter': [], 'must': [{
'match': {'text': {'query': 'foo'}}}]}}, 'rank': {'rrf': {}}}
return query_body
| null |
_llm_type
|
return 'anthropic_functions'
|
@property
def _llm_type(self) ->str:
return 'anthropic_functions'
| null |
run
|
"""Run query through Merriam-Webster API and return a formatted result."""
quoted_query = quote(query)
request_url = (
f'{MERRIAM_WEBSTER_API_URL}/{quoted_query}?key={self.merriam_webster_api_key}'
)
response = requests.get(request_url, timeout=MERRIAM_WEBSTER_TIMEOUT)
if response.status_code != 200:
return response.text
return self._format_response(query, response)
|
def run(self, query: str) ->str:
"""Run query through Merriam-Webster API and return a formatted result."""
quoted_query = quote(query)
request_url = (
f'{MERRIAM_WEBSTER_API_URL}/{quoted_query}?key={self.merriam_webster_api_key}'
)
response = requests.get(request_url, timeout=MERRIAM_WEBSTER_TIMEOUT)
if response.status_code != 200:
return response.text
return self._format_response(query, response)
|
Run query through Merriam-Webster API and return a formatted result.
|
_embed
|
if self.show_progress:
try:
from tqdm import tqdm
iter_ = tqdm(input, desc='OllamaEmbeddings')
except ImportError:
logger.warning(
'Unable to show progress bar because tqdm could not be imported. Please install with `pip install tqdm`.'
)
iter_ = input
else:
iter_ = input
return [self._process_emb_response(prompt) for prompt in iter_]
|
def _embed(self, input: List[str]) ->List[List[float]]:
if self.show_progress:
try:
from tqdm import tqdm
iter_ = tqdm(input, desc='OllamaEmbeddings')
except ImportError:
logger.warning(
'Unable to show progress bar because tqdm could not be imported. Please install with `pip install tqdm`.'
)
iter_ = input
else:
iter_ = input
return [self._process_emb_response(prompt) for prompt in iter_]
| null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.