method_name
stringlengths 1
78
| method_body
stringlengths 3
9.66k
| full_code
stringlengths 31
10.7k
| docstring
stringlengths 4
4.74k
⌀ |
|---|---|---|---|
add_message
|
"""Append the message to the record in Neo4j"""
query = (
f'MATCH (s:`{self._node_label}`) WHERE s.id = $session_id OPTIONAL MATCH (s)-[lm:LAST_MESSAGE]->(last_message) CREATE (s)-[:LAST_MESSAGE]->(new:Message) SET new += {{type:$type, content:$content}} WITH new, lm, last_message WHERE last_message IS NOT NULL CREATE (last_message)-[:NEXT]->(new) DELETE lm'
)
self._driver.execute_query(query, {'type': message.type, 'content': message
.content, 'session_id': self._session_id}).summary
|
def add_message(self, message: BaseMessage) ->None:
"""Append the message to the record in Neo4j"""
query = (
f'MATCH (s:`{self._node_label}`) WHERE s.id = $session_id OPTIONAL MATCH (s)-[lm:LAST_MESSAGE]->(last_message) CREATE (s)-[:LAST_MESSAGE]->(new:Message) SET new += {{type:$type, content:$content}} WITH new, lm, last_message WHERE last_message IS NOT NULL CREATE (last_message)-[:NEXT]->(new) DELETE lm'
)
self._driver.execute_query(query, {'type': message.type, 'content':
message.content, 'session_id': self._session_id}).summary
|
Append the message to the record in Neo4j
|
test_voyagi_embedding_documents
|
"""Test voyage embeddings."""
documents = ['foo bar']
embedding = VoyageEmbeddings(model=MODEL)
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 1024
|
def test_voyagi_embedding_documents() ->None:
"""Test voyage embeddings."""
documents = ['foo bar']
embedding = VoyageEmbeddings(model=MODEL)
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 1024
|
Test voyage embeddings.
|
anonymize
|
"""Anonymize text"""
return self._anonymize(text, language, allow_list)
|
def anonymize(self, text: str, language: Optional[str]=None, allow_list:
Optional[List[str]]=None) ->str:
"""Anonymize text"""
return self._anonymize(text, language, allow_list)
|
Anonymize text
|
on_retriever_end
|
self.on_retriever_end_common()
|
def on_retriever_end(self, *args: Any, **kwargs: Any) ->Any:
self.on_retriever_end_common()
| null |
results
|
"""Run query through DuckDuckGo and return metadata.
Args:
query: The query to search for.
max_results: The number of results to return.
source: The source to look from.
Returns:
A list of dictionaries with the following keys:
snippet - The description of the result.
title - The title of the result.
link - The link to the result.
"""
source = source or self.source
if source == 'text':
results = [{'snippet': r['body'], 'title': r['title'], 'link': r['href'
]} for r in self._ddgs_text(query, max_results=max_results)]
elif source == 'news':
results = [{'snippet': r['body'], 'title': r['title'], 'link': r['url'],
'date': r['date'], 'source': r['source']} for r in self._ddgs_news(
query, max_results=max_results)]
else:
results = []
if results is None:
results = [{'Result': 'No good DuckDuckGo Search Result was found'}]
return results
|
def results(self, query: str, max_results: int, source: Optional[str]=None
) ->List[Dict[str, str]]:
"""Run query through DuckDuckGo and return metadata.
Args:
query: The query to search for.
max_results: The number of results to return.
source: The source to look from.
Returns:
A list of dictionaries with the following keys:
snippet - The description of the result.
title - The title of the result.
link - The link to the result.
"""
source = source or self.source
if source == 'text':
results = [{'snippet': r['body'], 'title': r['title'], 'link': r[
'href']} for r in self._ddgs_text(query, max_results=max_results)]
elif source == 'news':
results = [{'snippet': r['body'], 'title': r['title'], 'link': r[
'url'], 'date': r['date'], 'source': r['source']} for r in self
._ddgs_news(query, max_results=max_results)]
else:
results = []
if results is None:
results = [{'Result': 'No good DuckDuckGo Search Result was found'}]
return results
|
Run query through DuckDuckGo and return metadata.
Args:
query: The query to search for.
max_results: The number of results to return.
source: The source to look from.
Returns:
A list of dictionaries with the following keys:
snippet - The description of the result.
title - The title of the result.
link - The link to the result.
|
delete
|
"""Delete by vector ID.
Args:
ids: List of ids to delete.
Returns:
True if deletion is successful,
False otherwise.
"""
return bool(self._collection.delete(ids))
|
def delete(self, ids: Optional[List[str]]=None, **kwargs: Any) ->bool:
"""Delete by vector ID.
Args:
ids: List of ids to delete.
Returns:
True if deletion is successful,
False otherwise.
"""
return bool(self._collection.delete(ids))
|
Delete by vector ID.
Args:
ids: List of ids to delete.
Returns:
True if deletion is successful,
False otherwise.
|
lazy_load
|
for row in self.api_wrapper.query(self.query):
if self.page_content_columns:
page_content_data = {k: v for k, v in row.items() if k in self.
page_content_columns}
else:
page_content_data = row
page_content = '\n'.join(f'{k}: {v}' for k, v in page_content_data.items())
if self.metadata_columns:
metadata = {k: v for k, v in row.items() if k in self.metadata_columns}
else:
metadata = {k: v for k, v in row.items() if k not in page_content_data}
yield Document(page_content=page_content, metadata=metadata)
|
def lazy_load(self) ->Iterator[Document]:
for row in self.api_wrapper.query(self.query):
if self.page_content_columns:
page_content_data = {k: v for k, v in row.items() if k in self.
page_content_columns}
else:
page_content_data = row
page_content = '\n'.join(f'{k}: {v}' for k, v in page_content_data.
items())
if self.metadata_columns:
metadata = {k: v for k, v in row.items() if k in self.
metadata_columns}
else:
metadata = {k: v for k, v in row.items() if k not in
page_content_data}
yield Document(page_content=page_content, metadata=metadata)
| null |
_identity
|
"""Return the same object."""
return x
|
def _identity(x: str) ->str:
"""Return the same object."""
return x
|
Return the same object.
|
_get_combined_score
|
"""Return the combined score for a document."""
hours_passed = _get_hours_passed(current_time, self._document_get_date(
'last_accessed_at', document))
score = (1.0 - self.decay_rate) ** hours_passed
for key in self.other_score_keys:
if key in document.metadata:
score += document.metadata[key]
if vector_relevance is not None:
score += vector_relevance
return score
|
def _get_combined_score(self, document: Document, vector_relevance:
Optional[float], current_time: datetime.datetime) ->float:
"""Return the combined score for a document."""
hours_passed = _get_hours_passed(current_time, self._document_get_date(
'last_accessed_at', document))
score = (1.0 - self.decay_rate) ** hours_passed
for key in self.other_score_keys:
if key in document.metadata:
score += document.metadata[key]
if vector_relevance is not None:
score += vector_relevance
return score
|
Return the combined score for a document.
|
test_all_imports
|
assert set(__all__) == set(EXPECTED_ALL)
|
def test_all_imports() ->None:
assert set(__all__) == set(EXPECTED_ALL)
| null |
test_exception_handling_bool
|
_tool = _FakeExceptionTool(handle_tool_error=True)
expected = 'Tool execution error'
actual = _tool.run({})
assert expected == actual
|
def test_exception_handling_bool() ->None:
_tool = _FakeExceptionTool(handle_tool_error=True)
expected = 'Tool execution error'
actual = _tool.run({})
assert expected == actual
| null |
on_llm_end
|
self.data_store['generation'] = args[0]
|
def on_llm_end(self, *args: Any, **kwargs: Any) ->Any:
self.data_store['generation'] = args[0]
| null |
embeddings
|
return self._embedding_function
|
@property
def embeddings(self) ->Optional[Embeddings]:
return self._embedding_function
| null |
_create_llm_result
|
generations = []
for res in response.choices:
finish_reason = res.finish_reason
gen = Generation(text=res.message.content, generation_info={
'finish_reason': finish_reason})
generations.append([gen])
if finish_reason != 'stop':
logger.warning('Giga generation stopped with reason: %s', finish_reason
)
if self.verbose:
logger.info('Giga response: %s', res.message.content)
token_usage = response.usage
llm_output = {'token_usage': token_usage, 'model_name': response.model}
return LLMResult(generations=generations, llm_output=llm_output)
|
def _create_llm_result(self, response: Any) ->LLMResult:
generations = []
for res in response.choices:
finish_reason = res.finish_reason
gen = Generation(text=res.message.content, generation_info={
'finish_reason': finish_reason})
generations.append([gen])
if finish_reason != 'stop':
logger.warning('Giga generation stopped with reason: %s',
finish_reason)
if self.verbose:
logger.info('Giga response: %s', res.message.content)
token_usage = response.usage
llm_output = {'token_usage': token_usage, 'model_name': response.model}
return LLMResult(generations=generations, llm_output=llm_output)
| null |
_get_hours_passed
|
"""Get the hours passed between two datetimes."""
return (time - ref_time).total_seconds() / 3600
|
def _get_hours_passed(time: datetime.datetime, ref_time: datetime.datetime
) ->float:
"""Get the hours passed between two datetimes."""
return (time - ref_time).total_seconds() / 3600
|
Get the hours passed between two datetimes.
|
with_types
|
return self.__class__(bound=self.bound, kwargs=self.kwargs, config=self.
config, custom_input_type=input_type if input_type is not None else
self.custom_input_type, custom_output_type=output_type if output_type
is not None else self.custom_output_type)
|
def with_types(self, input_type: Optional[Union[Type[Input], BaseModel]]=
None, output_type: Optional[Union[Type[Output], BaseModel]]=None
) ->Runnable[Input, Output]:
return self.__class__(bound=self.bound, kwargs=self.kwargs, config=self
.config, custom_input_type=input_type if input_type is not None else
self.custom_input_type, custom_output_type=output_type if
output_type is not None else self.custom_output_type)
| null |
validate_indices
|
if values['include_indices'] and values['ignore_indices']:
raise ValueError(
"Cannot specify both 'include_indices' and 'ignore_indices'.")
return values
|
@root_validator()
def validate_indices(cls, values: dict) ->dict:
if values['include_indices'] and values['ignore_indices']:
raise ValueError(
"Cannot specify both 'include_indices' and 'ignore_indices'.")
return values
| null |
_import_petals
|
from langchain_community.llms.petals import Petals
return Petals
|
def _import_petals() ->Any:
from langchain_community.llms.petals import Petals
return Petals
| null |
__init__
|
"""Initialize the Cloudflare Workers AI class."""
super().__init__(**kwargs)
self.endpoint_url = f'{self.base_url}/{self.account_id}/ai/run/{self.model}'
|
def __init__(self, **kwargs: Any) ->None:
"""Initialize the Cloudflare Workers AI class."""
super().__init__(**kwargs)
self.endpoint_url = (
f'{self.base_url}/{self.account_id}/ai/run/{self.model}')
|
Initialize the Cloudflare Workers AI class.
|
parse
|
"""Parse the response and error tags."""
json_match = re.search('```json(.*?)```', llm_output, re.DOTALL)
if json_match:
return self._load_json_block(json_match.group(1).strip())
else:
raise ValueError(f'No response found in output: {llm_output}.')
|
def parse(self, llm_output: str) ->str:
"""Parse the response and error tags."""
json_match = re.search('```json(.*?)```', llm_output, re.DOTALL)
if json_match:
return self._load_json_block(json_match.group(1).strip())
else:
raise ValueError(f'No response found in output: {llm_output}.')
|
Parse the response and error tags.
|
kv_dataset_name
|
import pandas as pd
client = Client()
df = pd.DataFrame({'some_input': ["What's the capital of California?",
"What's the capital of Nevada?", "What's the capital of Oregon?",
"What's the capital of Washington?"], 'other_input': ['a', 'b', 'c',
'd'], 'some_output': ['Sacramento', 'Carson City', 'Salem', 'Olympia'],
'other_output': ['e', 'f', 'g', 'h']})
uid = str(uuid4())[-8:]
_dataset_name = f'lcp kv dataset integration tests - {uid}'
client.upload_dataframe(df, name=_dataset_name, input_keys=['some_input',
'other_input'], output_keys=['some_output', 'other_output'],
description='Integration test dataset')
yield _dataset_name
|
@pytest.fixture(scope='module')
def kv_dataset_name() ->Iterator[str]:
import pandas as pd
client = Client()
df = pd.DataFrame({'some_input': ["What's the capital of California?",
"What's the capital of Nevada?", "What's the capital of Oregon?",
"What's the capital of Washington?"], 'other_input': ['a', 'b', 'c',
'd'], 'some_output': ['Sacramento', 'Carson City', 'Salem',
'Olympia'], 'other_output': ['e', 'f', 'g', 'h']})
uid = str(uuid4())[-8:]
_dataset_name = f'lcp kv dataset integration tests - {uid}'
client.upload_dataframe(df, name=_dataset_name, input_keys=[
'some_input', 'other_input'], output_keys=['some_output',
'other_output'], description='Integration test dataset')
yield _dataset_name
| null |
test_json_meta_01
|
mocker.patch('builtins.open', mocker.mock_open())
mocker.patch(patch_func, return_value=patch_func_value)
file_path = '/workspaces/langchain/test.json'
expected_docs = [Document(page_content='value1', metadata={'source':
file_path, 'seq_num': 1, 'x': 'value1-meta'}), Document(page_content=
'value2', metadata={'source': file_path, 'seq_num': 2, 'x': 'value2-meta'})
]
def metadata_func(record: Dict, metadata: Dict) ->Dict:
metadata['x'] = f"{record['text']}-meta"
return metadata
loader = JSONLoader(file_path=file_path, metadata_func=metadata_func, **kwargs)
result = loader.load()
assert result == expected_docs
|
@pytest.mark.parametrize('patch_func,patch_func_value,kwargs', ((
'pathlib.Path.read_text', '[{"text": "value1"}, {"text": "value2"}]', {
'jq_schema': '.[]', 'content_key': 'text'}), ('pathlib.Path.open', io.
StringIO(
"""
{"text": "value1"}
{"text": "value2"}
"""
), {'jq_schema': '.', 'content_key': 'text', 'json_lines': True})))
def test_json_meta_01(patch_func: str, patch_func_value: Any, kwargs: Dict,
mocker: MockerFixture) ->None:
mocker.patch('builtins.open', mocker.mock_open())
mocker.patch(patch_func, return_value=patch_func_value)
file_path = '/workspaces/langchain/test.json'
expected_docs = [Document(page_content='value1', metadata={'source':
file_path, 'seq_num': 1, 'x': 'value1-meta'}), Document(
page_content='value2', metadata={'source': file_path, 'seq_num': 2,
'x': 'value2-meta'})]
def metadata_func(record: Dict, metadata: Dict) ->Dict:
metadata['x'] = f"{record['text']}-meta"
return metadata
loader = JSONLoader(file_path=file_path, metadata_func=metadata_func,
**kwargs)
result = loader.load()
assert result == expected_docs
| null |
test_api_key_is_string
|
llm = VolcEngineMaasBase(volc_engine_maas_ak='secret-volc-ak',
volc_engine_maas_sk='secret-volc-sk')
assert isinstance(llm.volc_engine_maas_ak, SecretStr)
assert isinstance(llm.volc_engine_maas_sk, SecretStr)
|
def test_api_key_is_string() ->None:
llm = VolcEngineMaasBase(volc_engine_maas_ak='secret-volc-ak',
volc_engine_maas_sk='secret-volc-sk')
assert isinstance(llm.volc_engine_maas_ak, SecretStr)
assert isinstance(llm.volc_engine_maas_sk, SecretStr)
| null |
from_llms
|
"""Initialize with LLMs to experiment with and optional prompt.
Args:
llms: list of LLMs to experiment with
prompt: Optional prompt to use to prompt the LLMs. Defaults to None.
If a prompt was provided, it should only have one input variable.
"""
if prompt is None:
prompt = PromptTemplate(input_variables=['_input'], template='{_input}')
chains = [LLMChain(llm=llm, prompt=prompt) for llm in llms]
names = [str(llm) for llm in llms]
return cls(chains, names=names)
|
@classmethod
def from_llms(cls, llms: List[BaseLLM], prompt: Optional[PromptTemplate]=None
) ->ModelLaboratory:
"""Initialize with LLMs to experiment with and optional prompt.
Args:
llms: list of LLMs to experiment with
prompt: Optional prompt to use to prompt the LLMs. Defaults to None.
If a prompt was provided, it should only have one input variable.
"""
if prompt is None:
prompt = PromptTemplate(input_variables=['_input'], template='{_input}'
)
chains = [LLMChain(llm=llm, prompt=prompt) for llm in llms]
names = [str(llm) for llm in llms]
return cls(chains, names=names)
|
Initialize with LLMs to experiment with and optional prompt.
Args:
llms: list of LLMs to experiment with
prompt: Optional prompt to use to prompt the LLMs. Defaults to None.
If a prompt was provided, it should only have one input variable.
|
__init__
|
"""Google Vertex AI Vector Search (previously Matching Engine)
implementation of the vector store.
While the embeddings are stored in the Matching Engine, the embedded
documents will be stored in GCS.
An existing Index and corresponding Endpoint are preconditions for
using this module.
See usage in
docs/integrations/vectorstores/google_vertex_ai_vector_search.ipynb.
Note that this implementation is mostly meant for reading if you are
planning to do a real time implementation. While reading is a real time
operation, updating the index takes close to one hour.
Attributes:
project_id: The GCS project id.
index: The created index class. See
~:func:`MatchingEngine.from_components`.
endpoint: The created endpoint class. See
~:func:`MatchingEngine.from_components`.
embedding: A :class:`Embeddings` that will be used for
embedding the text sent. If none is sent, then the
multilingual Tensorflow Universal Sentence Encoder will be used.
gcs_client: The GCS client.
gcs_bucket_name: The GCS bucket name.
credentials (Optional): Created GCP credentials.
document_id_key (Optional): Key for storing document ID in document
metadata. If None, document ID will not be returned in document
metadata.
"""
super().__init__()
self._validate_google_libraries_installation()
self.project_id = project_id
self.index = index
self.endpoint = endpoint
self.embedding = embedding
self.gcs_client = gcs_client
self.credentials = credentials
self.gcs_bucket_name = gcs_bucket_name
self.document_id_key = document_id_key
|
def __init__(self, project_id: str, index: MatchingEngineIndex, endpoint:
MatchingEngineIndexEndpoint, embedding: Embeddings, gcs_client: storage
.Client, gcs_bucket_name: str, credentials: Optional[Credentials]=None,
*, document_id_key: Optional[str]=None):
"""Google Vertex AI Vector Search (previously Matching Engine)
implementation of the vector store.
While the embeddings are stored in the Matching Engine, the embedded
documents will be stored in GCS.
An existing Index and corresponding Endpoint are preconditions for
using this module.
See usage in
docs/integrations/vectorstores/google_vertex_ai_vector_search.ipynb.
Note that this implementation is mostly meant for reading if you are
planning to do a real time implementation. While reading is a real time
operation, updating the index takes close to one hour.
Attributes:
project_id: The GCS project id.
index: The created index class. See
~:func:`MatchingEngine.from_components`.
endpoint: The created endpoint class. See
~:func:`MatchingEngine.from_components`.
embedding: A :class:`Embeddings` that will be used for
embedding the text sent. If none is sent, then the
multilingual Tensorflow Universal Sentence Encoder will be used.
gcs_client: The GCS client.
gcs_bucket_name: The GCS bucket name.
credentials (Optional): Created GCP credentials.
document_id_key (Optional): Key for storing document ID in document
metadata. If None, document ID will not be returned in document
metadata.
"""
super().__init__()
self._validate_google_libraries_installation()
self.project_id = project_id
self.index = index
self.endpoint = endpoint
self.embedding = embedding
self.gcs_client = gcs_client
self.credentials = credentials
self.gcs_bucket_name = gcs_bucket_name
self.document_id_key = document_id_key
|
Google Vertex AI Vector Search (previously Matching Engine)
implementation of the vector store.
While the embeddings are stored in the Matching Engine, the embedded
documents will be stored in GCS.
An existing Index and corresponding Endpoint are preconditions for
using this module.
See usage in
docs/integrations/vectorstores/google_vertex_ai_vector_search.ipynb.
Note that this implementation is mostly meant for reading if you are
planning to do a real time implementation. While reading is a real time
operation, updating the index takes close to one hour.
Attributes:
project_id: The GCS project id.
index: The created index class. See
~:func:`MatchingEngine.from_components`.
endpoint: The created endpoint class. See
~:func:`MatchingEngine.from_components`.
embedding: A :class:`Embeddings` that will be used for
embedding the text sent. If none is sent, then the
multilingual Tensorflow Universal Sentence Encoder will be used.
gcs_client: The GCS client.
gcs_bucket_name: The GCS bucket name.
credentials (Optional): Created GCP credentials.
document_id_key (Optional): Key for storing document ID in document
metadata. If None, document ID will not be returned in document
metadata.
|
test_faiss_mmr_with_metadatas
|
texts = ['foo', 'foo', 'fou', 'foy']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = FAISS.from_texts(texts, FakeEmbeddings(), metadatas=metadatas)
query_vec = FakeEmbeddings().embed_query(text='foo')
output = docsearch.max_marginal_relevance_search_with_score_by_vector(query_vec
, k=10, lambda_mult=0.1)
assert len(output) == len(texts)
assert output[0][0] == Document(page_content='foo', metadata={'page': 0})
assert output[0][1] == 0.0
assert output[1][0] != Document(page_content='foo', metadata={'page': 0})
|
@pytest.mark.requires('faiss')
def test_faiss_mmr_with_metadatas() ->None:
texts = ['foo', 'foo', 'fou', 'foy']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = FAISS.from_texts(texts, FakeEmbeddings(), metadatas=metadatas)
query_vec = FakeEmbeddings().embed_query(text='foo')
output = docsearch.max_marginal_relevance_search_with_score_by_vector(
query_vec, k=10, lambda_mult=0.1)
assert len(output) == len(texts)
assert output[0][0] == Document(page_content='foo', metadata={'page': 0})
assert output[0][1] == 0.0
assert output[1][0] != Document(page_content='foo', metadata={'page': 0})
| null |
__init__
|
"""Initialize Snowflake document loader.
Args:
query: The query to run in Snowflake.
user: Snowflake user.
password: Snowflake password.
account: Snowflake account.
warehouse: Snowflake warehouse.
role: Snowflake role.
database: Snowflake database
schema: Snowflake schema
parameters: Optional. Parameters to pass to the query.
page_content_columns: Optional. Columns written to Document `page_content`.
metadata_columns: Optional. Columns written to Document `metadata`.
"""
self.query = query
self.user = user
self.password = password
self.account = account
self.warehouse = warehouse
self.role = role
self.database = database
self.schema = schema
self.parameters = parameters
self.page_content_columns = (page_content_columns if page_content_columns
is not None else ['*'])
self.metadata_columns = metadata_columns if metadata_columns is not None else [
]
|
def __init__(self, query: str, user: str, password: str, account: str,
warehouse: str, role: str, database: str, schema: str, parameters:
Optional[Dict[str, Any]]=None, page_content_columns: Optional[List[str]
]=None, metadata_columns: Optional[List[str]]=None):
"""Initialize Snowflake document loader.
Args:
query: The query to run in Snowflake.
user: Snowflake user.
password: Snowflake password.
account: Snowflake account.
warehouse: Snowflake warehouse.
role: Snowflake role.
database: Snowflake database
schema: Snowflake schema
parameters: Optional. Parameters to pass to the query.
page_content_columns: Optional. Columns written to Document `page_content`.
metadata_columns: Optional. Columns written to Document `metadata`.
"""
self.query = query
self.user = user
self.password = password
self.account = account
self.warehouse = warehouse
self.role = role
self.database = database
self.schema = schema
self.parameters = parameters
self.page_content_columns = (page_content_columns if
page_content_columns is not None else ['*'])
self.metadata_columns = (metadata_columns if metadata_columns is not
None else [])
|
Initialize Snowflake document loader.
Args:
query: The query to run in Snowflake.
user: Snowflake user.
password: Snowflake password.
account: Snowflake account.
warehouse: Snowflake warehouse.
role: Snowflake role.
database: Snowflake database
schema: Snowflake schema
parameters: Optional. Parameters to pass to the query.
page_content_columns: Optional. Columns written to Document `page_content`.
metadata_columns: Optional. Columns written to Document `metadata`.
|
test_simplify_code
|
segmenter = JavaScriptSegmenter(self.example_code)
simplified_code = segmenter.simplify_code()
self.assertEqual(simplified_code, self.expected_simplified_code)
|
def test_simplify_code(self) ->None:
segmenter = JavaScriptSegmenter(self.example_code)
simplified_code = segmenter.simplify_code()
self.assertEqual(simplified_code, self.expected_simplified_code)
| null |
test_opensearch_with_metadatas
|
"""Test end to end indexing and search with metadata."""
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = OpenSearchVectorSearch.from_texts(texts, FakeEmbeddings(),
metadatas=metadatas, opensearch_url=DEFAULT_OPENSEARCH_URL)
output = docsearch.similarity_search('foo', k=1)
assert output == [Document(page_content='foo', metadata={'page': 0})]
|
def test_opensearch_with_metadatas() ->None:
"""Test end to end indexing and search with metadata."""
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = OpenSearchVectorSearch.from_texts(texts, FakeEmbeddings(),
metadatas=metadatas, opensearch_url=DEFAULT_OPENSEARCH_URL)
output = docsearch.similarity_search('foo', k=1)
assert output == [Document(page_content='foo', metadata={'page': 0})]
|
Test end to end indexing and search with metadata.
|
persist
|
if self._serializer is None:
raise SKLearnVectorStoreException(
'You must specify a persist_path on creation to persist the collection.'
)
data = {'ids': self._ids, 'texts': self._texts, 'metadatas': self.
_metadatas, 'embeddings': self._embeddings}
self._serializer.save(data)
|
def persist(self) ->None:
if self._serializer is None:
raise SKLearnVectorStoreException(
'You must specify a persist_path on creation to persist the collection.'
)
data = {'ids': self._ids, 'texts': self._texts, 'metadatas': self.
_metadatas, 'embeddings': self._embeddings}
self._serializer.save(data)
| null |
_import_google_jobs
|
from langchain_community.utilities.google_jobs import GoogleJobsAPIWrapper
return GoogleJobsAPIWrapper
|
def _import_google_jobs() ->Any:
from langchain_community.utilities.google_jobs import GoogleJobsAPIWrapper
return GoogleJobsAPIWrapper
| null |
llm_chain_with_fallbacks
|
error_llm = FakeListLLM(responses=['foo'], i=1)
pass_llm = FakeListLLM(responses=['bar'])
prompt = PromptTemplate.from_template('what did baz say to {buz}')
return RunnableParallel({'buz': lambda x: x}) | (prompt | error_llm
).with_fallbacks([prompt | pass_llm])
|
@pytest.fixture()
def llm_chain_with_fallbacks() ->Runnable:
error_llm = FakeListLLM(responses=['foo'], i=1)
pass_llm = FakeListLLM(responses=['bar'])
prompt = PromptTemplate.from_template('what did baz say to {buz}')
return RunnableParallel({'buz': lambda x: x}) | (prompt | error_llm
).with_fallbacks([prompt | pass_llm])
| null |
_format_index
|
return f"Name: {index['name']}, Unique: {index['unique']}, Columns: {str(index['column_names'])}"
|
def _format_index(index: sqlalchemy.engine.interfaces.ReflectedIndex) ->str:
return (
f"Name: {index['name']}, Unique: {index['unique']}, Columns: {str(index['column_names'])}"
)
| null |
__init__
|
"""Initializes the BraveLoader.
Args:
query: The query to search for.
api_key: The API key to use.
search_kwargs: The search kwargs to use.
"""
self.query = query
self.api_key = api_key
self.search_kwargs = search_kwargs or {}
|
def __init__(self, query: str, api_key: str, search_kwargs: Optional[dict]=None
):
"""Initializes the BraveLoader.
Args:
query: The query to search for.
api_key: The API key to use.
search_kwargs: The search kwargs to use.
"""
self.query = query
self.api_key = api_key
self.search_kwargs = search_kwargs or {}
|
Initializes the BraveLoader.
Args:
query: The query to search for.
api_key: The API key to use.
search_kwargs: The search kwargs to use.
|
test_baseten_call
|
"""Test valid call to Baseten."""
llm = Baseten(model=os.environ['BASETEN_MODEL_ID'])
output = llm('Test prompt, please respond.')
assert isinstance(output, str)
|
def test_baseten_call() ->None:
"""Test valid call to Baseten."""
llm = Baseten(model=os.environ['BASETEN_MODEL_ID'])
output = llm('Test prompt, please respond.')
assert isinstance(output, str)
|
Test valid call to Baseten.
|
__call__
|
"""Make tool callable."""
return self.run(tool_input, callbacks=callbacks)
|
def __call__(self, tool_input: str, callbacks: Callbacks=None) ->str:
"""Make tool callable."""
return self.run(tool_input, callbacks=callbacks)
|
Make tool callable.
|
test_chatglm_generate
|
"""Test valid call to chatglm."""
llm = ChatGLM()
output = llm.generate(['who are you'])
assert isinstance(output, LLMResult)
assert isinstance(output.generations, list)
|
def test_chatglm_generate() ->None:
"""Test valid call to chatglm."""
llm = ChatGLM()
output = llm.generate(['who are you'])
assert isinstance(output, LLMResult)
assert isinstance(output.generations, list)
|
Test valid call to chatglm.
|
activate_selection_scorer
|
"""
Activates the selection scorer, meaning that the chain will attempt to use the selection scorer to score responses.
"""
self.selection_scorer_activated = True
|
def activate_selection_scorer(self) ->None:
"""
Activates the selection scorer, meaning that the chain will attempt to use the selection scorer to score responses.
"""
self.selection_scorer_activated = True
|
Activates the selection scorer, meaning that the chain will attempt to use the selection scorer to score responses.
|
__init__
|
"""Initialize with connection string, container and blob name."""
self.conn_str = conn_str
"""Connection string for Azure Blob Storage."""
self.container = container
"""Container name."""
self.blob = blob_name
"""Blob name."""
|
def __init__(self, conn_str: str, container: str, blob_name: str):
"""Initialize with connection string, container and blob name."""
self.conn_str = conn_str
"""Connection string for Azure Blob Storage."""
self.container = container
"""Container name."""
self.blob = blob_name
"""Blob name."""
|
Initialize with connection string, container and blob name.
|
test_load_string
|
"""Loads page_content of type string"""
page_content_column = 'text'
name = 'v1'
loader = HuggingFaceDatasetLoader(HUGGING_FACE_EXAMPLE_DATASET,
page_content_column, name)
docs = loader.load()
assert len(docs) == 2
doc = docs[0]
assert doc.page_content == '"This is text in version 1"'
assert doc.metadata.keys() == {'split', 'list', 'dict'}
|
@pytest.mark.requires('datasets')
@pytest.fixture
def test_load_string() ->None:
"""Loads page_content of type string"""
page_content_column = 'text'
name = 'v1'
loader = HuggingFaceDatasetLoader(HUGGING_FACE_EXAMPLE_DATASET,
page_content_column, name)
docs = loader.load()
assert len(docs) == 2
doc = docs[0]
assert doc.page_content == '"This is text in version 1"'
assert doc.metadata.keys() == {'split', 'list', 'dict'}
|
Loads page_content of type string
|
_on_llm_start
|
"""Process the LLM Run upon start."""
|
def _on_llm_start(self, run: Run) ->None:
"""Process the LLM Run upon start."""
|
Process the LLM Run upon start.
|
test_placeholder
|
"""Used for compiling integration tests without running any real tests."""
pass
|
@pytest.mark.compile
def test_placeholder() ->None:
"""Used for compiling integration tests without running any real tests."""
pass
|
Used for compiling integration tests without running any real tests.
|
_import_duckduckgo_search
|
from langchain_community.utilities.duckduckgo_search import DuckDuckGoSearchAPIWrapper
return DuckDuckGoSearchAPIWrapper
|
def _import_duckduckgo_search() ->Any:
from langchain_community.utilities.duckduckgo_search import DuckDuckGoSearchAPIWrapper
return DuckDuckGoSearchAPIWrapper
| null |
get_connection_string
|
connection_string: str = get_from_dict_or_env(data=kwargs, key=
'connection_string', env_key='HOLOGRES_CONNECTION_STRING')
if not connection_string:
raise ValueError(
'Hologres connection string is requiredEither pass it as a parameteror set the HOLOGRES_CONNECTION_STRING environment variable.Create the connection string by callingHologresVector.connection_string_from_db_params'
)
return connection_string
|
@classmethod
def get_connection_string(cls, kwargs: Dict[str, Any]) ->str:
connection_string: str = get_from_dict_or_env(data=kwargs, key=
'connection_string', env_key='HOLOGRES_CONNECTION_STRING')
if not connection_string:
raise ValueError(
'Hologres connection string is requiredEither pass it as a parameteror set the HOLOGRES_CONNECTION_STRING environment variable.Create the connection string by callingHologresVector.connection_string_from_db_params'
)
return connection_string
| null |
set_handler
|
"""Set handler as the only handler on the callback manager."""
self.set_handlers([handler], inherit=inherit)
|
def set_handler(self, handler: BaseCallbackHandler, inherit: bool=True) ->None:
"""Set handler as the only handler on the callback manager."""
self.set_handlers([handler], inherit=inherit)
|
Set handler as the only handler on the callback manager.
|
test_chat_valid_infer_variables
|
messages = [HumanMessagePromptTemplate.from_template(
'Do something with {question} using {context} giving it like {formatins}')]
prompt = ChatPromptTemplate(messages=messages, partial_variables={
'formatins': 'some structure'})
assert set(prompt.input_variables) == {'question', 'context'}
assert prompt.partial_variables == {'formatins': 'some structure'}
|
def test_chat_valid_infer_variables() ->None:
messages = [HumanMessagePromptTemplate.from_template(
'Do something with {question} using {context} giving it like {formatins}'
)]
prompt = ChatPromptTemplate(messages=messages, partial_variables={
'formatins': 'some structure'})
assert set(prompt.input_variables) == {'question', 'context'}
assert prompt.partial_variables == {'formatins': 'some structure'}
| null |
_kwargs_post_request
|
"""Build the kwargs for the Post request, used by sync
Args:
model (str): _description_
texts (List[str]): _description_
Returns:
Dict[str, Collection[str]]: _description_
"""
return dict(url=f'{self.host}/embeddings', headers={'content-type':
'application/json'}, json=dict(input=texts, model=model))
|
def _kwargs_post_request(self, model: str, texts: List[str]) ->Dict[str, Any]:
"""Build the kwargs for the Post request, used by sync
Args:
model (str): _description_
texts (List[str]): _description_
Returns:
Dict[str, Collection[str]]: _description_
"""
return dict(url=f'{self.host}/embeddings', headers={'content-type':
'application/json'}, json=dict(input=texts, model=model))
|
Build the kwargs for the Post request, used by sync
Args:
model (str): _description_
texts (List[str]): _description_
Returns:
Dict[str, Collection[str]]: _description_
|
test_add_texts
|
"""
Add some texts
"""
texts = ['foo', 'bar', 'baz']
metadatas = [{'author': 'Adam', 'category': 'Music'}, {'author': 'Eve',
'category': 'Music'}, {'author': 'John', 'category': 'History'}]
ids = self.vectorstore.add_texts(texts=texts, metadatas=metadatas)
assert len(ids) == len(texts)
|
def test_add_texts(self) ->None:
"""
Add some texts
"""
texts = ['foo', 'bar', 'baz']
metadatas = [{'author': 'Adam', 'category': 'Music'}, {'author': 'Eve',
'category': 'Music'}, {'author': 'John', 'category': 'History'}]
ids = self.vectorstore.add_texts(texts=texts, metadatas=metadatas)
assert len(ids) == len(texts)
|
Add some texts
|
_llm_type
|
"""Return type of llm."""
return 'javelin-ai-gateway'
|
@property
def _llm_type(self) ->str:
"""Return type of llm."""
return 'javelin-ai-gateway'
|
Return type of llm.
|
similarity_search_with_score
|
"""Return documents most similar to query, along with scores.
Args:
query: Text to look up documents similar to.
size: Number of Documents to return. Defaults to 4.
filter: Array of Elasticsearch filter clauses to apply to the query.
Returns:
List of Documents most similar to the query and score for each
"""
search_params = kwargs.get('search_params') or {}
if len(search_params) == 0 or search_params.get('size') is None:
search_params['size'] = k
return self._search(query=query, filter=filter, **kwargs)
|
def similarity_search_with_score(self, query: str, k: int, filter: Optional
[dict]=None, **kwargs: Any) ->List[Tuple[Document, float]]:
"""Return documents most similar to query, along with scores.
Args:
query: Text to look up documents similar to.
size: Number of Documents to return. Defaults to 4.
filter: Array of Elasticsearch filter clauses to apply to the query.
Returns:
List of Documents most similar to the query and score for each
"""
search_params = kwargs.get('search_params') or {}
if len(search_params) == 0 or search_params.get('size') is None:
search_params['size'] = k
return self._search(query=query, filter=filter, **kwargs)
|
Return documents most similar to query, along with scores.
Args:
query: Text to look up documents similar to.
size: Number of Documents to return. Defaults to 4.
filter: Array of Elasticsearch filter clauses to apply to the query.
Returns:
List of Documents most similar to the query and score for each
|
test_promptlayer_chat_openai_invalid_streaming_params
|
"""Test that streaming correctly invokes on_llm_new_token callback."""
with pytest.raises(ValueError):
PromptLayerChatOpenAI(max_tokens=10, streaming=True, temperature=0, n=5)
|
def test_promptlayer_chat_openai_invalid_streaming_params() ->None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
with pytest.raises(ValueError):
PromptLayerChatOpenAI(max_tokens=10, streaming=True, temperature=0, n=5
)
|
Test that streaming correctly invokes on_llm_new_token callback.
|
_chain_type
|
return 'tot'
|
@property
def _chain_type(self) ->str:
return 'tot'
| null |
_SetComp
|
self.write('{')
self.dispatch(t.elt)
for gen in t.generators:
self.dispatch(gen)
self.write('}')
|
def _SetComp(self, t):
self.write('{')
self.dispatch(t.elt)
for gen in t.generators:
self.dispatch(gen)
self.write('}')
| null |
add_texts
|
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
kwargs: vectorstore specific parameters
Returns:
List of ids from adding the texts into the vectorstore.
"""
|
@abstractmethod
def add_texts(self, texts: Iterable[str], metadatas: Optional[List[dict]]=
None, **kwargs: Any) ->List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
kwargs: vectorstore specific parameters
Returns:
List of ids from adding the texts into the vectorstore.
"""
|
Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
kwargs: vectorstore specific parameters
Returns:
List of ids from adding the texts into the vectorstore.
|
save
|
output = {'output': input_output.pop('output')}
memory.save_context(input_output, output)
return output['output']
|
def save(input_output):
output = {'output': input_output.pop('output')}
memory.save_context(input_output, output)
return output['output']
| null |
requires_input
|
"""Whether the chain requires an input string."""
return True
|
@property
def requires_input(self) ->bool:
"""Whether the chain requires an input string."""
return True
|
Whether the chain requires an input string.
|
create_prompt
|
"""Prompt does not depend on tools."""
return PROMPT
|
@classmethod
def create_prompt(cls, tools: Sequence[BaseTool]) ->BasePromptTemplate:
"""Prompt does not depend on tools."""
return PROMPT
|
Prompt does not depend on tools.
|
test_sequential_usage_single_inputs
|
"""Test sequential on single input chains."""
chain_1 = FakeChain(input_variables=['foo'], output_variables=['bar'])
chain_2 = FakeChain(input_variables=['bar'], output_variables=['baz'])
chain = SequentialChain(chains=[chain_1, chain_2], input_variables=['foo'])
output = chain({'foo': '123'})
expected_output = {'baz': '123foofoo', 'foo': '123'}
assert output == expected_output
|
def test_sequential_usage_single_inputs() ->None:
"""Test sequential on single input chains."""
chain_1 = FakeChain(input_variables=['foo'], output_variables=['bar'])
chain_2 = FakeChain(input_variables=['bar'], output_variables=['baz'])
chain = SequentialChain(chains=[chain_1, chain_2], input_variables=['foo'])
output = chain({'foo': '123'})
expected_output = {'baz': '123foofoo', 'foo': '123'}
assert output == expected_output
|
Test sequential on single input chains.
|
redis_client
|
"""Yield redis client."""
from upstash_redis import Redis
client = Redis(url=URL, token=TOKEN)
try:
client.ping()
except Exception:
pytest.skip('Ping request failed. Verify that credentials are correct.')
client.flushdb()
return client
|
@pytest.fixture
def redis_client() ->Redis:
"""Yield redis client."""
from upstash_redis import Redis
client = Redis(url=URL, token=TOKEN)
try:
client.ping()
except Exception:
pytest.skip('Ping request failed. Verify that credentials are correct.'
)
client.flushdb()
return client
|
Yield redis client.
|
validate_api_answer_prompt
|
"""Check that api answer prompt expects the right variables."""
input_vars = values['api_answer_chain'].prompt.input_variables
expected_vars = {'question', 'api_docs', 'api_url', 'api_response'}
if set(input_vars) != expected_vars:
raise ValueError(
f'Input variables should be {expected_vars}, got {input_vars}')
return values
|
@root_validator(pre=True)
def validate_api_answer_prompt(cls, values: Dict) ->Dict:
"""Check that api answer prompt expects the right variables."""
input_vars = values['api_answer_chain'].prompt.input_variables
expected_vars = {'question', 'api_docs', 'api_url', 'api_response'}
if set(input_vars) != expected_vars:
raise ValueError(
f'Input variables should be {expected_vars}, got {input_vars}')
return values
|
Check that api answer prompt expects the right variables.
|
_format_toots
|
"""Format toots into documents.
Adding user info, and selected toot fields into the metadata.
"""
for toot in toots:
metadata = {'created_at': toot['created_at'], 'user_info': user_info,
'is_reply': toot['in_reply_to_id'] is not None}
yield Document(page_content=toot['content'], metadata=metadata)
|
def _format_toots(self, toots: List[Dict[str, Any]], user_info: dict
) ->Iterable[Document]:
"""Format toots into documents.
Adding user info, and selected toot fields into the metadata.
"""
for toot in toots:
metadata = {'created_at': toot['created_at'], 'user_info':
user_info, 'is_reply': toot['in_reply_to_id'] is not None}
yield Document(page_content=toot['content'], metadata=metadata)
|
Format toots into documents.
Adding user info, and selected toot fields into the metadata.
|
_import_mongodb_atlas
|
from langchain_community.vectorstores.mongodb_atlas import MongoDBAtlasVectorSearch
return MongoDBAtlasVectorSearch
|
def _import_mongodb_atlas() ->Any:
from langchain_community.vectorstores.mongodb_atlas import MongoDBAtlasVectorSearch
return MongoDBAtlasVectorSearch
| null |
on_retriever_end
|
"""Run when retriever ends running."""
handle_event(self.handlers, 'on_retriever_end', 'ignore_retriever',
documents, run_id=self.run_id, parent_run_id=self.parent_run_id, tags=
self.tags, **kwargs)
|
def on_retriever_end(self, documents: Sequence[Document], **kwargs: Any
) ->None:
"""Run when retriever ends running."""
handle_event(self.handlers, 'on_retriever_end', 'ignore_retriever',
documents, run_id=self.run_id, parent_run_id=self.parent_run_id,
tags=self.tags, **kwargs)
|
Run when retriever ends running.
|
similarity_search_with_score
|
"""Return docs and it's scores most similar to query.
By default, supports Approximate Search.
Also supports Script Scoring and Painless Scripting.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents along with its scores most similar to the query.
Optional Args:
same as `similarity_search`
"""
text_field = kwargs.get('text_field', 'text')
metadata_field = kwargs.get('metadata_field', 'metadata')
hits = self._raw_similarity_search_with_score(query=query, k=k, **kwargs)
documents_with_scores = [(Document(page_content=hit['_source'][text_field],
metadata=hit['_source'] if metadata_field == '*' or metadata_field not in
hit['_source'] else hit['_source'][metadata_field]), hit['_score']) for
hit in hits]
return documents_with_scores
|
def similarity_search_with_score(self, query: str, k: int=4, **kwargs: Any
) ->List[Tuple[Document, float]]:
"""Return docs and it's scores most similar to query.
By default, supports Approximate Search.
Also supports Script Scoring and Painless Scripting.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents along with its scores most similar to the query.
Optional Args:
same as `similarity_search`
"""
text_field = kwargs.get('text_field', 'text')
metadata_field = kwargs.get('metadata_field', 'metadata')
hits = self._raw_similarity_search_with_score(query=query, k=k, **kwargs)
documents_with_scores = [(Document(page_content=hit['_source'][
text_field], metadata=hit['_source'] if metadata_field == '*' or
metadata_field not in hit['_source'] else hit['_source'][
metadata_field]), hit['_score']) for hit in hits]
return documents_with_scores
|
Return docs and it's scores most similar to query.
By default, supports Approximate Search.
Also supports Script Scoring and Painless Scripting.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents along with its scores most similar to the query.
Optional Args:
same as `similarity_search`
|
create_prompt
|
"""Create prompt in the style of the zero shot agent.
Args:
tools: List of tools the agent will have access to, used to format the
prompt.
prefix: String to put before the list of tools.
suffix: String to put after the list of tools.
input_variables: List of input variables the final prompt will expect.
Returns:
A PromptTemplate with the template assembled from the pieces here.
"""
tool_strings = '\n'.join([f'{tool.name}: {tool.description}' for tool in tools]
)
tool_names = ', '.join([tool.name for tool in tools])
format_instructions = format_instructions.format(tool_names=tool_names)
template = '\n\n'.join([prefix, tool_strings, format_instructions, suffix])
if input_variables is None:
input_variables = ['input', 'agent_scratchpad']
return PromptTemplate(template=template, input_variables=input_variables)
|
@classmethod
def create_prompt(cls, tools: Sequence[BaseTool], prefix: str=PREFIX,
suffix: str=SUFFIX, format_instructions: str=FORMAT_INSTRUCTIONS,
input_variables: Optional[List[str]]=None) ->PromptTemplate:
"""Create prompt in the style of the zero shot agent.
Args:
tools: List of tools the agent will have access to, used to format the
prompt.
prefix: String to put before the list of tools.
suffix: String to put after the list of tools.
input_variables: List of input variables the final prompt will expect.
Returns:
A PromptTemplate with the template assembled from the pieces here.
"""
tool_strings = '\n'.join([f'{tool.name}: {tool.description}' for tool in
tools])
tool_names = ', '.join([tool.name for tool in tools])
format_instructions = format_instructions.format(tool_names=tool_names)
template = '\n\n'.join([prefix, tool_strings, format_instructions, suffix])
if input_variables is None:
input_variables = ['input', 'agent_scratchpad']
return PromptTemplate(template=template, input_variables=input_variables)
|
Create prompt in the style of the zero shot agent.
Args:
tools: List of tools the agent will have access to, used to format the
prompt.
prefix: String to put before the list of tools.
suffix: String to put after the list of tools.
input_variables: List of input variables the final prompt will expect.
Returns:
A PromptTemplate with the template assembled from the pieces here.
|
test_openai_batch
|
"""Test batch tokens from AzureChatOpenAI."""
result = llm.batch(["I'm Pickle Rick", "I'm not Pickle Rick"])
for token in result:
assert isinstance(token.content, str)
|
@pytest.mark.scheduled
def test_openai_batch(llm: AzureChatOpenAI) ->None:
"""Test batch tokens from AzureChatOpenAI."""
result = llm.batch(["I'm Pickle Rick", "I'm not Pickle Rick"])
for token in result:
assert isinstance(token.content, str)
|
Test batch tokens from AzureChatOpenAI.
|
_type
|
return 'comma-separated-list'
|
@property
def _type(self) ->str:
return 'comma-separated-list'
| null |
format_request_payload
|
"""Formats the request body according to the input schema of
the model. Returns bytes or seekable file like object in the
format specified in the content_type request header.
"""
|
@abstractmethod
def format_request_payload(self, prompt: str, model_kwargs: Dict) ->bytes:
"""Formats the request body according to the input schema of
the model. Returns bytes or seekable file like object in the
format specified in the content_type request header.
"""
|
Formats the request body according to the input schema of
the model. Returns bytes or seekable file like object in the
format specified in the content_type request header.
|
_create_retry_decorator
|
"""Returns a tenacity retry decorator, preconfigured to handle PaLM exceptions"""
import google.api_core.exceptions
multiplier = 2
min_seconds = 1
max_seconds = 60
max_retries = 10
return retry(reraise=True, stop=stop_after_attempt(max_retries), wait=
wait_exponential(multiplier=multiplier, min=min_seconds, max=
max_seconds), retry=retry_if_exception_type(google.api_core.exceptions.
ResourceExhausted) | retry_if_exception_type(google.api_core.exceptions
.ServiceUnavailable) | retry_if_exception_type(google.api_core.
exceptions.GoogleAPIError), before_sleep=before_sleep_log(logger,
logging.WARNING))
|
def _create_retry_decorator() ->Callable[[Any], Any]:
"""Returns a tenacity retry decorator, preconfigured to handle PaLM exceptions"""
import google.api_core.exceptions
multiplier = 2
min_seconds = 1
max_seconds = 60
max_retries = 10
return retry(reraise=True, stop=stop_after_attempt(max_retries), wait=
wait_exponential(multiplier=multiplier, min=min_seconds, max=
max_seconds), retry=retry_if_exception_type(google.api_core.
exceptions.ResourceExhausted) | retry_if_exception_type(google.
api_core.exceptions.ServiceUnavailable) | retry_if_exception_type(
google.api_core.exceptions.GoogleAPIError), before_sleep=
before_sleep_log(logger, logging.WARNING))
|
Returns a tenacity retry decorator, preconfigured to handle PaLM exceptions
|
wait
|
time.sleep(1)
|
def wait() ->None:
time.sleep(1)
| null |
test__convert_dict_to_message_system
|
message = SystemMessage(content='foo')
with pytest.raises(ValueError) as e:
_convert_message_to_dict(message)
assert 'Got unknown type' in str(e)
|
def test__convert_dict_to_message_system() ->None:
message = SystemMessage(content='foo')
with pytest.raises(ValueError) as e:
_convert_message_to_dict(message)
assert 'Got unknown type' in str(e)
| null |
setup_class
|
if not os.getenv('OPENAI_API_KEY'):
raise ValueError('OPENAI_API_KEY environment variable is not set')
collection = prepare_collection()
assert collection.count_documents({}) == 0
|
@classmethod
def setup_class(cls) ->None:
if not os.getenv('OPENAI_API_KEY'):
raise ValueError('OPENAI_API_KEY environment variable is not set')
collection = prepare_collection()
assert collection.count_documents({}) == 0
| null |
test_all_steps_with_chat_model
|
joke = 'Why did the chicken cross the Mobius strip?'
response = 'Resolution response'
ideation_llm = FakeListChatModel(responses=['Ideation response' for _ in
range(20)])
critique_llm = FakeListChatModel(responses=['Critique response' for _ in
range(20)])
resolver_llm = FakeListChatModel(responses=[response for _ in range(20)])
prompt = PromptTemplate(input_variables=['joke'], template=
'Explain this joke to me: {joke}?')
chain = SmartLLMChain(ideation_llm=ideation_llm, critique_llm=critique_llm,
resolver_llm=resolver_llm, prompt=prompt)
result = chain(joke)
assert result['joke'] == joke
assert result['resolution'] == response
|
def test_all_steps_with_chat_model() ->None:
joke = 'Why did the chicken cross the Mobius strip?'
response = 'Resolution response'
ideation_llm = FakeListChatModel(responses=['Ideation response' for _ in
range(20)])
critique_llm = FakeListChatModel(responses=['Critique response' for _ in
range(20)])
resolver_llm = FakeListChatModel(responses=[response for _ in range(20)])
prompt = PromptTemplate(input_variables=['joke'], template=
'Explain this joke to me: {joke}?')
chain = SmartLLMChain(ideation_llm=ideation_llm, critique_llm=
critique_llm, resolver_llm=resolver_llm, prompt=prompt)
result = chain(joke)
assert result['joke'] == joke
assert result['resolution'] == response
| null |
test_embed_documents
|
"""Test embedding a query."""
model = GoogleGenerativeAIEmbeddings(model=_MODEL)
result = model.embed_documents(['Hello world', 'Good day, world'])
assert len(result) == 2
assert len(result[0]) == 768
assert len(result[1]) == 768
|
def test_embed_documents() ->None:
"""Test embedding a query."""
model = GoogleGenerativeAIEmbeddings(model=_MODEL)
result = model.embed_documents(['Hello world', 'Good day, world'])
assert len(result) == 2
assert len(result[0]) == 768
assert len(result[1]) == 768
|
Test embedding a query.
|
test_opaqueprompts_functions
|
prompt = PromptTemplate.from_template(prompt_template),
llm = OpenAI()
pg_chain = op.sanitize | RunnableParallel(secure_context=lambda x: x[
'secure_context'], response=(lambda x: x['sanitized_input']) | prompt |
llm | StrOutputParser()) | (lambda x: op.desanitize(x['response'], x[
'secure_context']))
pg_chain.invoke({'question':
'Write a text message to remind John to do password reset for his website through his email to stay secure.'
, 'history': ''})
|
def test_opaqueprompts_functions() ->None:
prompt = PromptTemplate.from_template(prompt_template),
llm = OpenAI()
pg_chain = op.sanitize | RunnableParallel(secure_context=lambda x: x[
'secure_context'], response=(lambda x: x['sanitized_input']) |
prompt | llm | StrOutputParser()) | (lambda x: op.desanitize(x[
'response'], x['secure_context']))
pg_chain.invoke({'question':
'Write a text message to remind John to do password reset for his website through his email to stay secure.'
, 'history': ''})
| null |
_cache_embedding
|
return self.embedding.embed_query(text=text)
|
@lru_cache(maxsize=ASTRA_DB_SEMANTIC_CACHE_EMBEDDING_CACHE_SIZE)
def _cache_embedding(text: str) ->List[float]:
return self.embedding.embed_query(text=text)
| null |
test_index_simple_delete_full
|
"""Indexing some content to confirm it gets added only once."""
loader = ToyLoader(documents=[Document(page_content=
'This is a test document.'), Document(page_content=
'This is another document.')])
with patch.object(record_manager, 'get_time', return_value=datetime(2021, 1,
1).timestamp()):
assert index(loader, record_manager, vector_store, cleanup='full') == {
'num_added': 2, 'num_deleted': 0, 'num_skipped': 0, 'num_updated': 0}
with patch.object(record_manager, 'get_time', return_value=datetime(2021, 1,
1).timestamp()):
assert index(loader, record_manager, vector_store, cleanup='full') == {
'num_added': 0, 'num_deleted': 0, 'num_skipped': 2, 'num_updated': 0}
loader = ToyLoader(documents=[Document(page_content='mutated document 1'),
Document(page_content='This is another document.')])
with patch.object(record_manager, 'get_time', return_value=datetime(2021, 1,
2).timestamp()):
assert index(loader, record_manager, vector_store, cleanup='full') == {
'num_added': 1, 'num_deleted': 1, 'num_skipped': 1, 'num_updated': 0}
doc_texts = set(vector_store.store.get(uid).page_content for uid in
vector_store.store)
assert doc_texts == {'mutated document 1', 'This is another document.'}
with patch.object(record_manager, 'get_time', return_value=datetime(2021, 1,
2).timestamp()):
assert index(loader, record_manager, vector_store, cleanup='full') == {
'num_added': 0, 'num_deleted': 0, 'num_skipped': 2, 'num_updated': 0}
|
def test_index_simple_delete_full(record_manager: SQLRecordManager,
vector_store: InMemoryVectorStore) ->None:
"""Indexing some content to confirm it gets added only once."""
loader = ToyLoader(documents=[Document(page_content=
'This is a test document.'), Document(page_content=
'This is another document.')])
with patch.object(record_manager, 'get_time', return_value=datetime(
2021, 1, 1).timestamp()):
assert index(loader, record_manager, vector_store, cleanup='full') == {
'num_added': 2, 'num_deleted': 0, 'num_skipped': 0,
'num_updated': 0}
with patch.object(record_manager, 'get_time', return_value=datetime(
2021, 1, 1).timestamp()):
assert index(loader, record_manager, vector_store, cleanup='full') == {
'num_added': 0, 'num_deleted': 0, 'num_skipped': 2,
'num_updated': 0}
loader = ToyLoader(documents=[Document(page_content=
'mutated document 1'), Document(page_content=
'This is another document.')])
with patch.object(record_manager, 'get_time', return_value=datetime(
2021, 1, 2).timestamp()):
assert index(loader, record_manager, vector_store, cleanup='full') == {
'num_added': 1, 'num_deleted': 1, 'num_skipped': 1,
'num_updated': 0}
doc_texts = set(vector_store.store.get(uid).page_content for uid in
vector_store.store)
assert doc_texts == {'mutated document 1', 'This is another document.'}
with patch.object(record_manager, 'get_time', return_value=datetime(
2021, 1, 2).timestamp()):
assert index(loader, record_manager, vector_store, cleanup='full') == {
'num_added': 0, 'num_deleted': 0, 'num_skipped': 2,
'num_updated': 0}
|
Indexing some content to confirm it gets added only once.
|
test_maps_call
|
search = DataForSeoAPIWrapper(params={'location_name': 'Spain',
'language_code': 'es', 'se_type': 'maps'})
output = search.results('coffee')
assert all(i['address_info']['country_code'] == 'ES' for i in output)
|
def test_maps_call() ->None:
search = DataForSeoAPIWrapper(params={'location_name': 'Spain',
'language_code': 'es', 'se_type': 'maps'})
output = search.results('coffee')
assert all(i['address_info']['country_code'] == 'ES' for i in output)
| null |
from_llm
|
"""
Create a ToTChain from a language model.
:param llm: The language model to use.
:param kwargs: Additional arguments to pass to the ToTChain constructor.
"""
return cls(llm=llm, **kwargs)
|
@classmethod
def from_llm(cls, llm: BaseLanguageModel, **kwargs: Any) ->ToTChain:
"""
Create a ToTChain from a language model.
:param llm: The language model to use.
:param kwargs: Additional arguments to pass to the ToTChain constructor.
"""
return cls(llm=llm, **kwargs)
|
Create a ToTChain from a language model.
:param llm: The language model to use.
:param kwargs: Additional arguments to pass to the ToTChain constructor.
|
_stop
|
return [f"""
{self.observation_prefix.rstrip()}""",
f"""
{self.observation_prefix.rstrip()}"""]
|
@property
def _stop(self) ->List[str]:
return [f'\n{self.observation_prefix.rstrip()}',
f'\n\t{self.observation_prefix.rstrip()}']
| null |
validate_llm_chain_input_variables
|
"""Make sure the LLM chain has the correct input variables."""
for var in llm_chain.prompt.input_variables:
if var not in ['tool_input', 'tables', 'schemas', 'examples']:
raise ValueError(
"LLM chain for QueryPowerBITool must have input variables ['tool_input', 'tables', 'schemas', 'examples'], found %s"
, llm_chain.prompt.input_variables)
return llm_chain
|
@validator('llm_chain')
def validate_llm_chain_input_variables(cls, llm_chain: Any) ->Any:
"""Make sure the LLM chain has the correct input variables."""
for var in llm_chain.prompt.input_variables:
if var not in ['tool_input', 'tables', 'schemas', 'examples']:
raise ValueError(
"LLM chain for QueryPowerBITool must have input variables ['tool_input', 'tables', 'schemas', 'examples'], found %s"
, llm_chain.prompt.input_variables)
return llm_chain
|
Make sure the LLM chain has the correct input variables.
|
test_pickbest_textembedder_w_label_no_score_no_emb
|
feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed=False,
model=MockEncoder())
named_actions = {'action1': ['0', '1', '2']}
expected = """shared |context context
|action1 0
|action1 1
|action1 2 """
selected = pick_best_chain.PickBestSelected(index=0, probability=1.0)
event = pick_best_chain.PickBestEvent(inputs={}, to_select_from=
named_actions, based_on={'context': 'context'}, selected=selected)
vw_ex_str = feature_embedder.format(event)
assert vw_ex_str == expected
|
@pytest.mark.requires('vowpal_wabbit_next')
def test_pickbest_textembedder_w_label_no_score_no_emb() ->None:
feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed=
False, model=MockEncoder())
named_actions = {'action1': ['0', '1', '2']}
expected = (
'shared |context context \n|action1 0 \n|action1 1 \n|action1 2 ')
selected = pick_best_chain.PickBestSelected(index=0, probability=1.0)
event = pick_best_chain.PickBestEvent(inputs={}, to_select_from=
named_actions, based_on={'context': 'context'}, selected=selected)
vw_ex_str = feature_embedder.format(event)
assert vw_ex_str == expected
| null |
test_pickbest_textembedder_more_namespaces_no_label_no_emb
|
feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed=False,
model=MockEncoder())
named_actions = {'action1': [{'a': '0', 'b': '0'}, '1', '2']}
context = {'context1': 'context1', 'context2': 'context2'}
expected = """shared |context1 context1 |context2 context2
|a 0 |b 0
|action1 1
|action1 2 """
event = pick_best_chain.PickBestEvent(inputs={}, to_select_from=
named_actions, based_on=context)
vw_ex_str = feature_embedder.format(event)
assert vw_ex_str == expected
|
@pytest.mark.requires('vowpal_wabbit_next')
def test_pickbest_textembedder_more_namespaces_no_label_no_emb() ->None:
feature_embedder = pick_best_chain.PickBestFeatureEmbedder(auto_embed=
False, model=MockEncoder())
named_actions = {'action1': [{'a': '0', 'b': '0'}, '1', '2']}
context = {'context1': 'context1', 'context2': 'context2'}
expected = """shared |context1 context1 |context2 context2
|a 0 |b 0
|action1 1
|action1 2 """
event = pick_best_chain.PickBestEvent(inputs={}, to_select_from=
named_actions, based_on=context)
vw_ex_str = feature_embedder.format(event)
assert vw_ex_str == expected
| null |
__post_init__
|
self.creds = self._load_credentials()
|
def __post_init__(self) ->None:
self.creds = self._load_credentials()
| null |
with_listeners
|
"""Bind lifecycle listeners to a Runnable, returning a new Runnable.
Args:
on_start: Called before the runnable starts running, with the Run object.
on_end: Called after the runnable finishes running, with the Run object.
on_error: Called if the runnable throws an error, with the Run object.
Returns:
The Run object contains information about the run, including its id,
type, input, output, error, start_time, end_time, and any tags or metadata
added to the run.
"""
from langchain_core.tracers.root_listeners import RootListenersTracer
return self.__class__(bound=self.bound, kwargs=self.kwargs, config=self.
config, config_factories=[lambda config: {'callbacks': [
RootListenersTracer(config=config, on_start=on_start, on_end=on_end,
on_error=on_error)]}], custom_input_type=self.custom_input_type,
custom_output_type=self.custom_output_type)
|
def with_listeners(self, *, on_start: Optional[Listener]=None, on_end:
Optional[Listener]=None, on_error: Optional[Listener]=None) ->Runnable[
Input, Output]:
"""Bind lifecycle listeners to a Runnable, returning a new Runnable.
Args:
on_start: Called before the runnable starts running, with the Run object.
on_end: Called after the runnable finishes running, with the Run object.
on_error: Called if the runnable throws an error, with the Run object.
Returns:
The Run object contains information about the run, including its id,
type, input, output, error, start_time, end_time, and any tags or metadata
added to the run.
"""
from langchain_core.tracers.root_listeners import RootListenersTracer
return self.__class__(bound=self.bound, kwargs=self.kwargs, config=self
.config, config_factories=[lambda config: {'callbacks': [
RootListenersTracer(config=config, on_start=on_start, on_end=on_end,
on_error=on_error)]}], custom_input_type=self.custom_input_type,
custom_output_type=self.custom_output_type)
|
Bind lifecycle listeners to a Runnable, returning a new Runnable.
Args:
on_start: Called before the runnable starts running, with the Run object.
on_end: Called after the runnable finishes running, with the Run object.
on_error: Called if the runnable throws an error, with the Run object.
Returns:
The Run object contains information about the run, including its id,
type, input, output, error, start_time, end_time, and any tags or metadata
added to the run.
|
get_schema
|
from openapi_pydantic import Reference
if isinstance(schema, Reference):
return self.get_referenced_schema(schema)
return schema
|
def get_schema(self, schema: Union[Reference, Schema]) ->Schema:
from openapi_pydantic import Reference
if isinstance(schema, Reference):
return self.get_referenced_schema(schema)
return schema
| null |
get_output_schema
|
return create_model('CombineDocumentsOutput', **{self.output_key: (str, None)})
|
def get_output_schema(self, config: Optional[RunnableConfig]=None) ->Type[
BaseModel]:
return create_model('CombineDocumentsOutput', **{self.output_key: (str,
None)})
| null |
_import_vectara
|
from langchain_community.vectorstores.vectara import Vectara
return Vectara
|
def _import_vectara() ->Any:
from langchain_community.vectorstores.vectara import Vectara
return Vectara
| null |
get_principles
|
if names is None:
return list(PRINCIPLES.values())
else:
return [PRINCIPLES[name] for name in names]
|
@classmethod
def get_principles(cls, names: Optional[List[str]]=None) ->List[
ConstitutionalPrinciple]:
if names is None:
return list(PRINCIPLES.values())
else:
return [PRINCIPLES[name] for name in names]
| null |
test_dereference_refs_nested_refs_skip
|
schema = {'type': 'object', 'properties': {'info': {'$ref': '#/$defs/info'}
}, '$defs': {'name': {'type': 'string'}, 'info': {'type': 'object',
'properties': {'age': 'int', 'name': {'$ref': '#/$defs/name'}}}}}
expected = {'type': 'object', 'properties': {'info': {'type': 'object',
'properties': {'age': 'int', 'name': {'type': 'string'}}}}, '$defs': {
'name': {'type': 'string'}, 'info': {'type': 'object', 'properties': {
'age': 'int', 'name': {'$ref': '#/$defs/name'}}}}}
actual = dereference_refs(schema)
assert actual == expected
|
def test_dereference_refs_nested_refs_skip() ->None:
schema = {'type': 'object', 'properties': {'info': {'$ref':
'#/$defs/info'}}, '$defs': {'name': {'type': 'string'}, 'info': {
'type': 'object', 'properties': {'age': 'int', 'name': {'$ref':
'#/$defs/name'}}}}}
expected = {'type': 'object', 'properties': {'info': {'type': 'object',
'properties': {'age': 'int', 'name': {'type': 'string'}}}}, '$defs':
{'name': {'type': 'string'}, 'info': {'type': 'object',
'properties': {'age': 'int', 'name': {'$ref': '#/$defs/name'}}}}}
actual = dereference_refs(schema)
assert actual == expected
| null |
read_file
|
"""
Reads a file from the gitlab repo
Parameters:
file_path(str): the file path
Returns:
str: The file decoded as a string
"""
file = self.gitlab_repo_instance.files.get(file_path, self.gitlab_branch)
return file.decode().decode('utf-8')
|
def read_file(self, file_path: str) ->str:
"""
Reads a file from the gitlab repo
Parameters:
file_path(str): the file path
Returns:
str: The file decoded as a string
"""
file = self.gitlab_repo_instance.files.get(file_path, self.gitlab_branch)
return file.decode().decode('utf-8')
|
Reads a file from the gitlab repo
Parameters:
file_path(str): the file path
Returns:
str: The file decoded as a string
|
load
|
if os.environ.get(API_KEY_ENV_VAR_NAME, None) is None:
raise Exception(f'Missing `{API_KEY_ENV_VAR_NAME}` environment variable.')
loader = WebBaseLoader('https://lilianweng.github.io/posts/2023-06-23-agent/')
data = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)
all_splits = text_splitter.split_documents(data)
MomentoVectorIndex.from_documents(all_splits, embedding=OpenAIEmbeddings(),
client=PreviewVectorIndexClient(configuration=VectorIndexConfigurations
.Default.latest(), credential_provider=CredentialProvider.
from_environment_variable(API_KEY_ENV_VAR_NAME)), index_name=index_name)
|
def load(API_KEY_ENV_VAR_NAME: str, index_name: str) ->None:
if os.environ.get(API_KEY_ENV_VAR_NAME, None) is None:
raise Exception(
f'Missing `{API_KEY_ENV_VAR_NAME}` environment variable.')
loader = WebBaseLoader(
'https://lilianweng.github.io/posts/2023-06-23-agent/')
data = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500,
chunk_overlap=0)
all_splits = text_splitter.split_documents(data)
MomentoVectorIndex.from_documents(all_splits, embedding=
OpenAIEmbeddings(), client=PreviewVectorIndexClient(configuration=
VectorIndexConfigurations.Default.latest(), credential_provider=
CredentialProvider.from_environment_variable(API_KEY_ENV_VAR_NAME)),
index_name=index_name)
| null |
__from
|
if metric not in INDEX_METRICS:
raise ValueError(
f'Unsupported distance metric: {metric}. Expected one of {list(INDEX_METRICS)}'
)
annoy = dependable_annoy_import()
if not embeddings:
raise ValueError('embeddings must be provided to build AnnoyIndex')
f = len(embeddings[0])
index = annoy.AnnoyIndex(f, metric=metric)
for i, emb in enumerate(embeddings):
index.add_item(i, emb)
index.build(trees, n_jobs=n_jobs)
documents = []
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
documents.append(Document(page_content=text, metadata=metadata))
index_to_id = {i: str(uuid.uuid4()) for i in range(len(documents))}
docstore = InMemoryDocstore({index_to_id[i]: doc for i, doc in enumerate(
documents)})
return cls(embedding.embed_query, index, metric, docstore, index_to_id)
|
@classmethod
def __from(cls, texts: List[str], embeddings: List[List[float]], embedding:
Embeddings, metadatas: Optional[List[dict]]=None, metric: str=
DEFAULT_METRIC, trees: int=100, n_jobs: int=-1, **kwargs: Any) ->Annoy:
if metric not in INDEX_METRICS:
raise ValueError(
f'Unsupported distance metric: {metric}. Expected one of {list(INDEX_METRICS)}'
)
annoy = dependable_annoy_import()
if not embeddings:
raise ValueError('embeddings must be provided to build AnnoyIndex')
f = len(embeddings[0])
index = annoy.AnnoyIndex(f, metric=metric)
for i, emb in enumerate(embeddings):
index.add_item(i, emb)
index.build(trees, n_jobs=n_jobs)
documents = []
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
documents.append(Document(page_content=text, metadata=metadata))
index_to_id = {i: str(uuid.uuid4()) for i in range(len(documents))}
docstore = InMemoryDocstore({index_to_id[i]: doc for i, doc in
enumerate(documents)})
return cls(embedding.embed_query, index, metric, docstore, index_to_id)
| null |
extension
|
return 'json'
|
@classmethod
def extension(cls) ->str:
return 'json'
| null |
load
|
return list(self.lazy_load())
|
def load(self) ->List[Document]:
return list(self.lazy_load())
| null |
_validate_tools
|
validate_tools_single_input(cls.__name__, tools)
if len(tools) == 0:
raise ValueError(
f'Got no tools for {cls.__name__}. At least one tool must be provided.'
)
for tool in tools:
if tool.description is None:
raise ValueError(
f'Got a tool {tool.name} without a description. For this agent, a description must always be provided.'
)
super()._validate_tools(tools)
|
@classmethod
def _validate_tools(cls, tools: Sequence[BaseTool]) ->None:
validate_tools_single_input(cls.__name__, tools)
if len(tools) == 0:
raise ValueError(
f'Got no tools for {cls.__name__}. At least one tool must be provided.'
)
for tool in tools:
if tool.description is None:
raise ValueError(
f'Got a tool {tool.name} without a description. For this agent, a description must always be provided.'
)
super()._validate_tools(tools)
| null |
always_verbose
|
return True
|
@property
def always_verbose(self) ->bool:
return True
| null |
on_tool_error
|
"""Handle an error for a tool run."""
tool_run = self._get_run(run_id, run_type='tool')
tool_run.error = self._get_stacktrace(error)
tool_run.end_time = datetime.now(timezone.utc)
tool_run.events.append({'name': 'error', 'time': tool_run.end_time})
self._end_trace(tool_run)
self._on_tool_error(tool_run)
return tool_run
|
def on_tool_error(self, error: BaseException, *, run_id: UUID, **kwargs: Any
) ->Run:
"""Handle an error for a tool run."""
tool_run = self._get_run(run_id, run_type='tool')
tool_run.error = self._get_stacktrace(error)
tool_run.end_time = datetime.now(timezone.utc)
tool_run.events.append({'name': 'error', 'time': tool_run.end_time})
self._end_trace(tool_run)
self._on_tool_error(tool_run)
return tool_run
|
Handle an error for a tool run.
|
on_llm_start_common
|
self.llm_starts += 1
self.starts += 1
|
def on_llm_start_common(self) ->None:
self.llm_starts += 1
self.starts += 1
| null |
test_without_fallback_parser
|
class FirstCharParser(BaseBlobParser):
def lazy_parse(self, blob: Blob) ->Iterator[Document]:
"""Extract the first character of a blob."""
yield Document(page_content=blob.as_string()[0])
class SecondCharParser(BaseBlobParser):
def lazy_parse(self, blob: Blob) ->Iterator[Document]:
"""Extract the second character of a blob."""
yield Document(page_content=blob.as_string()[1])
parser = MimeTypeBasedParser(handlers={'text/plain': FirstCharParser(),
'text/html': SecondCharParser()})
blob = Blob(data=b'Hello World', mimetype='text/plain')
docs = parser.parse_folder(blob)
assert len(docs) == 1
doc = docs[0]
assert doc.page_content == 'H'
blob = Blob(data=b'Hello World', mimetype='text/html')
docs = parser.parse_folder(blob)
assert len(docs) == 1
doc = docs[0]
assert doc.page_content == 'e'
blob = Blob(data=b'Hello World', mimetype='text/csv')
with pytest.raises(ValueError, match='Unsupported mime type'):
parser.parse_folder(blob)
|
def test_without_fallback_parser(self) ->None:
class FirstCharParser(BaseBlobParser):
def lazy_parse(self, blob: Blob) ->Iterator[Document]:
"""Extract the first character of a blob."""
yield Document(page_content=blob.as_string()[0])
class SecondCharParser(BaseBlobParser):
def lazy_parse(self, blob: Blob) ->Iterator[Document]:
"""Extract the second character of a blob."""
yield Document(page_content=blob.as_string()[1])
parser = MimeTypeBasedParser(handlers={'text/plain': FirstCharParser(),
'text/html': SecondCharParser()})
blob = Blob(data=b'Hello World', mimetype='text/plain')
docs = parser.parse_folder(blob)
assert len(docs) == 1
doc = docs[0]
assert doc.page_content == 'H'
blob = Blob(data=b'Hello World', mimetype='text/html')
docs = parser.parse_folder(blob)
assert len(docs) == 1
doc = docs[0]
assert doc.page_content == 'e'
blob = Blob(data=b'Hello World', mimetype='text/csv')
with pytest.raises(ValueError, match='Unsupported mime type'):
parser.parse_folder(blob)
| null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.