method_name
stringlengths 1
78
| method_body
stringlengths 3
9.66k
| full_code
stringlengths 31
10.7k
| docstring
stringlengths 4
4.74k
⌀ |
|---|---|---|---|
_format_definitions
|
formatted_definitions: List[str] = []
for definition in definitions:
formatted_definitions.extend(self._format_definition(definition))
if len(formatted_definitions) == 1:
return f"Definition of '{query}':\n{formatted_definitions[0]}"
result = f"Definitions of '{query}':\n\n"
for i, formatted_definition in enumerate(formatted_definitions, 1):
result += f'{i}. {formatted_definition}\n'
return result
|
def _format_definitions(self, query: str, definitions: List[Dict]) ->str:
formatted_definitions: List[str] = []
for definition in definitions:
formatted_definitions.extend(self._format_definition(definition))
if len(formatted_definitions) == 1:
return f"Definition of '{query}':\n{formatted_definitions[0]}"
result = f"Definitions of '{query}':\n\n"
for i, formatted_definition in enumerate(formatted_definitions, 1):
result += f'{i}. {formatted_definition}\n'
return result
| null |
test_stream
|
"""Test streaming tokens from OpenAI."""
llm = Chat__ModuleName__()
for token in llm.stream("I'm Pickle Rick"):
assert isinstance(token.content, str)
|
def test_stream() ->None:
"""Test streaming tokens from OpenAI."""
llm = Chat__ModuleName__()
for token in llm.stream("I'm Pickle Rick"):
assert isinstance(token.content, str)
|
Test streaming tokens from OpenAI.
|
test_with_metadatas_with_scores_using_vector
|
"""Test end to end construction and scored search, using embedding vector."""
texts = ['hello bagel', 'hello langchain']
metadatas = [{'page': str(i)} for i in range(len(texts))]
embeddings = [[1.1, 2.3, 3.2], [0.3, 0.3, 0.1]]
vector_search = Bagel.from_texts(cluster_name='testing_vector', texts=texts,
metadatas=metadatas, text_embeddings=embeddings)
embedded_query = [1.1, 2.3, 3.2]
output = vector_search.similarity_search_by_vector_with_relevance_scores(
query_embeddings=embedded_query, k=1)
assert output == [(Document(page_content='hello bagel', metadata={'page':
'0'}), 0.0)]
vector_search.delete_cluster()
|
def test_with_metadatas_with_scores_using_vector() ->None:
"""Test end to end construction and scored search, using embedding vector."""
texts = ['hello bagel', 'hello langchain']
metadatas = [{'page': str(i)} for i in range(len(texts))]
embeddings = [[1.1, 2.3, 3.2], [0.3, 0.3, 0.1]]
vector_search = Bagel.from_texts(cluster_name='testing_vector', texts=
texts, metadatas=metadatas, text_embeddings=embeddings)
embedded_query = [1.1, 2.3, 3.2]
output = vector_search.similarity_search_by_vector_with_relevance_scores(
query_embeddings=embedded_query, k=1)
assert output == [(Document(page_content='hello bagel', metadata={
'page': '0'}), 0.0)]
vector_search.delete_cluster()
|
Test end to end construction and scored search, using embedding vector.
|
_call
|
"""Generate text from a prompt.
Args:
prompt: The prompt to generate text from.
stop: A list of sequences to stop generation when encountered.
Returns:
The generated text.
Example:
.. code-block:: python
response = llm("Tell me a joke.")
"""
text = []
_run_manager = run_manager or CallbackManagerForLLMRun.get_noop_manager()
for chunk in self.client(prompt, stop=stop, stream=True):
text.append(chunk)
_run_manager.on_llm_new_token(chunk, verbose=self.verbose)
return ''.join(text)
|
def _call(self, prompt: str, stop: Optional[Sequence[str]]=None,
run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str:
"""Generate text from a prompt.
Args:
prompt: The prompt to generate text from.
stop: A list of sequences to stop generation when encountered.
Returns:
The generated text.
Example:
.. code-block:: python
response = llm("Tell me a joke.")
"""
text = []
_run_manager = run_manager or CallbackManagerForLLMRun.get_noop_manager()
for chunk in self.client(prompt, stop=stop, stream=True):
text.append(chunk)
_run_manager.on_llm_new_token(chunk, verbose=self.verbose)
return ''.join(text)
|
Generate text from a prompt.
Args:
prompt: The prompt to generate text from.
stop: A list of sequences to stop generation when encountered.
Returns:
The generated text.
Example:
.. code-block:: python
response = llm("Tell me a joke.")
|
FakeFind
|
def fn(self: Any, **kwargs: Any) ->Any:
return attrdict({'resources': {'123': attrdict({'fields': {'456':
attrdict({'paragraphs': {'123/t/text/0-14': attrdict({'text':
'This is a test', 'order': 0})}})}, 'data': {'texts': {'text': {
'body': 'This is a test'}}}, 'extra': attrdict({'metadata': {'some':
'metadata'}})})}})
return fn
|
def FakeFind(**args: Any) ->Any:
def fn(self: Any, **kwargs: Any) ->Any:
return attrdict({'resources': {'123': attrdict({'fields': {'456':
attrdict({'paragraphs': {'123/t/text/0-14': attrdict({'text':
'This is a test', 'order': 0})}})}, 'data': {'texts': {'text':
{'body': 'This is a test'}}}, 'extra': attrdict({'metadata': {
'some': 'metadata'}})})}})
return fn
| null |
_get_attachments
|
"""Get all attachments from a page.
Args:
soup: BeautifulSoup4 soup object.
Returns:
List of attachments.
"""
from bs4 import BeautifulSoup, Tag
content_list = soup.find('ul', {'class': 'contentList'})
if content_list is None:
raise ValueError('No content list found.')
content_list: BeautifulSoup
attachments = []
for attachment in content_list.find_all('ul', {'class': 'attachments'}):
attachment: Tag
for link in attachment.find_all('a'):
link: Tag
href = link.get('href')
if href is not None and not href.startswith('#'):
attachments.append(href)
return attachments
|
def _get_attachments(self, soup: Any) ->List[str]:
"""Get all attachments from a page.
Args:
soup: BeautifulSoup4 soup object.
Returns:
List of attachments.
"""
from bs4 import BeautifulSoup, Tag
content_list = soup.find('ul', {'class': 'contentList'})
if content_list is None:
raise ValueError('No content list found.')
content_list: BeautifulSoup
attachments = []
for attachment in content_list.find_all('ul', {'class': 'attachments'}):
attachment: Tag
for link in attachment.find_all('a'):
link: Tag
href = link.get('href')
if href is not None and not href.startswith('#'):
attachments.append(href)
return attachments
|
Get all attachments from a page.
Args:
soup: BeautifulSoup4 soup object.
Returns:
List of attachments.
|
on_tool_start
|
"""Do nothing when tool starts."""
pass
|
def on_tool_start(self, serialized: Dict[str, Any], input_str: str, **
kwargs: Any) ->None:
"""Do nothing when tool starts."""
pass
|
Do nothing when tool starts.
|
requires_input
|
"""
This evaluator does not require input.
"""
return False
|
@property
def requires_input(self) ->bool:
"""
This evaluator does not require input.
"""
return False
|
This evaluator does not require input.
|
delete_by_document_id
|
"""
Given this is a "similarity search" cache, an invalidation pattern
that makes sense is first a lookup to get an ID, and then deleting
with that ID. This is for the second step.
"""
self.collection.delete_one(document_id)
|
def delete_by_document_id(self, document_id: str) ->None:
"""
Given this is a "similarity search" cache, an invalidation pattern
that makes sense is first a lookup to get an ID, and then deleting
with that ID. This is for the second step.
"""
self.collection.delete_one(document_id)
|
Given this is a "similarity search" cache, an invalidation pattern
that makes sense is first a lookup to get an ID, and then deleting
with that ID. This is for the second step.
|
test_with_metadatas
|
"""Test end to end construction and search."""
texts = ['hello bagel', 'hello langchain']
metadatas = [{'metadata': str(i)} for i in range(len(texts))]
txt_search = Bagel.from_texts(cluster_name='testing', texts=texts,
metadatas=metadatas)
output = txt_search.similarity_search('hello bagel', k=1)
assert output == [Document(page_content='hello bagel', metadata={'metadata':
'0'})]
txt_search.delete_cluster()
|
def test_with_metadatas() ->None:
"""Test end to end construction and search."""
texts = ['hello bagel', 'hello langchain']
metadatas = [{'metadata': str(i)} for i in range(len(texts))]
txt_search = Bagel.from_texts(cluster_name='testing', texts=texts,
metadatas=metadatas)
output = txt_search.similarity_search('hello bagel', k=1)
assert output == [Document(page_content='hello bagel', metadata={
'metadata': '0'})]
txt_search.delete_cluster()
|
Test end to end construction and search.
|
test_stream
|
"""Test that stream works."""
chat = PaiEasChatEndpoint(eas_service_url=os.getenv('EAS_SERVICE_URL'),
eas_service_token=os.getenv('EAS_SERVICE_TOKEN'), streaming=True)
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
response = chat(messages=[HumanMessage(content='Hello.'), AIMessage(content
='Hello!'), HumanMessage(content='Who are you?')], stream=True,
callbacks=callback_manager)
assert callback_handler.llm_streams > 0
assert isinstance(response.content, str)
|
def test_stream() ->None:
"""Test that stream works."""
chat = PaiEasChatEndpoint(eas_service_url=os.getenv('EAS_SERVICE_URL'),
eas_service_token=os.getenv('EAS_SERVICE_TOKEN'), streaming=True)
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
response = chat(messages=[HumanMessage(content='Hello.'), AIMessage(
content='Hello!'), HumanMessage(content='Who are you?')], stream=
True, callbacks=callback_manager)
assert callback_handler.llm_streams > 0
assert isinstance(response.content, str)
|
Test that stream works.
|
on_llm_new_token
|
"""Do nothing when a new token is generated."""
pass
|
def on_llm_new_token(self, token: str, **kwargs: Any) ->None:
"""Do nothing when a new token is generated."""
pass
|
Do nothing when a new token is generated.
|
test_load_valid_numeric_content
|
file_path = '/workspaces/langchain/test.json'
expected_docs = [Document(page_content='99', metadata={'source': file_path,
'seq_num': 1}), Document(page_content='99.5', metadata={'source':
file_path, 'seq_num': 2})]
mocker.patch('builtins.open', mocker.mock_open())
mocker.patch('pathlib.Path.read_text', return_value=
"""
[
{"num": 99}, {"num": 99.5}
]
"""
)
loader = JSONLoader(file_path=file_path, jq_schema='.[].num', text_content=
False)
result = loader.load()
assert result == expected_docs
|
def test_load_valid_numeric_content(mocker: MockerFixture) ->None:
file_path = '/workspaces/langchain/test.json'
expected_docs = [Document(page_content='99', metadata={'source':
file_path, 'seq_num': 1}), Document(page_content='99.5', metadata={
'source': file_path, 'seq_num': 2})]
mocker.patch('builtins.open', mocker.mock_open())
mocker.patch('pathlib.Path.read_text', return_value=
"""
[
{"num": 99}, {"num": 99.5}
]
"""
)
loader = JSONLoader(file_path=file_path, jq_schema='.[].num',
text_content=False)
result = loader.load()
assert result == expected_docs
| null |
concatenate_rows
|
"""Combine message information in a readable format ready to be used.
Args:
row: dictionary containing message information.
"""
sender = row['sender_name']
text = row['content']
date = datetime.datetime.fromtimestamp(row['timestamp_ms'] / 1000).strftime(
'%Y-%m-%d %H:%M:%S')
return f'{sender} on {date}: {text}\n\n'
|
def concatenate_rows(row: dict) ->str:
"""Combine message information in a readable format ready to be used.
Args:
row: dictionary containing message information.
"""
sender = row['sender_name']
text = row['content']
date = datetime.datetime.fromtimestamp(row['timestamp_ms'] / 1000
).strftime('%Y-%m-%d %H:%M:%S')
return f'{sender} on {date}: {text}\n\n'
|
Combine message information in a readable format ready to be used.
Args:
row: dictionary containing message information.
|
get_count_value
|
return result.get(key, 0) or 0
|
def get_count_value(key: str, result: Dict[str, Any]) ->int:
return result.get(key, 0) or 0
| null |
combine_docs
|
"""Combine multiple documents recursively.
Args:
docs: List of documents to combine, assumed that each one is less than
`token_max`.
token_max: Recursively creates groups of documents less than this number
of tokens.
callbacks: Callbacks to be passed through
**kwargs: additional parameters to be passed to LLM calls (like other
input variables besides the documents)
Returns:
The first element returned is the single string output. The second
element returned is a dictionary of other keys to return.
"""
result_docs, extra_return_dict = self._collapse(docs, token_max=token_max,
callbacks=callbacks, **kwargs)
return self.combine_documents_chain.combine_docs(docs=result_docs,
callbacks=callbacks, **kwargs)
|
def combine_docs(self, docs: List[Document], token_max: Optional[int]=None,
callbacks: Callbacks=None, **kwargs: Any) ->Tuple[str, dict]:
"""Combine multiple documents recursively.
Args:
docs: List of documents to combine, assumed that each one is less than
`token_max`.
token_max: Recursively creates groups of documents less than this number
of tokens.
callbacks: Callbacks to be passed through
**kwargs: additional parameters to be passed to LLM calls (like other
input variables besides the documents)
Returns:
The first element returned is the single string output. The second
element returned is a dictionary of other keys to return.
"""
result_docs, extra_return_dict = self._collapse(docs, token_max=
token_max, callbacks=callbacks, **kwargs)
return self.combine_documents_chain.combine_docs(docs=result_docs,
callbacks=callbacks, **kwargs)
|
Combine multiple documents recursively.
Args:
docs: List of documents to combine, assumed that each one is less than
`token_max`.
token_max: Recursively creates groups of documents less than this number
of tokens.
callbacks: Callbacks to be passed through
**kwargs: additional parameters to be passed to LLM calls (like other
input variables besides the documents)
Returns:
The first element returned is the single string output. The second
element returned is a dictionary of other keys to return.
|
from_embeddings
|
return cls(model=model, distance_func_name=distance_func_name, **kwargs)
|
@classmethod
def from_embeddings(cls, model: Embeddings, distance_func_name: str=
'distance', **kwargs: Any) ->BaseOutputParser:
return cls(model=model, distance_func_name=distance_func_name, **kwargs)
| null |
lazy_load
|
"""Lazy load text from the url(s) in web_path."""
for doc in self.load():
yield doc
|
def lazy_load(self) ->Iterator[Document]:
"""Lazy load text from the url(s) in web_path."""
for doc in self.load():
yield doc
|
Lazy load text from the url(s) in web_path.
|
test_json_equality_evaluator_requires_reference
|
assert json_equality_evaluator.requires_reference is True
|
def test_json_equality_evaluator_requires_reference(json_equality_evaluator:
JsonEqualityEvaluator) ->None:
assert json_equality_evaluator.requires_reference is True
| null |
get_title
|
"""Document title."""
|
@abstractmethod
def get_title(self) ->str:
"""Document title."""
|
Document title.
|
test_scann_vector_sim
|
"""Test vector similarity."""
texts = ['foo', 'bar', 'baz']
docsearch = ScaNN.from_texts(texts, FakeEmbeddings())
index_to_id = docsearch.index_to_docstore_id
expected_docstore = InMemoryDocstore({index_to_id[0]: Document(page_content
='foo'), index_to_id[1]: Document(page_content='bar'), index_to_id[2]:
Document(page_content='baz')})
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
query_vec = FakeEmbeddings().embed_query(text='foo')
output = docsearch.similarity_search_by_vector(query_vec, k=1)
assert output == [Document(page_content='foo')]
|
def test_scann_vector_sim() ->None:
"""Test vector similarity."""
texts = ['foo', 'bar', 'baz']
docsearch = ScaNN.from_texts(texts, FakeEmbeddings())
index_to_id = docsearch.index_to_docstore_id
expected_docstore = InMemoryDocstore({index_to_id[0]: Document(
page_content='foo'), index_to_id[1]: Document(page_content='bar'),
index_to_id[2]: Document(page_content='baz')})
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
query_vec = FakeEmbeddings().embed_query(text='foo')
output = docsearch.similarity_search_by_vector(query_vec, k=1)
assert output == [Document(page_content='foo')]
|
Test vector similarity.
|
chain
|
"""
Process the given text to extract graph data and constructs a graph document from the extracted information.
The constructed graph document is then added to the graph.
Parameters:
- text (str): The input text from which the information will be extracted to construct the graph.
- allowed_nodes (Optional[List[str]]): A list of node labels to guide the extraction process.
If not provided, extraction won't have specific restriction on node labels.
- allowed_relationships (Optional[List[str]]): A list of relationship types to guide the extraction process.
If not provided, extraction won't have specific restriction on relationship types.
Returns:
str: A confirmation message indicating the completion of the graph construction.
"""
extract_chain = get_extraction_chain(allowed_nodes, allowed_relationships)
data = extract_chain.run(text)
graph_document = GraphDocument(nodes=[map_to_base_node(node) for node in
data.nodes], relationships=[map_to_base_relationship(rel) for rel in
data.rels], source=Document(page_content=text))
graph.add_graph_documents([graph_document])
return 'Graph construction finished'
|
def chain(text: str, allowed_nodes: Optional[List[str]]=None,
allowed_relationships: Optional[List[str]]=None) ->str:
"""
Process the given text to extract graph data and constructs a graph document from the extracted information.
The constructed graph document is then added to the graph.
Parameters:
- text (str): The input text from which the information will be extracted to construct the graph.
- allowed_nodes (Optional[List[str]]): A list of node labels to guide the extraction process.
If not provided, extraction won't have specific restriction on node labels.
- allowed_relationships (Optional[List[str]]): A list of relationship types to guide the extraction process.
If not provided, extraction won't have specific restriction on relationship types.
Returns:
str: A confirmation message indicating the completion of the graph construction.
"""
extract_chain = get_extraction_chain(allowed_nodes, allowed_relationships)
data = extract_chain.run(text)
graph_document = GraphDocument(nodes=[map_to_base_node(node) for node in
data.nodes], relationships=[map_to_base_relationship(rel) for rel in
data.rels], source=Document(page_content=text))
graph.add_graph_documents([graph_document])
return 'Graph construction finished'
|
Process the given text to extract graph data and constructs a graph document from the extracted information.
The constructed graph document is then added to the graph.
Parameters:
- text (str): The input text from which the information will be extracted to construct the graph.
- allowed_nodes (Optional[List[str]]): A list of node labels to guide the extraction process.
If not provided, extraction won't have specific restriction on node labels.
- allowed_relationships (Optional[List[str]]): A list of relationship types to guide the extraction process.
If not provided, extraction won't have specific restriction on relationship types.
Returns:
str: A confirmation message indicating the completion of the graph construction.
|
test_get_action_and_input
|
"""Test getting an action from text."""
llm_output = """Thought: I need to search for NBA
Action: Search
Action Input: NBA"""
action, action_input = get_action_and_input(llm_output)
assert action == 'Search'
assert action_input == 'NBA'
|
def test_get_action_and_input() ->None:
"""Test getting an action from text."""
llm_output = (
'Thought: I need to search for NBA\nAction: Search\nAction Input: NBA')
action, action_input = get_action_and_input(llm_output)
assert action == 'Search'
assert action_input == 'NBA'
|
Test getting an action from text.
|
_search
|
"""Return Elasticsearch documents most similar to query, along with scores.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
query_vector: Embedding to look up documents similar to.
fetch_k: Number of candidates to fetch from each shard.
Defaults to 50.
fields: List of fields to return from Elasticsearch.
Defaults to only returning the text field.
filter: Array of Elasticsearch filter clauses to apply to the query.
custom_query: Function to modify the Elasticsearch
query body before it is sent to Elasticsearch.
Returns:
List of Documents most similar to the query and score for each
"""
if fields is None:
fields = []
if 'metadata' not in fields:
fields.append('metadata')
if self.query_field not in fields:
fields.append(self.query_field)
if self.embedding and query is not None:
query_vector = self.embedding.embed_query(query)
query_body = self.strategy.query(query_vector=query_vector, query=query, k=
k, fetch_k=fetch_k, vector_query_field=self.vector_query_field,
text_field=self.query_field, filter=filter or [], similarity=self.
distance_strategy)
logger.debug(f'Query body: {query_body}')
if custom_query is not None:
query_body = custom_query(query_body, query)
logger.debug(f'Calling custom_query, Query body now: {query_body}')
response = self.client.search(index=self.index_name, **query_body, size=k,
source=fields)
def default_doc_builder(hit: Dict) ->Document:
return Document(page_content=hit['_source'].get(self.query_field, ''),
metadata=hit['_source']['metadata'])
doc_builder = doc_builder or default_doc_builder
docs_and_scores = []
for hit in response['hits']['hits']:
for field in fields:
if field in hit['_source'] and field not in ['metadata', self.
query_field]:
if 'metadata' not in hit['_source']:
hit['_source']['metadata'] = {}
hit['_source']['metadata'][field] = hit['_source'][field]
docs_and_scores.append((doc_builder(hit), hit['_score']))
return docs_and_scores
|
def _search(self, query: Optional[str]=None, k: int=4, query_vector: Union[
List[float], None]=None, fetch_k: int=50, fields: Optional[List[str]]=
None, filter: Optional[List[dict]]=None, custom_query: Optional[
Callable[[Dict, Union[str, None]], Dict]]=None, doc_builder: Optional[
Callable[[Dict], Document]]=None, **kwargs: Any) ->List[Tuple[Document,
float]]:
"""Return Elasticsearch documents most similar to query, along with scores.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
query_vector: Embedding to look up documents similar to.
fetch_k: Number of candidates to fetch from each shard.
Defaults to 50.
fields: List of fields to return from Elasticsearch.
Defaults to only returning the text field.
filter: Array of Elasticsearch filter clauses to apply to the query.
custom_query: Function to modify the Elasticsearch
query body before it is sent to Elasticsearch.
Returns:
List of Documents most similar to the query and score for each
"""
if fields is None:
fields = []
if 'metadata' not in fields:
fields.append('metadata')
if self.query_field not in fields:
fields.append(self.query_field)
if self.embedding and query is not None:
query_vector = self.embedding.embed_query(query)
query_body = self.strategy.query(query_vector=query_vector, query=query,
k=k, fetch_k=fetch_k, vector_query_field=self.vector_query_field,
text_field=self.query_field, filter=filter or [], similarity=self.
distance_strategy)
logger.debug(f'Query body: {query_body}')
if custom_query is not None:
query_body = custom_query(query_body, query)
logger.debug(f'Calling custom_query, Query body now: {query_body}')
response = self.client.search(index=self.index_name, **query_body, size
=k, source=fields)
def default_doc_builder(hit: Dict) ->Document:
return Document(page_content=hit['_source'].get(self.query_field,
''), metadata=hit['_source']['metadata'])
doc_builder = doc_builder or default_doc_builder
docs_and_scores = []
for hit in response['hits']['hits']:
for field in fields:
if field in hit['_source'] and field not in ['metadata', self.
query_field]:
if 'metadata' not in hit['_source']:
hit['_source']['metadata'] = {}
hit['_source']['metadata'][field] = hit['_source'][field]
docs_and_scores.append((doc_builder(hit), hit['_score']))
return docs_and_scores
|
Return Elasticsearch documents most similar to query, along with scores.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
query_vector: Embedding to look up documents similar to.
fetch_k: Number of candidates to fetch from each shard.
Defaults to 50.
fields: List of fields to return from Elasticsearch.
Defaults to only returning the text field.
filter: Array of Elasticsearch filter clauses to apply to the query.
custom_query: Function to modify the Elasticsearch
query body before it is sent to Elasticsearch.
Returns:
List of Documents most similar to the query and score for each
|
__new__
|
"""Initialize the OpenAI object."""
model_name = data.get('model_name', '')
if (model_name.startswith('gpt-3.5-turbo') or model_name.startswith('gpt-4')
) and '-instruct' not in model_name:
warnings.warn(
'You are trying to use a chat model. This way of initializing it is no longer supported. Instead, please use: `from langchain_community.chat_models import ChatOpenAI`'
)
return OpenAIChat(**data)
return super().__new__(cls)
|
def __new__(cls, **data: Any) ->Union[OpenAIChat, BaseOpenAI]:
"""Initialize the OpenAI object."""
model_name = data.get('model_name', '')
if (model_name.startswith('gpt-3.5-turbo') or model_name.startswith(
'gpt-4')) and '-instruct' not in model_name:
warnings.warn(
'You are trying to use a chat model. This way of initializing it is no longer supported. Instead, please use: `from langchain_community.chat_models import ChatOpenAI`'
)
return OpenAIChat(**data)
return super().__new__(cls)
|
Initialize the OpenAI object.
|
__init__
|
"""Initialize with Vectara API."""
self._vectara_customer_id = vectara_customer_id or os.environ.get(
'VECTARA_CUSTOMER_ID')
self._vectara_corpus_id = vectara_corpus_id or os.environ.get(
'VECTARA_CORPUS_ID')
self._vectara_api_key = vectara_api_key or os.environ.get('VECTARA_API_KEY')
if self._vectara_customer_id is None or self._vectara_corpus_id is None or self._vectara_api_key is None:
logger.warning(
"Can't find Vectara credentials, customer_id or corpus_id in environment."
)
else:
logger.debug(f'Using corpus id {self._vectara_corpus_id}')
self._source = source
self._session = requests.Session()
adapter = requests.adapters.HTTPAdapter(max_retries=3)
self._session.mount('http://', adapter)
self.vectara_api_timeout = vectara_api_timeout
|
def __init__(self, vectara_customer_id: Optional[str]=None,
vectara_corpus_id: Optional[str]=None, vectara_api_key: Optional[str]=
None, vectara_api_timeout: int=120, source: str='langchain'):
"""Initialize with Vectara API."""
self._vectara_customer_id = vectara_customer_id or os.environ.get(
'VECTARA_CUSTOMER_ID')
self._vectara_corpus_id = vectara_corpus_id or os.environ.get(
'VECTARA_CORPUS_ID')
self._vectara_api_key = vectara_api_key or os.environ.get('VECTARA_API_KEY'
)
if (self._vectara_customer_id is None or self._vectara_corpus_id is
None or self._vectara_api_key is None):
logger.warning(
"Can't find Vectara credentials, customer_id or corpus_id in environment."
)
else:
logger.debug(f'Using corpus id {self._vectara_corpus_id}')
self._source = source
self._session = requests.Session()
adapter = requests.adapters.HTTPAdapter(max_retries=3)
self._session.mount('http://', adapter)
self.vectara_api_timeout = vectara_api_timeout
|
Initialize with Vectara API.
|
_stop
|
return ['\nObservation:']
|
@property
def _stop(self) ->List[str]:
return ['\nObservation:']
| null |
load
|
"""Load a TileDB index from a URI.
Args:
index_uri: The URI of the TileDB vector index.
embedding: Embeddings to use when generating queries.
metric: Optional, Metric to use for indexing. Defaults to "euclidean".
config: Optional, TileDB config
timestamp: Optional, timestamp to use for opening the arrays.
"""
return cls(embedding=embedding, index_uri=index_uri, metric=metric, config=
config, timestamp=timestamp, **kwargs)
|
@classmethod
def load(cls, index_uri: str, embedding: Embeddings, *, metric: str=
DEFAULT_METRIC, config: Optional[Mapping[str, Any]]=None, timestamp:
Any=None, **kwargs: Any) ->TileDB:
"""Load a TileDB index from a URI.
Args:
index_uri: The URI of the TileDB vector index.
embedding: Embeddings to use when generating queries.
metric: Optional, Metric to use for indexing. Defaults to "euclidean".
config: Optional, TileDB config
timestamp: Optional, timestamp to use for opening the arrays.
"""
return cls(embedding=embedding, index_uri=index_uri, metric=metric,
config=config, timestamp=timestamp, **kwargs)
|
Load a TileDB index from a URI.
Args:
index_uri: The URI of the TileDB vector index.
embedding: Embeddings to use when generating queries.
metric: Optional, Metric to use for indexing. Defaults to "euclidean".
config: Optional, TileDB config
timestamp: Optional, timestamp to use for opening the arrays.
|
_create_message_dicts
|
message_dicts = [convert_message_to_dict(m) for m in messages]
return message_dicts
|
def _create_message_dicts(self, messages: List[BaseMessage]) ->List[Dict[
str, Any]]:
message_dicts = [convert_message_to_dict(m) for m in messages]
return message_dicts
| null |
input_keys
|
"""Return the input keys.
:meta private:
"""
return [self.input_key]
|
@property
def input_keys(self) ->List[str]:
"""Return the input keys.
:meta private:
"""
return [self.input_key]
|
Return the input keys.
:meta private:
|
_load_documents_from_folder
|
"""Load documents from a Dropbox folder."""
dbx = self._create_dropbox_client()
try:
from dropbox import exceptions
from dropbox.files import FileMetadata
except ImportError:
raise ImportError('You must run `pip install dropbox')
try:
results = dbx.files_list_folder(folder_path, recursive=self.recursive)
except exceptions.ApiError as ex:
raise ValueError(
f'Could not list files in the folder: {folder_path}. Please verify the folder path and try again.'
) from ex
files = [entry for entry in results.entries if isinstance(entry, FileMetadata)]
documents = [doc for doc in (self._load_file_from_path(file.path_display) for
file in files) if doc is not None]
return documents
|
def _load_documents_from_folder(self, folder_path: str) ->List[Document]:
"""Load documents from a Dropbox folder."""
dbx = self._create_dropbox_client()
try:
from dropbox import exceptions
from dropbox.files import FileMetadata
except ImportError:
raise ImportError('You must run `pip install dropbox')
try:
results = dbx.files_list_folder(folder_path, recursive=self.recursive)
except exceptions.ApiError as ex:
raise ValueError(
f'Could not list files in the folder: {folder_path}. Please verify the folder path and try again.'
) from ex
files = [entry for entry in results.entries if isinstance(entry,
FileMetadata)]
documents = [doc for doc in (self._load_file_from_path(file.
path_display) for file in files) if doc is not None]
return documents
|
Load documents from a Dropbox folder.
|
_create_retry_decorator
|
"""Define retry mechanism."""
import fireworks.client
errors = [fireworks.client.error.RateLimitError, fireworks.client.error.
InternalServerError, fireworks.client.error.BadGatewayError, fireworks.
client.error.ServiceUnavailableError]
return create_base_retry_decorator(error_types=errors, max_retries=llm.
max_retries, run_manager=run_manager)
|
def _create_retry_decorator(llm: ChatFireworks, run_manager: Optional[Union
[AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun]]=None
) ->Callable[[Any], Any]:
"""Define retry mechanism."""
import fireworks.client
errors = [fireworks.client.error.RateLimitError, fireworks.client.error
.InternalServerError, fireworks.client.error.BadGatewayError,
fireworks.client.error.ServiceUnavailableError]
return create_base_retry_decorator(error_types=errors, max_retries=llm.
max_retries, run_manager=run_manager)
|
Define retry mechanism.
|
_continuous_recognize
|
done = False
text = ''
def stop_cb(evt: Any) ->None:
"""callback that stop continuous recognition"""
speech_recognizer.stop_continuous_recognition_async()
nonlocal done
done = True
def retrieve_cb(evt: Any) ->None:
"""callback that retrieves the intermediate recognition results"""
nonlocal text
text += evt.result.text
speech_recognizer.recognized.connect(retrieve_cb)
speech_recognizer.session_stopped.connect(stop_cb)
speech_recognizer.canceled.connect(stop_cb)
speech_recognizer.start_continuous_recognition_async()
while not done:
time.sleep(0.5)
return text
|
def _continuous_recognize(self, speech_recognizer: Any) ->str:
done = False
text = ''
def stop_cb(evt: Any) ->None:
"""callback that stop continuous recognition"""
speech_recognizer.stop_continuous_recognition_async()
nonlocal done
done = True
def retrieve_cb(evt: Any) ->None:
"""callback that retrieves the intermediate recognition results"""
nonlocal text
text += evt.result.text
speech_recognizer.recognized.connect(retrieve_cb)
speech_recognizer.session_stopped.connect(stop_cb)
speech_recognizer.canceled.connect(stop_cb)
speech_recognizer.start_continuous_recognition_async()
while not done:
time.sleep(0.5)
return text
| null |
convert_to_openai_function
|
"""Convert a raw function/class to an OpenAI function.
Args:
function: Either a dictionary, a pydantic.BaseModel class, or a Python function.
If a dictionary is passed in, it is assumed to already be a valid OpenAI
function.
Returns:
A dict version of the passed in function which is compatible with the
OpenAI function-calling API.
"""
if isinstance(function, dict):
return function
elif isinstance(function, type) and issubclass(function, BaseModel):
return cast(Dict, convert_pydantic_to_openai_function(function))
elif callable(function):
return convert_python_function_to_openai_function(function)
else:
raise ValueError(
f'Unsupported function type {type(function)}. Functions must be passed in as Dict, pydantic.BaseModel, or Callable.'
)
|
def convert_to_openai_function(function: Union[Dict[str, Any], Type[
BaseModel], Callable]) ->Dict[str, Any]:
"""Convert a raw function/class to an OpenAI function.
Args:
function: Either a dictionary, a pydantic.BaseModel class, or a Python function.
If a dictionary is passed in, it is assumed to already be a valid OpenAI
function.
Returns:
A dict version of the passed in function which is compatible with the
OpenAI function-calling API.
"""
if isinstance(function, dict):
return function
elif isinstance(function, type) and issubclass(function, BaseModel):
return cast(Dict, convert_pydantic_to_openai_function(function))
elif callable(function):
return convert_python_function_to_openai_function(function)
else:
raise ValueError(
f'Unsupported function type {type(function)}. Functions must be passed in as Dict, pydantic.BaseModel, or Callable.'
)
|
Convert a raw function/class to an OpenAI function.
Args:
function: Either a dictionary, a pydantic.BaseModel class, or a Python function.
If a dictionary is passed in, it is assumed to already be a valid OpenAI
function.
Returns:
A dict version of the passed in function which is compatible with the
OpenAI function-calling API.
|
test__collapse_docs_metadata
|
"""Test collapse documents functionality when metadata exists."""
metadata1 = {'source': 'a', 'foo': 2, 'bar': '1', 'extra1': 'foo'}
metadata2 = {'source': 'b', 'foo': '3', 'bar': 2, 'extra2': 'bar'}
docs = [Document(page_content='foo', metadata=metadata1), Document(
page_content='bar', metadata=metadata2)]
output = collapse_docs(docs, _fake_combine_docs_func)
expected_metadata = {'source': 'a, b', 'foo': '2, 3', 'bar': '1, 2',
'extra1': 'foo', 'extra2': 'bar'}
expected_output = Document(page_content='foobar', metadata=expected_metadata)
assert output == expected_output
|
def test__collapse_docs_metadata() ->None:
"""Test collapse documents functionality when metadata exists."""
metadata1 = {'source': 'a', 'foo': 2, 'bar': '1', 'extra1': 'foo'}
metadata2 = {'source': 'b', 'foo': '3', 'bar': 2, 'extra2': 'bar'}
docs = [Document(page_content='foo', metadata=metadata1), Document(
page_content='bar', metadata=metadata2)]
output = collapse_docs(docs, _fake_combine_docs_func)
expected_metadata = {'source': 'a, b', 'foo': '2, 3', 'bar': '1, 2',
'extra1': 'foo', 'extra2': 'bar'}
expected_output = Document(page_content='foobar', metadata=
expected_metadata)
assert output == expected_output
|
Test collapse documents functionality when metadata exists.
|
test_timescalevector_with_filter_distant_match
|
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': str(i)} for i in range(len(texts))]
docsearch = TimescaleVector.from_texts(texts=texts, collection_name=
'test_collection_filter', embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas, service_url=SERVICE_URL, pre_delete_collection=True)
output = docsearch.similarity_search_with_score('foo', k=1, filter={'page':
'2'})
assert output == [(Document(page_content='baz', metadata={'page': '2'}),
0.0013003906671379406)]
|
def test_timescalevector_with_filter_distant_match() ->None:
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': str(i)} for i in range(len(texts))]
docsearch = TimescaleVector.from_texts(texts=texts, collection_name=
'test_collection_filter', embedding=FakeEmbeddingsWithAdaDimension(
), metadatas=metadatas, service_url=SERVICE_URL,
pre_delete_collection=True)
output = docsearch.similarity_search_with_score('foo', k=1, filter={
'page': '2'})
assert output == [(Document(page_content='baz', metadata={'page': '2'}),
0.0013003906671379406)]
|
Test end to end construction and search.
|
process_thread_images
|
text = ''
try:
from PIL import Image
from pytesseract import pytesseract
except ImportError:
raise ImportError(
'`Pillow or pytesseract` package not found, please run `pip install Pillow` or `pip install pytesseract`'
)
for img in tree.iter('img'):
src = img.get('src')
if not src or not src.startswith('/blob'):
continue
_, _, thread_id, blob_id = src.split('/')
blob_response = self.quip_client.get_blob(thread_id, blob_id)
try:
image = Image.open(BytesIO(blob_response.read()))
text = text + '\n' + pytesseract.image_to_string(image)
except OSError as e:
logger.error(f'failed to convert image to text, {e}')
raise e
return text
|
def process_thread_images(self, tree: ElementTree) ->str:
text = ''
try:
from PIL import Image
from pytesseract import pytesseract
except ImportError:
raise ImportError(
'`Pillow or pytesseract` package not found, please run `pip install Pillow` or `pip install pytesseract`'
)
for img in tree.iter('img'):
src = img.get('src')
if not src or not src.startswith('/blob'):
continue
_, _, thread_id, blob_id = src.split('/')
blob_response = self.quip_client.get_blob(thread_id, blob_id)
try:
image = Image.open(BytesIO(blob_response.read()))
text = text + '\n' + pytesseract.image_to_string(image)
except OSError as e:
logger.error(f'failed to convert image to text, {e}')
raise e
return text
| null |
run
|
"""Run query through Google Trends with Serpapi"""
serpapi_api_key = cast(SecretStr, self.serp_api_key)
params = {'engine': 'google_lens', 'api_key': serpapi_api_key.
get_secret_value(), 'url': query}
queryURL = (
f"https://serpapi.com/search?engine={params['engine']}&api_key={params['api_key']}&url={params['url']}"
)
response = requests.get(queryURL)
if response.status_code != 200:
return 'Google Lens search failed'
responseValue = response.json()
if responseValue['search_metadata']['status'] != 'Success':
return 'Google Lens search failed'
xs = ''
if len(responseValue['knowledge_graph']) > 0:
subject = responseValue['knowledge_graph'][0]
xs += f"Subject:{subject['title']}({subject['subtitle']})\n"
xs += f"Link to subject:{subject['link']}\n\n"
xs += 'Related Images:\n\n'
for image in responseValue['visual_matches']:
xs += f"Title: {image['title']}\n"
xs += f"Source({image['source']}): {image['link']}\n"
xs += f"Image: {image['thumbnail']}\n\n"
xs += ('Reverse Image Search' +
f"Link: {responseValue['reverse_image_search']['link']}\n")
print(xs)
docs = [xs]
return '\n\n'.join(docs)
|
def run(self, query: str) ->str:
"""Run query through Google Trends with Serpapi"""
serpapi_api_key = cast(SecretStr, self.serp_api_key)
params = {'engine': 'google_lens', 'api_key': serpapi_api_key.
get_secret_value(), 'url': query}
queryURL = (
f"https://serpapi.com/search?engine={params['engine']}&api_key={params['api_key']}&url={params['url']}"
)
response = requests.get(queryURL)
if response.status_code != 200:
return 'Google Lens search failed'
responseValue = response.json()
if responseValue['search_metadata']['status'] != 'Success':
return 'Google Lens search failed'
xs = ''
if len(responseValue['knowledge_graph']) > 0:
subject = responseValue['knowledge_graph'][0]
xs += f"Subject:{subject['title']}({subject['subtitle']})\n"
xs += f"Link to subject:{subject['link']}\n\n"
xs += 'Related Images:\n\n'
for image in responseValue['visual_matches']:
xs += f"Title: {image['title']}\n"
xs += f"Source({image['source']}): {image['link']}\n"
xs += f"Image: {image['thumbnail']}\n\n"
xs += ('Reverse Image Search' +
f"Link: {responseValue['reverse_image_search']['link']}\n")
print(xs)
docs = [xs]
return '\n\n'.join(docs)
|
Run query through Google Trends with Serpapi
|
_get_channel_id_map
|
"""Get a dictionary mapping channel names to their respective IDs."""
with zipfile.ZipFile(zip_path, 'r') as zip_file:
try:
with zip_file.open('channels.json', 'r') as f:
channels = json.load(f)
return {channel['name']: channel['id'] for channel in channels}
except KeyError:
return {}
|
@staticmethod
def _get_channel_id_map(zip_path: Path) ->Dict[str, str]:
"""Get a dictionary mapping channel names to their respective IDs."""
with zipfile.ZipFile(zip_path, 'r') as zip_file:
try:
with zip_file.open('channels.json', 'r') as f:
channels = json.load(f)
return {channel['name']: channel['id'] for channel in channels}
except KeyError:
return {}
|
Get a dictionary mapping channel names to their respective IDs.
|
parse_llm_output
|
"""
Based on the prompt we expect the result to be a string that looks like:
'[{"row_start": 12, "row_end": 19, "col_start": 1, "col_end": 12, "contents": "Entity ID"}]'
We'll load that JSON and turn it into a Pydantic model
"""
return [LLMPlateResponse(**plate_r) for plate_r in json.loads(result)]
|
def parse_llm_output(result: str):
"""
Based on the prompt we expect the result to be a string that looks like:
'[{"row_start": 12, "row_end": 19, "col_start": 1, "col_end": 12, "contents": "Entity ID"}]'
We'll load that JSON and turn it into a Pydantic model
"""
return [LLMPlateResponse(**plate_r) for plate_r in json.loads(result)]
|
Based on the prompt we expect the result to be a string that looks like:
'[{"row_start": 12, "row_end": 19, "col_start": 1, "col_end": 12, "contents": "Entity ID"}]'
We'll load that JSON and turn it into a Pydantic model
|
_call
|
known_values = inputs.copy()
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
for i, chain in enumerate(self.chains):
callbacks = _run_manager.get_child()
outputs = chain(known_values, return_only_outputs=True, callbacks=callbacks
)
known_values.update(outputs)
return {k: known_values[k] for k in self.output_variables}
|
def _call(self, inputs: Dict[str, str], run_manager: Optional[
CallbackManagerForChainRun]=None) ->Dict[str, str]:
known_values = inputs.copy()
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
for i, chain in enumerate(self.chains):
callbacks = _run_manager.get_child()
outputs = chain(known_values, return_only_outputs=True, callbacks=
callbacks)
known_values.update(outputs)
return {k: known_values[k] for k in self.output_variables}
| null |
test_serialization_of_wellknown_objects
|
"""Test that pydantic is able to serialize and deserialize well known objects."""
class WellKnownLCObject(BaseModel):
"""A well known LangChain object."""
__root__: Union[Document, HumanMessage, SystemMessage, ChatMessage,
FunctionMessage, AIMessage, HumanMessageChunk, SystemMessageChunk,
ChatMessageChunk, FunctionMessageChunk, AIMessageChunk,
StringPromptValue, ChatPromptValueConcrete, AgentFinish,
AgentAction, AgentActionMessageLog, ChatGeneration, Generation,
ChatGenerationChunk]
lc_objects = [HumanMessage(content='human'), HumanMessageChunk(content=
'human'), AIMessage(content='ai'), AIMessageChunk(content='ai'),
SystemMessage(content='sys'), SystemMessageChunk(content='sys'),
FunctionMessage(name='func', content='func'), FunctionMessageChunk(name
='func', content='func'), ChatMessage(role='human', content='human'),
ChatMessageChunk(role='human', content='human'), StringPromptValue(text
='hello'), ChatPromptValueConcrete(messages=[HumanMessage(content=
'human')]), Document(page_content='hello'), AgentFinish(return_values={
}, log=''), AgentAction(tool='tool', tool_input='input', log=''),
AgentActionMessageLog(tool='tool', tool_input='input', log='',
message_log=[HumanMessage(content='human')]), Generation(text='hello',
generation_info={'info': 'info'}), ChatGeneration(message=HumanMessage(
content='human')), ChatGenerationChunk(message=HumanMessageChunk(
content='cat'))]
for lc_object in lc_objects:
d = lc_object.dict()
assert 'type' in d, f'Missing key `type` for {type(lc_object)}'
obj1 = WellKnownLCObject.parse_obj(d)
assert type(obj1.__root__) == type(lc_object
), f'failed for {type(lc_object)}'
with pytest.raises(ValidationError):
WellKnownLCObject.parse_obj({})
|
def test_serialization_of_wellknown_objects() ->None:
"""Test that pydantic is able to serialize and deserialize well known objects."""
class WellKnownLCObject(BaseModel):
"""A well known LangChain object."""
__root__: Union[Document, HumanMessage, SystemMessage, ChatMessage,
FunctionMessage, AIMessage, HumanMessageChunk,
SystemMessageChunk, ChatMessageChunk, FunctionMessageChunk,
AIMessageChunk, StringPromptValue, ChatPromptValueConcrete,
AgentFinish, AgentAction, AgentActionMessageLog, ChatGeneration,
Generation, ChatGenerationChunk]
lc_objects = [HumanMessage(content='human'), HumanMessageChunk(content=
'human'), AIMessage(content='ai'), AIMessageChunk(content='ai'),
SystemMessage(content='sys'), SystemMessageChunk(content='sys'),
FunctionMessage(name='func', content='func'), FunctionMessageChunk(
name='func', content='func'), ChatMessage(role='human', content=
'human'), ChatMessageChunk(role='human', content='human'),
StringPromptValue(text='hello'), ChatPromptValueConcrete(messages=[
HumanMessage(content='human')]), Document(page_content='hello'),
AgentFinish(return_values={}, log=''), AgentAction(tool='tool',
tool_input='input', log=''), AgentActionMessageLog(tool='tool',
tool_input='input', log='', message_log=[HumanMessage(content=
'human')]), Generation(text='hello', generation_info={'info':
'info'}), ChatGeneration(message=HumanMessage(content='human')),
ChatGenerationChunk(message=HumanMessageChunk(content='cat'))]
for lc_object in lc_objects:
d = lc_object.dict()
assert 'type' in d, f'Missing key `type` for {type(lc_object)}'
obj1 = WellKnownLCObject.parse_obj(d)
assert type(obj1.__root__) == type(lc_object
), f'failed for {type(lc_object)}'
with pytest.raises(ValidationError):
WellKnownLCObject.parse_obj({})
|
Test that pydantic is able to serialize and deserialize well known objects.
|
get_prompt_input_key
|
"""
Get the prompt input key.
Args:
inputs: Dict[str, Any]
memory_variables: List[str]
Returns:
A prompt input key.
"""
prompt_input_keys = list(set(inputs).difference(memory_variables + ['stop']))
if len(prompt_input_keys) != 1:
raise ValueError(f'One input key expected got {prompt_input_keys}')
return prompt_input_keys[0]
|
def get_prompt_input_key(inputs: Dict[str, Any], memory_variables: List[str]
) ->str:
"""
Get the prompt input key.
Args:
inputs: Dict[str, Any]
memory_variables: List[str]
Returns:
A prompt input key.
"""
prompt_input_keys = list(set(inputs).difference(memory_variables + [
'stop']))
if len(prompt_input_keys) != 1:
raise ValueError(f'One input key expected got {prompt_input_keys}')
return prompt_input_keys[0]
|
Get the prompt input key.
Args:
inputs: Dict[str, Any]
memory_variables: List[str]
Returns:
A prompt input key.
|
test_prompt_jinja2_missing_input_variables
|
"""Test error is raised when input variables are not provided."""
prefix = 'Starting with {{ foo }}'
suffix = 'Ending with {{ bar }}'
with pytest.warns(UserWarning):
FewShotPromptTemplate(input_variables=[], suffix=suffix, examples=
example_jinja2_prompt[1], example_prompt=example_jinja2_prompt[0],
template_format='jinja2', validate_template=True)
assert FewShotPromptTemplate(input_variables=[], suffix=suffix, examples=
example_jinja2_prompt[1], example_prompt=example_jinja2_prompt[0],
template_format='jinja2').input_variables == ['bar']
with pytest.warns(UserWarning):
FewShotPromptTemplate(input_variables=['bar'], suffix=suffix, prefix=
prefix, examples=example_jinja2_prompt[1], example_prompt=
example_jinja2_prompt[0], template_format='jinja2',
validate_template=True)
assert FewShotPromptTemplate(input_variables=['bar'], suffix=suffix, prefix
=prefix, examples=example_jinja2_prompt[1], example_prompt=
example_jinja2_prompt[0], template_format='jinja2').input_variables == [
'bar', 'foo']
|
@pytest.mark.requires('jinja2')
def test_prompt_jinja2_missing_input_variables(example_jinja2_prompt: Tuple
[PromptTemplate, List[Dict[str, str]]]) ->None:
"""Test error is raised when input variables are not provided."""
prefix = 'Starting with {{ foo }}'
suffix = 'Ending with {{ bar }}'
with pytest.warns(UserWarning):
FewShotPromptTemplate(input_variables=[], suffix=suffix, examples=
example_jinja2_prompt[1], example_prompt=example_jinja2_prompt[
0], template_format='jinja2', validate_template=True)
assert FewShotPromptTemplate(input_variables=[], suffix=suffix,
examples=example_jinja2_prompt[1], example_prompt=
example_jinja2_prompt[0], template_format='jinja2'
).input_variables == ['bar']
with pytest.warns(UserWarning):
FewShotPromptTemplate(input_variables=['bar'], suffix=suffix,
prefix=prefix, examples=example_jinja2_prompt[1],
example_prompt=example_jinja2_prompt[0], template_format=
'jinja2', validate_template=True)
assert FewShotPromptTemplate(input_variables=['bar'], suffix=suffix,
prefix=prefix, examples=example_jinja2_prompt[1], example_prompt=
example_jinja2_prompt[0], template_format='jinja2'
).input_variables == ['bar', 'foo']
|
Test error is raised when input variables are not provided.
|
_stream
|
params: Dict[str, Any] = self._invocation_params(stop=stop, stream=True, **
kwargs)
for stream_resp in stream_generate_with_retry(self, prompt=prompt, **params):
chunk = GenerationChunk(**self._generation_from_qwen_resp(stream_resp))
yield chunk
if run_manager:
run_manager.on_llm_new_token(chunk.text, chunk=chunk, verbose=self.
verbose)
|
def _stream(self, prompt: str, stop: Optional[List[str]]=None, run_manager:
Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->Iterator[
GenerationChunk]:
params: Dict[str, Any] = self._invocation_params(stop=stop, stream=True,
**kwargs)
for stream_resp in stream_generate_with_retry(self, prompt=prompt, **params
):
chunk = GenerationChunk(**self._generation_from_qwen_resp(stream_resp))
yield chunk
if run_manager:
run_manager.on_llm_new_token(chunk.text, chunk=chunk, verbose=
self.verbose)
| null |
_test_parse_value
|
parsed = cast(Comparison, DEFAULT_PARSER.parse_folder(f'eq("x", {x})'))
actual = parsed.value
assert actual == x
|
def _test_parse_value(x: Any) ->None:
parsed = cast(Comparison, DEFAULT_PARSER.parse_folder(f'eq("x", {x})'))
actual = parsed.value
assert actual == x
| null |
test_load_tools_with_callback_manager_raises_deprecation_warning
|
"""Test load_tools raises a deprecation for old callback manager kwarg."""
callback_manager = MagicMock()
with pytest.warns(DeprecationWarning, match='callback_manager is deprecated'):
tools = load_tools(['requests_get'], callback_manager=callback_manager)
assert len(tools) == 1
assert tools[0].callbacks == callback_manager
|
def test_load_tools_with_callback_manager_raises_deprecation_warning() ->None:
"""Test load_tools raises a deprecation for old callback manager kwarg."""
callback_manager = MagicMock()
with pytest.warns(DeprecationWarning, match=
'callback_manager is deprecated'):
tools = load_tools(['requests_get'], callback_manager=callback_manager)
assert len(tools) == 1
assert tools[0].callbacks == callback_manager
|
Test load_tools raises a deprecation for old callback manager kwarg.
|
test_undefined_deprecation_schedule
|
"""This test is expected to fail until we defined a deprecation schedule."""
with pytest.raises(NotImplementedError):
warn_deprecated('1.0.0', pending=False)
|
def test_undefined_deprecation_schedule() ->None:
"""This test is expected to fail until we defined a deprecation schedule."""
with pytest.raises(NotImplementedError):
warn_deprecated('1.0.0', pending=False)
|
This test is expected to fail until we defined a deprecation schedule.
|
_parse_kv_pairs
|
result = []
for kv_pair in kv_pairs:
key = kv_pair.key.content if kv_pair.key else ''
value = kv_pair.value.content if kv_pair.value else ''
result.append((key, value))
return result
|
def _parse_kv_pairs(self, kv_pairs: List[Any]) ->List[Any]:
result = []
for kv_pair in kv_pairs:
key = kv_pair.key.content if kv_pair.key else ''
value = kv_pair.value.content if kv_pair.value else ''
result.append((key, value))
return result
| null |
from_llm
|
"""Initialize from LLM."""
qa_chain = LLMChain(llm=llm, prompt=qa_prompt)
ngql_generation_chain = LLMChain(llm=llm, prompt=ngql_prompt)
return cls(qa_chain=qa_chain, ngql_generation_chain=ngql_generation_chain,
**kwargs)
|
@classmethod
def from_llm(cls, llm: BaseLanguageModel, *, qa_prompt: BasePromptTemplate=
CYPHER_QA_PROMPT, ngql_prompt: BasePromptTemplate=
NGQL_GENERATION_PROMPT, **kwargs: Any) ->NebulaGraphQAChain:
"""Initialize from LLM."""
qa_chain = LLMChain(llm=llm, prompt=qa_prompt)
ngql_generation_chain = LLMChain(llm=llm, prompt=ngql_prompt)
return cls(qa_chain=qa_chain, ngql_generation_chain=
ngql_generation_chain, **kwargs)
|
Initialize from LLM.
|
seq_naive_rag_alt
|
context = ['Hi there!', 'How are you?', "What's your name?"]
retriever = RunnableLambda(lambda x: context)
prompt = PromptTemplate.from_template('{context} {question}')
llm = FakeListLLM(responses=['hello'])
return Context.setter('input') | {'context': retriever | Context.setter(
'context'), 'question': RunnablePassthrough()
} | prompt | llm | StrOutputParser() | Context.setter('result'
) | Context.getter(['context', 'input', 'result'])
|
def seq_naive_rag_alt() ->Runnable:
context = ['Hi there!', 'How are you?', "What's your name?"]
retriever = RunnableLambda(lambda x: context)
prompt = PromptTemplate.from_template('{context} {question}')
llm = FakeListLLM(responses=['hello'])
return Context.setter('input') | {'context': retriever | Context.setter
('context'), 'question': RunnablePassthrough()
} | prompt | llm | StrOutputParser() | Context.setter('result'
) | Context.getter(['context', 'input', 'result'])
| null |
_run
|
"""Run the tool."""
return self.requests_wrapper.get(_clean_url(url))
|
def _run(self, url: str, run_manager: Optional[CallbackManagerForToolRun]=None
) ->str:
"""Run the tool."""
return self.requests_wrapper.get(_clean_url(url))
|
Run the tool.
|
similarity_search
|
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
fetch_k: (Optional[int]) Number of Documents to fetch before filtering.
Defaults to 20.
Returns:
List of Documents most similar to the query.
"""
docs_and_scores = self.similarity_search_with_score(query, k=k, filter=
filter, fetch_k=fetch_k, **kwargs)
return [doc for doc, _ in docs_and_scores]
|
def similarity_search(self, query: str, k: int=4, filter: Optional[Dict[str,
Any]]=None, fetch_k: int=20, **kwargs: Any) ->List[Document]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
fetch_k: (Optional[int]) Number of Documents to fetch before filtering.
Defaults to 20.
Returns:
List of Documents most similar to the query.
"""
docs_and_scores = self.similarity_search_with_score(query, k=k, filter=
filter, fetch_k=fetch_k, **kwargs)
return [doc for doc, _ in docs_and_scores]
|
Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
fetch_k: (Optional[int]) Number of Documents to fetch before filtering.
Defaults to 20.
Returns:
List of Documents most similar to the query.
|
_create_chat_result
|
generations = []
for choice in response['choices']:
message = ChatMlflow._convert_dict_to_message(choice['message'])
usage = choice.get('usage', {})
gen = ChatGeneration(message=message, generation_info=usage)
generations.append(gen)
usage = response.get('usage', {})
return ChatResult(generations=generations, llm_output=usage)
|
@staticmethod
def _create_chat_result(response: Mapping[str, Any]) ->ChatResult:
generations = []
for choice in response['choices']:
message = ChatMlflow._convert_dict_to_message(choice['message'])
usage = choice.get('usage', {})
gen = ChatGeneration(message=message, generation_info=usage)
generations.append(gen)
usage = response.get('usage', {})
return ChatResult(generations=generations, llm_output=usage)
| null |
similarity_search_by_vector_with_score
|
"""Return pinecone documents most similar to embedding, along with scores."""
if namespace is None:
namespace = self._namespace
docs = []
results = self._index.query([embedding], top_k=k, include_metadata=True,
namespace=namespace, filter=filter)
for res in results['matches']:
metadata = res['metadata']
if self._text_key in metadata:
text = metadata.pop(self._text_key)
score = res['score']
docs.append((Document(page_content=text, metadata=metadata), score))
else:
logger.warning(
f'Found document with no `{self._text_key}` key. Skipping.')
return docs
|
def similarity_search_by_vector_with_score(self, embedding: List[float], *,
k: int=4, filter: Optional[dict]=None, namespace: Optional[str]=None
) ->List[Tuple[Document, float]]:
"""Return pinecone documents most similar to embedding, along with scores."""
if namespace is None:
namespace = self._namespace
docs = []
results = self._index.query([embedding], top_k=k, include_metadata=True,
namespace=namespace, filter=filter)
for res in results['matches']:
metadata = res['metadata']
if self._text_key in metadata:
text = metadata.pop(self._text_key)
score = res['score']
docs.append((Document(page_content=text, metadata=metadata), score)
)
else:
logger.warning(
f'Found document with no `{self._text_key}` key. Skipping.')
return docs
|
Return pinecone documents most similar to embedding, along with scores.
|
_identifying_params
|
"""Get the identifying parameters."""
return {**{'model_key': self.model_key}, **{'model_url_slug': self.
model_url_slug}, **{'model_kwargs': self.model_kwargs}}
|
@property
def _identifying_params(self) ->Mapping[str, Any]:
"""Get the identifying parameters."""
return {**{'model_key': self.model_key}, **{'model_url_slug': self.
model_url_slug}, **{'model_kwargs': self.model_kwargs}}
|
Get the identifying parameters.
|
mock_collection_config
|
return CollectionConfig(name='test_collection', description=
'Test Collection', metadata={'key': 'value'}, embedding_dimensions=
VECTOR_DIMS, is_auto_embedded=True)
|
@pytest.fixture
def mock_collection_config() ->CollectionConfig:
return CollectionConfig(name='test_collection', description=
'Test Collection', metadata={'key': 'value'}, embedding_dimensions=
VECTOR_DIMS, is_auto_embedded=True)
| null |
validate_environment
|
"""Validate that api key and python package exists in environment."""
gooseai_api_key = convert_to_secret_str(get_from_dict_or_env(values,
'gooseai_api_key', 'GOOSEAI_API_KEY'))
values['gooseai_api_key'] = gooseai_api_key
try:
import openai
openai.api_key = gooseai_api_key.get_secret_value()
openai.api_base = 'https://api.goose.ai/v1'
values['client'] = openai.Completion
except ImportError:
raise ImportError(
'Could not import openai python package. Please install it with `pip install openai`.'
)
return values
|
@root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that api key and python package exists in environment."""
gooseai_api_key = convert_to_secret_str(get_from_dict_or_env(values,
'gooseai_api_key', 'GOOSEAI_API_KEY'))
values['gooseai_api_key'] = gooseai_api_key
try:
import openai
openai.api_key = gooseai_api_key.get_secret_value()
openai.api_base = 'https://api.goose.ai/v1'
values['client'] = openai.Completion
except ImportError:
raise ImportError(
'Could not import openai python package. Please install it with `pip install openai`.'
)
return values
|
Validate that api key and python package exists in environment.
|
from_univariate_prompt
|
"""instantiation depends on component chains
*Security note*: The building blocks of this class include the implementation
of an AI technique that generates SQL code. If those SQL commands
are executed, it's critical to ensure they use credentials that
are narrowly-scoped to only include the permissions this chain needs.
Failure to do so may result in data corruption or loss, since this chain may
attempt commands like `DROP TABLE` or `INSERT` if appropriately prompted.
The best way to guard against such negative outcomes is to (as appropriate)
limit the permissions granted to the credentials used with this chain.
"""
return cls(llm=llm, chain=LLMChain(llm=llm, prompt=PromptTemplate(
input_variables=['question', 'query_result'], template=
"Summarize this answer '{query_result}' to this question '{question}'? "
)), narrative_chain=NarrativeChain.from_univariate_prompt(llm=llm),
causal_chain=CausalChain.from_univariate_prompt(llm=llm),
intervention_chain=InterventionChain.from_univariate_prompt(llm=llm),
query_chain=QueryChain.from_univariate_prompt(llm=llm), **kwargs)
|
@classmethod
def from_univariate_prompt(cls, llm: BaseLanguageModel, **kwargs: Any
) ->CPALChain:
"""instantiation depends on component chains
*Security note*: The building blocks of this class include the implementation
of an AI technique that generates SQL code. If those SQL commands
are executed, it's critical to ensure they use credentials that
are narrowly-scoped to only include the permissions this chain needs.
Failure to do so may result in data corruption or loss, since this chain may
attempt commands like `DROP TABLE` or `INSERT` if appropriately prompted.
The best way to guard against such negative outcomes is to (as appropriate)
limit the permissions granted to the credentials used with this chain.
"""
return cls(llm=llm, chain=LLMChain(llm=llm, prompt=PromptTemplate(
input_variables=['question', 'query_result'], template=
"Summarize this answer '{query_result}' to this question '{question}'? "
)), narrative_chain=NarrativeChain.from_univariate_prompt(llm=llm),
causal_chain=CausalChain.from_univariate_prompt(llm=llm),
intervention_chain=InterventionChain.from_univariate_prompt(llm=llm
), query_chain=QueryChain.from_univariate_prompt(llm=llm), **kwargs)
|
instantiation depends on component chains
*Security note*: The building blocks of this class include the implementation
of an AI technique that generates SQL code. If those SQL commands
are executed, it's critical to ensure they use credentials that
are narrowly-scoped to only include the permissions this chain needs.
Failure to do so may result in data corruption or loss, since this chain may
attempt commands like `DROP TABLE` or `INSERT` if appropriately prompted.
The best way to guard against such negative outcomes is to (as appropriate)
limit the permissions granted to the credentials used with this chain.
|
input_keys
|
"""Input keys."""
return self.the_input_keys
|
@property
def input_keys(self) ->List[str]:
"""Input keys."""
return self.the_input_keys
|
Input keys.
|
get_output_schema
|
return create_model('ChainOutput', **{k: (Any, None) for k in self.output_keys}
)
|
def get_output_schema(self, config: Optional[RunnableConfig]=None) ->Type[
BaseModel]:
return create_model('ChainOutput', **{k: (Any, None) for k in self.
output_keys})
| null |
get_parser
|
"""Get a parser by parser name."""
if parser_name not in _REGISTRY:
raise ValueError(f'Unknown parser combination: {parser_name}')
return _REGISTRY[parser_name]()
|
def get_parser(parser_name: str) ->BaseBlobParser:
"""Get a parser by parser name."""
if parser_name not in _REGISTRY:
raise ValueError(f'Unknown parser combination: {parser_name}')
return _REGISTRY[parser_name]()
|
Get a parser by parser name.
|
_generate_helper
|
try:
output = self._generate(prompts, stop=stop, run_manager=run_managers[0] if
run_managers else None, **kwargs
) if new_arg_supported else self._generate(prompts, stop=stop)
except BaseException as e:
for run_manager in run_managers:
run_manager.on_llm_error(e, response=LLMResult(generations=[]))
raise e
flattened_outputs = output.flatten()
for manager, flattened_output in zip(run_managers, flattened_outputs):
manager.on_llm_end(flattened_output)
if run_managers:
output.run = [RunInfo(run_id=run_manager.run_id) for run_manager in
run_managers]
return output
|
def _generate_helper(self, prompts: List[str], stop: Optional[List[str]],
run_managers: List[CallbackManagerForLLMRun], new_arg_supported: bool,
**kwargs: Any) ->LLMResult:
try:
output = self._generate(prompts, stop=stop, run_manager=
run_managers[0] if run_managers else None, **kwargs
) if new_arg_supported else self._generate(prompts, stop=stop)
except BaseException as e:
for run_manager in run_managers:
run_manager.on_llm_error(e, response=LLMResult(generations=[]))
raise e
flattened_outputs = output.flatten()
for manager, flattened_output in zip(run_managers, flattened_outputs):
manager.on_llm_end(flattened_output)
if run_managers:
output.run = [RunInfo(run_id=run_manager.run_id) for run_manager in
run_managers]
return output
| null |
lc_secrets
|
return {'konko_api_key': 'KONKO_API_KEY', 'openai_api_key': 'OPENAI_API_KEY'}
|
@property
def lc_secrets(self) ->Dict[str, str]:
return {'konko_api_key': 'KONKO_API_KEY', 'openai_api_key':
'OPENAI_API_KEY'}
| null |
color_mapping
|
return get_color_mapping([tool.name for tool in self.agent_executor.tools],
excluded_colors=['green', 'red'])
|
@property
def color_mapping(self) ->Dict[str, str]:
return get_color_mapping([tool.name for tool in self.agent_executor.
tools], excluded_colors=['green', 'red'])
| null |
test_pairwise_embedding_distance_eval_chain_cosine_similarity
|
"""Test the cosine similarity."""
pairwise_embedding_distance_eval_chain.distance_metric = (EmbeddingDistance
.COSINE)
result = pairwise_embedding_distance_eval_chain._compute_score(np.array(
vectors))
expected = 1.0 - np.dot(vectors[0], vectors[1]) / (np.linalg.norm(vectors[0
]) * np.linalg.norm(vectors[1]))
assert np.isclose(result, expected)
|
@pytest.mark.requires('scipy')
def test_pairwise_embedding_distance_eval_chain_cosine_similarity(
pairwise_embedding_distance_eval_chain:
PairwiseEmbeddingDistanceEvalChain, vectors: Tuple[np.ndarray, np.ndarray]
) ->None:
"""Test the cosine similarity."""
pairwise_embedding_distance_eval_chain.distance_metric = (EmbeddingDistance
.COSINE)
result = pairwise_embedding_distance_eval_chain._compute_score(np.array
(vectors))
expected = 1.0 - np.dot(vectors[0], vectors[1]) / (np.linalg.norm(
vectors[0]) * np.linalg.norm(vectors[1]))
assert np.isclose(result, expected)
|
Test the cosine similarity.
|
_warn_on_import
|
"""Warn on import of deprecated module."""
if _is_interactive_env():
return
if replacement:
warnings.warn(
f'Importing {name} from langchain root module is no longer supported. Please use {replacement} instead.'
)
else:
warnings.warn(
f'Importing {name} from langchain root module is no longer supported.')
|
def _warn_on_import(name: str, replacement: Optional[str]=None) ->None:
"""Warn on import of deprecated module."""
if _is_interactive_env():
return
if replacement:
warnings.warn(
f'Importing {name} from langchain root module is no longer supported. Please use {replacement} instead.'
)
else:
warnings.warn(
f'Importing {name} from langchain root module is no longer supported.'
)
|
Warn on import of deprecated module.
|
__or__
|
if isinstance(other, RunnableSequence):
return RunnableSequence(self.first, *self.middle, self.last, other.
first, *other.middle, other.last, name=self.name or other.name)
else:
return RunnableSequence(self.first, *self.middle, self.last,
coerce_to_runnable(other), name=self.name)
|
def __or__(self, other: Union[Runnable[Any, Other], Callable[[Any], Other],
Callable[[Iterator[Any]], Iterator[Other]], Mapping[str, Union[Runnable
[Any, Other], Callable[[Any], Other], Any]]]) ->RunnableSerializable[
Input, Other]:
if isinstance(other, RunnableSequence):
return RunnableSequence(self.first, *self.middle, self.last, other.
first, *other.middle, other.last, name=self.name or other.name)
else:
return RunnableSequence(self.first, *self.middle, self.last,
coerce_to_runnable(other), name=self.name)
| null |
transform
|
yield from self._transform_stream_with_config(input, self._transform,
config, **kwargs)
|
def transform(self, input: Iterator[Input], config: Optional[RunnableConfig
]=None, **kwargs: Any) ->Iterator[Dict[str, Any]]:
yield from self._transform_stream_with_config(input, self._transform,
config, **kwargs)
| null |
test_prompt_jinja2_functionality
|
prefix = 'Starting with {{ foo }}'
suffix = 'Ending with {{ bar }}'
prompt = FewShotPromptTemplate(input_variables=['foo', 'bar'], suffix=
suffix, prefix=prefix, examples=example_jinja2_prompt[1],
example_prompt=example_jinja2_prompt[0], template_format='jinja2')
output = prompt.format(foo='hello', bar='bye')
expected_output = """Starting with hello
happy: sad
tall: short
Ending with bye"""
assert output == expected_output
|
@pytest.mark.requires('jinja2')
def test_prompt_jinja2_functionality(example_jinja2_prompt: Tuple[
PromptTemplate, List[Dict[str, str]]]) ->None:
prefix = 'Starting with {{ foo }}'
suffix = 'Ending with {{ bar }}'
prompt = FewShotPromptTemplate(input_variables=['foo', 'bar'], suffix=
suffix, prefix=prefix, examples=example_jinja2_prompt[1],
example_prompt=example_jinja2_prompt[0], template_format='jinja2')
output = prompt.format(foo='hello', bar='bye')
expected_output = (
'Starting with hello\n\nhappy: sad\n\ntall: short\n\nEnding with bye')
assert output == expected_output
| null |
load_local
|
"""Load the local specified table of standalone vearch.
Returns:
Success or failure of loading the local specified table
"""
if not path_or_url:
raise ValueError('No metadata path!!!')
if not table_name:
raise ValueError('No table name!!!')
table_path = os.path.join(path_or_url, table_name + '.schema')
if not os.path.exists(table_path):
raise ValueError('vearch vectorbase table not exist!!!')
vearch_db = cls(embedding_function=embedding, path_or_url=path_or_url,
table_name=table_name, db_name=db_name, flag=flag)
vearch_db._load()
return vearch_db
|
@classmethod
def load_local(cls, embedding: Embeddings, path_or_url: Optional[str]=None,
table_name: str=_DEFAULT_TABLE_NAME, db_name: str=
_DEFAULT_CLUSTER_DB_NAME, flag: int=_DEFAULT_VERSION, **kwargs: Any
) ->Vearch:
"""Load the local specified table of standalone vearch.
Returns:
Success or failure of loading the local specified table
"""
if not path_or_url:
raise ValueError('No metadata path!!!')
if not table_name:
raise ValueError('No table name!!!')
table_path = os.path.join(path_or_url, table_name + '.schema')
if not os.path.exists(table_path):
raise ValueError('vearch vectorbase table not exist!!!')
vearch_db = cls(embedding_function=embedding, path_or_url=path_or_url,
table_name=table_name, db_name=db_name, flag=flag)
vearch_db._load()
return vearch_db
|
Load the local specified table of standalone vearch.
Returns:
Success or failure of loading the local specified table
|
_default_params
|
params: Dict[str, Any] = {'gateway_uri': self.gateway_uri, 'route': self.
route, **self.params.dict() if self.params else {}}
return params
|
@property
def _default_params(self) ->Dict[str, Any]:
params: Dict[str, Any] = {'gateway_uri': self.gateway_uri, 'route':
self.route, **self.params.dict() if self.params else {}}
return params
| null |
__init__
|
"""
Initialize with a PyVespa client.
"""
try:
from vespa.application import Vespa
except ImportError:
raise ImportError(
'Could not import Vespa python package. Please install it with `pip install pyvespa`.'
)
if not isinstance(app, Vespa):
raise ValueError(
f'app should be an instance of vespa.application.Vespa, got {type(app)}'
)
self._vespa_app = app
self._embedding_function = embedding_function
self._page_content_field = page_content_field
self._embedding_field = embedding_field
self._input_field = input_field
self._metadata_fields = metadata_fields
|
def __init__(self, app: Any, embedding_function: Optional[Embeddings]=None,
page_content_field: Optional[str]=None, embedding_field: Optional[str]=
None, input_field: Optional[str]=None, metadata_fields: Optional[List[
str]]=None) ->None:
"""
Initialize with a PyVespa client.
"""
try:
from vespa.application import Vespa
except ImportError:
raise ImportError(
'Could not import Vespa python package. Please install it with `pip install pyvespa`.'
)
if not isinstance(app, Vespa):
raise ValueError(
f'app should be an instance of vespa.application.Vespa, got {type(app)}'
)
self._vespa_app = app
self._embedding_function = embedding_function
self._page_content_field = page_content_field
self._embedding_field = embedding_field
self._input_field = input_field
self._metadata_fields = metadata_fields
|
Initialize with a PyVespa client.
|
test_astradb_vectorstore_create_delete
|
"""Create and delete."""
emb = SomeEmbeddings(dimension=2)
v_store = AstraDB(embedding=emb, collection_name='lc_test_1', token=os.
environ['ASTRA_DB_APPLICATION_TOKEN'], api_endpoint=os.environ[
'ASTRA_DB_API_ENDPOINT'], namespace=os.environ.get('ASTRA_DB_KEYSPACE'))
v_store.delete_collection()
from astrapy.db import AstraDB as LibAstraDB
astra_db_client = LibAstraDB(token=os.environ['ASTRA_DB_APPLICATION_TOKEN'],
api_endpoint=os.environ['ASTRA_DB_API_ENDPOINT'], namespace=os.environ.
get('ASTRA_DB_KEYSPACE'))
v_store_2 = AstraDB(embedding=emb, collection_name='lc_test_2',
astra_db_client=astra_db_client)
v_store_2.delete_collection()
|
def test_astradb_vectorstore_create_delete(self) ->None:
"""Create and delete."""
emb = SomeEmbeddings(dimension=2)
v_store = AstraDB(embedding=emb, collection_name='lc_test_1', token=os.
environ['ASTRA_DB_APPLICATION_TOKEN'], api_endpoint=os.environ[
'ASTRA_DB_API_ENDPOINT'], namespace=os.environ.get('ASTRA_DB_KEYSPACE')
)
v_store.delete_collection()
from astrapy.db import AstraDB as LibAstraDB
astra_db_client = LibAstraDB(token=os.environ[
'ASTRA_DB_APPLICATION_TOKEN'], api_endpoint=os.environ[
'ASTRA_DB_API_ENDPOINT'], namespace=os.environ.get('ASTRA_DB_KEYSPACE')
)
v_store_2 = AstraDB(embedding=emb, collection_name='lc_test_2',
astra_db_client=astra_db_client)
v_store_2.delete_collection()
|
Create and delete.
|
add_texts
|
create_index(texts, self.index, self.embeddings, self.sparse_encoder, ids=
ids, metadatas=metadatas, namespace=namespace)
|
def add_texts(self, texts: List[str], ids: Optional[List[str]]=None,
metadatas: Optional[List[dict]]=None, namespace: Optional[str]=None
) ->None:
create_index(texts, self.index, self.embeddings, self.sparse_encoder,
ids=ids, metadatas=metadatas, namespace=namespace)
| null |
load_memory_variables
|
"""Return history buffer."""
if self.return_messages:
final_buffer: Any = self.buffer
else:
final_buffer = get_buffer_string(self.buffer, human_prefix=self.
human_prefix, ai_prefix=self.ai_prefix)
return {self.memory_key: final_buffer}
|
def load_memory_variables(self, inputs: Dict[str, Any]) ->Dict[str, Any]:
"""Return history buffer."""
if self.return_messages:
final_buffer: Any = self.buffer
else:
final_buffer = get_buffer_string(self.buffer, human_prefix=self.
human_prefix, ai_prefix=self.ai_prefix)
return {self.memory_key: final_buffer}
|
Return history buffer.
|
_call
|
"""Call out to Writer's completions endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = Writer("Tell me a joke.")
"""
if self.base_url is not None:
base_url = self.base_url
else:
base_url = (
f'https://enterprise-api.writer.com/llm/organization/{self.writer_org_id}/model/{self.model_id}/completions'
)
params = {**self._default_params, **kwargs}
response = requests.post(url=base_url, headers={'Authorization':
f'{self.writer_api_key}', 'Content-Type': 'application/json', 'Accept':
'application/json'}, json={'prompt': prompt, **params})
text = response.text
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
|
def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager:
Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str:
"""Call out to Writer's completions endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = Writer("Tell me a joke.")
"""
if self.base_url is not None:
base_url = self.base_url
else:
base_url = (
f'https://enterprise-api.writer.com/llm/organization/{self.writer_org_id}/model/{self.model_id}/completions'
)
params = {**self._default_params, **kwargs}
response = requests.post(url=base_url, headers={'Authorization':
f'{self.writer_api_key}', 'Content-Type': 'application/json',
'Accept': 'application/json'}, json={'prompt': prompt, **params})
text = response.text
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
|
Call out to Writer's completions endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = Writer("Tell me a joke.")
|
test_document_found
|
"""Test document found."""
_dict = {'foo': Document(page_content='bar')}
docstore = InMemoryDocstore(_dict)
output = docstore.search('foo')
assert isinstance(output, Document)
assert output.page_content == 'bar'
|
def test_document_found() ->None:
"""Test document found."""
_dict = {'foo': Document(page_content='bar')}
docstore = InMemoryDocstore(_dict)
output = docstore.search('foo')
assert isinstance(output, Document)
assert output.page_content == 'bar'
|
Test document found.
|
__init__
|
self.openai_api_key = openai_api_key or get_from_env('openai_api_key',
'OPENAI_API_KEY')
self.openai_api_model = openai_api_model or get_from_env('openai_api_model',
'OPENAI_API_MODEL')
|
def __init__(self, openai_api_key: Optional[str]=None, openai_api_model:
Optional[str]=None) ->None:
self.openai_api_key = openai_api_key or get_from_env('openai_api_key',
'OPENAI_API_KEY')
self.openai_api_model = openai_api_model or get_from_env('openai_api_model'
, 'OPENAI_API_MODEL')
| null |
embed_documents
|
"""Return simple embeddings.
Embeddings encode each text as its index."""
return [([float(1.0)] * 9 + [float(i)]) for i in range(len(texts))]
|
def embed_documents(self, texts: List[str]) ->List[List[float]]:
"""Return simple embeddings.
Embeddings encode each text as its index."""
return [([float(1.0)] * 9 + [float(i)]) for i in range(len(texts))]
|
Return simple embeddings.
Embeddings encode each text as its index.
|
_get_figma_file
|
"""Get Figma file from Figma REST API."""
headers = {'X-Figma-Token': self.access_token}
request = urllib.request.Request(self._construct_figma_api_url(), headers=
headers)
with urllib.request.urlopen(request) as response:
json_data = json.loads(response.read().decode())
return json_data
|
def _get_figma_file(self) ->Any:
"""Get Figma file from Figma REST API."""
headers = {'X-Figma-Token': self.access_token}
request = urllib.request.Request(self._construct_figma_api_url(),
headers=headers)
with urllib.request.urlopen(request) as response:
json_data = json.loads(response.read().decode())
return json_data
|
Get Figma file from Figma REST API.
|
_get_google_cloud_texttospeech
|
return GoogleCloudTextToSpeechTool(**kwargs)
|
def _get_google_cloud_texttospeech(**kwargs: Any) ->BaseTool:
return GoogleCloudTextToSpeechTool(**kwargs)
| null |
on_chain_start
|
"""Run when chain starts running."""
aim = import_aim()
self.step += 1
self.chain_starts += 1
self.starts += 1
resp = {'action': 'on_chain_start'}
resp.update(self.get_custom_callback_meta())
inputs_res = deepcopy(inputs)
self._run.track(aim.Text(inputs_res['input']), name='on_chain_start',
context=resp)
|
def on_chain_start(self, serialized: Dict[str, Any], inputs: Dict[str, Any],
**kwargs: Any) ->None:
"""Run when chain starts running."""
aim = import_aim()
self.step += 1
self.chain_starts += 1
self.starts += 1
resp = {'action': 'on_chain_start'}
resp.update(self.get_custom_callback_meta())
inputs_res = deepcopy(inputs)
self._run.track(aim.Text(inputs_res['input']), name='on_chain_start',
context=resp)
|
Run when chain starts running.
|
elasticsearch_url
|
"""Return the elasticsearch url."""
from elasticsearch import Elasticsearch
url = 'http://localhost:9200'
yield url
es = Elasticsearch(hosts=url)
index_names = es.indices.get(index='_all').keys()
for index_name in index_names:
es.indices.delete(index=index_name)
|
@pytest.fixture(scope='class', autouse=True)
def elasticsearch_url(self) ->Union[str, Generator[str, None, None]]:
"""Return the elasticsearch url."""
from elasticsearch import Elasticsearch
url = 'http://localhost:9200'
yield url
es = Elasticsearch(hosts=url)
index_names = es.indices.get(index='_all').keys()
for index_name in index_names:
es.indices.delete(index=index_name)
|
Return the elasticsearch url.
|
tool_run_logging_kwargs
|
return {'llm_prefix': '', 'observation_prefix': '' if len(self.stop) == 0 else
self.stop[0]}
|
def tool_run_logging_kwargs(self) ->Dict:
return {'llm_prefix': '', 'observation_prefix': '' if len(self.stop) ==
0 else self.stop[0]}
| null |
_llm_type
|
"""Return type of llm."""
return 'fake-list'
|
@property
def _llm_type(self) ->str:
"""Return type of llm."""
return 'fake-list'
|
Return type of llm.
|
on_llm_end
|
if parent_run_id is None:
self.increment()
|
def on_llm_end(self, response: LLMResult, *, run_id: UUID, parent_run_id:
Optional[UUID]=None, **kwargs: Any) ->Any:
if parent_run_id is None:
self.increment()
| null |
test_valid_call
|
"""Test valid call of LLM chain."""
output = fake_llm_chain({'bar': 'baz'})
assert output == {'bar': 'baz', 'text1': 'foo'}
output = fake_llm_chain({'bar': 'baz', 'stop': ['foo']})
assert output == {'bar': 'baz', 'stop': ['foo'], 'text1': 'bar'}
|
def test_valid_call(fake_llm_chain: LLMChain) ->None:
"""Test valid call of LLM chain."""
output = fake_llm_chain({'bar': 'baz'})
assert output == {'bar': 'baz', 'text1': 'foo'}
output = fake_llm_chain({'bar': 'baz', 'stop': ['foo']})
assert output == {'bar': 'baz', 'stop': ['foo'], 'text1': 'bar'}
|
Test valid call of LLM chain.
|
_stream
|
params = self._prepare_params(stop=stop, stream=True, **kwargs)
for stream_resp in completion_with_retry(self, [prompt], stream=True,
is_gemini=self._is_gemini_model, run_manager=run_manager, **params):
chunk = self._response_to_generation(stream_resp)
yield chunk
if run_manager:
run_manager.on_llm_new_token(chunk.text, chunk=chunk, verbose=self.
verbose)
|
def _stream(self, prompt: str, stop: Optional[List[str]]=None, run_manager:
Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->Iterator[
GenerationChunk]:
params = self._prepare_params(stop=stop, stream=True, **kwargs)
for stream_resp in completion_with_retry(self, [prompt], stream=True,
is_gemini=self._is_gemini_model, run_manager=run_manager, **params):
chunk = self._response_to_generation(stream_resp)
yield chunk
if run_manager:
run_manager.on_llm_new_token(chunk.text, chunk=chunk, verbose=
self.verbose)
| null |
_type
|
return 'agent_trajectory'
|
@property
def _type(self) ->str:
return 'agent_trajectory'
| null |
test_get_all_10sec_timeout
|
start_time = time.time()
contract_address = '0x1a92f7381b9f03921564a437210bb9396471050c'
with pytest.raises(RuntimeError):
BlockchainDocumentLoader(contract_address=contract_address,
blockchainType=BlockchainType.ETH_MAINNET, api_key=os.environ[
'ALCHEMY_API_KEY'], get_all_tokens=True, max_execution_time=10).load()
end_time = time.time()
print('Execution took ', end_time - start_time, ' seconds')
|
@pytest.mark.skipif(not alchemyKeySet, reason='Alchemy API key not provided.')
def test_get_all_10sec_timeout() ->None:
start_time = time.time()
contract_address = '0x1a92f7381b9f03921564a437210bb9396471050c'
with pytest.raises(RuntimeError):
BlockchainDocumentLoader(contract_address=contract_address,
blockchainType=BlockchainType.ETH_MAINNET, api_key=os.environ[
'ALCHEMY_API_KEY'], get_all_tokens=True, max_execution_time=10
).load()
end_time = time.time()
print('Execution took ', end_time - start_time, ' seconds')
| null |
test_math_question_infinite_loop
|
"""Test simple question."""
question = """Michael had 58 golf balls. On tuesday, he lost 23 golf balls.
On wednesday, he lost 2 more. How many golf balls did he have
at the end of wednesday?"""
prompt = MATH_PROMPT.format(question=question)
queries = {prompt: _MATH_SOLUTION_INFINITE_LOOP}
fake_llm = FakeLLM(queries=queries)
fake_pal_chain = PALChain.from_math_prompt(fake_llm, timeout=1)
output = fake_pal_chain.run(question)
assert output == 'Execution timed out'
|
def test_math_question_infinite_loop() ->None:
"""Test simple question."""
question = """Michael had 58 golf balls. On tuesday, he lost 23 golf balls.
On wednesday, he lost 2 more. How many golf balls did he have
at the end of wednesday?"""
prompt = MATH_PROMPT.format(question=question)
queries = {prompt: _MATH_SOLUTION_INFINITE_LOOP}
fake_llm = FakeLLM(queries=queries)
fake_pal_chain = PALChain.from_math_prompt(fake_llm, timeout=1)
output = fake_pal_chain.run(question)
assert output == 'Execution timed out'
|
Test simple question.
|
test_pgvector_retriever_search_threshold_custom_normalization_fn
|
"""Test searching with threshold and custom normalization function"""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': str(i)} for i in range(len(texts))]
docsearch = PGVector.from_texts(texts=texts, collection_name=
'test_collection', embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas, connection_string=CONNECTION_STRING,
pre_delete_collection=True, relevance_score_fn=lambda d: d * 0)
retriever = docsearch.as_retriever(search_type='similarity_score_threshold',
search_kwargs={'k': 3, 'score_threshold': 0.5})
output = retriever.get_relevant_documents('foo')
assert output == []
|
def test_pgvector_retriever_search_threshold_custom_normalization_fn() ->None:
"""Test searching with threshold and custom normalization function"""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': str(i)} for i in range(len(texts))]
docsearch = PGVector.from_texts(texts=texts, collection_name=
'test_collection', embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas, connection_string=CONNECTION_STRING,
pre_delete_collection=True, relevance_score_fn=lambda d: d * 0)
retriever = docsearch.as_retriever(search_type=
'similarity_score_threshold', search_kwargs={'k': 3,
'score_threshold': 0.5})
output = retriever.get_relevant_documents('foo')
assert output == []
|
Test searching with threshold and custom normalization function
|
mget
|
"""Get the values associated with the given keys."""
return [(value.encode('utf-8') if value is not None else None) for value in
self.underlying_store.mget(keys)]
|
def mget(self, keys: Sequence[str]) ->List[Optional[bytes]]:
"""Get the values associated with the given keys."""
return [(value.encode('utf-8') if value is not None else None) for
value in self.underlying_store.mget(keys)]
|
Get the values associated with the given keys.
|
load
|
"""
:param space_key: Space key retrieved from a confluence URL, defaults to None
:type space_key: Optional[str], optional
:param page_ids: List of specific page IDs to load, defaults to None
:type page_ids: Optional[List[str]], optional
:param label: Get all pages with this label, defaults to None
:type label: Optional[str], optional
:param cql: CQL Expression, defaults to None
:type cql: Optional[str], optional
:param include_restricted_content: defaults to False
:type include_restricted_content: bool, optional
:param include_archived_content: Whether to include archived content,
defaults to False
:type include_archived_content: bool, optional
:param include_attachments: defaults to False
:type include_attachments: bool, optional
:param include_comments: defaults to False
:type include_comments: bool, optional
:param content_format: Specify content format, defaults to
ContentFormat.STORAGE, the supported values are:
`ContentFormat.EDITOR`, `ContentFormat.EXPORT_VIEW`,
`ContentFormat.ANONYMOUS_EXPORT_VIEW`,
`ContentFormat.STORAGE`, and `ContentFormat.VIEW`.
:type content_format: ContentFormat
:param limit: Maximum number of pages to retrieve per request, defaults to 50
:type limit: int, optional
:param max_pages: Maximum number of pages to retrieve in total, defaults 1000
:type max_pages: int, optional
:param ocr_languages: The languages to use for the Tesseract agent. To use a
language, you'll first need to install the appropriate
Tesseract language pack.
:type ocr_languages: str, optional
:param keep_markdown_format: Whether to keep the markdown format, defaults to
False
:type keep_markdown_format: bool
:param keep_newlines: Whether to keep the newlines format, defaults to
False
:type keep_newlines: bool
:raises ValueError: _description_
:raises ImportError: _description_
:return: _description_
:rtype: List[Document]
"""
if not space_key and not page_ids and not label and not cql:
raise ValueError(
'Must specify at least one among `space_key`, `page_ids`, `label`, `cql` parameters.'
)
docs = []
if space_key:
pages = self.paginate_request(self.confluence.get_all_pages_from_space,
space=space_key, limit=limit, max_pages=max_pages, status='any' if
include_archived_content else 'current', expand=content_format.value)
docs += self.process_pages(pages, include_restricted_content,
include_attachments, include_comments, content_format,
ocr_languages=ocr_languages, keep_markdown_format=
keep_markdown_format, keep_newlines=keep_newlines)
if label:
pages = self.paginate_request(self.confluence.get_all_pages_by_label,
label=label, limit=limit, max_pages=max_pages)
ids_by_label = [page['id'] for page in pages]
if page_ids:
page_ids = list(set(page_ids + ids_by_label))
else:
page_ids = list(set(ids_by_label))
if cql:
pages = self.paginate_request(self._search_content_by_cql, cql=cql,
limit=limit, max_pages=max_pages, include_archived_spaces=
include_archived_content, expand=content_format.value)
docs += self.process_pages(pages, include_restricted_content,
include_attachments, include_comments, content_format,
ocr_languages, keep_markdown_format)
if page_ids:
for page_id in page_ids:
get_page = retry(reraise=True, stop=stop_after_attempt(self.
number_of_retries), wait=wait_exponential(multiplier=1, min=
self.min_retry_seconds, max=self.max_retry_seconds),
before_sleep=before_sleep_log(logger, logging.WARNING))(self.
confluence.get_page_by_id)
page = get_page(page_id=page_id, expand=
f'{content_format.value},version')
if not include_restricted_content and not self.is_public_page(page):
continue
doc = self.process_page(page, include_attachments, include_comments,
content_format, ocr_languages, keep_markdown_format)
docs.append(doc)
return docs
|
def load(self, space_key: Optional[str]=None, page_ids: Optional[List[str]]
=None, label: Optional[str]=None, cql: Optional[str]=None,
include_restricted_content: bool=False, include_archived_content: bool=
False, include_attachments: bool=False, include_comments: bool=False,
content_format: ContentFormat=ContentFormat.STORAGE, limit: Optional[
int]=50, max_pages: Optional[int]=1000, ocr_languages: Optional[str]=
None, keep_markdown_format: bool=False, keep_newlines: bool=False) ->List[
Document]:
"""
:param space_key: Space key retrieved from a confluence URL, defaults to None
:type space_key: Optional[str], optional
:param page_ids: List of specific page IDs to load, defaults to None
:type page_ids: Optional[List[str]], optional
:param label: Get all pages with this label, defaults to None
:type label: Optional[str], optional
:param cql: CQL Expression, defaults to None
:type cql: Optional[str], optional
:param include_restricted_content: defaults to False
:type include_restricted_content: bool, optional
:param include_archived_content: Whether to include archived content,
defaults to False
:type include_archived_content: bool, optional
:param include_attachments: defaults to False
:type include_attachments: bool, optional
:param include_comments: defaults to False
:type include_comments: bool, optional
:param content_format: Specify content format, defaults to
ContentFormat.STORAGE, the supported values are:
`ContentFormat.EDITOR`, `ContentFormat.EXPORT_VIEW`,
`ContentFormat.ANONYMOUS_EXPORT_VIEW`,
`ContentFormat.STORAGE`, and `ContentFormat.VIEW`.
:type content_format: ContentFormat
:param limit: Maximum number of pages to retrieve per request, defaults to 50
:type limit: int, optional
:param max_pages: Maximum number of pages to retrieve in total, defaults 1000
:type max_pages: int, optional
:param ocr_languages: The languages to use for the Tesseract agent. To use a
language, you'll first need to install the appropriate
Tesseract language pack.
:type ocr_languages: str, optional
:param keep_markdown_format: Whether to keep the markdown format, defaults to
False
:type keep_markdown_format: bool
:param keep_newlines: Whether to keep the newlines format, defaults to
False
:type keep_newlines: bool
:raises ValueError: _description_
:raises ImportError: _description_
:return: _description_
:rtype: List[Document]
"""
if not space_key and not page_ids and not label and not cql:
raise ValueError(
'Must specify at least one among `space_key`, `page_ids`, `label`, `cql` parameters.'
)
docs = []
if space_key:
pages = self.paginate_request(self.confluence.
get_all_pages_from_space, space=space_key, limit=limit,
max_pages=max_pages, status='any' if include_archived_content else
'current', expand=content_format.value)
docs += self.process_pages(pages, include_restricted_content,
include_attachments, include_comments, content_format,
ocr_languages=ocr_languages, keep_markdown_format=
keep_markdown_format, keep_newlines=keep_newlines)
if label:
pages = self.paginate_request(self.confluence.
get_all_pages_by_label, label=label, limit=limit, max_pages=
max_pages)
ids_by_label = [page['id'] for page in pages]
if page_ids:
page_ids = list(set(page_ids + ids_by_label))
else:
page_ids = list(set(ids_by_label))
if cql:
pages = self.paginate_request(self._search_content_by_cql, cql=cql,
limit=limit, max_pages=max_pages, include_archived_spaces=
include_archived_content, expand=content_format.value)
docs += self.process_pages(pages, include_restricted_content,
include_attachments, include_comments, content_format,
ocr_languages, keep_markdown_format)
if page_ids:
for page_id in page_ids:
get_page = retry(reraise=True, stop=stop_after_attempt(self.
number_of_retries), wait=wait_exponential(multiplier=1, min
=self.min_retry_seconds, max=self.max_retry_seconds),
before_sleep=before_sleep_log(logger, logging.WARNING))(self
.confluence.get_page_by_id)
page = get_page(page_id=page_id, expand=
f'{content_format.value},version')
if not include_restricted_content and not self.is_public_page(page
):
continue
doc = self.process_page(page, include_attachments,
include_comments, content_format, ocr_languages,
keep_markdown_format)
docs.append(doc)
return docs
|
:param space_key: Space key retrieved from a confluence URL, defaults to None
:type space_key: Optional[str], optional
:param page_ids: List of specific page IDs to load, defaults to None
:type page_ids: Optional[List[str]], optional
:param label: Get all pages with this label, defaults to None
:type label: Optional[str], optional
:param cql: CQL Expression, defaults to None
:type cql: Optional[str], optional
:param include_restricted_content: defaults to False
:type include_restricted_content: bool, optional
:param include_archived_content: Whether to include archived content,
defaults to False
:type include_archived_content: bool, optional
:param include_attachments: defaults to False
:type include_attachments: bool, optional
:param include_comments: defaults to False
:type include_comments: bool, optional
:param content_format: Specify content format, defaults to
ContentFormat.STORAGE, the supported values are:
`ContentFormat.EDITOR`, `ContentFormat.EXPORT_VIEW`,
`ContentFormat.ANONYMOUS_EXPORT_VIEW`,
`ContentFormat.STORAGE`, and `ContentFormat.VIEW`.
:type content_format: ContentFormat
:param limit: Maximum number of pages to retrieve per request, defaults to 50
:type limit: int, optional
:param max_pages: Maximum number of pages to retrieve in total, defaults 1000
:type max_pages: int, optional
:param ocr_languages: The languages to use for the Tesseract agent. To use a
language, you'll first need to install the appropriate
Tesseract language pack.
:type ocr_languages: str, optional
:param keep_markdown_format: Whether to keep the markdown format, defaults to
False
:type keep_markdown_format: bool
:param keep_newlines: Whether to keep the newlines format, defaults to
False
:type keep_newlines: bool
:raises ValueError: _description_
:raises ImportError: _description_
:return: _description_
:rtype: List[Document]
|
load
|
"""Download a selected dataset.
Returns: a list of Documents.
"""
return list(self.lazy_load())
|
def load(self) ->List[Document]:
"""Download a selected dataset.
Returns: a list of Documents.
"""
return list(self.lazy_load())
|
Download a selected dataset.
Returns: a list of Documents.
|
_clean_response
|
return re.sub(f'^{self.name} ', '', text.strip()).strip()
|
def _clean_response(self, text: str) ->str:
return re.sub(f'^{self.name} ', '', text.strip()).strip()
| null |
test_all_imports
|
assert set(__all__) == set(EXPECTED_ALL)
|
def test_all_imports() ->None:
assert set(__all__) == set(EXPECTED_ALL)
| null |
test_mosaicml_embedding_documents
|
"""Test MosaicML embeddings."""
documents = ['foo bar']
embedding = MosaicMLInstructorEmbeddings()
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 768
|
def test_mosaicml_embedding_documents() ->None:
"""Test MosaicML embeddings."""
documents = ['foo bar']
embedding = MosaicMLInstructorEmbeddings()
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 768
|
Test MosaicML embeddings.
|
test_from_documents_inner_product
|
"""Test end to end construction and search."""
documents = [Document(page_content='Dogs are tough.', metadata={'a': 1}),
Document(page_content='Cats have fluff.', metadata={'b': 1}), Document(
page_content='What is a sandwich?', metadata={'c': 1}), Document(
page_content='That fence is purple.', metadata={'d': 1, 'e': 2})]
vectorstore = AzureCosmosDBVectorSearch.from_documents(documents,
azure_openai_embeddings, collection=collection, index_name=INDEX_NAME)
sleep(1)
vectorstore.create_index(num_lists, dimensions, CosmosDBSimilarityType.IP)
sleep(2)
output = vectorstore.similarity_search('Sandwich', k=1)
assert output
assert output[0].page_content == 'What is a sandwich?'
assert output[0].metadata['c'] == 1
vectorstore.delete_index()
|
def test_from_documents_inner_product(self, azure_openai_embeddings:
OpenAIEmbeddings, collection: Any) ->None:
"""Test end to end construction and search."""
documents = [Document(page_content='Dogs are tough.', metadata={'a': 1}
), Document(page_content='Cats have fluff.', metadata={'b': 1}),
Document(page_content='What is a sandwich?', metadata={'c': 1}),
Document(page_content='That fence is purple.', metadata={'d': 1,
'e': 2})]
vectorstore = AzureCosmosDBVectorSearch.from_documents(documents,
azure_openai_embeddings, collection=collection, index_name=INDEX_NAME)
sleep(1)
vectorstore.create_index(num_lists, dimensions, CosmosDBSimilarityType.IP)
sleep(2)
output = vectorstore.similarity_search('Sandwich', k=1)
assert output
assert output[0].page_content == 'What is a sandwich?'
assert output[0].metadata['c'] == 1
vectorstore.delete_index()
|
Test end to end construction and search.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.