method_name
stringlengths 1
78
| method_body
stringlengths 3
9.66k
| full_code
stringlengths 31
10.7k
| docstring
stringlengths 4
4.74k
⌀ |
|---|---|---|---|
input_keys
|
return ['input']
|
@property
def input_keys(self) ->List[str]:
return ['input']
| null |
get_propose_prompt
|
return PromptTemplate(template_format='jinja2', input_variables=[
'problem_description', 'thoughts', 'n'], output_parser=
JSONListOutputParser(), template=dedent(
"""
You are an intelligent agent that is generating thoughts in a tree of
thoughts setting.
The output should be a markdown code snippet formatted as a JSON list of
strings, including the leading and trailing "```json" and "```":
```json
[
"<thought-1>",
"<thought-2>",
"<thought-3>"
]
```
PROBLEM
{{ problem_description }}
{% if thoughts %}
VALID THOUGHTS
{% for thought in thoughts %}
{{ thought }}
{% endfor %}
Possible next {{ n }} valid thoughts based on the last valid thought:
{% else %}
Possible next {{ n }} valid thoughts based on the PROBLEM:
{%- endif -%}
"""
).strip())
|
def get_propose_prompt() ->PromptTemplate:
return PromptTemplate(template_format='jinja2', input_variables=[
'problem_description', 'thoughts', 'n'], output_parser=
JSONListOutputParser(), template=dedent(
"""
You are an intelligent agent that is generating thoughts in a tree of
thoughts setting.
The output should be a markdown code snippet formatted as a JSON list of
strings, including the leading and trailing "```json" and "```":
```json
[
"<thought-1>",
"<thought-2>",
"<thought-3>"
]
```
PROBLEM
{{ problem_description }}
{% if thoughts %}
VALID THOUGHTS
{% for thought in thoughts %}
{{ thought }}
{% endfor %}
Possible next {{ n }} valid thoughts based on the last valid thought:
{% else %}
Possible next {{ n }} valid thoughts based on the PROBLEM:
{%- endif -%}
"""
).strip())
| null |
test_messages_to_prompt_dict_with_valid_messages
|
pytest.importorskip('google.generativeai')
result = _messages_to_prompt_dict([SystemMessage(content='Prompt'),
HumanMessage(example=True, content='Human example #1'), AIMessage(
example=True, content='AI example #1'), HumanMessage(example=True,
content='Human example #2'), AIMessage(example=True, content=
'AI example #2'), HumanMessage(content='Real human message'), AIMessage
(content='Real AI message')])
expected = {'context': 'Prompt', 'examples': [{'author': 'human', 'content':
'Human example #1'}, {'author': 'ai', 'content': 'AI example #1'}, {
'author': 'human', 'content': 'Human example #2'}, {'author': 'ai',
'content': 'AI example #2'}], 'messages': [{'author': 'human',
'content': 'Real human message'}, {'author': 'ai', 'content':
'Real AI message'}]}
assert result == expected
|
def test_messages_to_prompt_dict_with_valid_messages() ->None:
pytest.importorskip('google.generativeai')
result = _messages_to_prompt_dict([SystemMessage(content='Prompt'),
HumanMessage(example=True, content='Human example #1'), AIMessage(
example=True, content='AI example #1'), HumanMessage(example=True,
content='Human example #2'), AIMessage(example=True, content=
'AI example #2'), HumanMessage(content='Real human message'),
AIMessage(content='Real AI message')])
expected = {'context': 'Prompt', 'examples': [{'author': 'human',
'content': 'Human example #1'}, {'author': 'ai', 'content':
'AI example #1'}, {'author': 'human', 'content': 'Human example #2'
}, {'author': 'ai', 'content': 'AI example #2'}], 'messages': [{
'author': 'human', 'content': 'Real human message'}, {'author':
'ai', 'content': 'Real AI message'}]}
assert result == expected
| null |
test_create_ticket
|
"""Test the Create Ticket Call that Creates a Issue/Ticket on JIRA."""
issue_string = (
'{"summary": "Test Summary", "description": "Test Description", "issuetype": {"name": "Bug"}, "project": {"key": "TP"}}'
)
jira = JiraAPIWrapper()
output = jira.run('create_issue', issue_string)
assert 'id' in output
assert 'key' in output
|
def test_create_ticket() ->None:
"""Test the Create Ticket Call that Creates a Issue/Ticket on JIRA."""
issue_string = (
'{"summary": "Test Summary", "description": "Test Description", "issuetype": {"name": "Bug"}, "project": {"key": "TP"}}'
)
jira = JiraAPIWrapper()
output = jira.run('create_issue', issue_string)
assert 'id' in output
assert 'key' in output
|
Test the Create Ticket Call that Creates a Issue/Ticket on JIRA.
|
test_partial_init_string
|
"""Test prompt can be initialized with partial variables."""
prefix = 'This is a test about {content}.'
suffix = 'Now you try to talk about {new_content}.'
examples = [{'question': 'foo', 'answer': 'bar'}, {'question': 'baz',
'answer': 'foo'}]
prompt = FewShotPromptTemplate(suffix=suffix, prefix=prefix,
input_variables=['new_content'], partial_variables={'content':
'animals'}, examples=examples, example_prompt=EXAMPLE_PROMPT,
example_separator='\n')
output = prompt.format(new_content='party')
expected_output = """This is a test about animals.
foo: bar
baz: foo
Now you try to talk about party."""
assert output == expected_output
|
def test_partial_init_string() ->None:
"""Test prompt can be initialized with partial variables."""
prefix = 'This is a test about {content}.'
suffix = 'Now you try to talk about {new_content}.'
examples = [{'question': 'foo', 'answer': 'bar'}, {'question': 'baz',
'answer': 'foo'}]
prompt = FewShotPromptTemplate(suffix=suffix, prefix=prefix,
input_variables=['new_content'], partial_variables={'content':
'animals'}, examples=examples, example_prompt=EXAMPLE_PROMPT,
example_separator='\n')
output = prompt.format(new_content='party')
expected_output = """This is a test about animals.
foo: bar
baz: foo
Now you try to talk about party."""
assert output == expected_output
|
Test prompt can be initialized with partial variables.
|
test_get_relevant_documents_with_filter
|
"""Test end to end construction and MRR search."""
from weaviate import Client
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
client = Client(weaviate_url)
retriever = WeaviateHybridSearchRetriever(client=client, index_name=
f'LangChain_{uuid4().hex}', text_key='text', attributes=['page'])
for i, text in enumerate(texts):
retriever.add_documents([Document(page_content=text, metadata=metadatas
[i])])
where_filter = {'path': ['page'], 'operator': 'Equal', 'valueNumber': 0}
output = retriever.get_relevant_documents('foo', where_filter=where_filter)
assert output == [Document(page_content='foo', metadata={'page': 0})]
|
@pytest.mark.vcr(ignore_localhost=True)
def test_get_relevant_documents_with_filter(self, weaviate_url: str) ->None:
"""Test end to end construction and MRR search."""
from weaviate import Client
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
client = Client(weaviate_url)
retriever = WeaviateHybridSearchRetriever(client=client, index_name=
f'LangChain_{uuid4().hex}', text_key='text', attributes=['page'])
for i, text in enumerate(texts):
retriever.add_documents([Document(page_content=text, metadata=
metadatas[i])])
where_filter = {'path': ['page'], 'operator': 'Equal', 'valueNumber': 0}
output = retriever.get_relevant_documents('foo', where_filter=where_filter)
assert output == [Document(page_content='foo', metadata={'page': 0})]
|
Test end to end construction and MRR search.
|
_import_azure_cognitive_services_AzureCogsSpeech2TextTool
|
from langchain_community.tools.azure_cognitive_services import AzureCogsSpeech2TextTool
return AzureCogsSpeech2TextTool
|
def _import_azure_cognitive_services_AzureCogsSpeech2TextTool() ->Any:
from langchain_community.tools.azure_cognitive_services import AzureCogsSpeech2TextTool
return AzureCogsSpeech2TextTool
| null |
test_qdrant_from_texts_stores_ids
|
"""Test end to end Qdrant.from_texts stores provided ids."""
from qdrant_client import QdrantClient
collection_name = uuid.uuid4().hex
with tempfile.TemporaryDirectory() as tmpdir:
ids = ['fa38d572-4c31-4579-aedc-1960d79df6df',
'cdc1aa36-d6ab-4fb2-8a94-56674fd27484']
vec_store = Qdrant.from_texts(['abc', 'def'], ConsistentFakeEmbeddings(
), ids=ids, collection_name=collection_name, path=str(tmpdir),
batch_size=batch_size, vector_name=vector_name)
del vec_store
client = QdrantClient(path=str(tmpdir))
assert 2 == client.count(collection_name).count
stored_ids = [point.id for point in client.scroll(collection_name)[0]]
assert set(ids) == set(stored_ids)
|
@pytest.mark.parametrize('batch_size', [1, 64])
@pytest.mark.parametrize('vector_name', [None, 'my-vector'])
def test_qdrant_from_texts_stores_ids(batch_size: int, vector_name:
Optional[str]) ->None:
"""Test end to end Qdrant.from_texts stores provided ids."""
from qdrant_client import QdrantClient
collection_name = uuid.uuid4().hex
with tempfile.TemporaryDirectory() as tmpdir:
ids = ['fa38d572-4c31-4579-aedc-1960d79df6df',
'cdc1aa36-d6ab-4fb2-8a94-56674fd27484']
vec_store = Qdrant.from_texts(['abc', 'def'],
ConsistentFakeEmbeddings(), ids=ids, collection_name=
collection_name, path=str(tmpdir), batch_size=batch_size,
vector_name=vector_name)
del vec_store
client = QdrantClient(path=str(tmpdir))
assert 2 == client.count(collection_name).count
stored_ids = [point.id for point in client.scroll(collection_name)[0]]
assert set(ids) == set(stored_ids)
|
Test end to end Qdrant.from_texts stores provided ids.
|
_create_retry_decorator
|
from grpc import RpcError
min_seconds = 1
max_seconds = 60
return retry(reraise=True, stop=stop_after_attempt(llm.max_retries), wait=
wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry
=retry_if_exception_type(RpcError), before_sleep=before_sleep_log(
logger, logging.WARNING))
|
def _create_retry_decorator(llm: YandexGPT) ->Callable[[Any], Any]:
from grpc import RpcError
min_seconds = 1
max_seconds = 60
return retry(reraise=True, stop=stop_after_attempt(llm.max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=
max_seconds), retry=retry_if_exception_type(RpcError), before_sleep
=before_sleep_log(logger, logging.WARNING))
| null |
make_request
|
"""Generate text from the model."""
params = params or {}
api_key = None
if self.nebula_api_key is not None:
api_key = self.nebula_api_key.get_secret_value()
headers = {'Content-Type': 'application/json', 'ApiKey': f'{api_key}'}
body = {'prompt': prompt}
for key, value in params.items():
body[key] = value
response = requests.post(url, headers=headers, json=body)
if response.status_code != 200:
raise Exception(
f'Request failed with status code {response.status_code} and message {response.text}'
)
return json.loads(response.text)
|
def make_request(self: Nebula, prompt: str, url: str=
f'{DEFAULT_NEBULA_SERVICE_URL}{DEFAULT_NEBULA_SERVICE_PATH}', params:
Optional[Dict]=None) ->Any:
"""Generate text from the model."""
params = params or {}
api_key = None
if self.nebula_api_key is not None:
api_key = self.nebula_api_key.get_secret_value()
headers = {'Content-Type': 'application/json', 'ApiKey': f'{api_key}'}
body = {'prompt': prompt}
for key, value in params.items():
body[key] = value
response = requests.post(url, headers=headers, json=body)
if response.status_code != 200:
raise Exception(
f'Request failed with status code {response.status_code} and message {response.text}'
)
return json.loads(response.text)
|
Generate text from the model.
|
_astream
|
raise NotImplementedError()
|
def _astream(self, messages: List[BaseMessage], stop: Optional[List[str]]=
None, run_manager: Optional[AsyncCallbackManagerForLLMRun]=None, **
kwargs: Any) ->AsyncIterator[ChatGenerationChunk]:
raise NotImplementedError()
| null |
_validate_tools
|
super()._validate_tools(tools)
validate_tools_single_input(class_name=cls.__name__, tools=tools)
|
@classmethod
def _validate_tools(cls, tools: Sequence[BaseTool]) ->None:
super()._validate_tools(tools)
validate_tools_single_input(class_name=cls.__name__, tools=tools)
| null |
__init__
|
"""
Create an AstraDB cache using a collection for storage.
Args (only keyword-arguments accepted):
collection_name (str): name of the Astra DB collection to create/use.
token (Optional[str]): API token for Astra DB usage.
api_endpoint (Optional[str]): full URL to the API endpoint,
such as "https://<DB-ID>-us-east1.apps.astra.datastax.com".
astra_db_client (Optional[Any]): *alternative to token+api_endpoint*,
you can pass an already-created 'astrapy.db.AstraDB' instance.
namespace (Optional[str]): namespace (aka keyspace) where the
collection is created. Defaults to the database's "default namespace".
"""
try:
from astrapy.db import AstraDB as LibAstraDB
except (ImportError, ModuleNotFoundError):
raise ImportError(
'Could not import a recent astrapy python package. Please install it with `pip install --upgrade astrapy`.'
)
if astra_db_client is not None:
if token is not None or api_endpoint is not None:
raise ValueError(
"You cannot pass 'astra_db_client' to AstraDB if passing 'token' and 'api_endpoint'."
)
self.collection_name = collection_name
self.token = token
self.api_endpoint = api_endpoint
self.namespace = namespace
if astra_db_client is not None:
self.astra_db = astra_db_client
else:
self.astra_db = LibAstraDB(token=self.token, api_endpoint=self.
api_endpoint, namespace=self.namespace)
self.collection = self.astra_db.create_collection(collection_name=self.
collection_name)
|
def __init__(self, *, collection_name: str=
ASTRA_DB_CACHE_DEFAULT_COLLECTION_NAME, token: Optional[str]=None,
api_endpoint: Optional[str]=None, astra_db_client: Optional[Any]=None,
namespace: Optional[str]=None):
"""
Create an AstraDB cache using a collection for storage.
Args (only keyword-arguments accepted):
collection_name (str): name of the Astra DB collection to create/use.
token (Optional[str]): API token for Astra DB usage.
api_endpoint (Optional[str]): full URL to the API endpoint,
such as "https://<DB-ID>-us-east1.apps.astra.datastax.com".
astra_db_client (Optional[Any]): *alternative to token+api_endpoint*,
you can pass an already-created 'astrapy.db.AstraDB' instance.
namespace (Optional[str]): namespace (aka keyspace) where the
collection is created. Defaults to the database's "default namespace".
"""
try:
from astrapy.db import AstraDB as LibAstraDB
except (ImportError, ModuleNotFoundError):
raise ImportError(
'Could not import a recent astrapy python package. Please install it with `pip install --upgrade astrapy`.'
)
if astra_db_client is not None:
if token is not None or api_endpoint is not None:
raise ValueError(
"You cannot pass 'astra_db_client' to AstraDB if passing 'token' and 'api_endpoint'."
)
self.collection_name = collection_name
self.token = token
self.api_endpoint = api_endpoint
self.namespace = namespace
if astra_db_client is not None:
self.astra_db = astra_db_client
else:
self.astra_db = LibAstraDB(token=self.token, api_endpoint=self.
api_endpoint, namespace=self.namespace)
self.collection = self.astra_db.create_collection(collection_name=self.
collection_name)
|
Create an AstraDB cache using a collection for storage.
Args (only keyword-arguments accepted):
collection_name (str): name of the Astra DB collection to create/use.
token (Optional[str]): API token for Astra DB usage.
api_endpoint (Optional[str]): full URL to the API endpoint,
such as "https://<DB-ID>-us-east1.apps.astra.datastax.com".
astra_db_client (Optional[Any]): *alternative to token+api_endpoint*,
you can pass an already-created 'astrapy.db.AstraDB' instance.
namespace (Optional[str]): namespace (aka keyspace) where the
collection is created. Defaults to the database's "default namespace".
|
test_chat_openai
|
"""Test ChatOpenAI wrapper."""
chat = ChatOpenAI(temperature=0.7, base_url=None, organization=None,
openai_proxy=None, timeout=10.0, max_retries=3, http_client=None, n=1,
max_tokens=10, default_headers=None, default_query=None)
message = HumanMessage(content='Hello')
response = chat([message])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
|
@pytest.mark.scheduled
def test_chat_openai() ->None:
"""Test ChatOpenAI wrapper."""
chat = ChatOpenAI(temperature=0.7, base_url=None, organization=None,
openai_proxy=None, timeout=10.0, max_retries=3, http_client=None, n
=1, max_tokens=10, default_headers=None, default_query=None)
message = HumanMessage(content='Hello')
response = chat([message])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
|
Test ChatOpenAI wrapper.
|
test_ddg_search_news_tool
|
keywords = 'Tesla'
tool = DuckDuckGoSearchResults(source='news')
result = tool(keywords)
print(result)
assert len(result.split()) > 20
|
@pytest.mark.skipif(not ddg_installed(), reason=
'requires duckduckgo-search package')
def test_ddg_search_news_tool() ->None:
keywords = 'Tesla'
tool = DuckDuckGoSearchResults(source='news')
result = tool(keywords)
print(result)
assert len(result.split()) > 20
| null |
test_run_success_arxiv_identifier
|
"""Test a query of an arxiv identifier returns the correct answer"""
output = api_client.run('1605.08386v1')
assert 'Heat-bath random walks with Markov bases' in output
|
def test_run_success_arxiv_identifier(api_client: ArxivAPIWrapper) ->None:
"""Test a query of an arxiv identifier returns the correct answer"""
output = api_client.run('1605.08386v1')
assert 'Heat-bath random walks with Markov bases' in output
|
Test a query of an arxiv identifier returns the correct answer
|
embed_documents
|
"""
Make a list of texts into a list of embedding vectors.
"""
return [self.embed_query(text) for text in texts]
|
def embed_documents(self, texts: List[str]) ->List[List[float]]:
"""
Make a list of texts into a list of embedding vectors.
"""
return [self.embed_query(text) for text in texts]
|
Make a list of texts into a list of embedding vectors.
|
similarity_search_with_score
|
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query.
"""
embedding = self.embedding.embed_query(query)
script_query = _default_script_query(embedding, filter)
response = self.client_search(self.client, self.index_name, script_query,
size=k)
hits = [hit for hit in response['hits']['hits']]
docs_and_scores = [(Document(page_content=hit['_source']['text'], metadata=
hit['_source']['metadata']), hit['_score']) for hit in hits]
return docs_and_scores
|
def similarity_search_with_score(self, query: str, k: int=4, filter:
Optional[dict]=None, **kwargs: Any) ->List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query.
"""
embedding = self.embedding.embed_query(query)
script_query = _default_script_query(embedding, filter)
response = self.client_search(self.client, self.index_name,
script_query, size=k)
hits = [hit for hit in response['hits']['hits']]
docs_and_scores = [(Document(page_content=hit['_source']['text'],
metadata=hit['_source']['metadata']), hit['_score']) for hit in hits]
return docs_and_scores
|
Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query.
|
warn_once
|
"""Warn once about the dangers of PythonREPL."""
logger.warning('Python REPL can execute arbitrary code. Use with caution.')
|
@functools.lru_cache(maxsize=None)
def warn_once() ->None:
"""Warn once about the dangers of PythonREPL."""
logger.warning('Python REPL can execute arbitrary code. Use with caution.')
|
Warn once about the dangers of PythonREPL.
|
_llm_type
|
return 'NIBittensorLLM'
|
@property
def _llm_type(self) ->str:
return 'NIBittensorLLM'
| null |
from_documents
|
"""Return VectorStore initialized from documents and embeddings."""
texts = [d.page_content for d in documents]
metadatas = [d.metadata for d in documents]
return cls.from_texts(texts, embedding, metadatas=metadatas, **kwargs)
|
@classmethod
def from_documents(cls: Type['MockVectorStore'], documents: List[Document],
embedding: Embeddings, **kwargs: Any) ->'MockVectorStore':
"""Return VectorStore initialized from documents and embeddings."""
texts = [d.page_content for d in documents]
metadatas = [d.metadata for d in documents]
return cls.from_texts(texts, embedding, metadatas=metadatas, **kwargs)
|
Return VectorStore initialized from documents and embeddings.
|
test_chat_prompt_template_with_messages
|
messages: List[Union[BaseMessagePromptTemplate, BaseMessage]
] = create_messages() + [HumanMessage(content='foo')]
chat_prompt_template = ChatPromptTemplate.from_messages(messages)
assert sorted(chat_prompt_template.input_variables) == sorted(['context',
'foo', 'bar'])
assert len(chat_prompt_template.messages) == 5
prompt_value = chat_prompt_template.format_prompt(context='see', foo='this',
bar='magic')
prompt_value_messages = prompt_value.to_messages()
assert prompt_value_messages[-1] == HumanMessage(content='foo')
|
def test_chat_prompt_template_with_messages() ->None:
messages: List[Union[BaseMessagePromptTemplate, BaseMessage]
] = create_messages() + [HumanMessage(content='foo')]
chat_prompt_template = ChatPromptTemplate.from_messages(messages)
assert sorted(chat_prompt_template.input_variables) == sorted([
'context', 'foo', 'bar'])
assert len(chat_prompt_template.messages) == 5
prompt_value = chat_prompt_template.format_prompt(context='see', foo=
'this', bar='magic')
prompt_value_messages = prompt_value.to_messages()
assert prompt_value_messages[-1] == HumanMessage(content='foo')
| null |
test_deprecated_function
|
"""Test deprecated function."""
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter('always')
assert deprecated_function() == 'This is a deprecated function.'
assert len(warning_list) == 1
warning = warning_list[0].message
assert str(warning
) == 'The function `deprecated_function` was deprecated in LangChain 2.0.0 and will be removed in 3.0.0'
doc = deprecated_function.__doc__
assert isinstance(doc, str)
assert doc.startswith('[*Deprecated*] original doc')
|
def test_deprecated_function() ->None:
"""Test deprecated function."""
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter('always')
assert deprecated_function() == 'This is a deprecated function.'
assert len(warning_list) == 1
warning = warning_list[0].message
assert str(warning
) == 'The function `deprecated_function` was deprecated in LangChain 2.0.0 and will be removed in 3.0.0'
doc = deprecated_function.__doc__
assert isinstance(doc, str)
assert doc.startswith('[*Deprecated*] original doc')
|
Test deprecated function.
|
from_texts
|
"""Create an AtlasDB vectorstore from a raw documents.
Args:
texts (List[str]): The list of texts to ingest.
name (str): Name of the project to create.
api_key (str): Your nomic API key,
embedding (Optional[Embeddings]): Embedding function. Defaults to None.
metadatas (Optional[List[dict]]): List of metadatas. Defaults to None.
ids (Optional[List[str]]): Optional list of document IDs. If None,
ids will be auto created
description (str): A description for your project.
is_public (bool): Whether your project is publicly accessible.
True by default.
reset_project_if_exists (bool): Whether to reset this project if it
already exists. Default False.
Generally useful during development and testing.
index_kwargs (Optional[dict]): Dict of kwargs for index creation.
See https://docs.nomic.ai/atlas_api.html
Returns:
AtlasDB: Nomic's neural database and finest rhizomatic instrument
"""
if name is None or api_key is None:
raise ValueError('`name` and `api_key` cannot be None.')
all_index_kwargs = {'name': name + '_index', 'indexed_field': 'text'}
if index_kwargs is not None:
for k, v in index_kwargs.items():
all_index_kwargs[k] = v
atlasDB = cls(name, embedding_function=embedding, api_key=api_key,
description='A description for your project', is_public=is_public,
reset_project_if_exists=reset_project_if_exists)
with atlasDB.project.wait_for_project_lock():
atlasDB.add_texts(texts=texts, metadatas=metadatas, ids=ids)
atlasDB.create_index(**all_index_kwargs)
return atlasDB
|
@classmethod
def from_texts(cls: Type[AtlasDB], texts: List[str], embedding: Optional[
Embeddings]=None, metadatas: Optional[List[dict]]=None, ids: Optional[
List[str]]=None, name: Optional[str]=None, api_key: Optional[str]=None,
description: str='A description for your project', is_public: bool=True,
reset_project_if_exists: bool=False, index_kwargs: Optional[dict]=None,
**kwargs: Any) ->AtlasDB:
"""Create an AtlasDB vectorstore from a raw documents.
Args:
texts (List[str]): The list of texts to ingest.
name (str): Name of the project to create.
api_key (str): Your nomic API key,
embedding (Optional[Embeddings]): Embedding function. Defaults to None.
metadatas (Optional[List[dict]]): List of metadatas. Defaults to None.
ids (Optional[List[str]]): Optional list of document IDs. If None,
ids will be auto created
description (str): A description for your project.
is_public (bool): Whether your project is publicly accessible.
True by default.
reset_project_if_exists (bool): Whether to reset this project if it
already exists. Default False.
Generally useful during development and testing.
index_kwargs (Optional[dict]): Dict of kwargs for index creation.
See https://docs.nomic.ai/atlas_api.html
Returns:
AtlasDB: Nomic's neural database and finest rhizomatic instrument
"""
if name is None or api_key is None:
raise ValueError('`name` and `api_key` cannot be None.')
all_index_kwargs = {'name': name + '_index', 'indexed_field': 'text'}
if index_kwargs is not None:
for k, v in index_kwargs.items():
all_index_kwargs[k] = v
atlasDB = cls(name, embedding_function=embedding, api_key=api_key,
description='A description for your project', is_public=is_public,
reset_project_if_exists=reset_project_if_exists)
with atlasDB.project.wait_for_project_lock():
atlasDB.add_texts(texts=texts, metadatas=metadatas, ids=ids)
atlasDB.create_index(**all_index_kwargs)
return atlasDB
|
Create an AtlasDB vectorstore from a raw documents.
Args:
texts (List[str]): The list of texts to ingest.
name (str): Name of the project to create.
api_key (str): Your nomic API key,
embedding (Optional[Embeddings]): Embedding function. Defaults to None.
metadatas (Optional[List[dict]]): List of metadatas. Defaults to None.
ids (Optional[List[str]]): Optional list of document IDs. If None,
ids will be auto created
description (str): A description for your project.
is_public (bool): Whether your project is publicly accessible.
True by default.
reset_project_if_exists (bool): Whether to reset this project if it
already exists. Default False.
Generally useful during development and testing.
index_kwargs (Optional[dict]): Dict of kwargs for index creation.
See https://docs.nomic.ai/atlas_api.html
Returns:
AtlasDB: Nomic's neural database and finest rhizomatic instrument
|
_llm_type
|
"""Return type of llm."""
return f"aviary-{self.model.replace('/', '-')}"
|
@property
def _llm_type(self) ->str:
"""Return type of llm."""
return f"aviary-{self.model.replace('/', '-')}"
|
Return type of llm.
|
test_pandas_output_parser_col_first_elem
|
expected_output = {'chicken': 1}
actual_output = parser.parse_folder('column:chicken[0]')
assert actual_output == expected_output
|
def test_pandas_output_parser_col_first_elem() ->None:
expected_output = {'chicken': 1}
actual_output = parser.parse_folder('column:chicken[0]')
assert actual_output == expected_output
| null |
texts_metadatas
|
return {'texts': ['Test Document' for _ in range(2)], 'metadatas': [{'key':
'value'} for _ in range(2)]}
|
@pytest.fixture
def texts_metadatas() ->Dict[str, Any]:
return {'texts': ['Test Document' for _ in range(2)], 'metadatas': [{
'key': 'value'} for _ in range(2)]}
| null |
_import_google_scholar
|
from langchain_community.utilities.google_scholar import GoogleScholarAPIWrapper
return GoogleScholarAPIWrapper
|
def _import_google_scholar() ->Any:
from langchain_community.utilities.google_scholar import GoogleScholarAPIWrapper
return GoogleScholarAPIWrapper
| null |
_dependable_mastodon_import
|
try:
import mastodon
except ImportError:
raise ImportError(
'Mastodon.py package not found, please install it with `pip install Mastodon.py`'
)
return mastodon
|
def _dependable_mastodon_import() ->mastodon:
try:
import mastodon
except ImportError:
raise ImportError(
'Mastodon.py package not found, please install it with `pip install Mastodon.py`'
)
return mastodon
| null |
_import_requests_tool_BaseRequestsTool
|
from langchain_community.tools.requests.tool import BaseRequestsTool
return BaseRequestsTool
|
def _import_requests_tool_BaseRequestsTool() ->Any:
from langchain_community.tools.requests.tool import BaseRequestsTool
return BaseRequestsTool
| null |
on_agent_action
|
"""Run on agent action."""
|
def on_agent_action(self, action: AgentAction, *, run_id: UUID,
parent_run_id: Optional[UUID]=None, **kwargs: Any) ->Any:
"""Run on agent action."""
|
Run on agent action.
|
validate_environment
|
"""Validate that api key and python package exists in environment."""
if values['n'] < 1:
raise ValueError('n must be at least 1.')
if values['n'] > 1 and values['streaming']:
raise ValueError('n must be 1 when streaming.')
values['openai_api_key'] = get_from_dict_or_env(values, 'openai_api_key',
'OPENAI_API_KEY')
values['openai_organization'] = values['openai_organization'] or os.getenv(
'OPENAI_ORG_ID') or os.getenv('OPENAI_ORGANIZATION')
values['openai_api_base'] = values['openai_api_base'] or os.getenv(
'OPENAI_API_BASE')
values['openai_proxy'] = get_from_dict_or_env(values, 'openai_proxy',
'OPENAI_PROXY', default='')
try:
import openai
except ImportError:
raise ImportError(
'Could not import openai python package. Please install it with `pip install openai`.'
)
if is_openai_v1():
client_params = {'api_key': values['openai_api_key'], 'organization':
values['openai_organization'], 'base_url': values['openai_api_base'
], 'timeout': values['request_timeout'], 'max_retries': values[
'max_retries'], 'default_headers': values['default_headers'],
'default_query': values['default_query'], 'http_client': values[
'http_client']}
if not values.get('client'):
values['client'] = openai.OpenAI(**client_params).chat.completions
if not values.get('async_client'):
values['async_client'] = openai.AsyncOpenAI(**client_params
).chat.completions
elif not values.get('client'):
values['client'] = openai.ChatCompletion
else:
pass
return values
|
@root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that api key and python package exists in environment."""
if values['n'] < 1:
raise ValueError('n must be at least 1.')
if values['n'] > 1 and values['streaming']:
raise ValueError('n must be 1 when streaming.')
values['openai_api_key'] = get_from_dict_or_env(values,
'openai_api_key', 'OPENAI_API_KEY')
values['openai_organization'] = values['openai_organization'] or os.getenv(
'OPENAI_ORG_ID') or os.getenv('OPENAI_ORGANIZATION')
values['openai_api_base'] = values['openai_api_base'] or os.getenv(
'OPENAI_API_BASE')
values['openai_proxy'] = get_from_dict_or_env(values, 'openai_proxy',
'OPENAI_PROXY', default='')
try:
import openai
except ImportError:
raise ImportError(
'Could not import openai python package. Please install it with `pip install openai`.'
)
if is_openai_v1():
client_params = {'api_key': values['openai_api_key'],
'organization': values['openai_organization'], 'base_url':
values['openai_api_base'], 'timeout': values['request_timeout'],
'max_retries': values['max_retries'], 'default_headers': values
['default_headers'], 'default_query': values['default_query'],
'http_client': values['http_client']}
if not values.get('client'):
values['client'] = openai.OpenAI(**client_params).chat.completions
if not values.get('async_client'):
values['async_client'] = openai.AsyncOpenAI(**client_params
).chat.completions
elif not values.get('client'):
values['client'] = openai.ChatCompletion
else:
pass
return values
|
Validate that api key and python package exists in environment.
|
delete_documents_with_document_id
|
"""Delete documents based on their IDs.
Args:
id_list: List of document IDs.
Returns:
Whether the deletion was successful or not.
"""
if id_list is None or len(id_list) == 0:
return True
from alibabacloud_ha3engine_vector import models
delete_doc_list = []
for doc_id in id_list:
delete_doc_list.append({'fields': {self.config.field_name_mapping['id']:
doc_id}, 'cmd': 'delete'})
delete_request = models.PushDocumentsRequest(self.options_headers,
delete_doc_list)
try:
delete_response = self.ha3_engine_client.push_documents(self.config.
opt_table_name, self.config.field_name_mapping['id'], delete_request)
json_response = json.loads(delete_response.body)
return json_response['status'] == 'OK'
except Exception as e:
logger.error(
f'delete doc from :{self.config.endpoint} instance_id:{self.config.instance_id} failed.'
, e)
raise e
|
def delete_documents_with_document_id(self, id_list: List[str]) ->bool:
"""Delete documents based on their IDs.
Args:
id_list: List of document IDs.
Returns:
Whether the deletion was successful or not.
"""
if id_list is None or len(id_list) == 0:
return True
from alibabacloud_ha3engine_vector import models
delete_doc_list = []
for doc_id in id_list:
delete_doc_list.append({'fields': {self.config.field_name_mapping[
'id']: doc_id}, 'cmd': 'delete'})
delete_request = models.PushDocumentsRequest(self.options_headers,
delete_doc_list)
try:
delete_response = self.ha3_engine_client.push_documents(self.config
.opt_table_name, self.config.field_name_mapping['id'],
delete_request)
json_response = json.loads(delete_response.body)
return json_response['status'] == 'OK'
except Exception as e:
logger.error(
f'delete doc from :{self.config.endpoint} instance_id:{self.config.instance_id} failed.'
, e)
raise e
|
Delete documents based on their IDs.
Args:
id_list: List of document IDs.
Returns:
Whether the deletion was successful or not.
|
test_simple_action_strlist_w_emb
|
str1 = 'test1'
str2 = 'test2'
str3 = 'test3'
encoded_str1 = base.stringify_embedding(list(encoded_keyword + str1))
encoded_str2 = base.stringify_embedding(list(encoded_keyword + str2))
encoded_str3 = base.stringify_embedding(list(encoded_keyword + str3))
expected = [{'a_namespace': encoded_str1}, {'a_namespace': encoded_str2}, {
'a_namespace': encoded_str3}]
assert base.embed(base.Embed([str1, str2, str3]), MockEncoder(), 'a_namespace'
) == expected
expected_embed_and_keep = [{'a_namespace': str1 + ' ' + encoded_str1}, {
'a_namespace': str2 + ' ' + encoded_str2}, {'a_namespace': str3 + ' ' +
encoded_str3}]
assert base.embed(base.EmbedAndKeep([str1, str2, str3]), MockEncoder(),
'a_namespace') == expected_embed_and_keep
|
@pytest.mark.requires('vowpal_wabbit_next')
def test_simple_action_strlist_w_emb() ->None:
str1 = 'test1'
str2 = 'test2'
str3 = 'test3'
encoded_str1 = base.stringify_embedding(list(encoded_keyword + str1))
encoded_str2 = base.stringify_embedding(list(encoded_keyword + str2))
encoded_str3 = base.stringify_embedding(list(encoded_keyword + str3))
expected = [{'a_namespace': encoded_str1}, {'a_namespace': encoded_str2
}, {'a_namespace': encoded_str3}]
assert base.embed(base.Embed([str1, str2, str3]), MockEncoder(),
'a_namespace') == expected
expected_embed_and_keep = [{'a_namespace': str1 + ' ' + encoded_str1},
{'a_namespace': str2 + ' ' + encoded_str2}, {'a_namespace': str3 +
' ' + encoded_str3}]
assert base.embed(base.EmbedAndKeep([str1, str2, str3]), MockEncoder(),
'a_namespace') == expected_embed_and_keep
| null |
lazy_load
|
"""Lazily load documents."""
if self.web_path:
blob = Blob.from_data(open(self.file_path, 'rb').read(), path=self.web_path
)
else:
blob = Blob.from_path(self.file_path)
yield from self.parser.parse_folder(blob)
|
def lazy_load(self) ->Iterator[Document]:
"""Lazily load documents."""
if self.web_path:
blob = Blob.from_data(open(self.file_path, 'rb').read(), path=self.
web_path)
else:
blob = Blob.from_path(self.file_path)
yield from self.parser.parse_folder(blob)
|
Lazily load documents.
|
get_tools
|
"""Get the tools in the toolkit."""
return [O365SearchEvents(), O365CreateDraftMessage(), O365SearchEmails(),
O365SendEvent(), O365SendMessage()]
|
def get_tools(self) ->List[BaseTool]:
"""Get the tools in the toolkit."""
return [O365SearchEvents(), O365CreateDraftMessage(), O365SearchEmails(
), O365SendEvent(), O365SendMessage()]
|
Get the tools in the toolkit.
|
test_llamacpp_streaming_callback
|
"""Test that streaming correctly invokes on_llm_new_token callback."""
MAX_TOKENS = 5
OFF_BY_ONE = 1
callback_handler = FakeCallbackHandler()
llm = LlamaCpp(model_path=get_model(), callbacks=[callback_handler],
verbose=True, max_tokens=MAX_TOKENS)
llm("Q: Can you count to 10? A:'1, ")
assert callback_handler.llm_streams <= MAX_TOKENS + OFF_BY_ONE
|
def test_llamacpp_streaming_callback() ->None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
MAX_TOKENS = 5
OFF_BY_ONE = 1
callback_handler = FakeCallbackHandler()
llm = LlamaCpp(model_path=get_model(), callbacks=[callback_handler],
verbose=True, max_tokens=MAX_TOKENS)
llm("Q: Can you count to 10? A:'1, ")
assert callback_handler.llm_streams <= MAX_TOKENS + OFF_BY_ONE
|
Test that streaming correctly invokes on_llm_new_token callback.
|
__add__
|
"""Override the + operator to allow for combining prompt templates."""
if isinstance(other, PromptTemplate):
if self.template_format != 'f-string':
raise ValueError(
'Adding prompt templates only supported for f-strings.')
if other.template_format != 'f-string':
raise ValueError(
'Adding prompt templates only supported for f-strings.')
input_variables = list(set(self.input_variables) | set(other.
input_variables))
template = self.template + other.template
validate_template = self.validate_template and other.validate_template
partial_variables = {k: v for k, v in self.partial_variables.items()}
for k, v in other.partial_variables.items():
if k in partial_variables:
raise ValueError('Cannot have same variable partialed twice.')
else:
partial_variables[k] = v
return PromptTemplate(template=template, input_variables=
input_variables, partial_variables=partial_variables,
template_format='f-string', validate_template=validate_template)
elif isinstance(other, str):
prompt = PromptTemplate.from_template(other)
return self + prompt
else:
raise NotImplementedError(f'Unsupported operand type for +: {type(other)}')
|
def __add__(self, other: Any) ->PromptTemplate:
"""Override the + operator to allow for combining prompt templates."""
if isinstance(other, PromptTemplate):
if self.template_format != 'f-string':
raise ValueError(
'Adding prompt templates only supported for f-strings.')
if other.template_format != 'f-string':
raise ValueError(
'Adding prompt templates only supported for f-strings.')
input_variables = list(set(self.input_variables) | set(other.
input_variables))
template = self.template + other.template
validate_template = self.validate_template and other.validate_template
partial_variables = {k: v for k, v in self.partial_variables.items()}
for k, v in other.partial_variables.items():
if k in partial_variables:
raise ValueError('Cannot have same variable partialed twice.')
else:
partial_variables[k] = v
return PromptTemplate(template=template, input_variables=
input_variables, partial_variables=partial_variables,
template_format='f-string', validate_template=validate_template)
elif isinstance(other, str):
prompt = PromptTemplate.from_template(other)
return self + prompt
else:
raise NotImplementedError(
f'Unsupported operand type for +: {type(other)}')
|
Override the + operator to allow for combining prompt templates.
|
test_drop
|
"""
Destroy the vector store
"""
self.vectorstore.drop()
|
def test_drop(self) ->None:
"""
Destroy the vector store
"""
self.vectorstore.drop()
|
Destroy the vector store
|
_llm_type
|
return 'vertexai'
|
@property
def _llm_type(self) ->str:
return 'vertexai'
| null |
test_prompt_invalid_template_format
|
"""Test initializing a prompt with invalid template format."""
template = 'This is a {foo} test.'
input_variables = ['foo']
with pytest.raises(ValueError):
PromptTemplate(input_variables=input_variables, template=template,
template_format='bar')
|
def test_prompt_invalid_template_format() ->None:
"""Test initializing a prompt with invalid template format."""
template = 'This is a {foo} test.'
input_variables = ['foo']
with pytest.raises(ValueError):
PromptTemplate(input_variables=input_variables, template=template,
template_format='bar')
|
Test initializing a prompt with invalid template format.
|
buffer
|
"""Access chat memory messages."""
return self.chat_memory.messages
|
@property
def buffer(self) ->List[BaseMessage]:
"""Access chat memory messages."""
return self.chat_memory.messages
|
Access chat memory messages.
|
get_table_names
|
"""Get names of tables available."""
warnings.warn(
'This method is deprecated - please use `get_usable_table_names`.')
return self.get_usable_table_names()
|
def get_table_names(self) ->Iterable[str]:
"""Get names of tables available."""
warnings.warn(
'This method is deprecated - please use `get_usable_table_names`.')
return self.get_usable_table_names()
|
Get names of tables available.
|
test_faiss_invalid_normalize_fn
|
"""Test the similarity search with normalized similarities."""
texts = ['foo', 'bar', 'baz']
docsearch = FAISS.from_texts(texts, FakeEmbeddings(), relevance_score_fn=lambda
_: 2.0)
with pytest.warns(Warning, match='scores must be between'):
docsearch.similarity_search_with_relevance_scores('foo', k=1)
|
@pytest.mark.requires('faiss')
def test_faiss_invalid_normalize_fn() ->None:
"""Test the similarity search with normalized similarities."""
texts = ['foo', 'bar', 'baz']
docsearch = FAISS.from_texts(texts, FakeEmbeddings(),
relevance_score_fn=lambda _: 2.0)
with pytest.warns(Warning, match='scores must be between'):
docsearch.similarity_search_with_relevance_scores('foo', k=1)
|
Test the similarity search with normalized similarities.
|
_import_requests_tool_RequestsPutTool
|
from langchain_community.tools.requests.tool import RequestsPutTool
return RequestsPutTool
|
def _import_requests_tool_RequestsPutTool() ->Any:
from langchain_community.tools.requests.tool import RequestsPutTool
return RequestsPutTool
| null |
headers
|
return {'Accept': 'application/vnd.github+json', 'Authorization':
f'Bearer {self.access_token}'}
|
@property
def headers(self) ->Dict[str, str]:
return {'Accept': 'application/vnd.github+json', 'Authorization':
f'Bearer {self.access_token}'}
| null |
__del__
|
if hasattr(self, 'temp_file'):
self.temp_file.close()
|
def __del__(self) ->None:
if hasattr(self, 'temp_file'):
self.temp_file.close()
| null |
__init__
|
"""Initialize the loader.
Args:
file_path: A file, url or s3 path for input file
textract_features: Features to be used for extraction, each feature
should be passed as a str that conforms to the enum
`Textract_Features`, see `amazon-textract-caller` pkg
client: boto3 textract client (Optional)
credentials_profile_name: AWS profile name, if not default (Optional)
region_name: AWS region, eg us-east-1 (Optional)
endpoint_url: endpoint url for the textract service (Optional)
"""
super().__init__(file_path, headers=headers)
try:
import textractcaller as tc
except ImportError:
raise ModuleNotFoundError(
'Could not import amazon-textract-caller python package. Please install it with `pip install amazon-textract-caller`.'
)
if textract_features:
features = [tc.Textract_Features[x] for x in textract_features]
else:
features = []
if credentials_profile_name or region_name or endpoint_url:
try:
import boto3
if credentials_profile_name is not None:
session = boto3.Session(profile_name=credentials_profile_name)
else:
session = boto3.Session()
client_params = {}
if region_name:
client_params['region_name'] = region_name
if endpoint_url:
client_params['endpoint_url'] = endpoint_url
client = session.client('textract', **client_params)
except ImportError:
raise ModuleNotFoundError(
'Could not import boto3 python package. Please install it with `pip install boto3`.'
)
except Exception as e:
raise ValueError(
'Could not load credentials to authenticate with AWS client. Please check that credentials in the specified profile name are valid.'
) from e
self.parser = AmazonTextractPDFParser(textract_features=features, client=client
)
|
def __init__(self, file_path: str, textract_features: Optional[Sequence[str
]]=None, client: Optional[Any]=None, credentials_profile_name: Optional
[str]=None, region_name: Optional[str]=None, endpoint_url: Optional[str
]=None, headers: Optional[Dict]=None) ->None:
"""Initialize the loader.
Args:
file_path: A file, url or s3 path for input file
textract_features: Features to be used for extraction, each feature
should be passed as a str that conforms to the enum
`Textract_Features`, see `amazon-textract-caller` pkg
client: boto3 textract client (Optional)
credentials_profile_name: AWS profile name, if not default (Optional)
region_name: AWS region, eg us-east-1 (Optional)
endpoint_url: endpoint url for the textract service (Optional)
"""
super().__init__(file_path, headers=headers)
try:
import textractcaller as tc
except ImportError:
raise ModuleNotFoundError(
'Could not import amazon-textract-caller python package. Please install it with `pip install amazon-textract-caller`.'
)
if textract_features:
features = [tc.Textract_Features[x] for x in textract_features]
else:
features = []
if credentials_profile_name or region_name or endpoint_url:
try:
import boto3
if credentials_profile_name is not None:
session = boto3.Session(profile_name=credentials_profile_name)
else:
session = boto3.Session()
client_params = {}
if region_name:
client_params['region_name'] = region_name
if endpoint_url:
client_params['endpoint_url'] = endpoint_url
client = session.client('textract', **client_params)
except ImportError:
raise ModuleNotFoundError(
'Could not import boto3 python package. Please install it with `pip install boto3`.'
)
except Exception as e:
raise ValueError(
'Could not load credentials to authenticate with AWS client. Please check that credentials in the specified profile name are valid.'
) from e
self.parser = AmazonTextractPDFParser(textract_features=features,
client=client)
|
Initialize the loader.
Args:
file_path: A file, url or s3 path for input file
textract_features: Features to be used for extraction, each feature
should be passed as a str that conforms to the enum
`Textract_Features`, see `amazon-textract-caller` pkg
client: boto3 textract client (Optional)
credentials_profile_name: AWS profile name, if not default (Optional)
region_name: AWS region, eg us-east-1 (Optional)
endpoint_url: endpoint url for the textract service (Optional)
|
lazy_load
|
"""
Lazy load the chat sessions from the iMessage chat.db
and yield them in the required format.
Yields:
ChatSession: Loaded chat session.
"""
import sqlite3
try:
conn = sqlite3.connect(self.db_path)
except sqlite3.OperationalError as e:
raise ValueError(
f"""Could not open iMessage DB file {self.db_path}.
Make sure your terminal emulator has disk access to this file.
You can either copy the DB file to an accessible location or grant full disk access for your terminal emulator. You can grant full disk access for your terminal emulator in System Settings > Security and Privacy > Full Disk Access."""
) from e
cursor = conn.cursor()
query = """SELECT name FROM sqlite_master
WHERE type='table' AND name='chat_handle_join';"""
cursor.execute(query)
is_chat_handle_join_exists = cursor.fetchone()
query = """SELECT chat_id
FROM message
JOIN chat_message_join ON message.ROWID = chat_message_join.message_id
GROUP BY chat_id
ORDER BY MAX(date) DESC;"""
cursor.execute(query)
chat_ids = [row[0] for row in cursor.fetchall()]
for chat_id in chat_ids:
yield self._load_single_chat_session(cursor, is_chat_handle_join_exists,
chat_id)
conn.close()
|
def lazy_load(self) ->Iterator[ChatSession]:
"""
Lazy load the chat sessions from the iMessage chat.db
and yield them in the required format.
Yields:
ChatSession: Loaded chat session.
"""
import sqlite3
try:
conn = sqlite3.connect(self.db_path)
except sqlite3.OperationalError as e:
raise ValueError(
f"""Could not open iMessage DB file {self.db_path}.
Make sure your terminal emulator has disk access to this file.
You can either copy the DB file to an accessible location or grant full disk access for your terminal emulator. You can grant full disk access for your terminal emulator in System Settings > Security and Privacy > Full Disk Access."""
) from e
cursor = conn.cursor()
query = """SELECT name FROM sqlite_master
WHERE type='table' AND name='chat_handle_join';"""
cursor.execute(query)
is_chat_handle_join_exists = cursor.fetchone()
query = """SELECT chat_id
FROM message
JOIN chat_message_join ON message.ROWID = chat_message_join.message_id
GROUP BY chat_id
ORDER BY MAX(date) DESC;"""
cursor.execute(query)
chat_ids = [row[0] for row in cursor.fetchall()]
for chat_id in chat_ids:
yield self._load_single_chat_session(cursor,
is_chat_handle_join_exists, chat_id)
conn.close()
|
Lazy load the chat sessions from the iMessage chat.db
and yield them in the required format.
Yields:
ChatSession: Loaded chat session.
|
_call
|
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
_input = inputs[self.input_key]
color_mapping = get_color_mapping([str(i) for i in range(len(self.chains))])
for i, chain in enumerate(self.chains):
_input = chain.run(_input, callbacks=_run_manager.get_child(
f'step_{i + 1}'))
if self.strip_outputs:
_input = _input.strip()
_run_manager.on_text(_input, color=color_mapping[str(i)], end='\n',
verbose=self.verbose)
return {self.output_key: _input}
|
def _call(self, inputs: Dict[str, str], run_manager: Optional[
CallbackManagerForChainRun]=None) ->Dict[str, str]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
_input = inputs[self.input_key]
color_mapping = get_color_mapping([str(i) for i in range(len(self.chains))]
)
for i, chain in enumerate(self.chains):
_input = chain.run(_input, callbacks=_run_manager.get_child(
f'step_{i + 1}'))
if self.strip_outputs:
_input = _input.strip()
_run_manager.on_text(_input, color=color_mapping[str(i)], end='\n',
verbose=self.verbose)
return {self.output_key: _input}
| null |
_get_verbosity
|
from langchain_core.globals import get_verbose
return get_verbose()
|
def _get_verbosity() ->bool:
from langchain_core.globals import get_verbose
return get_verbose()
| null |
_get_relevant_documents
|
return self.retriever.get_relevant_documents(query, run_manager=run_manager
.get_child(), **kwargs)
|
def _get_relevant_documents(self, query: str, *, run_manager:
CallbackManagerForRetrieverRun, **kwargs: Any) ->List[Document]:
return self.retriever.get_relevant_documents(query, run_manager=
run_manager.get_child(), **kwargs)
| null |
ignore_agent
|
"""Whether to ignore agent callbacks."""
return self.ignore_agent_
|
@property
def ignore_agent(self) ->bool:
"""Whether to ignore agent callbacks."""
return self.ignore_agent_
|
Whether to ignore agent callbacks.
|
_import_searchapi
|
from langchain_community.utilities.searchapi import SearchApiAPIWrapper
return SearchApiAPIWrapper
|
def _import_searchapi() ->Any:
from langchain_community.utilities.searchapi import SearchApiAPIWrapper
return SearchApiAPIWrapper
| null |
_extract_images_from_page
|
"""Extract images from page and get the text with RapidOCR."""
if not self.extract_images or '/XObject' not in page['/Resources'].keys():
return ''
xObject = page['/Resources']['/XObject'].get_object()
images = []
for obj in xObject:
if xObject[obj]['/Subtype'] == '/Image':
if xObject[obj]['/Filter'][1:] in _PDF_FILTER_WITHOUT_LOSS:
height, width = xObject[obj]['/Height'], xObject[obj]['/Width']
images.append(np.frombuffer(xObject[obj].get_data(), dtype=np.
uint8).reshape(height, width, -1))
elif xObject[obj]['/Filter'][1:] in _PDF_FILTER_WITH_LOSS:
images.append(xObject[obj].get_data())
else:
warnings.warn('Unknown PDF Filter!')
return extract_from_images_with_rapidocr(images)
|
def _extract_images_from_page(self, page: pypdf._page.PageObject) ->str:
"""Extract images from page and get the text with RapidOCR."""
if not self.extract_images or '/XObject' not in page['/Resources'].keys():
return ''
xObject = page['/Resources']['/XObject'].get_object()
images = []
for obj in xObject:
if xObject[obj]['/Subtype'] == '/Image':
if xObject[obj]['/Filter'][1:] in _PDF_FILTER_WITHOUT_LOSS:
height, width = xObject[obj]['/Height'], xObject[obj]['/Width']
images.append(np.frombuffer(xObject[obj].get_data(), dtype=
np.uint8).reshape(height, width, -1))
elif xObject[obj]['/Filter'][1:] in _PDF_FILTER_WITH_LOSS:
images.append(xObject[obj].get_data())
else:
warnings.warn('Unknown PDF Filter!')
return extract_from_images_with_rapidocr(images)
|
Extract images from page and get the text with RapidOCR.
|
test__convert_message_to_dict_system
|
message = SystemMessage(content='foo')
result = _convert_message_to_dict(message)
expected_output = {'role': 'system', 'content': 'foo'}
assert result == expected_output
|
def test__convert_message_to_dict_system() ->None:
message = SystemMessage(content='foo')
result = _convert_message_to_dict(message)
expected_output = {'role': 'system', 'content': 'foo'}
assert result == expected_output
| null |
parse_iter
|
"""Parse the output of an LLM call."""
raise NotImplementedError
|
def parse_iter(self, text: str) ->Iterator[re.Match]:
"""Parse the output of an LLM call."""
raise NotImplementedError
|
Parse the output of an LLM call.
|
on_chain_error
|
"""Run when chain errors."""
self.step += 1
self.errors += 1
|
def on_chain_error(self, error: BaseException, **kwargs: Any) ->None:
"""Run when chain errors."""
self.step += 1
self.errors += 1
|
Run when chain errors.
|
test_edenai_call
|
"""Test simple call to edenai's speech to text endpoint."""
speech2text = EdenAiSpeechToTextTool(providers=['amazon'])
output = speech2text(
'https://audio-samples.github.io/samples/mp3/blizzard_unconditional/sample-0.mp3'
)
assert speech2text.name == 'edenai_speech_to_text'
assert speech2text.feature == 'audio'
assert speech2text.subfeature == 'speech_to_text_async'
assert isinstance(output, str)
|
def test_edenai_call() ->None:
"""Test simple call to edenai's speech to text endpoint."""
speech2text = EdenAiSpeechToTextTool(providers=['amazon'])
output = speech2text(
'https://audio-samples.github.io/samples/mp3/blizzard_unconditional/sample-0.mp3'
)
assert speech2text.name == 'edenai_speech_to_text'
assert speech2text.feature == 'audio'
assert speech2text.subfeature == 'speech_to_text_async'
assert isinstance(output, str)
|
Test simple call to edenai's speech to text endpoint.
|
test_from_documents
|
input_docs = [Document(page_content='I have a pen.'), Document(page_content
='Do you have a pen?'), Document(page_content='I have a bag.')]
tfidf_retriever = TFIDFRetriever.from_documents(documents=input_docs)
assert len(tfidf_retriever.docs) == 3
assert tfidf_retriever.tfidf_array.toarray().shape == (3, 5)
|
@pytest.mark.requires('sklearn')
def test_from_documents() ->None:
input_docs = [Document(page_content='I have a pen.'), Document(
page_content='Do you have a pen?'), Document(page_content=
'I have a bag.')]
tfidf_retriever = TFIDFRetriever.from_documents(documents=input_docs)
assert len(tfidf_retriever.docs) == 3
assert tfidf_retriever.tfidf_array.toarray().shape == (3, 5)
| null |
on_chat_model_start
|
if self.__has_valid_config is False:
return
try:
user_id = _get_user_id(metadata)
user_props = _get_user_props(metadata)
params = kwargs.get('invocation_params', {})
params.update(serialized.get('kwargs', {}))
name = params.get('model') or params.get('model_name') or params.get(
'model_id')
if not name and 'anthropic' in params.get('_type'):
name = 'claude-2'
extra = {param: params.get(param) for param in PARAMS_TO_CAPTURE if
params.get(param) is not None}
input = _parse_lc_messages(messages[0])
self.__track_event('llm', 'start', user_id=user_id, run_id=str(run_id),
parent_run_id=str(parent_run_id) if parent_run_id else None, name=
name, input=input, tags=tags, extra=extra, metadata=metadata,
user_props=user_props, app_id=self.__app_id)
except Exception as e:
logger.error(f'[LLMonitor] An error occurred in on_chat_model_start: {e}')
|
def on_chat_model_start(self, serialized: Dict[str, Any], messages: List[
List[BaseMessage]], *, run_id: UUID, parent_run_id: Union[UUID, None]=
None, tags: Union[List[str], None]=None, metadata: Union[Dict[str, Any],
None]=None, **kwargs: Any) ->Any:
if self.__has_valid_config is False:
return
try:
user_id = _get_user_id(metadata)
user_props = _get_user_props(metadata)
params = kwargs.get('invocation_params', {})
params.update(serialized.get('kwargs', {}))
name = params.get('model') or params.get('model_name') or params.get(
'model_id')
if not name and 'anthropic' in params.get('_type'):
name = 'claude-2'
extra = {param: params.get(param) for param in PARAMS_TO_CAPTURE if
params.get(param) is not None}
input = _parse_lc_messages(messages[0])
self.__track_event('llm', 'start', user_id=user_id, run_id=str(
run_id), parent_run_id=str(parent_run_id) if parent_run_id else
None, name=name, input=input, tags=tags, extra=extra, metadata=
metadata, user_props=user_props, app_id=self.__app_id)
except Exception as e:
logger.error(
f'[LLMonitor] An error occurred in on_chat_model_start: {e}')
| null |
_get_mock_page_restrictions
|
return {'read': {'operation': 'read', 'restrictions': {'user': {'results':
[], 'start': 0, 'limit': 200, 'size': 0}, 'group': {'results': [],
'start': 0, 'limit': 200, 'size': 0}}, '_expandable': {'content':
f'/rest/api/content/{page_id}'}, '_links': {'self':
f'{self.CONFLUENCE_URL}/rest/api/content/{page_id}/restriction/byOperation/read'
}}, 'update': {'operation': 'update', 'restrictions': {'user': {
'results': [], 'start': 0, 'limit': 200, 'size': 0}, 'group': {
'results': [], 'start': 0, 'limit': 200, 'size': 0}}, '_expandable': {
'content': f'/rest/api/content/{page_id}'}, '_links': {'self':
f'{self.CONFLUENCE_URL}/rest/api/content/{page_id}/restriction/byOperation/update'
}}, '_links': {'self':
f'{self.CONFLUENCE_URL}/rest/api/content/{page_id}/restriction/byOperation'
, 'base': self.CONFLUENCE_URL, 'context': '/wiki'}}
|
def _get_mock_page_restrictions(self, page_id: str) ->Dict:
return {'read': {'operation': 'read', 'restrictions': {'user': {
'results': [], 'start': 0, 'limit': 200, 'size': 0}, 'group': {
'results': [], 'start': 0, 'limit': 200, 'size': 0}}, '_expandable':
{'content': f'/rest/api/content/{page_id}'}, '_links': {'self':
f'{self.CONFLUENCE_URL}/rest/api/content/{page_id}/restriction/byOperation/read'
}}, 'update': {'operation': 'update', 'restrictions': {'user': {
'results': [], 'start': 0, 'limit': 200, 'size': 0}, 'group': {
'results': [], 'start': 0, 'limit': 200, 'size': 0}}, '_expandable':
{'content': f'/rest/api/content/{page_id}'}, '_links': {'self':
f'{self.CONFLUENCE_URL}/rest/api/content/{page_id}/restriction/byOperation/update'
}}, '_links': {'self':
f'{self.CONFLUENCE_URL}/rest/api/content/{page_id}/restriction/byOperation'
, 'base': self.CONFLUENCE_URL, 'context': '/wiki'}}
| null |
__init__
|
"""
Initialize the object for file processing with Azure Document Intelligence
(formerly Form Recognizer).
This constructor initializes a DocumentIntelligenceParser object to be used
for parsing files using the Azure Document Intelligence API. The load method
generates a Document node including metadata (source blob and page number)
for each page.
Parameters:
-----------
file_path : str
The path to the file that needs to be parsed.
client: Any
A DocumentAnalysisClient to perform the analysis of the blob
model : str
The model name or ID to be used for form recognition in Azure.
Examples:
---------
>>> obj = DocumentIntelligenceLoader(
... file_path="path/to/file",
... client=client,
... model="prebuilt-document"
... )
"""
self.parser = DocumentIntelligenceParser(client=client, model=model)
super().__init__(file_path, headers=headers)
|
def __init__(self, file_path: str, client: Any, model: str=
'prebuilt-document', headers: Optional[Dict]=None) ->None:
"""
Initialize the object for file processing with Azure Document Intelligence
(formerly Form Recognizer).
This constructor initializes a DocumentIntelligenceParser object to be used
for parsing files using the Azure Document Intelligence API. The load method
generates a Document node including metadata (source blob and page number)
for each page.
Parameters:
-----------
file_path : str
The path to the file that needs to be parsed.
client: Any
A DocumentAnalysisClient to perform the analysis of the blob
model : str
The model name or ID to be used for form recognition in Azure.
Examples:
---------
>>> obj = DocumentIntelligenceLoader(
... file_path="path/to/file",
... client=client,
... model="prebuilt-document"
... )
"""
self.parser = DocumentIntelligenceParser(client=client, model=model)
super().__init__(file_path, headers=headers)
|
Initialize the object for file processing with Azure Document Intelligence
(formerly Form Recognizer).
This constructor initializes a DocumentIntelligenceParser object to be used
for parsing files using the Azure Document Intelligence API. The load method
generates a Document node including metadata (source blob and page number)
for each page.
Parameters:
-----------
file_path : str
The path to the file that needs to be parsed.
client: Any
A DocumentAnalysisClient to perform the analysis of the blob
model : str
The model name or ID to be used for form recognition in Azure.
Examples:
---------
>>> obj = DocumentIntelligenceLoader(
... file_path="path/to/file",
... client=client,
... model="prebuilt-document"
... )
|
__init__
|
raise ValueError('Deprecated,TinyAsyncGradientEmbeddingClient was removed.')
|
def __init__(self, *args, **kwargs) ->None:
raise ValueError('Deprecated,TinyAsyncGradientEmbeddingClient was removed.'
)
| null |
fake_retriever_v2
|
return FakeRetrieverV2()
|
@pytest.fixture
def fake_retriever_v2() ->BaseRetriever:
return FakeRetrieverV2()
| null |
_cleanup_unnecessary_items
|
fields = self.json_result_fields if self.json_result_fields is not None else []
if len(fields) > 0:
for k, v in list(d.items()):
if isinstance(v, dict):
self._cleanup_unnecessary_items(v)
if len(v) == 0:
del d[k]
elif k not in fields:
del d[k]
if 'xpath' in d:
del d['xpath']
if 'position' in d:
del d['position']
if 'rectangle' in d:
del d['rectangle']
for k, v in list(d.items()):
if isinstance(v, dict):
self._cleanup_unnecessary_items(v)
return d
|
def _cleanup_unnecessary_items(self, d: dict) ->dict:
fields = (self.json_result_fields if self.json_result_fields is not
None else [])
if len(fields) > 0:
for k, v in list(d.items()):
if isinstance(v, dict):
self._cleanup_unnecessary_items(v)
if len(v) == 0:
del d[k]
elif k not in fields:
del d[k]
if 'xpath' in d:
del d['xpath']
if 'position' in d:
del d['position']
if 'rectangle' in d:
del d['rectangle']
for k, v in list(d.items()):
if isinstance(v, dict):
self._cleanup_unnecessary_items(v)
return d
| null |
get_tokens
|
tiktoken = _import_tiktoken()
return len(tiktoken.get_encoding('cl100k_base').encode(text))
|
def get_tokens(text: str) ->int:
tiktoken = _import_tiktoken()
return len(tiktoken.get_encoding('cl100k_base').encode(text))
| null |
retriever
|
return search.run(query)
|
def retriever(query):
return search.run(query)
| null |
_similarity_search_with_relevance_scores
|
"""Return docs and their similarity scores on a scale from 0 to 1."""
score_threshold = kwargs.pop('score_threshold', None)
relevance_score_fn = self._select_relevance_score_fn()
if relevance_score_fn is None:
raise ValueError(
'normalize_score_fn must be provided to ScaNN constructor to normalize scores'
)
docs_and_scores = self.similarity_search_with_score(query, k=k, filter=
filter, fetch_k=fetch_k, **kwargs)
docs_and_rel_scores = [(doc, relevance_score_fn(score)) for doc, score in
docs_and_scores]
if score_threshold is not None:
docs_and_rel_scores = [(doc, similarity) for doc, similarity in
docs_and_rel_scores if similarity >= score_threshold]
return docs_and_rel_scores
|
def _similarity_search_with_relevance_scores(self, query: str, k: int=4,
filter: Optional[Dict[str, Any]]=None, fetch_k: int=20, **kwargs: Any
) ->List[Tuple[Document, float]]:
"""Return docs and their similarity scores on a scale from 0 to 1."""
score_threshold = kwargs.pop('score_threshold', None)
relevance_score_fn = self._select_relevance_score_fn()
if relevance_score_fn is None:
raise ValueError(
'normalize_score_fn must be provided to ScaNN constructor to normalize scores'
)
docs_and_scores = self.similarity_search_with_score(query, k=k, filter=
filter, fetch_k=fetch_k, **kwargs)
docs_and_rel_scores = [(doc, relevance_score_fn(score)) for doc, score in
docs_and_scores]
if score_threshold is not None:
docs_and_rel_scores = [(doc, similarity) for doc, similarity in
docs_and_rel_scores if similarity >= score_threshold]
return docs_and_rel_scores
|
Return docs and their similarity scores on a scale from 0 to 1.
|
test_json_equality_evaluator_evaluate_strings_custom_operator_equal
|
def operator(x: dict, y: dict) ->bool:
return x['a'] == y['a']
evaluator = JsonEqualityEvaluator(operator=operator)
prediction = '{"a": 1, "b": 2}'
reference = '{"a": 1, "c": 3}'
result = evaluator.evaluate_strings(prediction=prediction, reference=reference)
assert result == {'score': True}
|
def test_json_equality_evaluator_evaluate_strings_custom_operator_equal(
) ->None:
def operator(x: dict, y: dict) ->bool:
return x['a'] == y['a']
evaluator = JsonEqualityEvaluator(operator=operator)
prediction = '{"a": 1, "b": 2}'
reference = '{"a": 1, "c": 3}'
result = evaluator.evaluate_strings(prediction=prediction, reference=
reference)
assert result == {'score': True}
| null |
on_llm_start
|
"""Run when LLM starts."""
self.step += 1
self.llm_starts += 1
self.starts += 1
resp: Dict[str, Any] = {}
resp.update({'action': 'on_llm_start'})
resp.update(flatten_dict(serialized))
resp.update(self.get_custom_callback_meta())
prompt_responses = []
for prompt in prompts:
prompt_responses.append(prompt)
resp.update({'prompts': prompt_responses})
self.deck.append(self.markdown_renderer().to_html('### LLM Start'))
self.deck.append(self.table_renderer().to_html(self.pandas.DataFrame([resp]
)) + '\n')
|
def on_llm_start(self, serialized: Dict[str, Any], prompts: List[str], **
kwargs: Any) ->None:
"""Run when LLM starts."""
self.step += 1
self.llm_starts += 1
self.starts += 1
resp: Dict[str, Any] = {}
resp.update({'action': 'on_llm_start'})
resp.update(flatten_dict(serialized))
resp.update(self.get_custom_callback_meta())
prompt_responses = []
for prompt in prompts:
prompt_responses.append(prompt)
resp.update({'prompts': prompt_responses})
self.deck.append(self.markdown_renderer().to_html('### LLM Start'))
self.deck.append(self.table_renderer().to_html(self.pandas.DataFrame([
resp])) + '\n')
|
Run when LLM starts.
|
_parse_message
|
return {'role': role, 'text': text}
|
def _parse_message(role: str, text: str) ->Dict:
return {'role': role, 'text': text}
| null |
_llm_type
|
return 'vicuna-style'
|
@property
def _llm_type(self) ->str:
return 'vicuna-style'
| null |
_add_newlines_before_ha
|
new_text = input_text
for word in ['Human:', 'Assistant:']:
new_text = new_text.replace(word, '\n\n' + word)
for i in range(2):
new_text = new_text.replace('\n\n\n' + word, '\n\n' + word)
return new_text
|
def _add_newlines_before_ha(input_text: str) ->str:
new_text = input_text
for word in ['Human:', 'Assistant:']:
new_text = new_text.replace(word, '\n\n' + word)
for i in range(2):
new_text = new_text.replace('\n\n\n' + word, '\n\n' + word)
return new_text
| null |
test_structured_single_str_decorator_no_infer_schema
|
"""Test functionality with structured arguments parsed as a decorator."""
@tool(infer_schema=False)
def unstructured_tool_input(tool_input: str) ->str:
"""Return the arguments directly."""
assert isinstance(tool_input, str)
return f'{tool_input}'
assert isinstance(unstructured_tool_input, BaseTool)
assert unstructured_tool_input.args_schema is None
assert unstructured_tool_input.run('foo') == 'foo'
|
def test_structured_single_str_decorator_no_infer_schema() ->None:
"""Test functionality with structured arguments parsed as a decorator."""
@tool(infer_schema=False)
def unstructured_tool_input(tool_input: str) ->str:
"""Return the arguments directly."""
assert isinstance(tool_input, str)
return f'{tool_input}'
assert isinstance(unstructured_tool_input, BaseTool)
assert unstructured_tool_input.args_schema is None
assert unstructured_tool_input.run('foo') == 'foo'
|
Test functionality with structured arguments parsed as a decorator.
|
__init__
|
"""Initialize with Marqo client."""
try:
import marqo
except ImportError:
raise ImportError(
'Could not import marqo python package. Please install it with `pip install marqo`.'
)
if not isinstance(client, marqo.Client):
raise ValueError(
f'client should be an instance of marqo.Client, got {type(client)}')
self._client = client
self._index_name = index_name
self._add_documents_settings = {
} if add_documents_settings is None else add_documents_settings
self._searchable_attributes = searchable_attributes
self.page_content_builder = page_content_builder
self.tensor_fields = ['text']
self._document_batch_size = 1024
|
def __init__(self, client: marqo.Client, index_name: str,
add_documents_settings: Optional[Dict[str, Any]]=None,
searchable_attributes: Optional[List[str]]=None, page_content_builder:
Optional[Callable[[Dict[str, Any]], str]]=None):
"""Initialize with Marqo client."""
try:
import marqo
except ImportError:
raise ImportError(
'Could not import marqo python package. Please install it with `pip install marqo`.'
)
if not isinstance(client, marqo.Client):
raise ValueError(
f'client should be an instance of marqo.Client, got {type(client)}'
)
self._client = client
self._index_name = index_name
self._add_documents_settings = {
} if add_documents_settings is None else add_documents_settings
self._searchable_attributes = searchable_attributes
self.page_content_builder = page_content_builder
self.tensor_fields = ['text']
self._document_batch_size = 1024
|
Initialize with Marqo client.
|
test_fireworks_streaming_stop_words
|
"""Test streaming tokens with stop words."""
last_token = ''
for token in chat.stream("I'm Pickle Rick", stop=[',']):
last_token = cast(str, token.content)
assert isinstance(token.content, str)
assert last_token[-1] == ','
|
@pytest.mark.scheduled
def test_fireworks_streaming_stop_words(chat: ChatFireworks) ->None:
"""Test streaming tokens with stop words."""
last_token = ''
for token in chat.stream("I'm Pickle Rick", stop=[',']):
last_token = cast(str, token.content)
assert isinstance(token.content, str)
assert last_token[-1] == ','
|
Test streaming tokens with stop words.
|
_results_to_docs
|
return [doc for doc, _ in _results_to_docs_and_scores(results)]
|
def _results_to_docs(results: Any) ->List[Document]:
return [doc for doc, _ in _results_to_docs_and_scores(results)]
| null |
_import_playwright_NavigateTool
|
from langchain_community.tools.playwright import NavigateTool
return NavigateTool
|
def _import_playwright_NavigateTool() ->Any:
from langchain_community.tools.playwright import NavigateTool
return NavigateTool
| null |
test_refresh_schema
|
self.conn.execute(
'CREATE NODE TABLE Person (name STRING, birthDate STRING, PRIMARY KEY(name))'
)
self.conn.execute('CREATE REL TABLE ActedIn (FROM Person TO Movie)')
self.kuzu_graph.refresh_schema()
schema = self.kuzu_graph.get_schema
self.assertEqual(schema, EXPECTED_SCHEMA)
|
def test_refresh_schema(self) ->None:
self.conn.execute(
'CREATE NODE TABLE Person (name STRING, birthDate STRING, PRIMARY KEY(name))'
)
self.conn.execute('CREATE REL TABLE ActedIn (FROM Person TO Movie)')
self.kuzu_graph.refresh_schema()
schema = self.kuzu_graph.get_schema
self.assertEqual(schema, EXPECTED_SCHEMA)
| null |
test_deeplake_with_metadatas
|
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': str(i)} for i in range(len(texts))]
docsearch = DeepLake.from_texts(dataset_path='mem://test_path', texts=texts,
embedding=FakeEmbeddings(), metadatas=metadatas)
output = docsearch.similarity_search('foo', k=1)
assert output == [Document(page_content='foo', metadata={'page': '0'})]
|
def test_deeplake_with_metadatas() ->None:
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': str(i)} for i in range(len(texts))]
docsearch = DeepLake.from_texts(dataset_path='mem://test_path', texts=
texts, embedding=FakeEmbeddings(), metadatas=metadatas)
output = docsearch.similarity_search('foo', k=1)
assert output == [Document(page_content='foo', metadata={'page': '0'})]
|
Test end to end construction and search.
|
parse
|
try:
expected_keys = ['query', 'filter']
allowed_keys = ['query', 'filter', 'limit']
parsed = parse_and_check_json_markdown(text, expected_keys)
if len(parsed['query']) == 0:
parsed['query'] = ' '
if parsed['filter'] == 'NO_FILTER' or not parsed['filter']:
parsed['filter'] = None
else:
parsed['filter'] = self.ast_parse(parsed['filter'])
if not parsed.get('limit'):
parsed.pop('limit', None)
return StructuredQuery(**{k: v for k, v in parsed.items() if k in
allowed_keys})
except Exception as e:
raise OutputParserException(
f'Parsing text\n{text}\n raised following error:\n{e}')
|
def parse(self, text: str) ->StructuredQuery:
try:
expected_keys = ['query', 'filter']
allowed_keys = ['query', 'filter', 'limit']
parsed = parse_and_check_json_markdown(text, expected_keys)
if len(parsed['query']) == 0:
parsed['query'] = ' '
if parsed['filter'] == 'NO_FILTER' or not parsed['filter']:
parsed['filter'] = None
else:
parsed['filter'] = self.ast_parse(parsed['filter'])
if not parsed.get('limit'):
parsed.pop('limit', None)
return StructuredQuery(**{k: v for k, v in parsed.items() if k in
allowed_keys})
except Exception as e:
raise OutputParserException(
f'Parsing text\n{text}\n raised following error:\n{e}')
| null |
get_description
|
template: str = (
'Useful for when you need to answer questions about {name} and the sources used to construct the answer. Whenever you need information about {description} you should ALWAYS use this. Input should be a fully formed question. Output is a json serialized dictionary with keys `answer` and `sources`. Only use this tool if the user explicitly asks for sources.'
)
return template.format(name=name, description=description)
|
@staticmethod
def get_description(name: str, description: str) ->str:
template: str = (
'Useful for when you need to answer questions about {name} and the sources used to construct the answer. Whenever you need information about {description} you should ALWAYS use this. Input should be a fully formed question. Output is a json serialized dictionary with keys `answer` and `sources`. Only use this tool if the user explicitly asks for sources.'
)
return template.format(name=name, description=description)
| null |
crawl
|
page = self.page
page_element_buffer = self.page_element_buffer
start = time.time()
page_state_as_text = []
device_pixel_ratio: float = page.evaluate('window.devicePixelRatio')
if platform == 'darwin' and device_pixel_ratio == 1:
device_pixel_ratio = 2
win_upper_bound: float = page.evaluate('window.pageYOffset')
win_left_bound: float = page.evaluate('window.pageXOffset')
win_width: float = page.evaluate('window.screen.width')
win_height: float = page.evaluate('window.screen.height')
win_right_bound: float = win_left_bound + win_width
win_lower_bound: float = win_upper_bound + win_height
percentage_progress_start = 1
percentage_progress_end = 2
page_state_as_text.append({'x': 0, 'y': 0, 'text':
'[scrollbar {:0.2f}-{:0.2f}%]'.format(round(percentage_progress_start,
2), round(percentage_progress_end))})
tree = self.client.send('DOMSnapshot.captureSnapshot', {'computedStyles': [
], 'includeDOMRects': True, 'includePaintOrder': True})
strings: Dict[int, str] = tree['strings']
document: Dict[str, Any] = tree['documents'][0]
nodes: Dict[str, Any] = document['nodes']
backend_node_id: Dict[int, int] = nodes['backendNodeId']
attributes: Dict[int, Dict[int, Any]] = nodes['attributes']
node_value: Dict[int, int] = nodes['nodeValue']
parent: Dict[int, int] = nodes['parentIndex']
node_names: Dict[int, int] = nodes['nodeName']
is_clickable: Set[int] = set(nodes['isClickable']['index'])
input_value: Dict[str, Any] = nodes['inputValue']
input_value_index: List[int] = input_value['index']
input_value_values: List[int] = input_value['value']
layout: Dict[str, Any] = document['layout']
layout_node_index: List[int] = layout['nodeIndex']
bounds: Dict[int, List[float]] = layout['bounds']
cursor: int = 0
child_nodes: Dict[str, List[Dict[str, Any]]] = {}
elements_in_view_port: List[ElementInViewPort] = []
anchor_ancestry: Dict[str, Tuple[bool, Optional[int]]] = {'-1': (False, None)}
button_ancestry: Dict[str, Tuple[bool, Optional[int]]] = {'-1': (False, None)}
def convert_name(node_name: Optional[str], has_click_handler: Optional[bool]
) ->str:
if node_name == 'a':
return 'link'
if node_name == 'input':
return 'input'
if node_name == 'img':
return 'img'
if node_name == 'button' or has_click_handler:
return 'button'
else:
return 'text'
def find_attributes(attributes: Dict[int, Any], keys: List[str]) ->Dict[str,
str]:
values = {}
for [key_index, value_index] in zip(*((iter(attributes),) * 2)):
if value_index < 0:
continue
key = strings[key_index]
value = strings[value_index]
if key in keys:
values[key] = value
keys.remove(key)
if not keys:
return values
return values
def add_to_hash_tree(hash_tree: Dict[str, Tuple[bool, Optional[int]]], tag:
str, node_id: int, node_name: Optional[str], parent_id: int) ->Tuple[
bool, Optional[int]]:
parent_id_str = str(parent_id)
if not parent_id_str in hash_tree:
parent_name = strings[node_names[parent_id]].lower()
grand_parent_id = parent[parent_id]
add_to_hash_tree(hash_tree, tag, parent_id, parent_name,
grand_parent_id)
is_parent_desc_anchor, anchor_id = hash_tree[parent_id_str]
if node_name == tag:
value: Tuple[bool, Optional[int]] = (True, node_id)
elif is_parent_desc_anchor:
value = True, anchor_id
else:
value = False, None
hash_tree[str(node_id)] = value
return value
for index, node_name_index in enumerate(node_names):
node_parent = parent[index]
node_name: Optional[str] = strings[node_name_index].lower()
is_ancestor_of_anchor, anchor_id = add_to_hash_tree(anchor_ancestry,
'a', index, node_name, node_parent)
is_ancestor_of_button, button_id = add_to_hash_tree(button_ancestry,
'button', index, node_name, node_parent)
try:
cursor = layout_node_index.index(index)
except:
continue
if node_name in black_listed_elements:
continue
[x, y, width, height] = bounds[cursor]
x /= device_pixel_ratio
y /= device_pixel_ratio
width /= device_pixel_ratio
height /= device_pixel_ratio
elem_left_bound = x
elem_top_bound = y
elem_right_bound = x + width
elem_lower_bound = y + height
partially_is_in_viewport = (elem_left_bound < win_right_bound and
elem_right_bound >= win_left_bound and elem_top_bound <
win_lower_bound and elem_lower_bound >= win_upper_bound)
if not partially_is_in_viewport:
continue
meta_data: List[str] = []
element_attributes = find_attributes(attributes[index], ['type',
'placeholder', 'aria-label', 'title', 'alt'])
ancestor_exception = is_ancestor_of_anchor or is_ancestor_of_button
ancestor_node_key = None if not ancestor_exception else str(anchor_id
) if is_ancestor_of_anchor else str(button_id)
ancestor_node = None if not ancestor_exception else child_nodes.setdefault(
str(ancestor_node_key), [])
if node_name == '#text' and ancestor_exception and ancestor_node:
text = strings[node_value[index]]
if text == '|' or text == '•':
continue
ancestor_node.append({'type': 'type', 'value': text})
else:
if node_name == 'input' and element_attributes.get('type'
) == 'submit' or node_name == 'button':
node_name = 'button'
element_attributes.pop('type', None)
for key in element_attributes:
if ancestor_exception and ancestor_node:
ancestor_node.append({'type': 'attribute', 'key': key,
'value': element_attributes[key]})
else:
meta_data.append(element_attributes[key])
element_node_value = None
if node_value[index] >= 0:
element_node_value = strings[node_value[index]]
if element_node_value == '|':
continue
elif node_name == 'input' and index in input_value_index and element_node_value is None:
node_input_text_index = input_value_index.index(index)
text_index = input_value_values[node_input_text_index]
if node_input_text_index >= 0 and text_index >= 0:
element_node_value = strings[text_index]
if ancestor_exception and (node_name != 'a' and node_name != 'button'):
continue
elements_in_view_port.append({'node_index': str(index),
'backend_node_id': backend_node_id[index], 'node_name': node_name,
'node_value': element_node_value, 'node_meta': meta_data,
'is_clickable': index in is_clickable, 'origin_x': int(x),
'origin_y': int(y), 'center_x': int(x + width / 2), 'center_y': int
(y + height / 2)})
elements_of_interest = []
id_counter = 0
for element in elements_in_view_port:
node_index = element.get('node_index')
node_name = element.get('node_name')
element_node_value = element.get('node_value')
node_is_clickable = element.get('is_clickable')
node_meta_data: Optional[List[str]] = element.get('node_meta')
inner_text = f'{element_node_value} ' if element_node_value else ''
meta = ''
if node_index in child_nodes:
for child in child_nodes[node_index]:
entry_type = child.get('type')
entry_value = child.get('value')
if entry_type == 'attribute' and node_meta_data:
entry_key = child.get('key')
node_meta_data.append(f'{entry_key}="{entry_value}"')
else:
inner_text += f'{entry_value} '
if node_meta_data:
meta_string = ' '.join(node_meta_data)
meta = f' {meta_string}'
if inner_text != '':
inner_text = f'{inner_text.strip()}'
converted_node_name = convert_name(node_name, node_is_clickable)
if ((converted_node_name != 'button' or meta == '') and
converted_node_name != 'link' and converted_node_name != 'input' and
converted_node_name != 'img' and converted_node_name != 'textarea'
) and inner_text.strip() == '':
continue
page_element_buffer[id_counter] = element
if inner_text != '':
elements_of_interest.append(
f'<{converted_node_name} id={id_counter}{meta}>{inner_text}</{converted_node_name}>'
)
else:
elements_of_interest.append(
f'<{converted_node_name} id={id_counter}{meta}/>')
id_counter += 1
print('Parsing time: {:0.2f} seconds'.format(time.time() - start))
return elements_of_interest
|
def crawl(self) ->List[str]:
page = self.page
page_element_buffer = self.page_element_buffer
start = time.time()
page_state_as_text = []
device_pixel_ratio: float = page.evaluate('window.devicePixelRatio')
if platform == 'darwin' and device_pixel_ratio == 1:
device_pixel_ratio = 2
win_upper_bound: float = page.evaluate('window.pageYOffset')
win_left_bound: float = page.evaluate('window.pageXOffset')
win_width: float = page.evaluate('window.screen.width')
win_height: float = page.evaluate('window.screen.height')
win_right_bound: float = win_left_bound + win_width
win_lower_bound: float = win_upper_bound + win_height
percentage_progress_start = 1
percentage_progress_end = 2
page_state_as_text.append({'x': 0, 'y': 0, 'text':
'[scrollbar {:0.2f}-{:0.2f}%]'.format(round(
percentage_progress_start, 2), round(percentage_progress_end))})
tree = self.client.send('DOMSnapshot.captureSnapshot', {
'computedStyles': [], 'includeDOMRects': True, 'includePaintOrder':
True})
strings: Dict[int, str] = tree['strings']
document: Dict[str, Any] = tree['documents'][0]
nodes: Dict[str, Any] = document['nodes']
backend_node_id: Dict[int, int] = nodes['backendNodeId']
attributes: Dict[int, Dict[int, Any]] = nodes['attributes']
node_value: Dict[int, int] = nodes['nodeValue']
parent: Dict[int, int] = nodes['parentIndex']
node_names: Dict[int, int] = nodes['nodeName']
is_clickable: Set[int] = set(nodes['isClickable']['index'])
input_value: Dict[str, Any] = nodes['inputValue']
input_value_index: List[int] = input_value['index']
input_value_values: List[int] = input_value['value']
layout: Dict[str, Any] = document['layout']
layout_node_index: List[int] = layout['nodeIndex']
bounds: Dict[int, List[float]] = layout['bounds']
cursor: int = 0
child_nodes: Dict[str, List[Dict[str, Any]]] = {}
elements_in_view_port: List[ElementInViewPort] = []
anchor_ancestry: Dict[str, Tuple[bool, Optional[int]]] = {'-1': (False,
None)}
button_ancestry: Dict[str, Tuple[bool, Optional[int]]] = {'-1': (False,
None)}
def convert_name(node_name: Optional[str], has_click_handler: Optional[
bool]) ->str:
if node_name == 'a':
return 'link'
if node_name == 'input':
return 'input'
if node_name == 'img':
return 'img'
if node_name == 'button' or has_click_handler:
return 'button'
else:
return 'text'
def find_attributes(attributes: Dict[int, Any], keys: List[str]) ->Dict[
str, str]:
values = {}
for [key_index, value_index] in zip(*((iter(attributes),) * 2)):
if value_index < 0:
continue
key = strings[key_index]
value = strings[value_index]
if key in keys:
values[key] = value
keys.remove(key)
if not keys:
return values
return values
def add_to_hash_tree(hash_tree: Dict[str, Tuple[bool, Optional[int]]],
tag: str, node_id: int, node_name: Optional[str], parent_id: int
) ->Tuple[bool, Optional[int]]:
parent_id_str = str(parent_id)
if not parent_id_str in hash_tree:
parent_name = strings[node_names[parent_id]].lower()
grand_parent_id = parent[parent_id]
add_to_hash_tree(hash_tree, tag, parent_id, parent_name,
grand_parent_id)
is_parent_desc_anchor, anchor_id = hash_tree[parent_id_str]
if node_name == tag:
value: Tuple[bool, Optional[int]] = (True, node_id)
elif is_parent_desc_anchor:
value = True, anchor_id
else:
value = False, None
hash_tree[str(node_id)] = value
return value
for index, node_name_index in enumerate(node_names):
node_parent = parent[index]
node_name: Optional[str] = strings[node_name_index].lower()
is_ancestor_of_anchor, anchor_id = add_to_hash_tree(anchor_ancestry,
'a', index, node_name, node_parent)
is_ancestor_of_button, button_id = add_to_hash_tree(button_ancestry,
'button', index, node_name, node_parent)
try:
cursor = layout_node_index.index(index)
except:
continue
if node_name in black_listed_elements:
continue
[x, y, width, height] = bounds[cursor]
x /= device_pixel_ratio
y /= device_pixel_ratio
width /= device_pixel_ratio
height /= device_pixel_ratio
elem_left_bound = x
elem_top_bound = y
elem_right_bound = x + width
elem_lower_bound = y + height
partially_is_in_viewport = (elem_left_bound < win_right_bound and
elem_right_bound >= win_left_bound and elem_top_bound <
win_lower_bound and elem_lower_bound >= win_upper_bound)
if not partially_is_in_viewport:
continue
meta_data: List[str] = []
element_attributes = find_attributes(attributes[index], ['type',
'placeholder', 'aria-label', 'title', 'alt'])
ancestor_exception = is_ancestor_of_anchor or is_ancestor_of_button
ancestor_node_key = None if not ancestor_exception else str(anchor_id
) if is_ancestor_of_anchor else str(button_id)
ancestor_node = (None if not ancestor_exception else child_nodes.
setdefault(str(ancestor_node_key), []))
if node_name == '#text' and ancestor_exception and ancestor_node:
text = strings[node_value[index]]
if text == '|' or text == '•':
continue
ancestor_node.append({'type': 'type', 'value': text})
else:
if node_name == 'input' and element_attributes.get('type'
) == 'submit' or node_name == 'button':
node_name = 'button'
element_attributes.pop('type', None)
for key in element_attributes:
if ancestor_exception and ancestor_node:
ancestor_node.append({'type': 'attribute', 'key': key,
'value': element_attributes[key]})
else:
meta_data.append(element_attributes[key])
element_node_value = None
if node_value[index] >= 0:
element_node_value = strings[node_value[index]]
if element_node_value == '|':
continue
elif node_name == 'input' and index in input_value_index and element_node_value is None:
node_input_text_index = input_value_index.index(index)
text_index = input_value_values[node_input_text_index]
if node_input_text_index >= 0 and text_index >= 0:
element_node_value = strings[text_index]
if ancestor_exception and (node_name != 'a' and node_name != 'button'):
continue
elements_in_view_port.append({'node_index': str(index),
'backend_node_id': backend_node_id[index], 'node_name':
node_name, 'node_value': element_node_value, 'node_meta':
meta_data, 'is_clickable': index in is_clickable, 'origin_x':
int(x), 'origin_y': int(y), 'center_x': int(x + width / 2),
'center_y': int(y + height / 2)})
elements_of_interest = []
id_counter = 0
for element in elements_in_view_port:
node_index = element.get('node_index')
node_name = element.get('node_name')
element_node_value = element.get('node_value')
node_is_clickable = element.get('is_clickable')
node_meta_data: Optional[List[str]] = element.get('node_meta')
inner_text = f'{element_node_value} ' if element_node_value else ''
meta = ''
if node_index in child_nodes:
for child in child_nodes[node_index]:
entry_type = child.get('type')
entry_value = child.get('value')
if entry_type == 'attribute' and node_meta_data:
entry_key = child.get('key')
node_meta_data.append(f'{entry_key}="{entry_value}"')
else:
inner_text += f'{entry_value} '
if node_meta_data:
meta_string = ' '.join(node_meta_data)
meta = f' {meta_string}'
if inner_text != '':
inner_text = f'{inner_text.strip()}'
converted_node_name = convert_name(node_name, node_is_clickable)
if ((converted_node_name != 'button' or meta == '') and
converted_node_name != 'link' and converted_node_name !=
'input' and converted_node_name != 'img' and
converted_node_name != 'textarea') and inner_text.strip() == '':
continue
page_element_buffer[id_counter] = element
if inner_text != '':
elements_of_interest.append(
f'<{converted_node_name} id={id_counter}{meta}>{inner_text}</{converted_node_name}>'
)
else:
elements_of_interest.append(
f'<{converted_node_name} id={id_counter}{meta}/>')
id_counter += 1
print('Parsing time: {:0.2f} seconds'.format(time.time() - start))
return elements_of_interest
| null |
format_prompt
|
"""Create Prompt Value."""
|
@abstractmethod
def format_prompt(self, **kwargs: Any) ->PromptValue:
"""Create Prompt Value."""
|
Create Prompt Value.
|
parse_pull_requests
|
"""
Extracts title and number from each Issue and puts them in a dictionary
Parameters:
issues(List[Issue]): A list of Github Issue objects
Returns:
List[dict]: A dictionary of issue titles and numbers
"""
parsed = []
for pr in pull_requests:
parsed.append({'title': pr.title, 'number': pr.number, 'commits': str(
pr.commits), 'comments': str(pr.comments)})
return parsed
|
def parse_pull_requests(self, pull_requests: List[PullRequest]) ->List[dict]:
"""
Extracts title and number from each Issue and puts them in a dictionary
Parameters:
issues(List[Issue]): A list of Github Issue objects
Returns:
List[dict]: A dictionary of issue titles and numbers
"""
parsed = []
for pr in pull_requests:
parsed.append({'title': pr.title, 'number': pr.number, 'commits':
str(pr.commits), 'comments': str(pr.comments)})
return parsed
|
Extracts title and number from each Issue and puts them in a dictionary
Parameters:
issues(List[Issue]): A list of Github Issue objects
Returns:
List[dict]: A dictionary of issue titles and numbers
|
_embed_documents
|
"""Embed search docs."""
if isinstance(self._embedding, Embeddings):
return self._embedding.embed_documents(list(texts))
return [self._embedding(t) for t in texts]
|
def _embed_documents(self, texts: Iterable[str]) ->List[List[float]]:
"""Embed search docs."""
if isinstance(self._embedding, Embeddings):
return self._embedding.embed_documents(list(texts))
return [self._embedding(t) for t in texts]
|
Embed search docs.
|
test_run_success
|
responses.add(responses.POST, api_client.outline_instance_url + api_client.
outline_search_endpoint, json=OUTLINE_SUCCESS_RESPONSE, status=200)
docs = api_client.run('Testing')
assert_docs(docs, all_meta=False)
|
@responses.activate
def test_run_success(api_client: OutlineAPIWrapper) ->None:
responses.add(responses.POST, api_client.outline_instance_url +
api_client.outline_search_endpoint, json=OUTLINE_SUCCESS_RESPONSE,
status=200)
docs = api_client.run('Testing')
assert_docs(docs, all_meta=False)
| null |
test_eval_chain
|
"""Test a simple eval chain."""
example = {'query': "What's my name", 'answer': 'John Doe'}
prediction = {'result': 'John Doe'}
fake_qa_eval_chain = QAEvalChain.from_llm(FakeLLM())
outputs = fake_qa_eval_chain.evaluate([example, example], [prediction,
prediction])
assert outputs[0] == outputs[1]
assert fake_qa_eval_chain.output_key in outputs[0]
assert outputs[0][fake_qa_eval_chain.output_key] == 'foo'
|
@pytest.mark.skipif(sys.platform.startswith('win'), reason=
'Test not supported on Windows')
def test_eval_chain() ->None:
"""Test a simple eval chain."""
example = {'query': "What's my name", 'answer': 'John Doe'}
prediction = {'result': 'John Doe'}
fake_qa_eval_chain = QAEvalChain.from_llm(FakeLLM())
outputs = fake_qa_eval_chain.evaluate([example, example], [prediction,
prediction])
assert outputs[0] == outputs[1]
assert fake_qa_eval_chain.output_key in outputs[0]
assert outputs[0][fake_qa_eval_chain.output_key] == 'foo'
|
Test a simple eval chain.
|
test_qdrant_max_marginal_relevance_search
|
"""Test end to end construction and MRR search."""
from qdrant_client import models
filter = models.Filter(must=[models.FieldCondition(key=
f'{metadata_payload_key}.page', match=models.MatchValue(value=2))])
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = Qdrant.from_texts(texts, ConsistentFakeEmbeddings(), metadatas=
metadatas, location=':memory:', content_payload_key=content_payload_key,
metadata_payload_key=metadata_payload_key, batch_size=batch_size,
vector_name=vector_name, distance_func='EUCLID')
output = docsearch.max_marginal_relevance_search('foo', k=2, fetch_k=3,
lambda_mult=0.0)
assert output == [Document(page_content='foo', metadata={'page': 0}),
Document(page_content='baz', metadata={'page': 2})]
output = docsearch.max_marginal_relevance_search('foo', k=2, fetch_k=3,
lambda_mult=0.0, filter=filter)
assert output == [Document(page_content='baz', metadata={'page': 2})]
|
@pytest.mark.parametrize('batch_size', [1, 64])
@pytest.mark.parametrize('content_payload_key', [Qdrant.CONTENT_KEY,
'test_content'])
@pytest.mark.parametrize('metadata_payload_key', [Qdrant.METADATA_KEY,
'test_metadata'])
@pytest.mark.parametrize('vector_name', [None, 'my-vector'])
def test_qdrant_max_marginal_relevance_search(batch_size: int,
content_payload_key: str, metadata_payload_key: str, vector_name:
Optional[str]) ->None:
"""Test end to end construction and MRR search."""
from qdrant_client import models
filter = models.Filter(must=[models.FieldCondition(key=
f'{metadata_payload_key}.page', match=models.MatchValue(value=2))])
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = Qdrant.from_texts(texts, ConsistentFakeEmbeddings(),
metadatas=metadatas, location=':memory:', content_payload_key=
content_payload_key, metadata_payload_key=metadata_payload_key,
batch_size=batch_size, vector_name=vector_name, distance_func='EUCLID')
output = docsearch.max_marginal_relevance_search('foo', k=2, fetch_k=3,
lambda_mult=0.0)
assert output == [Document(page_content='foo', metadata={'page': 0}),
Document(page_content='baz', metadata={'page': 2})]
output = docsearch.max_marginal_relevance_search('foo', k=2, fetch_k=3,
lambda_mult=0.0, filter=filter)
assert output == [Document(page_content='baz', metadata={'page': 2})]
|
Test end to end construction and MRR search.
|
_load_vector_db_qa
|
if 'vectorstore' in kwargs:
vectorstore = kwargs.pop('vectorstore')
else:
raise ValueError('`vectorstore` must be present.')
if 'combine_documents_chain' in config:
combine_documents_chain_config = config.pop('combine_documents_chain')
combine_documents_chain = load_chain_from_config(
combine_documents_chain_config)
elif 'combine_documents_chain_path' in config:
combine_documents_chain = load_chain(config.pop(
'combine_documents_chain_path'))
else:
raise ValueError(
'One of `combine_documents_chain` or `combine_documents_chain_path` must be present.'
)
return VectorDBQA(combine_documents_chain=combine_documents_chain,
vectorstore=vectorstore, **config)
|
def _load_vector_db_qa(config: dict, **kwargs: Any) ->VectorDBQA:
if 'vectorstore' in kwargs:
vectorstore = kwargs.pop('vectorstore')
else:
raise ValueError('`vectorstore` must be present.')
if 'combine_documents_chain' in config:
combine_documents_chain_config = config.pop('combine_documents_chain')
combine_documents_chain = load_chain_from_config(
combine_documents_chain_config)
elif 'combine_documents_chain_path' in config:
combine_documents_chain = load_chain(config.pop(
'combine_documents_chain_path'))
else:
raise ValueError(
'One of `combine_documents_chain` or `combine_documents_chain_path` must be present.'
)
return VectorDBQA(combine_documents_chain=combine_documents_chain,
vectorstore=vectorstore, **config)
| null |
_generate
|
should_stream = stream if stream is not None else self.streaming
if should_stream:
stream_iter = self._stream(messages, stop=stop, run_manager=run_manager,
**kwargs)
return generate_from_stream(stream_iter)
payload = self._build_payload(messages)
response = self._client.chat(payload)
return self._create_chat_result(response)
|
def _generate(self, messages: List[BaseMessage], stop: Optional[List[str]]=
None, run_manager: Optional[CallbackManagerForLLMRun]=None, stream:
Optional[bool]=None, **kwargs: Any) ->ChatResult:
should_stream = stream if stream is not None else self.streaming
if should_stream:
stream_iter = self._stream(messages, stop=stop, run_manager=
run_manager, **kwargs)
return generate_from_stream(stream_iter)
payload = self._build_payload(messages)
response = self._client.chat(payload)
return self._create_chat_result(response)
| null |
_delete_all
|
"""Delete all records in the table."""
while True:
r = self._client.data().query(self._table_name, payload={'columns': ['id']}
)
if r.status_code != 200:
raise Exception(f'Error running query: {r.status_code} {r}')
ids = [rec['id'] for rec in r['records']]
if len(ids) == 0:
break
operations = [{'delete': {'table': self._table_name, 'id': id}} for id in
ids]
self._client.records().transaction(payload={'operations': operations})
|
def _delete_all(self) ->None:
"""Delete all records in the table."""
while True:
r = self._client.data().query(self._table_name, payload={'columns':
['id']})
if r.status_code != 200:
raise Exception(f'Error running query: {r.status_code} {r}')
ids = [rec['id'] for rec in r['records']]
if len(ids) == 0:
break
operations = [{'delete': {'table': self._table_name, 'id': id}} for
id in ids]
self._client.records().transaction(payload={'operations': operations})
|
Delete all records in the table.
|
is_llm
|
"""Check if the language model is a LLM.
Args:
llm: Language model to check.
Returns:
True if the language model is a BaseLLM model, False otherwise.
"""
return isinstance(llm, BaseLLM)
|
def is_llm(llm: BaseLanguageModel) ->bool:
"""Check if the language model is a LLM.
Args:
llm: Language model to check.
Returns:
True if the language model is a BaseLLM model, False otherwise.
"""
return isinstance(llm, BaseLLM)
|
Check if the language model is a LLM.
Args:
llm: Language model to check.
Returns:
True if the language model is a BaseLLM model, False otherwise.
|
message_chunk_to_message
|
if not isinstance(chunk, BaseMessageChunk):
return chunk
return chunk.__class__.__mro__[1](**{k: v for k, v in chunk.__dict__.items(
) if k != 'type'})
|
def message_chunk_to_message(chunk: BaseMessageChunk) ->BaseMessage:
if not isinstance(chunk, BaseMessageChunk):
return chunk
return chunk.__class__.__mro__[1](**{k: v for k, v in chunk.__dict__.
items() if k != 'type'})
| null |
max_marginal_relevance_search
|
"""Perform a search and return results that are reordered by MMR.
Args:
query (str): The text being searched.
k (int, optional): How many results to give. Defaults to 4.
fetch_k (int, optional): Total results to select k from.
Defaults to 20.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5
param (dict, optional): The search params for the specified index.
Defaults to None.
expr (str, optional): Filtering expression. Defaults to None.
timeout (int, optional): How long to wait before timeout error.
Defaults to None.
kwargs: Collection.search() keyword arguments.
Returns:
List[Document]: Document results for search.
"""
if self.col is None:
logger.debug('No existing collection to search.')
return []
embedding = self.embedding_func.embed_query(query)
return self.max_marginal_relevance_search_by_vector(embedding=embedding, k=
k, fetch_k=fetch_k, lambda_mult=lambda_mult, param=param, expr=expr,
timeout=timeout, **kwargs)
|
def max_marginal_relevance_search(self, query: str, k: int=4, fetch_k: int=
20, lambda_mult: float=0.5, param: Optional[dict]=None, expr: Optional[
str]=None, timeout: Optional[int]=None, **kwargs: Any) ->List[Document]:
"""Perform a search and return results that are reordered by MMR.
Args:
query (str): The text being searched.
k (int, optional): How many results to give. Defaults to 4.
fetch_k (int, optional): Total results to select k from.
Defaults to 20.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5
param (dict, optional): The search params for the specified index.
Defaults to None.
expr (str, optional): Filtering expression. Defaults to None.
timeout (int, optional): How long to wait before timeout error.
Defaults to None.
kwargs: Collection.search() keyword arguments.
Returns:
List[Document]: Document results for search.
"""
if self.col is None:
logger.debug('No existing collection to search.')
return []
embedding = self.embedding_func.embed_query(query)
return self.max_marginal_relevance_search_by_vector(embedding=embedding,
k=k, fetch_k=fetch_k, lambda_mult=lambda_mult, param=param, expr=
expr, timeout=timeout, **kwargs)
|
Perform a search and return results that are reordered by MMR.
Args:
query (str): The text being searched.
k (int, optional): How many results to give. Defaults to 4.
fetch_k (int, optional): Total results to select k from.
Defaults to 20.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5
param (dict, optional): The search params for the specified index.
Defaults to None.
expr (str, optional): Filtering expression. Defaults to None.
timeout (int, optional): How long to wait before timeout error.
Defaults to None.
kwargs: Collection.search() keyword arguments.
Returns:
List[Document]: Document results for search.
|
__del__
|
"""Ensure the client streaming connection is properly shutdown"""
self.client.close()
|
def __del__(self):
"""Ensure the client streaming connection is properly shutdown"""
self.client.close()
|
Ensure the client streaming connection is properly shutdown
|
modify_serialized_iterative
|
"""Utility to modify the serialized field of a list of runs dictionaries.
removes any keys that match the exact_keys and any keys that contain any of the
partial_keys.
recursively moves the dictionaries under the kwargs key to the top level.
changes the "id" field to a string "_kind" field that tells WBTraceTree how to
visualize the run. promotes the "serialized" field to the top level.
:param runs: The list of runs to modify.
:param exact_keys: A tuple of keys to remove from the serialized field.
:param partial_keys: A tuple of partial keys to remove from the serialized
field.
:return: The modified list of runs.
"""
def remove_exact_and_partial_keys(obj: Dict[str, Any]) ->Dict[str, Any]:
"""Recursively removes exact and partial keys from a dictionary.
:param obj: The dictionary to remove keys from.
:return: The modified dictionary.
"""
if isinstance(obj, dict):
obj = {k: v for k, v in obj.items() if k not in exact_keys and not
any(partial in k for partial in partial_keys)}
for k, v in obj.items():
obj[k] = remove_exact_and_partial_keys(v)
elif isinstance(obj, list):
obj = [remove_exact_and_partial_keys(x) for x in obj]
return obj
def handle_id_and_kwargs(obj: Dict[str, Any], root: bool=False) ->Dict[str, Any
]:
"""Recursively handles the id and kwargs fields of a dictionary.
changes the id field to a string "_kind" field that tells WBTraceTree how
to visualize the run. recursively moves the dictionaries under the kwargs
key to the top level.
:param obj: a run dictionary with id and kwargs fields.
:param root: whether this is the root dictionary or the serialized
dictionary.
:return: The modified dictionary.
"""
if isinstance(obj, dict):
if ('id' in obj or 'name' in obj) and not root:
_kind = obj.get('id')
if not _kind:
_kind = [obj.get('name')]
obj['_kind'] = _kind[-1]
obj.pop('id', None)
obj.pop('name', None)
if 'kwargs' in obj:
kwargs = obj.pop('kwargs')
for k, v in kwargs.items():
obj[k] = v
for k, v in obj.items():
obj[k] = handle_id_and_kwargs(v)
elif isinstance(obj, list):
obj = [handle_id_and_kwargs(x) for x in obj]
return obj
def transform_serialized(serialized: Dict[str, Any]) ->Dict[str, Any]:
"""Transforms the serialized field of a run dictionary to be compatible
with WBTraceTree.
:param serialized: The serialized field of a run dictionary.
:return: The transformed serialized field.
"""
serialized = handle_id_and_kwargs(serialized, root=True)
serialized = remove_exact_and_partial_keys(serialized)
return serialized
def transform_run(run: Dict[str, Any]) ->Dict[str, Any]:
"""Transforms a run dictionary to be compatible with WBTraceTree.
:param run: The run dictionary to transform.
:return: The transformed run dictionary.
"""
transformed_dict = transform_serialized(run)
serialized = transformed_dict.pop('serialized')
for k, v in serialized.items():
transformed_dict[k] = v
_kind = transformed_dict.get('_kind', None)
name = transformed_dict.pop('name', None)
exec_ord = transformed_dict.pop('execution_order', None)
if not name:
name = _kind
output_dict = {f'{exec_ord}_{name}': transformed_dict}
return output_dict
return list(map(transform_run, runs))
|
def modify_serialized_iterative(self, runs: List[Dict[str, Any]],
exact_keys: Tuple[str, ...]=(), partial_keys: Tuple[str, ...]=()) ->List[
Dict[str, Any]]:
"""Utility to modify the serialized field of a list of runs dictionaries.
removes any keys that match the exact_keys and any keys that contain any of the
partial_keys.
recursively moves the dictionaries under the kwargs key to the top level.
changes the "id" field to a string "_kind" field that tells WBTraceTree how to
visualize the run. promotes the "serialized" field to the top level.
:param runs: The list of runs to modify.
:param exact_keys: A tuple of keys to remove from the serialized field.
:param partial_keys: A tuple of partial keys to remove from the serialized
field.
:return: The modified list of runs.
"""
def remove_exact_and_partial_keys(obj: Dict[str, Any]) ->Dict[str, Any]:
"""Recursively removes exact and partial keys from a dictionary.
:param obj: The dictionary to remove keys from.
:return: The modified dictionary.
"""
if isinstance(obj, dict):
obj = {k: v for k, v in obj.items() if k not in exact_keys and
not any(partial in k for partial in partial_keys)}
for k, v in obj.items():
obj[k] = remove_exact_and_partial_keys(v)
elif isinstance(obj, list):
obj = [remove_exact_and_partial_keys(x) for x in obj]
return obj
def handle_id_and_kwargs(obj: Dict[str, Any], root: bool=False) ->Dict[
str, Any]:
"""Recursively handles the id and kwargs fields of a dictionary.
changes the id field to a string "_kind" field that tells WBTraceTree how
to visualize the run. recursively moves the dictionaries under the kwargs
key to the top level.
:param obj: a run dictionary with id and kwargs fields.
:param root: whether this is the root dictionary or the serialized
dictionary.
:return: The modified dictionary.
"""
if isinstance(obj, dict):
if ('id' in obj or 'name' in obj) and not root:
_kind = obj.get('id')
if not _kind:
_kind = [obj.get('name')]
obj['_kind'] = _kind[-1]
obj.pop('id', None)
obj.pop('name', None)
if 'kwargs' in obj:
kwargs = obj.pop('kwargs')
for k, v in kwargs.items():
obj[k] = v
for k, v in obj.items():
obj[k] = handle_id_and_kwargs(v)
elif isinstance(obj, list):
obj = [handle_id_and_kwargs(x) for x in obj]
return obj
def transform_serialized(serialized: Dict[str, Any]) ->Dict[str, Any]:
"""Transforms the serialized field of a run dictionary to be compatible
with WBTraceTree.
:param serialized: The serialized field of a run dictionary.
:return: The transformed serialized field.
"""
serialized = handle_id_and_kwargs(serialized, root=True)
serialized = remove_exact_and_partial_keys(serialized)
return serialized
def transform_run(run: Dict[str, Any]) ->Dict[str, Any]:
"""Transforms a run dictionary to be compatible with WBTraceTree.
:param run: The run dictionary to transform.
:return: The transformed run dictionary.
"""
transformed_dict = transform_serialized(run)
serialized = transformed_dict.pop('serialized')
for k, v in serialized.items():
transformed_dict[k] = v
_kind = transformed_dict.get('_kind', None)
name = transformed_dict.pop('name', None)
exec_ord = transformed_dict.pop('execution_order', None)
if not name:
name = _kind
output_dict = {f'{exec_ord}_{name}': transformed_dict}
return output_dict
return list(map(transform_run, runs))
|
Utility to modify the serialized field of a list of runs dictionaries.
removes any keys that match the exact_keys and any keys that contain any of the
partial_keys.
recursively moves the dictionaries under the kwargs key to the top level.
changes the "id" field to a string "_kind" field that tells WBTraceTree how to
visualize the run. promotes the "serialized" field to the top level.
:param runs: The list of runs to modify.
:param exact_keys: A tuple of keys to remove from the serialized field.
:param partial_keys: A tuple of partial keys to remove from the serialized
field.
:return: The modified list of runs.
|
remove_html_tags
|
from bs4 import BeautifulSoup
soup = BeautifulSoup(html_string, 'html.parser')
return soup.get_text()
|
def remove_html_tags(self, html_string: str) ->str:
from bs4 import BeautifulSoup
soup = BeautifulSoup(html_string, 'html.parser')
return soup.get_text()
| null |
__init__
|
"""Initialize callback handler."""
super().__init__()
self.model_id = model_id
self.model_version = model_version
self.space_key = SPACE_KEY
self.api_key = API_KEY
self.prompt_records: List[str] = []
self.response_records: List[str] = []
self.prediction_ids: List[str] = []
self.pred_timestamps: List[int] = []
self.response_embeddings: List[float] = []
self.prompt_embeddings: List[float] = []
self.prompt_tokens = 0
self.completion_tokens = 0
self.total_tokens = 0
self.step = 0
from arize.pandas.embeddings import EmbeddingGenerator, UseCases
from arize.pandas.logger import Client
self.generator = EmbeddingGenerator.from_use_case(use_case=UseCases.NLP.
SEQUENCE_CLASSIFICATION, model_name='distilbert-base-uncased',
tokenizer_max_length=512, batch_size=256)
self.arize_client = Client(space_key=SPACE_KEY, api_key=API_KEY)
if SPACE_KEY == 'SPACE_KEY' or API_KEY == 'API_KEY':
raise ValueError('❌ CHANGE SPACE AND API KEYS')
else:
print('✅ Arize client setup done! Now you can start using Arize!')
|
def __init__(self, model_id: Optional[str]=None, model_version: Optional[
str]=None, SPACE_KEY: Optional[str]=None, API_KEY: Optional[str]=None
) ->None:
"""Initialize callback handler."""
super().__init__()
self.model_id = model_id
self.model_version = model_version
self.space_key = SPACE_KEY
self.api_key = API_KEY
self.prompt_records: List[str] = []
self.response_records: List[str] = []
self.prediction_ids: List[str] = []
self.pred_timestamps: List[int] = []
self.response_embeddings: List[float] = []
self.prompt_embeddings: List[float] = []
self.prompt_tokens = 0
self.completion_tokens = 0
self.total_tokens = 0
self.step = 0
from arize.pandas.embeddings import EmbeddingGenerator, UseCases
from arize.pandas.logger import Client
self.generator = EmbeddingGenerator.from_use_case(use_case=UseCases.NLP
.SEQUENCE_CLASSIFICATION, model_name='distilbert-base-uncased',
tokenizer_max_length=512, batch_size=256)
self.arize_client = Client(space_key=SPACE_KEY, api_key=API_KEY)
if SPACE_KEY == 'SPACE_KEY' or API_KEY == 'API_KEY':
raise ValueError('❌ CHANGE SPACE AND API KEYS')
else:
print('✅ Arize client setup done! Now you can start using Arize!')
|
Initialize callback handler.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.