method_name
stringlengths 1
78
| method_body
stringlengths 3
9.66k
| full_code
stringlengths 31
10.7k
| docstring
stringlengths 4
4.74k
⌀ |
|---|---|---|---|
load_ts_git_dataset
|
json_url = 'https://s3.amazonaws.com/assets.timescale.com/ai/ts_git_log.json'
tmp_file = 'ts_git_log.json'
temp_dir = tempfile.gettempdir()
json_file_path = os.path.join(temp_dir, tmp_file)
if not os.path.exists(json_file_path):
response = requests.get(json_url)
if response.status_code == 200:
with open(json_file_path, 'w') as json_file:
json_file.write(response.text)
else:
print(
f'Failed to download JSON file. Status code: {response.status_code}'
)
loader = JSONLoader(file_path=json_file_path, jq_schema='.commit_history[]',
text_content=False, metadata_func=extract_metadata)
documents = loader.load()
documents = [doc for doc in documents if doc.metadata['date'] is not None]
if num_records > 0:
documents = documents[:num_records]
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
docs = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
TimescaleVector.from_documents(embedding=embeddings, ids=[doc.metadata['id'
] for doc in docs], documents=docs, collection_name=collection_name,
service_url=service_url, time_partition_interval=partition_interval)
|
def load_ts_git_dataset(service_url, collection_name='timescale_commits',
num_records: int=500, partition_interval=timedelta(days=7)):
json_url = (
'https://s3.amazonaws.com/assets.timescale.com/ai/ts_git_log.json')
tmp_file = 'ts_git_log.json'
temp_dir = tempfile.gettempdir()
json_file_path = os.path.join(temp_dir, tmp_file)
if not os.path.exists(json_file_path):
response = requests.get(json_url)
if response.status_code == 200:
with open(json_file_path, 'w') as json_file:
json_file.write(response.text)
else:
print(
f'Failed to download JSON file. Status code: {response.status_code}'
)
loader = JSONLoader(file_path=json_file_path, jq_schema=
'.commit_history[]', text_content=False, metadata_func=extract_metadata
)
documents = loader.load()
documents = [doc for doc in documents if doc.metadata['date'] is not None]
if num_records > 0:
documents = documents[:num_records]
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
docs = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
TimescaleVector.from_documents(embedding=embeddings, ids=[doc.metadata[
'id'] for doc in docs], documents=docs, collection_name=
collection_name, service_url=service_url, time_partition_interval=
partition_interval)
| null |
on_llm_start
|
"""Run when LLM starts."""
aim = import_aim()
self.step += 1
self.llm_starts += 1
self.starts += 1
resp = {'action': 'on_llm_start'}
resp.update(self.get_custom_callback_meta())
prompts_res = deepcopy(prompts)
self._run.track([aim.Text(prompt) for prompt in prompts_res], name=
'on_llm_start', context=resp)
|
def on_llm_start(self, serialized: Dict[str, Any], prompts: List[str], **
kwargs: Any) ->None:
"""Run when LLM starts."""
aim = import_aim()
self.step += 1
self.llm_starts += 1
self.starts += 1
resp = {'action': 'on_llm_start'}
resp.update(self.get_custom_callback_meta())
prompts_res = deepcopy(prompts)
self._run.track([aim.Text(prompt) for prompt in prompts_res], name=
'on_llm_start', context=resp)
|
Run when LLM starts.
|
_run
|
logging.getLogger(__name__)
try:
result = self.client.conversations_history(channel=channel_id)
messages = result['messages']
filtered_messages = [{key: message[key] for key in ('user', 'text',
'ts')} for message in messages if 'user' in message and 'text' in
message and 'ts' in message]
return json.dumps(filtered_messages)
except Exception as e:
return 'Error creating conversation: {}'.format(e)
|
def _run(self, channel_id: str, run_manager: Optional[
CallbackManagerForToolRun]=None) ->str:
logging.getLogger(__name__)
try:
result = self.client.conversations_history(channel=channel_id)
messages = result['messages']
filtered_messages = [{key: message[key] for key in ('user', 'text',
'ts')} for message in messages if 'user' in message and 'text' in
message and 'ts' in message]
return json.dumps(filtered_messages)
except Exception as e:
return 'Error creating conversation: {}'.format(e)
| null |
test_unstructured_api_file_io_loader
|
"""Test unstructured loader."""
file_path = os.path.join(EXAMPLE_DOCS_DIRECTORY, 'layout-parser-paper.pdf')
with open(file_path, 'rb') as f:
loader = UnstructuredAPIFileIOLoader(file=f, api_key='FAKE_API_KEY',
strategy='fast', mode='elements', file_filename=file_path)
docs = loader.load()
assert len(docs) > 1
|
def test_unstructured_api_file_io_loader() ->None:
"""Test unstructured loader."""
file_path = os.path.join(EXAMPLE_DOCS_DIRECTORY, 'layout-parser-paper.pdf')
with open(file_path, 'rb') as f:
loader = UnstructuredAPIFileIOLoader(file=f, api_key='FAKE_API_KEY',
strategy='fast', mode='elements', file_filename=file_path)
docs = loader.load()
assert len(docs) > 1
|
Test unstructured loader.
|
remove
|
"""
Removes the specified package from the current LangServe app.
"""
project_root = get_package_root(project_dir)
project_pyproject = project_root / 'pyproject.toml'
package_root = project_root / 'packages'
remove_deps: List[str] = []
for api_path in api_paths:
package_dir = package_root / api_path
if not package_dir.exists():
typer.echo(f'Package {api_path} does not exist. Skipping...')
continue
try:
pyproject = package_dir / 'pyproject.toml'
langserve_export = get_langserve_export(pyproject)
typer.echo(f"Removing {langserve_export['package_name']}...")
shutil.rmtree(package_dir)
remove_deps.append(api_path)
except Exception:
pass
try:
remove_dependencies_from_pyproject_toml(project_pyproject, remove_deps)
except Exception:
typer.echo('Failed to remove dependencies from pyproject.toml.')
|
@app_cli.command()
def remove(api_paths: Annotated[List[str], typer.Argument(help=
'The API paths to remove')], *, project_dir: Annotated[Optional[Path],
typer.Option(help='The project directory')]=None):
"""
Removes the specified package from the current LangServe app.
"""
project_root = get_package_root(project_dir)
project_pyproject = project_root / 'pyproject.toml'
package_root = project_root / 'packages'
remove_deps: List[str] = []
for api_path in api_paths:
package_dir = package_root / api_path
if not package_dir.exists():
typer.echo(f'Package {api_path} does not exist. Skipping...')
continue
try:
pyproject = package_dir / 'pyproject.toml'
langserve_export = get_langserve_export(pyproject)
typer.echo(f"Removing {langserve_export['package_name']}...")
shutil.rmtree(package_dir)
remove_deps.append(api_path)
except Exception:
pass
try:
remove_dependencies_from_pyproject_toml(project_pyproject, remove_deps)
except Exception:
typer.echo('Failed to remove dependencies from pyproject.toml.')
|
Removes the specified package from the current LangServe app.
|
is_lc_serializable
|
"""Return whether this model can be serialized by Langchain."""
return False
|
@classmethod
def is_lc_serializable(cls) ->bool:
"""Return whether this model can be serialized by Langchain."""
return False
|
Return whether this model can be serialized by Langchain.
|
test_get_internal_transaction
|
account_address = '0x9dd134d14d1e65f84b706d6f205cd5b1cd03a46b'
loader = EtherscanLoader(account_address, filter='internal_transaction')
result = loader.load()
assert len(result) > 0, 'No transactions returned'
|
@pytest.mark.skipif(not etherscan_key_set, reason=
'Etherscan API key not provided.')
def test_get_internal_transaction() ->None:
account_address = '0x9dd134d14d1e65f84b706d6f205cd5b1cd03a46b'
loader = EtherscanLoader(account_address, filter='internal_transaction')
result = loader.load()
assert len(result) > 0, 'No transactions returned'
| null |
_import_google_serper
|
from langchain_community.utilities.google_serper import GoogleSerperAPIWrapper
return GoogleSerperAPIWrapper
|
def _import_google_serper() ->Any:
from langchain_community.utilities.google_serper import GoogleSerperAPIWrapper
return GoogleSerperAPIWrapper
| null |
test_create_action_payload_with_params
|
"""Test that the action payload with params is being created correctly."""
tool = ZapierNLARunAction(action_id='test', zapier_description='test',
params_schema={'test': 'test'}, api_wrapper=ZapierNLAWrapper(
zapier_nla_api_key='test'))
payload = tool.api_wrapper._create_action_payload('some instructions', {
'test': 'test'}, preview_only=True)
assert payload['instructions'] == 'some instructions'
assert payload['preview_only'] is True
assert payload['test'] == 'test'
|
def test_create_action_payload_with_params() ->None:
"""Test that the action payload with params is being created correctly."""
tool = ZapierNLARunAction(action_id='test', zapier_description='test',
params_schema={'test': 'test'}, api_wrapper=ZapierNLAWrapper(
zapier_nla_api_key='test'))
payload = tool.api_wrapper._create_action_payload('some instructions',
{'test': 'test'}, preview_only=True)
assert payload['instructions'] == 'some instructions'
assert payload['preview_only'] is True
assert payload['test'] == 'test'
|
Test that the action payload with params is being created correctly.
|
get_files_from_directory
|
"""
Recursively fetches files from a directory in the repo.
Parameters:
directory_path (str): Path to the directory
Returns:
str: List of file paths, or an error message.
"""
from github import GithubException
files: List[str] = []
try:
contents = self.github_repo_instance.get_contents(directory_path, ref=
self.active_branch)
except GithubException as e:
return f'Error: status code {e.status}, {e.message}'
for content in contents:
if content.type == 'dir':
files.extend(self.get_files_from_directory(content.path))
else:
files.append(content.path)
return str(files)
|
def get_files_from_directory(self, directory_path: str) ->str:
"""
Recursively fetches files from a directory in the repo.
Parameters:
directory_path (str): Path to the directory
Returns:
str: List of file paths, or an error message.
"""
from github import GithubException
files: List[str] = []
try:
contents = self.github_repo_instance.get_contents(directory_path,
ref=self.active_branch)
except GithubException as e:
return f'Error: status code {e.status}, {e.message}'
for content in contents:
if content.type == 'dir':
files.extend(self.get_files_from_directory(content.path))
else:
files.append(content.path)
return str(files)
|
Recursively fetches files from a directory in the repo.
Parameters:
directory_path (str): Path to the directory
Returns:
str: List of file paths, or an error message.
|
_invocation_params
|
if is_openai_v1():
openai_args: Dict = {'model': self.model, **self.model_kwargs}
else:
openai_args = {'model': self.model, 'request_timeout': self.
request_timeout, 'headers': self.headers, 'api_key': self.
openai_api_key, 'organization': self.openai_organization,
'api_base': self.openai_api_base, 'api_type': self.openai_api_type,
'api_version': self.openai_api_version, **self.model_kwargs}
if self.openai_api_type in ('azure', 'azure_ad', 'azuread'):
openai_args['engine'] = self.deployment
if self.openai_proxy:
try:
import openai
except ImportError:
raise ImportError(
'Could not import openai python package. Please install it with `pip install openai`.'
)
openai.proxy = {'http': self.openai_proxy, 'https': self.openai_proxy}
return openai_args
|
@property
def _invocation_params(self) ->Dict[str, Any]:
if is_openai_v1():
openai_args: Dict = {'model': self.model, **self.model_kwargs}
else:
openai_args = {'model': self.model, 'request_timeout': self.
request_timeout, 'headers': self.headers, 'api_key': self.
openai_api_key, 'organization': self.openai_organization,
'api_base': self.openai_api_base, 'api_type': self.
openai_api_type, 'api_version': self.openai_api_version, **self
.model_kwargs}
if self.openai_api_type in ('azure', 'azure_ad', 'azuread'):
openai_args['engine'] = self.deployment
if self.openai_proxy:
try:
import openai
except ImportError:
raise ImportError(
'Could not import openai python package. Please install it with `pip install openai`.'
)
openai.proxy = {'http': self.openai_proxy, 'https': self.
openai_proxy}
return openai_args
| null |
get_default_prompt
|
human_template = (
'Given this based_on "{rl_chain_selected_based_on}" as the most important attribute, rank how good or bad this text is: "{rl_chain_selected}".'
)
human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
default_system_prompt = AutoSelectionScorer.get_default_system_prompt()
chat_prompt = ChatPromptTemplate.from_messages([default_system_prompt,
human_message_prompt])
return chat_prompt
|
@staticmethod
def get_default_prompt() ->ChatPromptTemplate:
human_template = (
'Given this based_on "{rl_chain_selected_based_on}" as the most important attribute, rank how good or bad this text is: "{rl_chain_selected}".'
)
human_message_prompt = HumanMessagePromptTemplate.from_template(
human_template)
default_system_prompt = AutoSelectionScorer.get_default_system_prompt()
chat_prompt = ChatPromptTemplate.from_messages([default_system_prompt,
human_message_prompt])
return chat_prompt
| null |
test_psychic_loader_load_data
|
mock_get_documents_response = MagicMock()
mock_get_documents_response.documents = [self._get_mock_document('123'),
self._get_mock_document('456')]
mock_get_documents_response.next_page_cursor = None
mock_psychic.get_documents.return_value = mock_get_documents_response
psychic_loader = self._get_mock_psychic_loader(mock_psychic)
documents = psychic_loader.load()
assert mock_psychic.get_documents.call_count == 1
assert len(documents) == 2
assert all(isinstance(doc, Document) for doc in documents)
assert documents[0].page_content == 'Content 123'
assert documents[1].page_content == 'Content 456'
|
def test_psychic_loader_load_data(self, mock_psychic: MagicMock) ->None:
mock_get_documents_response = MagicMock()
mock_get_documents_response.documents = [self._get_mock_document('123'),
self._get_mock_document('456')]
mock_get_documents_response.next_page_cursor = None
mock_psychic.get_documents.return_value = mock_get_documents_response
psychic_loader = self._get_mock_psychic_loader(mock_psychic)
documents = psychic_loader.load()
assert mock_psychic.get_documents.call_count == 1
assert len(documents) == 2
assert all(isinstance(doc, Document) for doc in documents)
assert documents[0].page_content == 'Content 123'
assert documents[1].page_content == 'Content 456'
| null |
get_graph
|
from langchain_core.runnables.graph import Graph
graph = Graph()
for step in self.steps:
current_last_node = graph.last_node()
step_graph = step.get_graph(config)
if step is not self.first:
step_graph.trim_first_node()
if step is not self.last:
step_graph.trim_last_node()
graph.extend(step_graph)
step_first_node = step_graph.first_node()
if not step_first_node:
raise ValueError(f'Runnable {step} has no first node')
if current_last_node:
graph.add_edge(current_last_node, step_first_node)
return graph
|
def get_graph(self, config: Optional[RunnableConfig]=None) ->Graph:
from langchain_core.runnables.graph import Graph
graph = Graph()
for step in self.steps:
current_last_node = graph.last_node()
step_graph = step.get_graph(config)
if step is not self.first:
step_graph.trim_first_node()
if step is not self.last:
step_graph.trim_last_node()
graph.extend(step_graph)
step_first_node = step_graph.first_node()
if not step_first_node:
raise ValueError(f'Runnable {step} has no first node')
if current_last_node:
graph.add_edge(current_last_node, step_first_node)
return graph
| null |
_import_surrealdb
|
from langchain_community.vectorstores.surrealdb import SurrealDBStore
return SurrealDBStore
|
def _import_surrealdb() ->Any:
from langchain_community.vectorstores.surrealdb import SurrealDBStore
return SurrealDBStore
| null |
resolve_criteria
|
"""Resolve the criteria to evaluate.
Parameters
----------
criteria : CRITERIA_TYPE
The criteria to evaluate the runs against. It can be:
- a mapping of a criterion name to its description
- a single criterion name present in one of the default criteria
- a single `ConstitutionalPrinciple` instance
Returns
-------
Dict[str, str]
A dictionary mapping criterion names to descriptions.
Examples
--------
>>> criterion = "relevance"
>>> CriteriaEvalChain.resolve_criteria(criteria)
{'relevance': 'Is the submission referring to a real quote from the text?'}
"""
return resolve_criteria(criteria)
|
@classmethod
def resolve_criteria(cls, criteria: Optional[Union[CRITERIA_TYPE, str]]
) ->Dict[str, str]:
"""Resolve the criteria to evaluate.
Parameters
----------
criteria : CRITERIA_TYPE
The criteria to evaluate the runs against. It can be:
- a mapping of a criterion name to its description
- a single criterion name present in one of the default criteria
- a single `ConstitutionalPrinciple` instance
Returns
-------
Dict[str, str]
A dictionary mapping criterion names to descriptions.
Examples
--------
>>> criterion = "relevance"
>>> CriteriaEvalChain.resolve_criteria(criteria)
{'relevance': 'Is the submission referring to a real quote from the text?'}
"""
return resolve_criteria(criteria)
|
Resolve the criteria to evaluate.
Parameters
----------
criteria : CRITERIA_TYPE
The criteria to evaluate the runs against. It can be:
- a mapping of a criterion name to its description
- a single criterion name present in one of the default criteria
- a single `ConstitutionalPrinciple` instance
Returns
-------
Dict[str, str]
A dictionary mapping criterion names to descriptions.
Examples
--------
>>> criterion = "relevance"
>>> CriteriaEvalChain.resolve_criteria(criteria)
{'relevance': 'Is the submission referring to a real quote from the text?'}
|
test_replicate_model_kwargs
|
"""Test simple non-streaming call to Replicate."""
llm = Replicate(model=TEST_MODEL, model_kwargs={'max_length': 100,
'temperature': 0.01})
long_output = llm('What is LangChain')
llm = Replicate(model=TEST_MODEL, model_kwargs={'max_length': 10,
'temperature': 0.01})
short_output = llm('What is LangChain')
assert len(short_output) < len(long_output)
assert llm.model_kwargs == {'max_length': 10, 'temperature': 0.01}
|
def test_replicate_model_kwargs() ->None:
"""Test simple non-streaming call to Replicate."""
llm = Replicate(model=TEST_MODEL, model_kwargs={'max_length': 100,
'temperature': 0.01})
long_output = llm('What is LangChain')
llm = Replicate(model=TEST_MODEL, model_kwargs={'max_length': 10,
'temperature': 0.01})
short_output = llm('What is LangChain')
assert len(short_output) < len(long_output)
assert llm.model_kwargs == {'max_length': 10, 'temperature': 0.01}
|
Test simple non-streaming call to Replicate.
|
__init__
|
self.dimension = dimension
|
def __init__(self, dimension: int) ->None:
self.dimension = dimension
| null |
get_structured_schema
|
"""Returns the schema of the Graph database"""
return {}
|
@property
def get_structured_schema(self) ->Dict[str, Any]:
"""Returns the schema of the Graph database"""
return {}
|
Returns the schema of the Graph database
|
validate_environment
|
"""Validates that the OpaquePrompts API key and the Python package exist."""
try:
import opaqueprompts as op
except ImportError:
raise ImportError(
'Could not import the `opaqueprompts` Python package, please install it with `pip install opaqueprompts`.'
)
if op.__package__ is None:
raise ValueError(
'Could not properly import `opaqueprompts`, opaqueprompts.__package__ is None.'
)
api_key = get_from_dict_or_env(values, 'opaqueprompts_api_key',
'OPAQUEPROMPTS_API_KEY', default='')
if not api_key:
raise ValueError(
'Could not find OPAQUEPROMPTS_API_KEY in the environment. Please set it to your OpaquePrompts API key.You can get it by creating an account on the OpaquePrompts website: https://opaqueprompts.opaque.co/ .'
)
return values
|
@root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""Validates that the OpaquePrompts API key and the Python package exist."""
try:
import opaqueprompts as op
except ImportError:
raise ImportError(
'Could not import the `opaqueprompts` Python package, please install it with `pip install opaqueprompts`.'
)
if op.__package__ is None:
raise ValueError(
'Could not properly import `opaqueprompts`, opaqueprompts.__package__ is None.'
)
api_key = get_from_dict_or_env(values, 'opaqueprompts_api_key',
'OPAQUEPROMPTS_API_KEY', default='')
if not api_key:
raise ValueError(
'Could not find OPAQUEPROMPTS_API_KEY in the environment. Please set it to your OpaquePrompts API key.You can get it by creating an account on the OpaquePrompts website: https://opaqueprompts.opaque.co/ .'
)
return values
|
Validates that the OpaquePrompts API key and the Python package exist.
|
test_connect_arangodb
|
"""Test that the ArangoDB database is correctly instantiated and connected."""
graph = ArangoGraph(get_arangodb_client())
sample_aql_result = graph.query("RETURN 'hello world'")
assert ['hello_world'] == sample_aql_result
|
def test_connect_arangodb() ->None:
"""Test that the ArangoDB database is correctly instantiated and connected."""
graph = ArangoGraph(get_arangodb_client())
sample_aql_result = graph.query("RETURN 'hello world'")
assert ['hello_world'] == sample_aql_result
|
Test that the ArangoDB database is correctly instantiated and connected.
|
test_ai_endpoints_invoke
|
"""Test invoke tokens."""
llm = ChatNVIDIA(model='llama2_13b', max_tokens=60)
result = llm.invoke("I'm Pickle Rick", config=dict(tags=['foo']))
assert isinstance(result.content, str)
|
def test_ai_endpoints_invoke() ->None:
"""Test invoke tokens."""
llm = ChatNVIDIA(model='llama2_13b', max_tokens=60)
result = llm.invoke("I'm Pickle Rick", config=dict(tags=['foo']))
assert isinstance(result.content, str)
|
Test invoke tokens.
|
_call
|
"""Call the textgen web API and return the output.
Args:
prompt: The prompt to use for generation.
stop: A list of strings to stop generation when encountered.
Returns:
The generated text.
Example:
.. code-block:: python
from langchain_community.llms import TextGen
llm = TextGen(model_url="http://localhost:5000")
llm("Write a story about llamas.")
"""
if self.streaming:
combined_text_output = ''
for chunk in self._stream(prompt=prompt, stop=stop, run_manager=
run_manager, **kwargs):
combined_text_output += chunk.text
result = combined_text_output
else:
url = f'{self.model_url}/api/v1/generate'
params = self._get_parameters(stop)
request = params.copy()
request['prompt'] = prompt
response = requests.post(url, json=request)
if response.status_code == 200:
result = response.json()['results'][0]['text']
else:
print(f'ERROR: response: {response}')
result = ''
return result
|
def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager:
Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str:
"""Call the textgen web API and return the output.
Args:
prompt: The prompt to use for generation.
stop: A list of strings to stop generation when encountered.
Returns:
The generated text.
Example:
.. code-block:: python
from langchain_community.llms import TextGen
llm = TextGen(model_url="http://localhost:5000")
llm("Write a story about llamas.")
"""
if self.streaming:
combined_text_output = ''
for chunk in self._stream(prompt=prompt, stop=stop, run_manager=
run_manager, **kwargs):
combined_text_output += chunk.text
result = combined_text_output
else:
url = f'{self.model_url}/api/v1/generate'
params = self._get_parameters(stop)
request = params.copy()
request['prompt'] = prompt
response = requests.post(url, json=request)
if response.status_code == 200:
result = response.json()['results'][0]['text']
else:
print(f'ERROR: response: {response}')
result = ''
return result
|
Call the textgen web API and return the output.
Args:
prompt: The prompt to use for generation.
stop: A list of strings to stop generation when encountered.
Returns:
The generated text.
Example:
.. code-block:: python
from langchain_community.llms import TextGen
llm = TextGen(model_url="http://localhost:5000")
llm("Write a story about llamas.")
|
__init__
|
from azure.ai.documentintelligence import DocumentIntelligenceClient
from azure.core.credentials import AzureKeyCredential
kwargs = {}
if api_version is not None:
kwargs['api_version'] = api_version
self.client = DocumentIntelligenceClient(endpoint=api_endpoint, credential=
AzureKeyCredential(api_key), headers={'x-ms-useragent':
'langchain-parser/1.0.0'}, **kwargs)
self.api_model = api_model
self.mode = mode
assert self.mode in ['single', 'page', 'object', 'markdown']
|
def __init__(self, api_endpoint: str, api_key: str, api_version: Optional[
str]=None, api_model: str='prebuilt-layout', mode: str='markdown'):
from azure.ai.documentintelligence import DocumentIntelligenceClient
from azure.core.credentials import AzureKeyCredential
kwargs = {}
if api_version is not None:
kwargs['api_version'] = api_version
self.client = DocumentIntelligenceClient(endpoint=api_endpoint,
credential=AzureKeyCredential(api_key), headers={'x-ms-useragent':
'langchain-parser/1.0.0'}, **kwargs)
self.api_model = api_model
self.mode = mode
assert self.mode in ['single', 'page', 'object', 'markdown']
| null |
similarity_search_test
|
"""Test end to end construction and search."""
embeddings: OpenAIEmbeddings = OpenAIEmbeddings(model=model, chunk_size=1)
vector_store: AzureSearch = AzureSearch(azure_search_endpoint=
vector_store_address, azure_search_key=vector_store_password,
index_name=index_name, embedding_function=embeddings.embed_query)
vector_store.add_texts(['Test 1', 'Test 2', 'Test 3'], [{'title': 'Title 1',
'any_metadata': 'Metadata 1'}, {'title': 'Title 2', 'any_metadata':
'Metadata 2'}, {'title': 'Title 3', 'any_metadata': 'Metadata 3'}])
time.sleep(1)
res = vector_store.similarity_search(query='Test 1', k=3)
assert len(res) == 3
|
@pytest.fixture
def similarity_search_test() ->None:
"""Test end to end construction and search."""
embeddings: OpenAIEmbeddings = OpenAIEmbeddings(model=model, chunk_size=1)
vector_store: AzureSearch = AzureSearch(azure_search_endpoint=
vector_store_address, azure_search_key=vector_store_password,
index_name=index_name, embedding_function=embeddings.embed_query)
vector_store.add_texts(['Test 1', 'Test 2', 'Test 3'], [{'title':
'Title 1', 'any_metadata': 'Metadata 1'}, {'title': 'Title 2',
'any_metadata': 'Metadata 2'}, {'title': 'Title 3', 'any_metadata':
'Metadata 3'}])
time.sleep(1)
res = vector_store.similarity_search(query='Test 1', k=3)
assert len(res) == 3
|
Test end to end construction and search.
|
_is_jupyter_environment
|
try:
from IPython import get_ipython
res = get_ipython()
return get_ipython() is not None and 'zmqshell' in str(type(res))
except ImportError:
return False
|
def _is_jupyter_environment() ->bool:
try:
from IPython import get_ipython
res = get_ipython()
return get_ipython() is not None and 'zmqshell' in str(type(res))
except ImportError:
return False
| null |
awrapper
|
async def afunc(*args: Any, **kwargs: Any) ->Any:
return func(*args, **kwargs)
return afunc
|
def awrapper(func: Callable) ->Callable[..., Awaitable[Any]]:
async def afunc(*args: Any, **kwargs: Any) ->Any:
return func(*args, **kwargs)
return afunc
| null |
test_from_texts_with_metadatas_cosine_distance
|
texts = ['Dogs are tough.', 'Cats have fluff.', 'What is a sandwich?',
'The fence is purple.']
metadatas = [{'a': 1}, {'b': 1}, {'c': 1}, {'d': 1, 'e': 2}]
vectorstore = AzureCosmosDBVectorSearch.from_texts(texts,
azure_openai_embeddings, metadatas=metadatas, collection=collection,
index_name=INDEX_NAME)
vectorstore.create_index(num_lists, dimensions, similarity_algorithm)
sleep(2)
output = vectorstore.similarity_search('Sandwich', k=1)
assert output
assert output[0].page_content == 'What is a sandwich?'
assert output[0].metadata['c'] == 1
vectorstore.delete_index()
|
def test_from_texts_with_metadatas_cosine_distance(self,
azure_openai_embeddings: OpenAIEmbeddings, collection: Any) ->None:
texts = ['Dogs are tough.', 'Cats have fluff.', 'What is a sandwich?',
'The fence is purple.']
metadatas = [{'a': 1}, {'b': 1}, {'c': 1}, {'d': 1, 'e': 2}]
vectorstore = AzureCosmosDBVectorSearch.from_texts(texts,
azure_openai_embeddings, metadatas=metadatas, collection=collection,
index_name=INDEX_NAME)
vectorstore.create_index(num_lists, dimensions, similarity_algorithm)
sleep(2)
output = vectorstore.similarity_search('Sandwich', k=1)
assert output
assert output[0].page_content == 'What is a sandwich?'
assert output[0].metadata['c'] == 1
vectorstore.delete_index()
| null |
test_huggingface_endpoint_text2text_generation
|
"""Test valid call to HuggingFace text2text model."""
llm = HuggingFaceEndpoint(endpoint_url='', task='text2text-generation')
output = llm('The capital of New York is')
assert output == 'Albany'
|
@unittest.skip(
'This test requires an inference endpoint. Tested with Hugging Face endpoints'
)
def test_huggingface_endpoint_text2text_generation() ->None:
"""Test valid call to HuggingFace text2text model."""
llm = HuggingFaceEndpoint(endpoint_url='', task='text2text-generation')
output = llm('The capital of New York is')
assert output == 'Albany'
|
Test valid call to HuggingFace text2text model.
|
_wait_until_collection_created
|
"""Sleeps until the collection for this message history is ready
to be queried
"""
self._wait_until(lambda : self._collection_is_ready(),
RocksetChatMessageHistory.CREATE_TIMEOUT_MS)
|
def _wait_until_collection_created(self) ->None:
"""Sleeps until the collection for this message history is ready
to be queried
"""
self._wait_until(lambda : self._collection_is_ready(),
RocksetChatMessageHistory.CREATE_TIMEOUT_MS)
|
Sleeps until the collection for this message history is ready
to be queried
|
delete
|
"""Delete by vector IDs.
Args:
ids: List of ids to delete.
"""
if ids is None:
raise ValueError('No ids provided to delete.')
for id in ids:
self.client.delete(index=self.index_name, id=id)
|
def delete(self, ids: Optional[List[str]]=None, **kwargs: Any) ->None:
"""Delete by vector IDs.
Args:
ids: List of ids to delete.
"""
if ids is None:
raise ValueError('No ids provided to delete.')
for id in ids:
self.client.delete(index=self.index_name, id=id)
|
Delete by vector IDs.
Args:
ids: List of ids to delete.
|
truncate_single
|
"""Utility to truncate a single run dictionary to only keep the specified
keys.
:param run: The run dictionary to truncate.
:return: The truncated run dictionary
"""
new_dict = {}
for key in run:
if key in keep_keys:
new_dict[key] = run.get(key)
return new_dict
|
def truncate_single(run: Dict[str, Any]) ->Dict[str, Any]:
"""Utility to truncate a single run dictionary to only keep the specified
keys.
:param run: The run dictionary to truncate.
:return: The truncated run dictionary
"""
new_dict = {}
for key in run:
if key in keep_keys:
new_dict[key] = run.get(key)
return new_dict
|
Utility to truncate a single run dictionary to only keep the specified
keys.
:param run: The run dictionary to truncate.
:return: The truncated run dictionary
|
_run
|
"""Use the tool."""
return self.api_wrapper.run(query=query, sort=sort, time_filter=time_filter,
subreddit=subreddit, limit=int(limit))
|
def _run(self, query: str, sort: str, time_filter: str, subreddit: str,
limit: str, run_manager: Optional[CallbackManagerForToolRun]=None) ->str:
"""Use the tool."""
return self.api_wrapper.run(query=query, sort=sort, time_filter=
time_filter, subreddit=subreddit, limit=int(limit))
|
Use the tool.
|
test_load_converts_dataframe_columns_to_document_metadata
|
loader = PolarsDataFrameLoader(sample_data_frame)
docs = loader.load()
for i, doc in enumerate(docs):
df: pl.DataFrame = sample_data_frame[i]
assert df is not None
assert doc.metadata['author'] == df.select('author').item()
assert doc.metadata['date'] == df.select('date').item()
|
def test_load_converts_dataframe_columns_to_document_metadata(sample_data_frame
: pl.DataFrame) ->None:
loader = PolarsDataFrameLoader(sample_data_frame)
docs = loader.load()
for i, doc in enumerate(docs):
df: pl.DataFrame = sample_data_frame[i]
assert df is not None
assert doc.metadata['author'] == df.select('author').item()
assert doc.metadata['date'] == df.select('date').item()
| null |
__init__
|
"""Initialize DocusaurusLoader
Args:
url: The base URL of the Docusaurus website.
custom_html_tags: Optional custom html tags to extract content from pages.
kwargs: Additional args to extend the underlying SitemapLoader, for example:
filter_urls, blocksize, meta_function, is_local, continue_on_failure
"""
if not kwargs.get('is_local'):
url = f'{url}/sitemap.xml'
self.custom_html_tags = custom_html_tags or ['main article']
super().__init__(url, parsing_function=kwargs.get('parsing_function') or
self._parsing_function, **kwargs)
|
def __init__(self, url: str, custom_html_tags: Optional[List[str]]=None, **
kwargs: Any):
"""Initialize DocusaurusLoader
Args:
url: The base URL of the Docusaurus website.
custom_html_tags: Optional custom html tags to extract content from pages.
kwargs: Additional args to extend the underlying SitemapLoader, for example:
filter_urls, blocksize, meta_function, is_local, continue_on_failure
"""
if not kwargs.get('is_local'):
url = f'{url}/sitemap.xml'
self.custom_html_tags = custom_html_tags or ['main article']
super().__init__(url, parsing_function=kwargs.get('parsing_function') or
self._parsing_function, **kwargs)
|
Initialize DocusaurusLoader
Args:
url: The base URL of the Docusaurus website.
custom_html_tags: Optional custom html tags to extract content from pages.
kwargs: Additional args to extend the underlying SitemapLoader, for example:
filter_urls, blocksize, meta_function, is_local, continue_on_failure
|
_collect_metadata
|
"""Collect metadata from Redis.
Method ensures that there isn't a mismatch between the metadata
and the index schema passed to this class by the user or generated
by this class.
Args:
result (Document): redis.commands.search.Document object returned
from Redis.
Returns:
Dict[str, Any]: Collected metadata.
"""
meta = {}
for key in self._schema.metadata_keys:
try:
meta[key] = getattr(result, key)
except AttributeError:
logger.warning(f'Metadata key {key} not found in metadata. ' +
'Setting to None. \n' +
'Metadata fields defined for this instance: ' +
f'{self._schema.metadata_keys}')
meta[key] = None
return meta
|
def _collect_metadata(self, result: 'Document') ->Dict[str, Any]:
"""Collect metadata from Redis.
Method ensures that there isn't a mismatch between the metadata
and the index schema passed to this class by the user or generated
by this class.
Args:
result (Document): redis.commands.search.Document object returned
from Redis.
Returns:
Dict[str, Any]: Collected metadata.
"""
meta = {}
for key in self._schema.metadata_keys:
try:
meta[key] = getattr(result, key)
except AttributeError:
logger.warning(f'Metadata key {key} not found in metadata. ' +
'Setting to None. \n' +
'Metadata fields defined for this instance: ' +
f'{self._schema.metadata_keys}')
meta[key] = None
return meta
|
Collect metadata from Redis.
Method ensures that there isn't a mismatch between the metadata
and the index schema passed to this class by the user or generated
by this class.
Args:
result (Document): redis.commands.search.Document object returned
from Redis.
Returns:
Dict[str, Any]: Collected metadata.
|
__init_subclass__
|
"""Create the definition of the new tool class."""
super().__init_subclass__(**kwargs)
args_schema_type = cls.__annotations__.get('args_schema', None)
if args_schema_type is not None:
if args_schema_type is None or args_schema_type == BaseModel:
typehint_mandate = """
class ChildTool(BaseTool):
...
args_schema: Type[BaseModel] = SchemaClass
..."""
name = cls.__name__
raise SchemaAnnotationError(
f"""Tool definition for {name} must include valid type annotations for argument 'args_schema' to behave as expected.
Expected annotation of 'Type[BaseModel]' but got '{args_schema_type}'.
Expected class looks like:
{typehint_mandate}"""
)
|
def __init_subclass__(cls, **kwargs: Any) ->None:
"""Create the definition of the new tool class."""
super().__init_subclass__(**kwargs)
args_schema_type = cls.__annotations__.get('args_schema', None)
if args_schema_type is not None:
if args_schema_type is None or args_schema_type == BaseModel:
typehint_mandate = """
class ChildTool(BaseTool):
...
args_schema: Type[BaseModel] = SchemaClass
..."""
name = cls.__name__
raise SchemaAnnotationError(
f"""Tool definition for {name} must include valid type annotations for argument 'args_schema' to behave as expected.
Expected annotation of 'Type[BaseModel]' but got '{args_schema_type}'.
Expected class looks like:
{typehint_mandate}"""
)
|
Create the definition of the new tool class.
|
validate_environment
|
"""Validate that api key and python package exists in environment."""
huggingface_api_key = convert_to_secret_str(get_from_dict_or_env(values,
'huggingface_api_key', 'HUGGINGFACE_API_KEY'))
try:
from petals import AutoDistributedModelForCausalLM
from transformers import AutoTokenizer
model_name = values['model_name']
values['tokenizer'] = AutoTokenizer.from_pretrained(model_name)
values['client'] = AutoDistributedModelForCausalLM.from_pretrained(
model_name)
values['huggingface_api_key'] = huggingface_api_key.get_secret_value()
except ImportError:
raise ImportError(
'Could not import transformers or petals python package.Please install with `pip install -U transformers petals`.'
)
return values
|
@root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that api key and python package exists in environment."""
huggingface_api_key = convert_to_secret_str(get_from_dict_or_env(values,
'huggingface_api_key', 'HUGGINGFACE_API_KEY'))
try:
from petals import AutoDistributedModelForCausalLM
from transformers import AutoTokenizer
model_name = values['model_name']
values['tokenizer'] = AutoTokenizer.from_pretrained(model_name)
values['client'] = AutoDistributedModelForCausalLM.from_pretrained(
model_name)
values['huggingface_api_key'] = huggingface_api_key.get_secret_value()
except ImportError:
raise ImportError(
'Could not import transformers or petals python package.Please install with `pip install -U transformers petals`.'
)
return values
|
Validate that api key and python package exists in environment.
|
_prepare
|
config = ensure_config(config)
which = config.get('configurable', {}).get(self.which.id, self.default_key)
if self.prefix_keys:
config = cast(RunnableConfig, {**config, 'configurable': {
_strremoveprefix(k, f'{self.which.id}=={which}/'): v for k, v in
config.get('configurable', {}).items()}})
if which == self.default_key:
return self.default, config
elif which in self.alternatives:
alt = self.alternatives[which]
if isinstance(alt, Runnable):
return alt, config
else:
return alt(), config
else:
raise ValueError(f'Unknown alternative: {which}')
|
def _prepare(self, config: Optional[RunnableConfig]=None) ->Tuple[Runnable[
Input, Output], RunnableConfig]:
config = ensure_config(config)
which = config.get('configurable', {}).get(self.which.id, self.default_key)
if self.prefix_keys:
config = cast(RunnableConfig, {**config, 'configurable': {
_strremoveprefix(k, f'{self.which.id}=={which}/'): v for k, v in
config.get('configurable', {}).items()}})
if which == self.default_key:
return self.default, config
elif which in self.alternatives:
alt = self.alternatives[which]
if isinstance(alt, Runnable):
return alt, config
else:
return alt(), config
else:
raise ValueError(f'Unknown alternative: {which}')
| null |
_delete_previous
|
stmt = select(self.cache_schema.response).where(self.cache_schema.
prompt_md5 == self.get_md5(prompt)).where(self.cache_schema.llm ==
llm_string).where(self.cache_schema.prompt == prompt).order_by(self.
cache_schema.idx)
with Session(self.engine) as session, session.begin():
rows = session.execute(stmt).fetchall()
for item in rows:
session.delete(item)
|
def _delete_previous(self, prompt: str, llm_string: str) ->None:
stmt = select(self.cache_schema.response).where(self.cache_schema.
prompt_md5 == self.get_md5(prompt)).where(self.cache_schema.llm ==
llm_string).where(self.cache_schema.prompt == prompt).order_by(self
.cache_schema.idx)
with Session(self.engine) as session, session.begin():
rows = session.execute(stmt).fetchall()
for item in rows:
session.delete(item)
| null |
test_all_imports
|
assert set(__all__) == set(EXPECTED_ALL)
|
def test_all_imports() ->None:
assert set(__all__) == set(EXPECTED_ALL)
| null |
_load_sequential_chain
|
chain = SequentialChain(chains=[LLMChain(llm=llm, prompt=
create_assertions_prompt, output_key='assertions', verbose=verbose),
LLMChain(llm=llm, prompt=check_assertions_prompt, output_key=
'checked_assertions', verbose=verbose), LLMChain(llm=llm, prompt=
revised_summary_prompt, output_key='revised_summary', verbose=verbose),
LLMChain(llm=llm, output_key='all_true', prompt=are_all_true_prompt,
verbose=verbose)], input_variables=['summary'], output_variables=[
'all_true', 'revised_summary'], verbose=verbose)
return chain
|
def _load_sequential_chain(llm: BaseLanguageModel, create_assertions_prompt:
PromptTemplate, check_assertions_prompt: PromptTemplate,
revised_summary_prompt: PromptTemplate, are_all_true_prompt:
PromptTemplate, verbose: bool=False) ->SequentialChain:
chain = SequentialChain(chains=[LLMChain(llm=llm, prompt=
create_assertions_prompt, output_key='assertions', verbose=verbose),
LLMChain(llm=llm, prompt=check_assertions_prompt, output_key=
'checked_assertions', verbose=verbose), LLMChain(llm=llm, prompt=
revised_summary_prompt, output_key='revised_summary', verbose=
verbose), LLMChain(llm=llm, output_key='all_true', prompt=
are_all_true_prompt, verbose=verbose)], input_variables=['summary'],
output_variables=['all_true', 'revised_summary'], verbose=verbose)
return chain
| null |
add_example
|
"""Adds an example to the selector."""
raise NotImplementedError()
|
def add_example(self, example: Dict[str, str]) ->Any:
"""Adds an example to the selector."""
raise NotImplementedError()
|
Adds an example to the selector.
|
on_chat_model_start
|
"""Run when the chat model is started."""
llm_model = kwargs.get('invocation_params', {}).get('model', None)
if llm_model is not None:
self.metadata['model'] = llm_model
if len(messages) == 0:
return
for message in messages[0]:
role = self.message_role_model.SYSTEM
if message.type == 'human':
role = self.message_role_model.USER
elif message.type == 'system':
role = self.message_role_model.SYSTEM
elif message.type == 'ai':
role = self.message_role_model.ASSISTANT
self.messages.append(self.message_model(message=message.content, role=role)
)
|
def on_chat_model_start(self, serialized: Dict[str, Any], messages: List[
List[BaseMessage]], *, run_id: UUID, **kwargs: Any) ->Any:
"""Run when the chat model is started."""
llm_model = kwargs.get('invocation_params', {}).get('model', None)
if llm_model is not None:
self.metadata['model'] = llm_model
if len(messages) == 0:
return
for message in messages[0]:
role = self.message_role_model.SYSTEM
if message.type == 'human':
role = self.message_role_model.USER
elif message.type == 'system':
role = self.message_role_model.SYSTEM
elif message.type == 'ai':
role = self.message_role_model.ASSISTANT
self.messages.append(self.message_model(message=message.content,
role=role))
|
Run when the chat model is started.
|
test_appx_search_with_boolean_filter
|
"""Test Approximate Search with Boolean Filter."""
boolean_filter_val = {'bool': {'must': [{'term': {'text': 'bar'}}]}}
docsearch = OpenSearchVectorSearch.from_texts(texts, FakeEmbeddings(),
opensearch_url=DEFAULT_OPENSEARCH_URL)
output = docsearch.similarity_search('foo', k=3, boolean_filter=
boolean_filter_val, subquery_clause='should')
assert output == [Document(page_content='bar')]
|
def test_appx_search_with_boolean_filter() ->None:
"""Test Approximate Search with Boolean Filter."""
boolean_filter_val = {'bool': {'must': [{'term': {'text': 'bar'}}]}}
docsearch = OpenSearchVectorSearch.from_texts(texts, FakeEmbeddings(),
opensearch_url=DEFAULT_OPENSEARCH_URL)
output = docsearch.similarity_search('foo', k=3, boolean_filter=
boolean_filter_val, subquery_clause='should')
assert output == [Document(page_content='bar')]
|
Test Approximate Search with Boolean Filter.
|
is_lc_serializable
|
return False
|
@classmethod
def is_lc_serializable(cls) ->bool:
return False
| null |
test_all_imports
|
assert set(__all__) == set(EXPECTED_ALL)
|
def test_all_imports() ->None:
assert set(__all__) == set(EXPECTED_ALL)
| null |
on_tool_end
|
"""Run when tool ends running.
Args:
output (str): The output of the tool.
"""
handle_event(self.handlers, 'on_tool_end', 'ignore_agent', output, run_id=
self.run_id, parent_run_id=self.parent_run_id, tags=self.tags, **kwargs)
|
def on_tool_end(self, output: str, **kwargs: Any) ->None:
"""Run when tool ends running.
Args:
output (str): The output of the tool.
"""
handle_event(self.handlers, 'on_tool_end', 'ignore_agent', output,
run_id=self.run_id, parent_run_id=self.parent_run_id, tags=self.
tags, **kwargs)
|
Run when tool ends running.
Args:
output (str): The output of the tool.
|
_create_message
|
return [{'type': role, 'data': {'content': txt}}]
|
def _create_message(txt: str, role: str='human') ->List[dict]:
return [{'type': role, 'data': {'content': txt}}]
| null |
_import_bittensor
|
from langchain_community.llms.bittensor import NIBittensorLLM
return NIBittensorLLM
|
def _import_bittensor() ->Any:
from langchain_community.llms.bittensor import NIBittensorLLM
return NIBittensorLLM
| null |
test_multiple_items
|
"""Test that a string with multiple comma-separated items is parsed to a list."""
parser = CommaSeparatedListOutputParser()
text = 'foo, bar, baz'
expected = ['foo', 'bar', 'baz']
assert parser.parse_folder(text) == expected
assert add(parser.transform(t for t in text)) == expected
assert list(parser.transform(t for t in text)) == [[a] for a in expected]
assert list(parser.transform(t for t in text.splitlines(keepends=True))) == [[
a] for a in expected]
assert list(parser.transform(' ' + t if i > 0 else t for i, t in enumerate(
text.split(' ')))) == [[a] for a in expected]
assert list(parser.transform(iter([text]))) == [[a] for a in expected]
|
def test_multiple_items() ->None:
"""Test that a string with multiple comma-separated items is parsed to a list."""
parser = CommaSeparatedListOutputParser()
text = 'foo, bar, baz'
expected = ['foo', 'bar', 'baz']
assert parser.parse_folder(text) == expected
assert add(parser.transform(t for t in text)) == expected
assert list(parser.transform(t for t in text)) == [[a] for a in expected]
assert list(parser.transform(t for t in text.splitlines(keepends=True))
) == [[a] for a in expected]
assert list(parser.transform(' ' + t if i > 0 else t for i, t in
enumerate(text.split(' ')))) == [[a] for a in expected]
assert list(parser.transform(iter([text]))) == [[a] for a in expected]
|
Test that a string with multiple comma-separated items is parsed to a list.
|
_format_response
|
content = json.loads(response.content)
if not content:
return f"No Merriam-Webster definition was found for query '{query}'."
if isinstance(content[0], str):
result = f"No Merriam-Webster definition was found for query '{query}'.\n"
if len(content) > 1:
alternatives = [f'{i + 1}. {content[i]}' for i in range(len(content))]
result += 'You can try one of the following alternative queries:\n\n'
result += '\n'.join(alternatives)
else:
result += f"Did you mean '{content[0]}'?"
else:
result = self._format_definitions(query, content)
return result
|
def _format_response(self, query: str, response: requests.Response) ->str:
content = json.loads(response.content)
if not content:
return f"No Merriam-Webster definition was found for query '{query}'."
if isinstance(content[0], str):
result = (
f"No Merriam-Webster definition was found for query '{query}'.\n")
if len(content) > 1:
alternatives = [f'{i + 1}. {content[i]}' for i in range(len(
content))]
result += (
'You can try one of the following alternative queries:\n\n')
result += '\n'.join(alternatives)
else:
result += f"Did you mean '{content[0]}'?"
else:
result = self._format_definitions(query, content)
return result
| null |
get_parser
|
"""Override this method to associate a default parser with the class."""
raise NotImplementedError()
|
@staticmethod
def get_parser(**kwargs: Any) ->BaseBlobParser:
"""Override this method to associate a default parser with the class."""
raise NotImplementedError()
|
Override this method to associate a default parser with the class.
|
test_get_doc_by_filter
|
"""Test on document retrieval with metadata filter."""
docs = store.get_documents(filter={'kind': 'fruit'})
kinds = [d.metadata['kind'] for d in docs]
assert 'fruit' in kinds
assert 'treat' not in kinds
assert 'planet' not in kinds
|
def test_get_doc_by_filter(self, store: BigQueryVectorSearch) ->None:
"""Test on document retrieval with metadata filter."""
docs = store.get_documents(filter={'kind': 'fruit'})
kinds = [d.metadata['kind'] for d in docs]
assert 'fruit' in kinds
assert 'treat' not in kinds
assert 'planet' not in kinds
|
Test on document retrieval with metadata filter.
|
serve
|
"""
Starts the LangServe app.
"""
sys.path.append(str(Path.cwd()))
app_str = app if app is not None else 'app.server:app'
host_str = host if host is not None else '127.0.0.1'
import uvicorn
uvicorn.run(app_str, host=host_str, port=port if port is not None else 8000,
reload=True)
|
@app_cli.command()
def serve(*, port: Annotated[Optional[int], typer.Option(help=
'The port to run the server on')]=None, host: Annotated[Optional[str],
typer.Option(help='The host to run the server on')]=None, app:
Annotated[Optional[str], typer.Option(help=
'The app to run, e.g. `app.server:app`')]=None) ->None:
"""
Starts the LangServe app.
"""
sys.path.append(str(Path.cwd()))
app_str = app if app is not None else 'app.server:app'
host_str = host if host is not None else '127.0.0.1'
import uvicorn
uvicorn.run(app_str, host=host_str, port=port if port is not None else
8000, reload=True)
|
Starts the LangServe app.
|
_call
|
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
callbacks = _run_manager.get_child()
route = self.router_chain.route(inputs, callbacks=callbacks)
_run_manager.on_text(str(route.destination) + ': ' + str(route.next_inputs),
verbose=self.verbose)
if not route.destination:
return self.default_chain(route.next_inputs, callbacks=callbacks)
elif route.destination in self.destination_chains:
return self.destination_chains[route.destination](route.next_inputs,
callbacks=callbacks)
elif self.silent_errors:
return self.default_chain(route.next_inputs, callbacks=callbacks)
else:
raise ValueError(
f"Received invalid destination chain name '{route.destination}'")
|
def _call(self, inputs: Dict[str, Any], run_manager: Optional[
CallbackManagerForChainRun]=None) ->Dict[str, Any]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
callbacks = _run_manager.get_child()
route = self.router_chain.route(inputs, callbacks=callbacks)
_run_manager.on_text(str(route.destination) + ': ' + str(route.
next_inputs), verbose=self.verbose)
if not route.destination:
return self.default_chain(route.next_inputs, callbacks=callbacks)
elif route.destination in self.destination_chains:
return self.destination_chains[route.destination](route.next_inputs,
callbacks=callbacks)
elif self.silent_errors:
return self.default_chain(route.next_inputs, callbacks=callbacks)
else:
raise ValueError(
f"Received invalid destination chain name '{route.destination}'")
| null |
_run
|
"""
Runs a command in a subprocess and returns
the output.
Args:
command: The command to run
"""
try:
output = subprocess.run(command, shell=True, check=True, stdout=
subprocess.PIPE, stderr=subprocess.STDOUT).stdout.decode()
except subprocess.CalledProcessError as error:
if self.return_err_output:
return error.stdout.decode()
return str(error)
if self.strip_newlines:
output = output.strip()
return output
|
def _run(self, command: str) ->str:
"""
Runs a command in a subprocess and returns
the output.
Args:
command: The command to run
"""
try:
output = subprocess.run(command, shell=True, check=True, stdout=
subprocess.PIPE, stderr=subprocess.STDOUT).stdout.decode()
except subprocess.CalledProcessError as error:
if self.return_err_output:
return error.stdout.decode()
return str(error)
if self.strip_newlines:
output = output.strip()
return output
|
Runs a command in a subprocess and returns
the output.
Args:
command: The command to run
|
from_llm_and_url
|
"""Instantiate the toolkit from an OpenAPI Spec URL"""
spec = OpenAPISpec.from_url(open_api_url)
return cls.from_llm_and_spec(llm=llm, spec=spec, requests=requests, verbose
=verbose, **kwargs)
|
@classmethod
def from_llm_and_url(cls, llm: BaseLanguageModel, open_api_url: str,
requests: Optional[Requests]=None, verbose: bool=False, **kwargs: Any
) ->NLAToolkit:
"""Instantiate the toolkit from an OpenAPI Spec URL"""
spec = OpenAPISpec.from_url(open_api_url)
return cls.from_llm_and_spec(llm=llm, spec=spec, requests=requests,
verbose=verbose, **kwargs)
|
Instantiate the toolkit from an OpenAPI Spec URL
|
transform
|
yield from self._transform_stream_with_config(input, self._transform,
config, **kwargs)
|
def transform(self, input: Iterator[Dict[str, Any]], config: Optional[
RunnableConfig]=None, **kwargs: Any) ->Iterator[Dict[str, Any]]:
yield from self._transform_stream_with_config(input, self._transform,
config, **kwargs)
| null |
_run
|
from langchain.output_parsers.json import parse_json_markdown
try:
data = parse_json_markdown(text)
except json.JSONDecodeError as e:
raise e
data_params = data.get('params')
response = self.requests_wrapper.get(data['url'], params=data_params)
response = response[:self.response_length]
return self.llm_chain.predict(response=response, instructions=data[
'output_instructions']).strip()
|
def _run(self, text: str) ->str:
from langchain.output_parsers.json import parse_json_markdown
try:
data = parse_json_markdown(text)
except json.JSONDecodeError as e:
raise e
data_params = data.get('params')
response = self.requests_wrapper.get(data['url'], params=data_params)
response = response[:self.response_length]
return self.llm_chain.predict(response=response, instructions=data[
'output_instructions']).strip()
| null |
test_ifixit_loader
|
"""Test iFixit loader."""
web_path = 'https://www.ifixit.com/Guide/iPad+9+Battery+Replacement/151279'
loader = IFixitLoader(web_path)
assert loader.page_type == 'Guide'
assert loader.id == '151279'
assert loader.web_path == web_path
|
def test_ifixit_loader() ->None:
"""Test iFixit loader."""
web_path = 'https://www.ifixit.com/Guide/iPad+9+Battery+Replacement/151279'
loader = IFixitLoader(web_path)
assert loader.page_type == 'Guide'
assert loader.id == '151279'
assert loader.web_path == web_path
|
Test iFixit loader.
|
on_retriever_end
|
if parent_run_id is None:
self.increment()
|
def on_retriever_end(self, documents: Sequence[Document], *, run_id: UUID,
parent_run_id: Optional[UUID]=None, **kwargs: Any) ->Any:
if parent_run_id is None:
self.increment()
| null |
from_response_schemas
|
return cls(response_schemas=response_schemas)
|
@classmethod
def from_response_schemas(cls, response_schemas: List[ResponseSchema]
) ->StructuredOutputParser:
return cls(response_schemas=response_schemas)
| null |
match
|
if template is cls.ListWildcard:
return isinstance(value, list)
elif template is cls.StrWildcard:
return isinstance(value, str)
elif template is cls.DictWildcard:
return isinstance(value, dict)
elif template is cls.IntWildcard:
return isinstance(value, int)
elif template is cls.FloatWildcard:
return isinstance(value, float)
elif template is cls.ObjectWildcard:
return True
elif type(value) != type(template):
return False
elif isinstance(value, dict):
if len(value) != len(template):
return False
for k, v in value.items():
if k not in template or not cls.match(v, template[k]):
return False
return True
elif isinstance(value, list):
if len(value) != len(template):
return False
for i in range(len(value)):
if not cls.match(value[i], template[i]):
return False
return True
else:
return value == template
|
@classmethod
def match(cls, value: Any, template: Any) ->bool:
if template is cls.ListWildcard:
return isinstance(value, list)
elif template is cls.StrWildcard:
return isinstance(value, str)
elif template is cls.DictWildcard:
return isinstance(value, dict)
elif template is cls.IntWildcard:
return isinstance(value, int)
elif template is cls.FloatWildcard:
return isinstance(value, float)
elif template is cls.ObjectWildcard:
return True
elif type(value) != type(template):
return False
elif isinstance(value, dict):
if len(value) != len(template):
return False
for k, v in value.items():
if k not in template or not cls.match(v, template[k]):
return False
return True
elif isinstance(value, list):
if len(value) != len(template):
return False
for i in range(len(value)):
if not cls.match(value[i], template[i]):
return False
return True
else:
return value == template
| null |
_default_params
|
"""Get the default parameters for calling Cohere API."""
return {'max_new_tokens': self.max_new_tokens, 'temperature': self.
temperature, 'top_k': self.top_k, 'top_p': self.top_p,
'repetition_penalty': self.repetition_penalty}
|
@property
def _default_params(self) ->Dict[str, Any]:
"""Get the default parameters for calling Cohere API."""
return {'max_new_tokens': self.max_new_tokens, 'temperature': self.
temperature, 'top_k': self.top_k, 'top_p': self.top_p,
'repetition_penalty': self.repetition_penalty}
|
Get the default parameters for calling Cohere API.
|
get_client
|
"""Get a redis client from the connection url given. This helper accepts
urls for Redis server (TCP with/without TLS or UnixSocket) as well as
Redis Sentinel connections.
Redis Cluster is not supported.
Before creating a connection the existence of the database driver is checked
an and ValueError raised otherwise
To use, you should have the ``redis`` python package installed.
Example:
.. code-block:: python
from langchain_community.utilities.redis import get_client
redis_client = get_client(
redis_url="redis://username:password@localhost:6379"
index_name="my-index",
embedding_function=embeddings.embed_query,
)
To use a redis replication setup with multiple redis server and redis sentinels
set "redis_url" to "redis+sentinel://" scheme. With this url format a path is
needed holding the name of the redis service within the sentinels to get the
correct redis server connection. The default service name is "mymaster". The
optional second part of the path is the redis db number to connect to.
An optional username or password is used for booth connections to the rediserver
and the sentinel, different passwords for server and sentinel are not supported.
And as another constraint only one sentinel instance can be given:
Example:
.. code-block:: python
from langchain_community.utilities.redis import get_client
redis_client = get_client(
redis_url="redis+sentinel://username:password@sentinelhost:26379/mymaster/0"
index_name="my-index",
embedding_function=embeddings.embed_query,
)
"""
try:
import redis
except ImportError:
raise ImportError(
'Could not import redis python package. Please install it with `pip install redis>=4.1.0`.'
)
if redis_url.startswith('redis+sentinel'):
redis_client = _redis_sentinel_client(redis_url, **kwargs)
elif redis_url.startswith('rediss+sentinel'):
kwargs['ssl'] = True
if 'ssl_cert_reqs' not in kwargs:
kwargs['ssl_cert_reqs'] = 'none'
redis_client = _redis_sentinel_client(redis_url, **kwargs)
else:
redis_client = redis.from_url(redis_url, **kwargs)
if _check_for_cluster(redis_client):
redis_client.close()
redis_client = _redis_cluster_client(redis_url, **kwargs)
return redis_client
|
def get_client(redis_url: str, **kwargs: Any) ->RedisType:
"""Get a redis client from the connection url given. This helper accepts
urls for Redis server (TCP with/without TLS or UnixSocket) as well as
Redis Sentinel connections.
Redis Cluster is not supported.
Before creating a connection the existence of the database driver is checked
an and ValueError raised otherwise
To use, you should have the ``redis`` python package installed.
Example:
.. code-block:: python
from langchain_community.utilities.redis import get_client
redis_client = get_client(
redis_url="redis://username:password@localhost:6379"
index_name="my-index",
embedding_function=embeddings.embed_query,
)
To use a redis replication setup with multiple redis server and redis sentinels
set "redis_url" to "redis+sentinel://" scheme. With this url format a path is
needed holding the name of the redis service within the sentinels to get the
correct redis server connection. The default service name is "mymaster". The
optional second part of the path is the redis db number to connect to.
An optional username or password is used for booth connections to the rediserver
and the sentinel, different passwords for server and sentinel are not supported.
And as another constraint only one sentinel instance can be given:
Example:
.. code-block:: python
from langchain_community.utilities.redis import get_client
redis_client = get_client(
redis_url="redis+sentinel://username:password@sentinelhost:26379/mymaster/0"
index_name="my-index",
embedding_function=embeddings.embed_query,
)
"""
try:
import redis
except ImportError:
raise ImportError(
'Could not import redis python package. Please install it with `pip install redis>=4.1.0`.'
)
if redis_url.startswith('redis+sentinel'):
redis_client = _redis_sentinel_client(redis_url, **kwargs)
elif redis_url.startswith('rediss+sentinel'):
kwargs['ssl'] = True
if 'ssl_cert_reqs' not in kwargs:
kwargs['ssl_cert_reqs'] = 'none'
redis_client = _redis_sentinel_client(redis_url, **kwargs)
else:
redis_client = redis.from_url(redis_url, **kwargs)
if _check_for_cluster(redis_client):
redis_client.close()
redis_client = _redis_cluster_client(redis_url, **kwargs)
return redis_client
|
Get a redis client from the connection url given. This helper accepts
urls for Redis server (TCP with/without TLS or UnixSocket) as well as
Redis Sentinel connections.
Redis Cluster is not supported.
Before creating a connection the existence of the database driver is checked
an and ValueError raised otherwise
To use, you should have the ``redis`` python package installed.
Example:
.. code-block:: python
from langchain_community.utilities.redis import get_client
redis_client = get_client(
redis_url="redis://username:password@localhost:6379"
index_name="my-index",
embedding_function=embeddings.embed_query,
)
To use a redis replication setup with multiple redis server and redis sentinels
set "redis_url" to "redis+sentinel://" scheme. With this url format a path is
needed holding the name of the redis service within the sentinels to get the
correct redis server connection. The default service name is "mymaster". The
optional second part of the path is the redis db number to connect to.
An optional username or password is used for booth connections to the rediserver
and the sentinel, different passwords for server and sentinel are not supported.
And as another constraint only one sentinel instance can be given:
Example:
.. code-block:: python
from langchain_community.utilities.redis import get_client
redis_client = get_client(
redis_url="redis+sentinel://username:password@sentinelhost:26379/mymaster/0"
index_name="my-index",
embedding_function=embeddings.embed_query,
)
|
test_load_fail_wrong_split_name
|
"""Test that fails to load"""
with pytest.raises(ValidationError) as exc_info:
TensorflowDatasetLoader(dataset_name='mlqa/en', split_name=
'wrong_split_name', load_max_docs=MAX_DOCS,
sample_to_document_function=mlqaen_example_to_document)
assert 'Unknown split' in str(exc_info.value)
|
def test_load_fail_wrong_split_name() ->None:
"""Test that fails to load"""
with pytest.raises(ValidationError) as exc_info:
TensorflowDatasetLoader(dataset_name='mlqa/en', split_name=
'wrong_split_name', load_max_docs=MAX_DOCS,
sample_to_document_function=mlqaen_example_to_document)
assert 'Unknown split' in str(exc_info.value)
|
Test that fails to load
|
test_add_texts
|
with mock.patch('nuclia.sdk.resource.NucliaResource.create', new_callable=
FakeCreate):
ndb = NucliaDB(knowledge_box='YOUR_KB_ID', local=False, api_key=
'YOUR_API_KEY')
assert ndb.is_local is False
ids = ndb.add_texts(['This is a new test', 'This is a second test'])
assert len(ids) == 2
|
def test_add_texts() ->None:
with mock.patch('nuclia.sdk.resource.NucliaResource.create',
new_callable=FakeCreate):
ndb = NucliaDB(knowledge_box='YOUR_KB_ID', local=False, api_key=
'YOUR_API_KEY')
assert ndb.is_local is False
ids = ndb.add_texts(['This is a new test', 'This is a second test'])
assert len(ids) == 2
| null |
poetry_conf
|
"""Load the pyproject.toml file."""
with open(PYPROJECT_TOML) as f:
return toml.load(f)['tool']['poetry']
|
@pytest.fixture()
def poetry_conf() ->Dict[str, Any]:
"""Load the pyproject.toml file."""
with open(PYPROJECT_TOML) as f:
return toml.load(f)['tool']['poetry']
|
Load the pyproject.toml file.
|
extract_dict_elements_from_component_fields
|
"""Extract elements from a dictionary.
Args:
data: The dictionary to extract elements from.
component: The component to extract elements from.
Returns:
A dictionary containing the elements from the input dictionary that are also
in the component.
"""
output = {}
for attribute in fields(component):
if attribute.name in data:
output[attribute.name] = data[attribute.name]
return output
|
def extract_dict_elements_from_component_fields(data: dict, component: Type
[Component]) ->dict:
"""Extract elements from a dictionary.
Args:
data: The dictionary to extract elements from.
component: The component to extract elements from.
Returns:
A dictionary containing the elements from the input dictionary that are also
in the component.
"""
output = {}
for attribute in fields(component):
if attribute.name in data:
output[attribute.name] = data[attribute.name]
return output
|
Extract elements from a dictionary.
Args:
data: The dictionary to extract elements from.
component: The component to extract elements from.
Returns:
A dictionary containing the elements from the input dictionary that are also
in the component.
|
test_telegram_chat_loader_html
|
_check_telegram_chat_loader(path)
|
@pytest.mark.skip(reason=
"requires bs4 but marking it as such doesn't seem to work")
@pytest.mark.parametrize('path', ['telegram_chat_json',
'telegram_chat_json.zip', 'telegram_chat_json/result.json'])
def test_telegram_chat_loader_html(path: str) ->None:
_check_telegram_chat_loader(path)
| null |
on_tool_start
|
"""Run when tool starts running."""
aim = import_aim()
self.step += 1
self.tool_starts += 1
self.starts += 1
resp = {'action': 'on_tool_start'}
resp.update(self.get_custom_callback_meta())
self._run.track(aim.Text(input_str), name='on_tool_start', context=resp)
|
def on_tool_start(self, serialized: Dict[str, Any], input_str: str, **
kwargs: Any) ->None:
"""Run when tool starts running."""
aim = import_aim()
self.step += 1
self.tool_starts += 1
self.starts += 1
resp = {'action': 'on_tool_start'}
resp.update(self.get_custom_callback_meta())
self._run.track(aim.Text(input_str), name='on_tool_start', context=resp)
|
Run when tool starts running.
|
mget
|
"""Get the values associated with the given keys."""
encoded_keys: List[str] = [self.key_encoder(key) for key in keys]
values = self.store.mget(encoded_keys)
return [(self.value_deserializer(value) if value is not None else value) for
value in values]
|
def mget(self, keys: Sequence[K]) ->List[Optional[V]]:
"""Get the values associated with the given keys."""
encoded_keys: List[str] = [self.key_encoder(key) for key in keys]
values = self.store.mget(encoded_keys)
return [(self.value_deserializer(value) if value is not None else value
) for value in values]
|
Get the values associated with the given keys.
|
on_llm_end
|
"""Run when LLM ends running."""
self.step += 1
self.llm_ends += 1
self.ends += 1
resp = self._init_resp()
resp.update({'action': 'on_llm_end'})
resp.update(flatten_dict(response.llm_output or {}))
resp.update(self.get_custom_callback_meta())
for generations in response.generations:
for generation in generations:
generation_resp = deepcopy(resp)
generation_resp.update(flatten_dict(generation.dict()))
generation_resp.update(self.analyze_text(generation.text))
self.on_llm_end_records.append(generation_resp)
self.action_records.append(generation_resp)
if self.stream_logs:
self.logger.report_text(generation_resp)
|
def on_llm_end(self, response: LLMResult, **kwargs: Any) ->None:
"""Run when LLM ends running."""
self.step += 1
self.llm_ends += 1
self.ends += 1
resp = self._init_resp()
resp.update({'action': 'on_llm_end'})
resp.update(flatten_dict(response.llm_output or {}))
resp.update(self.get_custom_callback_meta())
for generations in response.generations:
for generation in generations:
generation_resp = deepcopy(resp)
generation_resp.update(flatten_dict(generation.dict()))
generation_resp.update(self.analyze_text(generation.text))
self.on_llm_end_records.append(generation_resp)
self.action_records.append(generation_resp)
if self.stream_logs:
self.logger.report_text(generation_resp)
|
Run when LLM ends running.
|
test_xinference_llm_
|
from xinference.client import RESTfulClient
endpoint, _ = setup
client = RESTfulClient(endpoint)
model_uid = client.launch_model(model_name='vicuna-v1.3',
model_size_in_billions=7, quantization='q4_0')
llm = Xinference(server_url=endpoint, model_uid=model_uid)
answer = llm(prompt='Q: What food can we try in the capital of France? A:')
assert isinstance(answer, str)
answer = llm(prompt='Q: where can we visit in the capital of France? A:',
generate_config={'max_tokens': 1024, 'stream': True})
assert isinstance(answer, str)
|
def test_xinference_llm_(setup: Tuple[str, str]) ->None:
from xinference.client import RESTfulClient
endpoint, _ = setup
client = RESTfulClient(endpoint)
model_uid = client.launch_model(model_name='vicuna-v1.3',
model_size_in_billions=7, quantization='q4_0')
llm = Xinference(server_url=endpoint, model_uid=model_uid)
answer = llm(prompt='Q: What food can we try in the capital of France? A:')
assert isinstance(answer, str)
answer = llm(prompt=
'Q: where can we visit in the capital of France? A:',
generate_config={'max_tokens': 1024, 'stream': True})
assert isinstance(answer, str)
| null |
online_process
|
"""Parses a blob lazily using online processing.
Args:
blob: a blob to parse.
enable_native_pdf_parsing: enable pdf embedded text extraction
field_mask: a comma-separated list of which fields to include in the
Document AI response.
suggested: "text,pages.pageNumber,pages.layout"
page_range: list of page numbers to parse. If `None`,
entire document will be parsed.
"""
try:
from google.cloud import documentai
from google.cloud.documentai_v1.types import IndividualPageSelector, OcrConfig, ProcessOptions
except ImportError as exc:
raise ImportError(
'documentai package not found, please install it with `pip install google-cloud-documentai`'
) from exc
try:
from google.cloud.documentai_toolbox.wrappers.page import _text_from_layout
except ImportError as exc:
raise ImportError(
'documentai_toolbox package not found, please install it with `pip install google-cloud-documentai-toolbox`'
) from exc
ocr_config = OcrConfig(enable_native_pdf_parsing=enable_native_pdf_parsing
) if enable_native_pdf_parsing else None
individual_page_selector = IndividualPageSelector(pages=page_range
) if page_range else None
response = self._client.process_document(documentai.ProcessRequest(name=
self._processor_name, gcs_document=documentai.GcsDocument(gcs_uri=blob.
path, mime_type=blob.mimetype or 'application/pdf'), process_options=
ProcessOptions(ocr_config=ocr_config, individual_page_selector=
individual_page_selector), skip_human_review=True, field_mask=field_mask))
yield from (Document(page_content=_text_from_layout(page.layout, response.
document.text), metadata={'page': page.page_number, 'source': blob.path
}) for page in response.document.pages)
|
def online_process(self, blob: Blob, enable_native_pdf_parsing: bool=True,
field_mask: Optional[str]=None, page_range: Optional[List[int]]=None
) ->Iterator[Document]:
"""Parses a blob lazily using online processing.
Args:
blob: a blob to parse.
enable_native_pdf_parsing: enable pdf embedded text extraction
field_mask: a comma-separated list of which fields to include in the
Document AI response.
suggested: "text,pages.pageNumber,pages.layout"
page_range: list of page numbers to parse. If `None`,
entire document will be parsed.
"""
try:
from google.cloud import documentai
from google.cloud.documentai_v1.types import IndividualPageSelector, OcrConfig, ProcessOptions
except ImportError as exc:
raise ImportError(
'documentai package not found, please install it with `pip install google-cloud-documentai`'
) from exc
try:
from google.cloud.documentai_toolbox.wrappers.page import _text_from_layout
except ImportError as exc:
raise ImportError(
'documentai_toolbox package not found, please install it with `pip install google-cloud-documentai-toolbox`'
) from exc
ocr_config = OcrConfig(enable_native_pdf_parsing=enable_native_pdf_parsing
) if enable_native_pdf_parsing else None
individual_page_selector = IndividualPageSelector(pages=page_range
) if page_range else None
response = self._client.process_document(documentai.ProcessRequest(name
=self._processor_name, gcs_document=documentai.GcsDocument(gcs_uri=
blob.path, mime_type=blob.mimetype or 'application/pdf'),
process_options=ProcessOptions(ocr_config=ocr_config,
individual_page_selector=individual_page_selector),
skip_human_review=True, field_mask=field_mask))
yield from (Document(page_content=_text_from_layout(page.layout,
response.document.text), metadata={'page': page.page_number,
'source': blob.path}) for page in response.document.pages)
|
Parses a blob lazily using online processing.
Args:
blob: a blob to parse.
enable_native_pdf_parsing: enable pdf embedded text extraction
field_mask: a comma-separated list of which fields to include in the
Document AI response.
suggested: "text,pages.pageNumber,pages.layout"
page_range: list of page numbers to parse. If `None`,
entire document will be parsed.
|
_import_wolfram_alpha_tool
|
from langchain_community.tools.wolfram_alpha.tool import WolframAlphaQueryRun
return WolframAlphaQueryRun
|
def _import_wolfram_alpha_tool() ->Any:
from langchain_community.tools.wolfram_alpha.tool import WolframAlphaQueryRun
return WolframAlphaQueryRun
| null |
embeddings
|
return self._embedding
|
@property
def embeddings(self) ->Optional[Embeddings]:
return self._embedding
| null |
on_llm_error
|
"""Run when LLM errors."""
self.step += 1
self.errors += 1
|
def on_llm_error(self, error: BaseException, **kwargs: Any) ->None:
"""Run when LLM errors."""
self.step += 1
self.errors += 1
|
Run when LLM errors.
|
test_merger_retriever_get_relevant_docs
|
"""Test get_relevant_docs."""
texts_group_a = ['This is a document about the Boston Celtics',
'Fly me to the moon is one of my favourite songs.I simply love going to the movies'
]
texts_group_b = ['This is a document about the Poenix Suns',
'The Boston Celtics won the game by 20 points',
'Real stupidity beats artificial intelligence every time. TP']
embeddings = OpenAIEmbeddings()
retriever_a = Chroma.from_texts(texts_group_a, embedding=embeddings
).as_retriever(search_kwargs={'k': 1})
retriever_b = Chroma.from_texts(texts_group_b, embedding=embeddings
).as_retriever(search_kwargs={'k': 1})
lotr = MergerRetriever(retrievers=[retriever_a, retriever_b])
actual = lotr.get_relevant_documents('Tell me about the Celtics')
assert len(actual) == 2
assert texts_group_a[0] in [d.page_content for d in actual]
assert texts_group_b[1] in [d.page_content for d in actual]
|
def test_merger_retriever_get_relevant_docs() ->None:
"""Test get_relevant_docs."""
texts_group_a = ['This is a document about the Boston Celtics',
'Fly me to the moon is one of my favourite songs.I simply love going to the movies'
]
texts_group_b = ['This is a document about the Poenix Suns',
'The Boston Celtics won the game by 20 points',
'Real stupidity beats artificial intelligence every time. TP']
embeddings = OpenAIEmbeddings()
retriever_a = Chroma.from_texts(texts_group_a, embedding=embeddings
).as_retriever(search_kwargs={'k': 1})
retriever_b = Chroma.from_texts(texts_group_b, embedding=embeddings
).as_retriever(search_kwargs={'k': 1})
lotr = MergerRetriever(retrievers=[retriever_a, retriever_b])
actual = lotr.get_relevant_documents('Tell me about the Celtics')
assert len(actual) == 2
assert texts_group_a[0] in [d.page_content for d in actual]
assert texts_group_b[1] in [d.page_content for d in actual]
|
Test get_relevant_docs.
|
assert_docs
|
for doc in docs:
assert doc.page_content
assert doc.metadata
main_meta = {'title', 'source'}
assert set(doc.metadata).issuperset(main_meta)
if all_meta:
assert len(set(doc.metadata)) > len(main_meta)
else:
assert len(set(doc.metadata)) == len(main_meta)
|
def assert_docs(docs: List[Document], all_meta: bool=False) ->None:
for doc in docs:
assert doc.page_content
assert doc.metadata
main_meta = {'title', 'source'}
assert set(doc.metadata).issuperset(main_meta)
if all_meta:
assert len(set(doc.metadata)) > len(main_meta)
else:
assert len(set(doc.metadata)) == len(main_meta)
| null |
_import_bing_search
|
from langchain_community.utilities.bing_search import BingSearchAPIWrapper
return BingSearchAPIWrapper
|
def _import_bing_search() ->Any:
from langchain_community.utilities.bing_search import BingSearchAPIWrapper
return BingSearchAPIWrapper
| null |
from_str
|
"""Parse an HTTP verb."""
try:
return cls(verb)
except ValueError:
raise ValueError(f'Invalid HTTP verb. Valid values are {cls.__members__}')
|
@classmethod
def from_str(cls, verb: str) ->HTTPVerb:
"""Parse an HTTP verb."""
try:
return cls(verb)
except ValueError:
raise ValueError(
f'Invalid HTTP verb. Valid values are {cls.__members__}')
|
Parse an HTTP verb.
|
_get_execution_order
|
"""Get the execution order for a run."""
if parent_run_id is None:
return 1
parent_run = self.run_map.get(parent_run_id)
if parent_run is None:
logger.debug(f'Parent run with UUID {parent_run_id} not found.')
return 1
if parent_run.child_execution_order is None:
raise TracerException(
f'Parent run with UUID {parent_run_id} has no child execution order.')
return parent_run.child_execution_order + 1
|
def _get_execution_order(self, parent_run_id: Optional[str]=None) ->int:
"""Get the execution order for a run."""
if parent_run_id is None:
return 1
parent_run = self.run_map.get(parent_run_id)
if parent_run is None:
logger.debug(f'Parent run with UUID {parent_run_id} not found.')
return 1
if parent_run.child_execution_order is None:
raise TracerException(
f'Parent run with UUID {parent_run_id} has no child execution order.'
)
return parent_run.child_execution_order + 1
|
Get the execution order for a run.
|
_get_default_system_message
|
return SystemMessage(content=
'Do your best to answer the questions. Feel free to use any tools available to look up relevant information, only if necessary'
)
|
def _get_default_system_message() ->SystemMessage:
return SystemMessage(content=
'Do your best to answer the questions. Feel free to use any tools available to look up relevant information, only if necessary'
)
| null |
ignore_chain
|
"""Whether to ignore chain callbacks."""
return self.ignore_chain_
|
@property
def ignore_chain(self) ->bool:
"""Whether to ignore chain callbacks."""
return self.ignore_chain_
|
Whether to ignore chain callbacks.
|
lookup
|
"""Look up based on prompt and llm_string."""
rows = self._search_rows(prompt, llm_string)
if rows:
return [loads(row[0]) for row in rows]
return None
|
def lookup(self, prompt: str, llm_string: str) ->Optional[RETURN_VAL_TYPE]:
"""Look up based on prompt and llm_string."""
rows = self._search_rows(prompt, llm_string)
if rows:
return [loads(row[0]) for row in rows]
return None
|
Look up based on prompt and llm_string.
|
ignore_llm
|
"""Whether to ignore LLM callbacks."""
return False
|
@property
def ignore_llm(self) ->bool:
"""Whether to ignore LLM callbacks."""
return False
|
Whether to ignore LLM callbacks.
|
_collect_yaml_input
|
"""Collects and returns user input as a single string."""
lines = []
while True:
line = input()
if not line.strip():
break
if stop and any(seq in line for seq in stop):
break
lines.append(line)
yaml_string = '\n'.join(lines)
try:
message = _message_from_dict(yaml.safe_load(StringIO(yaml_string)))
if message is None:
return HumanMessage(content='')
if stop:
if isinstance(message.content, str):
message.content = enforce_stop_tokens(message.content, stop)
else:
raise ValueError('Cannot use when output is not a string.')
return message
except yaml.YAMLError:
raise ValueError('Invalid YAML string entered.')
except ValueError:
raise ValueError('Invalid message entered.')
|
def _collect_yaml_input(messages: List[BaseMessage], stop: Optional[List[
str]]=None) ->BaseMessage:
"""Collects and returns user input as a single string."""
lines = []
while True:
line = input()
if not line.strip():
break
if stop and any(seq in line for seq in stop):
break
lines.append(line)
yaml_string = '\n'.join(lines)
try:
message = _message_from_dict(yaml.safe_load(StringIO(yaml_string)))
if message is None:
return HumanMessage(content='')
if stop:
if isinstance(message.content, str):
message.content = enforce_stop_tokens(message.content, stop)
else:
raise ValueError('Cannot use when output is not a string.')
return message
except yaml.YAMLError:
raise ValueError('Invalid YAML string entered.')
except ValueError:
raise ValueError('Invalid message entered.')
|
Collects and returns user input as a single string.
|
send_pdf
|
with open(self.file_path, 'rb') as f:
files = {'file': f}
response = requests.post(self.url, headers=self._mathpix_headers, files
=files, data=self.data)
response_data = response.json()
if 'error' in response_data:
raise ValueError(f"Mathpix request failed: {response_data['error']}")
if 'pdf_id' in response_data:
pdf_id = response_data['pdf_id']
return pdf_id
else:
raise ValueError('Unable to send PDF to Mathpix.')
|
def send_pdf(self) ->str:
with open(self.file_path, 'rb') as f:
files = {'file': f}
response = requests.post(self.url, headers=self._mathpix_headers,
files=files, data=self.data)
response_data = response.json()
if 'error' in response_data:
raise ValueError(f"Mathpix request failed: {response_data['error']}")
if 'pdf_id' in response_data:
pdf_id = response_data['pdf_id']
return pdf_id
else:
raise ValueError('Unable to send PDF to Mathpix.')
| null |
test_extract_paragraphs
|
bs_transformer = BeautifulSoupTransformer()
paragraphs_html = (
'<html><h1>Header</h1><p>First paragraph.</p><p>Second paragraph.</p><h1>Ignore at end</h1></html>'
)
documents = [Document(page_content=paragraphs_html)]
docs_transformed = bs_transformer.transform_documents(documents)
assert docs_transformed[0].page_content == 'First paragraph. Second paragraph.'
|
@pytest.mark.requires('bs4')
def test_extract_paragraphs() ->None:
bs_transformer = BeautifulSoupTransformer()
paragraphs_html = (
'<html><h1>Header</h1><p>First paragraph.</p><p>Second paragraph.</p><h1>Ignore at end</h1></html>'
)
documents = [Document(page_content=paragraphs_html)]
docs_transformed = bs_transformer.transform_documents(documents)
assert docs_transformed[0
].page_content == 'First paragraph. Second paragraph.'
| null |
test_milvus_add_extra
|
"""Test end to end construction and MRR search."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = _milvus_from_texts(metadatas=metadatas)
docsearch.add_texts(texts, metadatas)
output = docsearch.similarity_search('foo', k=10)
assert len(output) == 6
|
def test_milvus_add_extra() ->None:
"""Test end to end construction and MRR search."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = _milvus_from_texts(metadatas=metadatas)
docsearch.add_texts(texts, metadatas)
output = docsearch.similarity_search('foo', k=10)
assert len(output) == 6
|
Test end to end construction and MRR search.
|
example_memory
|
example_1 = ConversationBufferMemory(memory_key='foo')
example_2 = ConversationBufferMemory(memory_key='bar')
example_3 = ConversationBufferMemory(memory_key='bar')
return [example_1, example_2, example_3]
|
@pytest.fixture()
def example_memory() ->List[ConversationBufferMemory]:
example_1 = ConversationBufferMemory(memory_key='foo')
example_2 = ConversationBufferMemory(memory_key='bar')
example_3 = ConversationBufferMemory(memory_key='bar')
return [example_1, example_2, example_3]
| null |
test_get_erc721_transaction
|
account_address = '0x9dd134d14d1e65f84b706d6f205cd5b1cd03a46b'
loader = EtherscanLoader(account_address, filter='erc721_transaction')
result = loader.load()
assert len(result) > 0, 'No transactions returned'
|
@pytest.mark.skipif(not etherscan_key_set, reason=
'Etherscan API key not provided.')
def test_get_erc721_transaction() ->None:
account_address = '0x9dd134d14d1e65f84b706d6f205cd5b1cd03a46b'
loader = EtherscanLoader(account_address, filter='erc721_transaction')
result = loader.load()
assert len(result) > 0, 'No transactions returned'
| null |
__init__
|
self.completions = Completions()
|
def __init__(self) ->None:
self.completions = Completions()
| null |
test_faiss_mmr_with_metadatas_and_filter
|
texts = ['foo', 'foo', 'fou', 'foy']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = FAISS.from_texts(texts, FakeEmbeddings(), metadatas=metadatas)
query_vec = FakeEmbeddings().embed_query(text='foo')
output = docsearch.max_marginal_relevance_search_with_score_by_vector(query_vec
, k=10, lambda_mult=0.1, filter={'page': 1})
assert len(output) == 1
assert output[0][0] == Document(page_content='foo', metadata={'page': 1})
assert output[0][1] == 0.0
|
@pytest.mark.requires('faiss')
def test_faiss_mmr_with_metadatas_and_filter() ->None:
texts = ['foo', 'foo', 'fou', 'foy']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = FAISS.from_texts(texts, FakeEmbeddings(), metadatas=metadatas)
query_vec = FakeEmbeddings().embed_query(text='foo')
output = docsearch.max_marginal_relevance_search_with_score_by_vector(
query_vec, k=10, lambda_mult=0.1, filter={'page': 1})
assert len(output) == 1
assert output[0][0] == Document(page_content='foo', metadata={'page': 1})
assert output[0][1] == 0.0
| null |
load_memory_variables
|
"""Return history buffer."""
return {self.memory_key: self.buffer}
|
def load_memory_variables(self, inputs: Dict[str, Any]) ->Dict[str, str]:
"""Return history buffer."""
return {self.memory_key: self.buffer}
|
Return history buffer.
|
test_ai21_call_experimental
|
"""Test valid call to ai21 with an experimental model."""
llm = AI21(maxTokens=10, model='j1-grande-instruct')
output = llm('Say foo:')
assert isinstance(output, str)
|
def test_ai21_call_experimental() ->None:
"""Test valid call to ai21 with an experimental model."""
llm = AI21(maxTokens=10, model='j1-grande-instruct')
output = llm('Say foo:')
assert isinstance(output, str)
|
Test valid call to ai21 with an experimental model.
|
_convert_newlines
|
"""Convert newline characters to markdown newline sequences
(space, space, newline).
"""
return text.replace('\n', ' \n')
|
def _convert_newlines(text: str) ->str:
"""Convert newline characters to markdown newline sequences
(space, space, newline).
"""
return text.replace('\n', ' \n')
|
Convert newline characters to markdown newline sequences
(space, space, newline).
|
failed
|
return any(task.failed() for task in self.tasks)
|
def failed(self) ->bool:
return any(task.failed() for task in self.tasks)
| null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.