method_name
stringlengths 1
78
| method_body
stringlengths 3
9.66k
| full_code
stringlengths 31
10.7k
| docstring
stringlengths 4
4.74k
⌀ |
|---|---|---|---|
test_nested_dict_throws
|
with pytest.raises(ValueError):
base.embed({'test_namespace': {'a': {'b': 1}}}, MockEncoder())
|
@pytest.mark.requires('vowpal_wabbit_next')
def test_nested_dict_throws() ->None:
with pytest.raises(ValueError):
base.embed({'test_namespace': {'a': {'b': 1}}}, MockEncoder())
| null |
input_keys
|
"""Input keys."""
return self.chain.input_keys
|
@property
def input_keys(self) ->List[str]:
"""Input keys."""
return self.chain.input_keys
|
Input keys.
|
__enter__
|
return self
|
def __enter__(self) ->WhyLabsCallbackHandler:
return self
| null |
_create_table_if_not_exists
|
create_table_query = f"""
CREATE TABLE IF NOT EXISTS {self.full_table_name} (
key TEXT PRIMARY KEY,
value TEXT
)
"""
with self.conn:
self.conn.execute(create_table_query)
|
def _create_table_if_not_exists(self) ->None:
create_table_query = f"""
CREATE TABLE IF NOT EXISTS {self.full_table_name} (
key TEXT PRIMARY KEY,
value TEXT
)
"""
with self.conn:
self.conn.execute(create_table_query)
| null |
test_requests_post_tool
|
tool = RequestsPostTool(requests_wrapper=mock_requests_wrapper)
input_text = '{"url": "https://example.com", "data": {"key": "value"}}'
assert tool.run(input_text) == "post {'key': 'value'}"
assert asyncio.run(tool.arun(input_text)) == "apost {'key': 'value'}"
|
def test_requests_post_tool(mock_requests_wrapper: TextRequestsWrapper) ->None:
tool = RequestsPostTool(requests_wrapper=mock_requests_wrapper)
input_text = '{"url": "https://example.com", "data": {"key": "value"}}'
assert tool.run(input_text) == "post {'key': 'value'}"
assert asyncio.run(tool.arun(input_text)) == "apost {'key': 'value'}"
| null |
_import_sqlitevss
|
from langchain_community.vectorstores.sqlitevss import SQLiteVSS
return SQLiteVSS
|
def _import_sqlitevss() ->Any:
from langchain_community.vectorstores.sqlitevss import SQLiteVSS
return SQLiteVSS
| null |
test_debug_is_settable_via_setter
|
from langchain_core.callbacks.manager import _get_debug
from langchain import globals
previous_value = globals._debug
previous_fn_reading = _get_debug()
assert previous_value == previous_fn_reading
set_debug(not previous_value)
new_value = globals._debug
new_fn_reading = _get_debug()
try:
assert new_value != previous_value
assert new_value == new_fn_reading
assert new_value == get_debug()
finally:
set_debug(previous_value)
|
def test_debug_is_settable_via_setter() ->None:
from langchain_core.callbacks.manager import _get_debug
from langchain import globals
previous_value = globals._debug
previous_fn_reading = _get_debug()
assert previous_value == previous_fn_reading
set_debug(not previous_value)
new_value = globals._debug
new_fn_reading = _get_debug()
try:
assert new_value != previous_value
assert new_value == new_fn_reading
assert new_value == get_debug()
finally:
set_debug(previous_value)
| null |
load
|
return self._get_resource()
|
def load(self) ->List[Document]:
return self._get_resource()
| null |
similarity_search_by_index
|
"""Return docs most similar to docstore_index.
Args:
docstore_index: Index of document in docstore
k: Number of Documents to return. Defaults to 4.
search_k: inspect up to search_k nodes which defaults
to n_trees * n if not provided
Returns:
List of Documents most similar to the embedding.
"""
docs_and_scores = self.similarity_search_with_score_by_index(docstore_index,
k, search_k)
return [doc for doc, _ in docs_and_scores]
|
def similarity_search_by_index(self, docstore_index: int, k: int=4,
search_k: int=-1, **kwargs: Any) ->List[Document]:
"""Return docs most similar to docstore_index.
Args:
docstore_index: Index of document in docstore
k: Number of Documents to return. Defaults to 4.
search_k: inspect up to search_k nodes which defaults
to n_trees * n if not provided
Returns:
List of Documents most similar to the embedding.
"""
docs_and_scores = self.similarity_search_with_score_by_index(docstore_index
, k, search_k)
return [doc for doc, _ in docs_and_scores]
|
Return docs most similar to docstore_index.
Args:
docstore_index: Index of document in docstore
k: Number of Documents to return. Defaults to 4.
search_k: inspect up to search_k nodes which defaults
to n_trees * n if not provided
Returns:
List of Documents most similar to the embedding.
|
_import_searchapi_tool_SearchAPIResults
|
from langchain_community.tools.searchapi.tool import SearchAPIResults
return SearchAPIResults
|
def _import_searchapi_tool_SearchAPIResults() ->Any:
from langchain_community.tools.searchapi.tool import SearchAPIResults
return SearchAPIResults
| null |
add_texts
|
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
kwargs: vectorstore specific parameters
Returns:
List of ids from adding the texts into the vectorstore.
"""
return list(texts)
|
def add_texts(self, texts: Iterable[str], metadatas: Optional[List[dict]]=
None, **kwargs: Any) ->List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
kwargs: vectorstore specific parameters
Returns:
List of ids from adding the texts into the vectorstore.
"""
return list(texts)
|
Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
kwargs: vectorstore specific parameters
Returns:
List of ids from adding the texts into the vectorstore.
|
select_examples
|
"""Select which examples to use based on semantic similarity."""
if self.input_keys:
input_variables = {key: input_variables[key] for key in self.input_keys}
query = ' '.join(sorted_values(input_variables))
example_docs = self.vectorstore.max_marginal_relevance_search(query, k=self
.k, fetch_k=self.fetch_k)
examples = [dict(e.metadata) for e in example_docs]
if self.example_keys:
examples = [{k: eg[k] for k in self.example_keys} for eg in examples]
return examples
|
def select_examples(self, input_variables: Dict[str, str]) ->List[dict]:
"""Select which examples to use based on semantic similarity."""
if self.input_keys:
input_variables = {key: input_variables[key] for key in self.input_keys
}
query = ' '.join(sorted_values(input_variables))
example_docs = self.vectorstore.max_marginal_relevance_search(query, k=
self.k, fetch_k=self.fetch_k)
examples = [dict(e.metadata) for e in example_docs]
if self.example_keys:
examples = [{k: eg[k] for k in self.example_keys} for eg in examples]
return examples
|
Select which examples to use based on semantic similarity.
|
mget
|
"""Get the values associated with the given keys.
Args:
keys (Sequence[str]): A sequence of keys.
Returns:
A sequence of optional values associated with the keys.
If a key is not found, the corresponding value will be None.
"""
return [self.store.get(key) for key in keys]
|
def mget(self, keys: Sequence[str]) ->List[Optional[V]]:
"""Get the values associated with the given keys.
Args:
keys (Sequence[str]): A sequence of keys.
Returns:
A sequence of optional values associated with the keys.
If a key is not found, the corresponding value will be None.
"""
return [self.store.get(key) for key in keys]
|
Get the values associated with the given keys.
Args:
keys (Sequence[str]): A sequence of keys.
Returns:
A sequence of optional values associated with the keys.
If a key is not found, the corresponding value will be None.
|
_import_powerbi
|
from langchain_community.utilities.powerbi import PowerBIDataset
return PowerBIDataset
|
def _import_powerbi() ->Any:
from langchain_community.utilities.powerbi import PowerBIDataset
return PowerBIDataset
| null |
test_react_chain
|
"""Test react chain."""
responses = ["""I should probably search
Action: Search[langchain]""",
"""I should probably lookup
Action: Lookup[made]""",
"""Ah okay now I know the answer
Action: Finish[2022]"""]
fake_llm = FakeListLLM(responses=responses)
react_chain = ReActChain(llm=fake_llm, docstore=FakeDocstore())
output = react_chain.run('when was langchain made')
assert output == '2022'
|
def test_react_chain() ->None:
"""Test react chain."""
responses = ['I should probably search\nAction: Search[langchain]',
"""I should probably lookup
Action: Lookup[made]""",
"""Ah okay now I know the answer
Action: Finish[2022]"""]
fake_llm = FakeListLLM(responses=responses)
react_chain = ReActChain(llm=fake_llm, docstore=FakeDocstore())
output = react_chain.run('when was langchain made')
assert output == '2022'
|
Test react chain.
|
image_summarize
|
"""
Make image summary
:param img_base64: Base64 encoded string for image
:param prompt: Text prompt for summarizatiomn
:return: Image summarization prompt
"""
chat = ChatOllama(model='bakllava', temperature=0)
msg = chat.invoke([HumanMessage(content=[{'type': 'text', 'text': prompt},
{'type': 'image_url', 'image_url':
f'data:image/jpeg;base64,{img_base64}'}])])
return msg.content
|
def image_summarize(img_base64, prompt):
"""
Make image summary
:param img_base64: Base64 encoded string for image
:param prompt: Text prompt for summarizatiomn
:return: Image summarization prompt
"""
chat = ChatOllama(model='bakllava', temperature=0)
msg = chat.invoke([HumanMessage(content=[{'type': 'text', 'text':
prompt}, {'type': 'image_url', 'image_url':
f'data:image/jpeg;base64,{img_base64}'}])])
return msg.content
|
Make image summary
:param img_base64: Base64 encoded string for image
:param prompt: Text prompt for summarizatiomn
:return: Image summarization prompt
|
_convert_dict_to_message
|
role = _dict['role']
if role == 'user':
return HumanMessage(content=_dict['content'])
elif role == 'assistant':
content = _dict['content'] or ''
return AIMessage(content=content)
elif role == 'system':
return SystemMessage(content=_dict['content'])
else:
return ChatMessage(content=_dict['content'], role=role)
|
def _convert_dict_to_message(_dict: Mapping[str, Any]) ->BaseMessage:
role = _dict['role']
if role == 'user':
return HumanMessage(content=_dict['content'])
elif role == 'assistant':
content = _dict['content'] or ''
return AIMessage(content=content)
elif role == 'system':
return SystemMessage(content=_dict['content'])
else:
return ChatMessage(content=_dict['content'], role=role)
| null |
path_params
|
return [property.name for property in self.properties if property.location ==
APIPropertyLocation.PATH]
|
@property
def path_params(self) ->List[str]:
return [property.name for property in self.properties if property.
location == APIPropertyLocation.PATH]
| null |
_run
|
"""Use the tool."""
values = query.split(',')
person = values[0]
if len(values) > 1:
num_results = int(values[1])
else:
num_results = 2
return self._search(person, num_results)
|
def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun]
=None) ->str:
"""Use the tool."""
values = query.split(',')
person = values[0]
if len(values) > 1:
num_results = int(values[1])
else:
num_results = 2
return self._search(person, num_results)
|
Use the tool.
|
trace_as_chain_group
|
"""Get a callback manager for a chain group in a context manager.
Useful for grouping different calls together as a single run even if
they aren't composed in a single chain.
Args:
group_name (str): The name of the chain group.
callback_manager (CallbackManager, optional): The callback manager to use.
inputs (Dict[str, Any], optional): The inputs to the chain group.
project_name (str, optional): The name of the project.
Defaults to None.
example_id (str or UUID, optional): The ID of the example.
Defaults to None.
run_id (UUID, optional): The ID of the run.
tags (List[str], optional): The inheritable tags to apply to all runs.
Defaults to None.
Note: must have LANGCHAIN_TRACING_V2 env var set to true to see the trace in LangSmith.
Returns:
CallbackManagerForChainGroup: The callback manager for the chain group.
Example:
.. code-block:: python
llm_input = "Foo"
with trace_as_chain_group("group_name", inputs={"input": llm_input}) as manager:
# Use the callback manager for the chain group
res = llm.predict(llm_input, callbacks=manager)
manager.on_chain_end({"output": res})
"""
from langchain_core.tracers.context import _get_trace_callbacks
cb = _get_trace_callbacks(project_name, example_id, callback_manager=
callback_manager)
cm = CallbackManager.configure(inheritable_callbacks=cb, inheritable_tags=tags)
run_manager = cm.on_chain_start({'name': group_name}, inputs or {}, run_id=
run_id)
child_cm = run_manager.get_child()
group_cm = CallbackManagerForChainGroup(child_cm.handlers, child_cm.
inheritable_handlers, child_cm.parent_run_id, parent_run_manager=
run_manager, tags=child_cm.tags, inheritable_tags=child_cm.
inheritable_tags, metadata=child_cm.metadata, inheritable_metadata=
child_cm.inheritable_metadata)
try:
yield group_cm
except Exception as e:
if not group_cm.ended:
run_manager.on_chain_error(e)
raise e
else:
if not group_cm.ended:
run_manager.on_chain_end({})
|
@contextmanager
def trace_as_chain_group(group_name: str, callback_manager: Optional[
CallbackManager]=None, *, inputs: Optional[Dict[str, Any]]=None,
project_name: Optional[str]=None, example_id: Optional[Union[str, UUID]
]=None, run_id: Optional[UUID]=None, tags: Optional[List[str]]=None
) ->Generator[CallbackManagerForChainGroup, None, None]:
"""Get a callback manager for a chain group in a context manager.
Useful for grouping different calls together as a single run even if
they aren't composed in a single chain.
Args:
group_name (str): The name of the chain group.
callback_manager (CallbackManager, optional): The callback manager to use.
inputs (Dict[str, Any], optional): The inputs to the chain group.
project_name (str, optional): The name of the project.
Defaults to None.
example_id (str or UUID, optional): The ID of the example.
Defaults to None.
run_id (UUID, optional): The ID of the run.
tags (List[str], optional): The inheritable tags to apply to all runs.
Defaults to None.
Note: must have LANGCHAIN_TRACING_V2 env var set to true to see the trace in LangSmith.
Returns:
CallbackManagerForChainGroup: The callback manager for the chain group.
Example:
.. code-block:: python
llm_input = "Foo"
with trace_as_chain_group("group_name", inputs={"input": llm_input}) as manager:
# Use the callback manager for the chain group
res = llm.predict(llm_input, callbacks=manager)
manager.on_chain_end({"output": res})
"""
from langchain_core.tracers.context import _get_trace_callbacks
cb = _get_trace_callbacks(project_name, example_id, callback_manager=
callback_manager)
cm = CallbackManager.configure(inheritable_callbacks=cb,
inheritable_tags=tags)
run_manager = cm.on_chain_start({'name': group_name}, inputs or {},
run_id=run_id)
child_cm = run_manager.get_child()
group_cm = CallbackManagerForChainGroup(child_cm.handlers, child_cm.
inheritable_handlers, child_cm.parent_run_id, parent_run_manager=
run_manager, tags=child_cm.tags, inheritable_tags=child_cm.
inheritable_tags, metadata=child_cm.metadata, inheritable_metadata=
child_cm.inheritable_metadata)
try:
yield group_cm
except Exception as e:
if not group_cm.ended:
run_manager.on_chain_error(e)
raise e
else:
if not group_cm.ended:
run_manager.on_chain_end({})
|
Get a callback manager for a chain group in a context manager.
Useful for grouping different calls together as a single run even if
they aren't composed in a single chain.
Args:
group_name (str): The name of the chain group.
callback_manager (CallbackManager, optional): The callback manager to use.
inputs (Dict[str, Any], optional): The inputs to the chain group.
project_name (str, optional): The name of the project.
Defaults to None.
example_id (str or UUID, optional): The ID of the example.
Defaults to None.
run_id (UUID, optional): The ID of the run.
tags (List[str], optional): The inheritable tags to apply to all runs.
Defaults to None.
Note: must have LANGCHAIN_TRACING_V2 env var set to true to see the trace in LangSmith.
Returns:
CallbackManagerForChainGroup: The callback manager for the chain group.
Example:
.. code-block:: python
llm_input = "Foo"
with trace_as_chain_group("group_name", inputs={"input": llm_input}) as manager:
# Use the callback manager for the chain group
res = llm.predict(llm_input, callbacks=manager)
manager.on_chain_end({"output": res})
|
_parse_json
|
if isinstance(node, str):
return parse_json_markdown(node)
elif hasattr(node, 'schema') and callable(getattr(node, 'schema')):
return getattr(node, 'schema')()
return node
|
def _parse_json(self, node: Any) ->Union[dict, list, None, float, bool, int,
str]:
if isinstance(node, str):
return parse_json_markdown(node)
elif hasattr(node, 'schema') and callable(getattr(node, 'schema')):
return getattr(node, 'schema')()
return node
| null |
test_instruct_prompt
|
"""Test instruct prompt."""
llm = MosaicML(inject_instruction_format=True, model_kwargs={
'max_new_tokens': 10})
instruction = 'Repeat the word foo'
prompt = llm._transform_prompt(instruction)
expected_prompt = PROMPT_FOR_GENERATION_FORMAT.format(instruction=instruction)
assert prompt == expected_prompt
output = llm(prompt)
assert isinstance(output, str)
|
def test_instruct_prompt() ->None:
"""Test instruct prompt."""
llm = MosaicML(inject_instruction_format=True, model_kwargs={
'max_new_tokens': 10})
instruction = 'Repeat the word foo'
prompt = llm._transform_prompt(instruction)
expected_prompt = PROMPT_FOR_GENERATION_FORMAT.format(instruction=
instruction)
assert prompt == expected_prompt
output = llm(prompt)
assert isinstance(output, str)
|
Test instruct prompt.
|
_get_filled_chunk
|
"""Fill the generation chunk."""
return ChatGenerationChunk(message=ChatMessageChunk(content=text, role=role))
|
def _get_filled_chunk(self, text: str, role: Optional[str]='assistant'
) ->ChatGenerationChunk:
"""Fill the generation chunk."""
return ChatGenerationChunk(message=ChatMessageChunk(content=text, role=
role))
|
Fill the generation chunk.
|
validate_environment
|
"""Validate that python package exists in environment."""
try:
import ads
except ImportError as ex:
raise ImportError(
'Could not import ads python package. Please install it with `pip install oracle_ads`.'
) from ex
if not values.get('auth', None):
values['auth'] = ads.common.auth.default_signer()
values['endpoint'] = get_from_dict_or_env(values, 'endpoint',
'OCI_LLM_ENDPOINT')
return values
|
@root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that python package exists in environment."""
try:
import ads
except ImportError as ex:
raise ImportError(
'Could not import ads python package. Please install it with `pip install oracle_ads`.'
) from ex
if not values.get('auth', None):
values['auth'] = ads.common.auth.default_signer()
values['endpoint'] = get_from_dict_or_env(values, 'endpoint',
'OCI_LLM_ENDPOINT')
return values
|
Validate that python package exists in environment.
|
_load_chain_from_file
|
"""Load chain from file."""
if isinstance(file, str):
file_path = Path(file)
else:
file_path = file
if file_path.suffix == '.json':
with open(file_path) as f:
config = json.load(f)
elif file_path.suffix == '.yaml':
with open(file_path, 'r') as f:
config = yaml.safe_load(f)
else:
raise ValueError('File type must be json or yaml')
if 'verbose' in kwargs:
config['verbose'] = kwargs.pop('verbose')
if 'memory' in kwargs:
config['memory'] = kwargs.pop('memory')
return load_chain_from_config(config, **kwargs)
|
def _load_chain_from_file(file: Union[str, Path], **kwargs: Any) ->Chain:
"""Load chain from file."""
if isinstance(file, str):
file_path = Path(file)
else:
file_path = file
if file_path.suffix == '.json':
with open(file_path) as f:
config = json.load(f)
elif file_path.suffix == '.yaml':
with open(file_path, 'r') as f:
config = yaml.safe_load(f)
else:
raise ValueError('File type must be json or yaml')
if 'verbose' in kwargs:
config['verbose'] = kwargs.pop('verbose')
if 'memory' in kwargs:
config['memory'] = kwargs.pop('memory')
return load_chain_from_config(config, **kwargs)
|
Load chain from file.
|
delete
|
"""Delete by vector ID or other criteria.
Args:
ids: List of ids to delete.
**kwargs: Other keyword arguments that subclasses might use.
Returns:
Optional[bool]: True if deletion is successful,
False otherwise, None if not implemented.
"""
payload = {'ids': ids}
response = requests.delete(SemaDB.BASE_URL +
f'/collections/{self.collection_name}/points', json=payload, headers=
self.headers)
return response.status_code == 200 and len(response.json()['failedPoints']
) == 0
|
def delete(self, ids: Optional[List[str]]=None, **kwargs: Any) ->Optional[bool
]:
"""Delete by vector ID or other criteria.
Args:
ids: List of ids to delete.
**kwargs: Other keyword arguments that subclasses might use.
Returns:
Optional[bool]: True if deletion is successful,
False otherwise, None if not implemented.
"""
payload = {'ids': ids}
response = requests.delete(SemaDB.BASE_URL +
f'/collections/{self.collection_name}/points', json=payload,
headers=self.headers)
return response.status_code == 200 and len(response.json()['failedPoints']
) == 0
|
Delete by vector ID or other criteria.
Args:
ids: List of ids to delete.
**kwargs: Other keyword arguments that subclasses might use.
Returns:
Optional[bool]: True if deletion is successful,
False otherwise, None if not implemented.
|
_import_bedrock
|
from langchain_community.llms.bedrock import Bedrock
return Bedrock
|
def _import_bedrock() ->Any:
from langchain_community.llms.bedrock import Bedrock
return Bedrock
| null |
_import_pipelineai
|
from langchain_community.llms.pipelineai import PipelineAI
return PipelineAI
|
def _import_pipelineai() ->Any:
from langchain_community.llms.pipelineai import PipelineAI
return PipelineAI
| null |
test_awa_embedding_query
|
"""Test Awa embeddings for query."""
document = 'foo bar'
embedding = AwaEmbeddings()
output = embedding.embed_query(document)
assert len(output) == 768
|
def test_awa_embedding_query() ->None:
"""Test Awa embeddings for query."""
document = 'foo bar'
embedding = AwaEmbeddings()
output = embedding.embed_query(document)
assert len(output) == 768
|
Test Awa embeddings for query.
|
test_no_backticks
|
"""Test if there are no backticks, so the original text should be returned."""
query = 'MATCH (n) RETURN n'
output = extract_cypher(query)
assert output == query
|
def test_no_backticks() ->None:
"""Test if there are no backticks, so the original text should be returned."""
query = 'MATCH (n) RETURN n'
output = extract_cypher(query)
assert output == query
|
Test if there are no backticks, so the original text should be returned.
|
_handle_callbacks
|
if callback_manager is not None:
warnings.warn(
'callback_manager is deprecated. Please use callbacks instead.',
DeprecationWarning)
if callbacks is not None:
raise ValueError(
'Cannot specify both callback_manager and callbacks arguments.')
return callback_manager
return callbacks
|
def _handle_callbacks(callback_manager: Optional[BaseCallbackManager],
callbacks: Callbacks) ->Callbacks:
if callback_manager is not None:
warnings.warn(
'callback_manager is deprecated. Please use callbacks instead.',
DeprecationWarning)
if callbacks is not None:
raise ValueError(
'Cannot specify both callback_manager and callbacks arguments.'
)
return callback_manager
return callbacks
| null |
visit_comparison
|
if comparison.comparator in (Comparator.IN, Comparator.NIN) and not isinstance(
comparison.value, list):
comparison.value = [comparison.value]
return {comparison.attribute: {self._format_func(comparison.comparator):
comparison.value}}
|
def visit_comparison(self, comparison: Comparison) ->Dict:
if comparison.comparator in (Comparator.IN, Comparator.NIN
) and not isinstance(comparison.value, list):
comparison.value = [comparison.value]
return {comparison.attribute: {self._format_func(comparison.comparator):
comparison.value}}
| null |
__exit__
|
user_ctx.set(None)
user_props_ctx.set(None)
|
def __exit__(self, exc_type: Any, exc_value: Any, exc_tb: Any) ->Any:
user_ctx.set(None)
user_props_ctx.set(None)
| null |
test_chat_fireworks_llm_output_contains_model_id
|
"""Test llm_output contains model_id."""
message = HumanMessage(content='Hello')
llm_result = chat.generate([[message]])
assert llm_result.llm_output is not None
assert llm_result.llm_output['model'] == chat.model
|
@pytest.mark.scheduled
def test_chat_fireworks_llm_output_contains_model_id(chat: ChatFireworks
) ->None:
"""Test llm_output contains model_id."""
message = HumanMessage(content='Hello')
llm_result = chat.generate([[message]])
assert llm_result.llm_output is not None
assert llm_result.llm_output['model'] == chat.model
|
Test llm_output contains model_id.
|
_default_params
|
"""Get the default parameters for calling Clarifai API."""
return {}
|
@property
def _default_params(self) ->Dict[str, Any]:
"""Get the default parameters for calling Clarifai API."""
return {}
|
Get the default parameters for calling Clarifai API.
|
_get_pubmed
|
return PubmedQueryRun(api_wrapper=PubMedAPIWrapper(**kwargs))
|
def _get_pubmed(**kwargs: Any) ->BaseTool:
return PubmedQueryRun(api_wrapper=PubMedAPIWrapper(**kwargs))
| null |
from_documents
|
"""Return VectorStore initialized from documents."""
texts = [document.page_content for document in documents]
metadatas = [document.metadata for document in documents]
return cls.from_texts(texts, embedding, metadatas, db_url, collection_name,
**kwargs)
|
@classmethod
def from_documents(cls, documents: List[Document], embedding: Embeddings,
db_url: str='', collection_name: str=str(uuid.uuid4().hex), **kwargs: Any
) ->PGVecto_rs:
"""Return VectorStore initialized from documents."""
texts = [document.page_content for document in documents]
metadatas = [document.metadata for document in documents]
return cls.from_texts(texts, embedding, metadatas, db_url,
collection_name, **kwargs)
|
Return VectorStore initialized from documents.
|
test_google_palm_embedding_documents_multiple
|
"""Test Google PaLM embeddings."""
documents = ['foo bar', 'bar foo', 'foo']
embedding = GooglePalmEmbeddings()
output = embedding.embed_documents(documents)
assert len(output) == 3
assert len(output[0]) == 768
assert len(output[1]) == 768
assert len(output[2]) == 768
|
def test_google_palm_embedding_documents_multiple() ->None:
"""Test Google PaLM embeddings."""
documents = ['foo bar', 'bar foo', 'foo']
embedding = GooglePalmEmbeddings()
output = embedding.embed_documents(documents)
assert len(output) == 3
assert len(output[0]) == 768
assert len(output[1]) == 768
assert len(output[2]) == 768
|
Test Google PaLM embeddings.
|
test_character_text_splitting_args
|
"""Test invalid arguments."""
with pytest.raises(ValueError):
CharacterTextSplitter(chunk_size=2, chunk_overlap=4)
|
def test_character_text_splitting_args() ->None:
"""Test invalid arguments."""
with pytest.raises(ValueError):
CharacterTextSplitter(chunk_size=2, chunk_overlap=4)
|
Test invalid arguments.
|
_get_relevant_documents
|
url, json, headers = self._create_request(query)
response = requests.post(url, json=json, headers=headers)
results = response.json()['results'][0]['results']
docs = []
for d in results:
content = d.pop('text')
metadata = d.pop('metadata', d)
if metadata.get('source_id'):
metadata['source'] = metadata.pop('source_id')
docs.append(Document(page_content=content, metadata=metadata))
return docs
|
def _get_relevant_documents(self, query: str, *, run_manager:
CallbackManagerForRetrieverRun) ->List[Document]:
url, json, headers = self._create_request(query)
response = requests.post(url, json=json, headers=headers)
results = response.json()['results'][0]['results']
docs = []
for d in results:
content = d.pop('text')
metadata = d.pop('metadata', d)
if metadata.get('source_id'):
metadata['source'] = metadata.pop('source_id')
docs.append(Document(page_content=content, metadata=metadata))
return docs
| null |
test_meilisearch_with_metadatas_with_scores_using_vector
|
"""Test end to end construction and scored search, using embedding vector."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': str(i)} for i in range(len(texts))]
embeddings = FakeEmbeddings()
docsearch = Meilisearch.from_texts(texts=texts, embedding=FakeEmbeddings(),
url=TEST_MEILI_HTTP_ADDR, api_key=TEST_MEILI_MASTER_KEY, index_name=
INDEX_NAME, metadatas=metadatas)
embedded_query = embeddings.embed_query('foo')
self._wait_last_task()
output = docsearch.similarity_search_by_vector_with_scores(embedding=
embedded_query, k=1)
assert output == [(Document(page_content='foo', metadata={'page': '0'}), 9.0)]
|
def test_meilisearch_with_metadatas_with_scores_using_vector(self) ->None:
"""Test end to end construction and scored search, using embedding vector."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': str(i)} for i in range(len(texts))]
embeddings = FakeEmbeddings()
docsearch = Meilisearch.from_texts(texts=texts, embedding=
FakeEmbeddings(), url=TEST_MEILI_HTTP_ADDR, api_key=
TEST_MEILI_MASTER_KEY, index_name=INDEX_NAME, metadatas=metadatas)
embedded_query = embeddings.embed_query('foo')
self._wait_last_task()
output = docsearch.similarity_search_by_vector_with_scores(embedding=
embedded_query, k=1)
assert output == [(Document(page_content='foo', metadata={'page': '0'}),
9.0)]
|
Test end to end construction and scored search, using embedding vector.
|
test_fireworks_multiple_prompts
|
"""Test completion with multiple prompts."""
output = llm.generate(['How is the weather in New York today?',
"I'm pickle rick"])
assert isinstance(output, LLMResult)
assert isinstance(output.generations, list)
assert len(output.generations) == 2
|
@pytest.mark.scheduled
def test_fireworks_multiple_prompts(llm: Fireworks) ->None:
"""Test completion with multiple prompts."""
output = llm.generate(['How is the weather in New York today?',
"I'm pickle rick"])
assert isinstance(output, LLMResult)
assert isinstance(output.generations, list)
assert len(output.generations) == 2
|
Test completion with multiple prompts.
|
dependable_faiss_import
|
"""
Import faiss if available, otherwise raise error.
If FAISS_NO_AVX2 environment variable is set, it will be considered
to load FAISS with no AVX2 optimization.
Args:
no_avx2: Load FAISS strictly with no AVX2 optimization
so that the vectorstore is portable and compatible with other devices.
"""
if no_avx2 is None and 'FAISS_NO_AVX2' in os.environ:
no_avx2 = bool(os.getenv('FAISS_NO_AVX2'))
try:
if no_avx2:
from faiss import swigfaiss as faiss
else:
import faiss
except ImportError:
raise ImportError(
'Could not import faiss python package. Please install it with `pip install faiss-gpu` (for CUDA supported GPU) or `pip install faiss-cpu` (depending on Python version).'
)
return faiss
|
def dependable_faiss_import(no_avx2: Optional[bool]=None) ->Any:
"""
Import faiss if available, otherwise raise error.
If FAISS_NO_AVX2 environment variable is set, it will be considered
to load FAISS with no AVX2 optimization.
Args:
no_avx2: Load FAISS strictly with no AVX2 optimization
so that the vectorstore is portable and compatible with other devices.
"""
if no_avx2 is None and 'FAISS_NO_AVX2' in os.environ:
no_avx2 = bool(os.getenv('FAISS_NO_AVX2'))
try:
if no_avx2:
from faiss import swigfaiss as faiss
else:
import faiss
except ImportError:
raise ImportError(
'Could not import faiss python package. Please install it with `pip install faiss-gpu` (for CUDA supported GPU) or `pip install faiss-cpu` (depending on Python version).'
)
return faiss
|
Import faiss if available, otherwise raise error.
If FAISS_NO_AVX2 environment variable is set, it will be considered
to load FAISS with no AVX2 optimization.
Args:
no_avx2: Load FAISS strictly with no AVX2 optimization
so that the vectorstore is portable and compatible with other devices.
|
transform
|
yield from self._transform_stream_with_config(input, self._transform,
patch_config(config, run_name=(config or {}).get('run_name') or self.
name), **kwargs)
|
def transform(self, input: Iterator[Input], config: Optional[RunnableConfig
]=None, **kwargs: Optional[Any]) ->Iterator[Output]:
yield from self._transform_stream_with_config(input, self._transform,
patch_config(config, run_name=(config or {}).get('run_name') or
self.name), **kwargs)
| null |
_evaluate_strings
|
"""Score the output string.
Args:
prediction (str): The output string from the first model.
input (str, optional): The input or task string.
callbacks (Callbacks, optional): The callbacks to use.
reference (str, optional): The reference string, if any.
**kwargs (Any): Additional keyword arguments.
Returns:
dict: A dictionary containing:
- reasoning: The reasoning for the preference.
- score: A score between 1 and 10.
"""
input_ = self._prepare_input(prediction, input, reference)
result = self(inputs=input_, callbacks=callbacks, tags=tags, metadata=
metadata, include_run_info=include_run_info)
return self._prepare_output(result)
|
def _evaluate_strings(self, *, prediction: str, input: Optional[str]=None,
reference: Optional[str]=None, callbacks: Callbacks=None, tags:
Optional[List[str]]=None, metadata: Optional[Dict[str, Any]]=None,
include_run_info: bool=False, **kwargs: Any) ->dict:
"""Score the output string.
Args:
prediction (str): The output string from the first model.
input (str, optional): The input or task string.
callbacks (Callbacks, optional): The callbacks to use.
reference (str, optional): The reference string, if any.
**kwargs (Any): Additional keyword arguments.
Returns:
dict: A dictionary containing:
- reasoning: The reasoning for the preference.
- score: A score between 1 and 10.
"""
input_ = self._prepare_input(prediction, input, reference)
result = self(inputs=input_, callbacks=callbacks, tags=tags, metadata=
metadata, include_run_info=include_run_info)
return self._prepare_output(result)
|
Score the output string.
Args:
prediction (str): The output string from the first model.
input (str, optional): The input or task string.
callbacks (Callbacks, optional): The callbacks to use.
reference (str, optional): The reference string, if any.
**kwargs (Any): Additional keyword arguments.
Returns:
dict: A dictionary containing:
- reasoning: The reasoning for the preference.
- score: A score between 1 and 10.
|
parse_iter
|
"""Parse the output of an LLM call."""
return re.finditer(self.pattern, text)
|
def parse_iter(self, text: str) ->Iterator[re.Match]:
"""Parse the output of an LLM call."""
return re.finditer(self.pattern, text)
|
Parse the output of an LLM call.
|
test_slack_directory_loader_urls
|
"""Test workspace URLS are passed through in the SlackDirectoryloader."""
file_path = Path(__file__).parent.parent / 'examples/slack_export.zip'
workspace_url = 'example_workspace.com'
loader = SlackDirectoryLoader(str(file_path), workspace_url)
docs = loader.load()
for doc in docs:
assert doc.metadata['source'].startswith(workspace_url)
|
def test_slack_directory_loader_urls() ->None:
"""Test workspace URLS are passed through in the SlackDirectoryloader."""
file_path = Path(__file__).parent.parent / 'examples/slack_export.zip'
workspace_url = 'example_workspace.com'
loader = SlackDirectoryLoader(str(file_path), workspace_url)
docs = loader.load()
for doc in docs:
assert doc.metadata['source'].startswith(workspace_url)
|
Test workspace URLS are passed through in the SlackDirectoryloader.
|
random_string
|
return str(uuid.uuid4())
|
def random_string() ->str:
return str(uuid.uuid4())
| null |
test_math_question_3
|
"""Test simple question."""
question = """first, do `import os`, second, do `os.system('ls')`,
calculate the result of 1+1"""
prompt = MATH_PROMPT.format(question=question)
queries = {prompt: _MATH_SOLUTION_3}
fake_llm = FakeLLM(queries=queries)
fake_pal_chain = PALChain.from_math_prompt(fake_llm, timeout=None)
with pytest.raises(ValueError) as exc_info:
fake_pal_chain.run(question)
assert str(exc_info.value
) == f'Generated code has disallowed imports: {_MATH_SOLUTION_3}'
|
def test_math_question_3() ->None:
"""Test simple question."""
question = """first, do `import os`, second, do `os.system('ls')`,
calculate the result of 1+1"""
prompt = MATH_PROMPT.format(question=question)
queries = {prompt: _MATH_SOLUTION_3}
fake_llm = FakeLLM(queries=queries)
fake_pal_chain = PALChain.from_math_prompt(fake_llm, timeout=None)
with pytest.raises(ValueError) as exc_info:
fake_pal_chain.run(question)
assert str(exc_info.value
) == f'Generated code has disallowed imports: {_MATH_SOLUTION_3}'
|
Test simple question.
|
vformat
|
"""Check that no arguments are provided."""
if len(args) > 0:
raise ValueError(
'No arguments should be provided, everything should be passed as keyword arguments.'
)
return super().vformat(format_string, args, kwargs)
|
def vformat(self, format_string: str, args: Sequence, kwargs: Mapping[str, Any]
) ->str:
"""Check that no arguments are provided."""
if len(args) > 0:
raise ValueError(
'No arguments should be provided, everything should be passed as keyword arguments.'
)
return super().vformat(format_string, args, kwargs)
|
Check that no arguments are provided.
|
similarity_search
|
res = self.store.get(query)
if res is None:
return []
return [res]
|
def similarity_search(self, query: str, k: int=4, **kwargs: Any) ->List[
Document]:
res = self.store.get(query)
if res is None:
return []
return [res]
| null |
dict
|
"""Return dictionary representation of agent."""
_dict = super().dict()
try:
_dict['_type'] = str(self._agent_type)
except NotImplementedError:
pass
return _dict
|
def dict(self, **kwargs: Any) ->Dict:
"""Return dictionary representation of agent."""
_dict = super().dict()
try:
_dict['_type'] = str(self._agent_type)
except NotImplementedError:
pass
return _dict
|
Return dictionary representation of agent.
|
test_clarifai_with_from_documents
|
"""Test end to end construction and search."""
initial_content = 'foo'
original_doc = Document(page_content=initial_content, metadata={'page': '0'})
USER_ID = 'minhajul'
APP_ID = 'test-lang-2'
NUMBER_OF_DOCS = 1
docsearch = Clarifai.from_documents(user_id=USER_ID, app_id=APP_ID,
documents=[original_doc], pat=None, number_of_docs=NUMBER_OF_DOCS)
time.sleep(2.5)
output = docsearch.similarity_search('foo')
assert output == [Document(page_content=initial_content, metadata={'page':
'0'})]
|
def test_clarifai_with_from_documents() ->None:
"""Test end to end construction and search."""
initial_content = 'foo'
original_doc = Document(page_content=initial_content, metadata={'page':
'0'})
USER_ID = 'minhajul'
APP_ID = 'test-lang-2'
NUMBER_OF_DOCS = 1
docsearch = Clarifai.from_documents(user_id=USER_ID, app_id=APP_ID,
documents=[original_doc], pat=None, number_of_docs=NUMBER_OF_DOCS)
time.sleep(2.5)
output = docsearch.similarity_search('foo')
assert output == [Document(page_content=initial_content, metadata={
'page': '0'})]
|
Test end to end construction and search.
|
validate_environment
|
"""Validates the environment."""
try:
from google.cloud.contentwarehouse_v1 import DocumentServiceClient
except ImportError as exc:
raise ImportError(
'google.cloud.contentwarehouse is not installed.Please install it with pip install google-cloud-contentwarehouse'
) from exc
values['project_number'] = get_from_dict_or_env(values, 'project_number',
'PROJECT_NUMBER')
values['client'] = DocumentServiceClient(client_info=get_client_info(module
='document-ai-warehouse'))
return values
|
@root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""Validates the environment."""
try:
from google.cloud.contentwarehouse_v1 import DocumentServiceClient
except ImportError as exc:
raise ImportError(
'google.cloud.contentwarehouse is not installed.Please install it with pip install google-cloud-contentwarehouse'
) from exc
values['project_number'] = get_from_dict_or_env(values,
'project_number', 'PROJECT_NUMBER')
values['client'] = DocumentServiceClient(client_info=get_client_info(
module='document-ai-warehouse'))
return values
|
Validates the environment.
|
get_params
|
"""Get parameters for SerpAPI."""
_params = {'api_key': self.serpapi_api_key, 'q': query}
params = {**self.params, **_params}
return params
|
def get_params(self, query: str) ->Dict[str, str]:
"""Get parameters for SerpAPI."""
_params = {'api_key': self.serpapi_api_key, 'q': query}
params = {**self.params, **_params}
return params
|
Get parameters for SerpAPI.
|
float
|
return float(item)
|
def float(self, item: Any) ->float:
return float(item)
| null |
test_fireworks_streaming
|
"""Test streaming tokens from Fireworks."""
for token in chat.stream("I'm Pickle Rick"):
assert isinstance(token.content, str)
|
@pytest.mark.scheduled
def test_fireworks_streaming(chat: ChatFireworks) ->None:
"""Test streaming tokens from Fireworks."""
for token in chat.stream("I'm Pickle Rick"):
assert isinstance(token.content, str)
|
Test streaming tokens from Fireworks.
|
test_parse_without_language
|
llm_output = """I can use the `foo` tool to achieve the goal.
Action:
```
{
"action": "foo",
"action_input": "bar"
}
```
"""
action, action_input = get_action_and_input(llm_output)
assert action == 'foo'
assert action_input == 'bar'
|
def test_parse_without_language() ->None:
llm_output = """I can use the `foo` tool to achieve the goal.
Action:
```
{
"action": "foo",
"action_input": "bar"
}
```
"""
action, action_input = get_action_and_input(llm_output)
assert action == 'foo'
assert action_input == 'bar'
| null |
get_allowed_tools
|
return self.allowed_tools
|
def get_allowed_tools(self) ->Optional[List[str]]:
return self.allowed_tools
| null |
_import_stackexchange_tool
|
from langchain_community.tools.stackexchange.tool import StackExchangeTool
return StackExchangeTool
|
def _import_stackexchange_tool() ->Any:
from langchain_community.tools.stackexchange.tool import StackExchangeTool
return StackExchangeTool
| null |
upserting_vector_store
|
"""Vector store fixture."""
return InMemoryVectorStore(permit_upserts=True)
|
@pytest.fixture
def upserting_vector_store() ->InMemoryVectorStore:
"""Vector store fixture."""
return InMemoryVectorStore(permit_upserts=True)
|
Vector store fixture.
|
__init__
|
"""Initialize with file path.
Args:
extract_images: Whether to extract images from PDF.
concatenate_pages: If True, concatenate all PDF pages into one a single
document. Otherwise, return one document per page.
"""
try:
from pdfminer.high_level import extract_text
except ImportError:
raise ImportError(
'`pdfminer` package not found, please install it with `pip install pdfminer.six`'
)
super().__init__(file_path, headers=headers)
self.parser = PDFMinerParser(extract_images=extract_images,
concatenate_pages=concatenate_pages)
|
def __init__(self, file_path: str, *, headers: Optional[Dict]=None,
extract_images: bool=False, concatenate_pages: bool=True) ->None:
"""Initialize with file path.
Args:
extract_images: Whether to extract images from PDF.
concatenate_pages: If True, concatenate all PDF pages into one a single
document. Otherwise, return one document per page.
"""
try:
from pdfminer.high_level import extract_text
except ImportError:
raise ImportError(
'`pdfminer` package not found, please install it with `pip install pdfminer.six`'
)
super().__init__(file_path, headers=headers)
self.parser = PDFMinerParser(extract_images=extract_images,
concatenate_pages=concatenate_pages)
|
Initialize with file path.
Args:
extract_images: Whether to extract images from PDF.
concatenate_pages: If True, concatenate all PDF pages into one a single
document. Otherwise, return one document per page.
|
_reduce_tokens_below_limit
|
num_docs = len(docs)
if self.max_tokens_limit and isinstance(self.combine_docs_chain,
StuffDocumentsChain):
tokens = [self.combine_docs_chain.llm_chain._get_num_tokens(doc.
page_content) for doc in docs]
token_count = sum(tokens[:num_docs])
while token_count > self.max_tokens_limit:
num_docs -= 1
token_count -= tokens[num_docs]
return docs[:num_docs]
|
def _reduce_tokens_below_limit(self, docs: List[Document]) ->List[Document]:
num_docs = len(docs)
if self.max_tokens_limit and isinstance(self.combine_docs_chain,
StuffDocumentsChain):
tokens = [self.combine_docs_chain.llm_chain._get_num_tokens(doc.
page_content) for doc in docs]
token_count = sum(tokens[:num_docs])
while token_count > self.max_tokens_limit:
num_docs -= 1
token_count -= tokens[num_docs]
return docs[:num_docs]
| null |
_schemas_strict
|
"""Get the dictionary of schemas or err."""
schemas = self._components_strict.schemas
if schemas is None:
raise ValueError('No schemas found in spec. ')
return schemas
|
@property
def _schemas_strict(self) ->Dict[str, Schema]:
"""Get the dictionary of schemas or err."""
schemas = self._components_strict.schemas
if schemas is None:
raise ValueError('No schemas found in spec. ')
return schemas
|
Get the dictionary of schemas or err.
|
is_lc_serializable
|
return False
|
@classmethod
def is_lc_serializable(cls) ->bool:
return False
| null |
validate_dependencies
|
"""
Validate that the rapidfuzz library is installed.
Args:
values (Dict[str, Any]): The input values.
Returns:
Dict[str, Any]: The validated values.
"""
_load_rapidfuzz()
return values
|
@root_validator
def validate_dependencies(cls, values: Dict[str, Any]) ->Dict[str, Any]:
"""
Validate that the rapidfuzz library is installed.
Args:
values (Dict[str, Any]): The input values.
Returns:
Dict[str, Any]: The validated values.
"""
_load_rapidfuzz()
return values
|
Validate that the rapidfuzz library is installed.
Args:
values (Dict[str, Any]): The input values.
Returns:
Dict[str, Any]: The validated values.
|
add_texts
|
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
kwargs: vectorstore specific parameters
Returns:
List of ids from adding the texts into the vectorstore.
"""
embeddings = self.embedding_function.embed_documents(list(texts))
return self.add_embeddings(texts=texts, embeddings=embeddings, metadatas=
metadatas, ids=ids, **kwargs)
|
def add_texts(self, texts: Iterable[str], metadatas: Optional[List[dict]]=
None, ids: Optional[List[str]]=None, **kwargs: Any) ->List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
kwargs: vectorstore specific parameters
Returns:
List of ids from adding the texts into the vectorstore.
"""
embeddings = self.embedding_function.embed_documents(list(texts))
return self.add_embeddings(texts=texts, embeddings=embeddings,
metadatas=metadatas, ids=ids, **kwargs)
|
Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
kwargs: vectorstore specific parameters
Returns:
List of ids from adding the texts into the vectorstore.
|
parse
|
if '</tool>' in text:
tool, tool_input = text.split('</tool>')
_tool = tool.split('<tool>')[1]
_tool_input = tool_input.split('<tool_input>')[1]
if '</tool_input>' in _tool_input:
_tool_input = _tool_input.split('</tool_input>')[0]
return AgentAction(tool=_tool, tool_input=_tool_input, log=text)
elif '<final_answer>' in text:
_, answer = text.split('<final_answer>')
if '</final_answer>' in answer:
answer = answer.split('</final_answer>')[0]
return AgentFinish(return_values={'output': answer}, log=text)
else:
raise ValueError
|
def parse(self, text: str) ->Union[AgentAction, AgentFinish]:
if '</tool>' in text:
tool, tool_input = text.split('</tool>')
_tool = tool.split('<tool>')[1]
_tool_input = tool_input.split('<tool_input>')[1]
if '</tool_input>' in _tool_input:
_tool_input = _tool_input.split('</tool_input>')[0]
return AgentAction(tool=_tool, tool_input=_tool_input, log=text)
elif '<final_answer>' in text:
_, answer = text.split('<final_answer>')
if '</final_answer>' in answer:
answer = answer.split('</final_answer>')[0]
return AgentFinish(return_values={'output': answer}, log=text)
else:
raise ValueError
| null |
X
|
return [[1.0, 2.0, 3.0], [0.0, 1.0, 0.0], [1.0, 2.0, 0.0]]
|
@pytest.fixture
def X() ->List[List[float]]:
return [[1.0, 2.0, 3.0], [0.0, 1.0, 0.0], [1.0, 2.0, 0.0]]
| null |
_run
|
return f'{tool_input}'
|
def _run(self, tool_input: str) ->str:
return f'{tool_input}'
| null |
test_nested_html_structure
|
loader = ReadTheDocsLoader(PARENT_DIR / 'nested_html_structure')
documents = loader.load()
assert documents[0].page_content == 'Hello World!'
|
@pytest.mark.requires('bs4')
def test_nested_html_structure() ->None:
loader = ReadTheDocsLoader(PARENT_DIR / 'nested_html_structure')
documents = loader.load()
assert documents[0].page_content == 'Hello World!'
| null |
test_dashvector_search_with_scores
|
dashvector = DashVector.from_texts(texts=texts, embedding=FakeEmbeddings(),
ids=ids)
sleep(0.5)
output = dashvector.similarity_search_with_relevance_scores('foo')
docs, scores = zip(*output)
assert scores[0] < scores[1] < scores[2]
assert list(docs) == [Document(page_content='foo'), Document(page_content=
'bar'), Document(page_content='baz')]
|
def test_dashvector_search_with_scores() ->None:
dashvector = DashVector.from_texts(texts=texts, embedding=
FakeEmbeddings(), ids=ids)
sleep(0.5)
output = dashvector.similarity_search_with_relevance_scores('foo')
docs, scores = zip(*output)
assert scores[0] < scores[1] < scores[2]
assert list(docs) == [Document(page_content='foo'), Document(
page_content='bar'), Document(page_content='baz')]
| null |
test_runnable_context_seq_key_order
|
seq: Runnable = {'bar': Context.getter('foo')} | Context.setter('foo')
with pytest.raises(ValueError):
seq.invoke('foo')
|
def test_runnable_context_seq_key_order() ->None:
seq: Runnable = {'bar': Context.getter('foo')} | Context.setter('foo')
with pytest.raises(ValueError):
seq.invoke('foo')
| null |
resize_base64_image
|
"""
Resize an image encoded as a Base64 string.
:param base64_string: A Base64 encoded string of the image to be resized.
:param size: A tuple representing the new size (width, height) for the image.
:return: A Base64 encoded string of the resized image.
"""
img_data = base64.b64decode(base64_string)
img = Image.open(io.BytesIO(img_data))
resized_img = img.resize(size, Image.LANCZOS)
buffered = io.BytesIO()
resized_img.save(buffered, format=img.format)
return base64.b64encode(buffered.getvalue()).decode('utf-8')
|
def resize_base64_image(base64_string, size=(128, 128)):
"""
Resize an image encoded as a Base64 string.
:param base64_string: A Base64 encoded string of the image to be resized.
:param size: A tuple representing the new size (width, height) for the image.
:return: A Base64 encoded string of the resized image.
"""
img_data = base64.b64decode(base64_string)
img = Image.open(io.BytesIO(img_data))
resized_img = img.resize(size, Image.LANCZOS)
buffered = io.BytesIO()
resized_img.save(buffered, format=img.format)
return base64.b64encode(buffered.getvalue()).decode('utf-8')
|
Resize an image encoded as a Base64 string.
:param base64_string: A Base64 encoded string of the image to be resized.
:param size: A tuple representing the new size (width, height) for the image.
:return: A Base64 encoded string of the resized image.
|
_import_google_lens
|
from langchain_community.utilities.google_lens import GoogleLensAPIWrapper
return GoogleLensAPIWrapper
|
def _import_google_lens() ->Any:
from langchain_community.utilities.google_lens import GoogleLensAPIWrapper
return GoogleLensAPIWrapper
| null |
test_sim_search_with_score
|
"""Test end to end construction and similarity search with score."""
in_memory_vec_store = DocArrayInMemorySearch.from_texts(texts=texts,
embedding=FakeEmbeddings(), metric=metric)
output = in_memory_vec_store.similarity_search_with_score('foo', k=1)
out_doc, out_score = output[0]
assert out_doc == Document(page_content='foo')
expected_score = 0.0 if 'dist' in metric else 1.0
assert np.isclose(out_score, expected_score, atol=1e-06)
|
@pytest.mark.parametrize('metric', ['cosine_sim', 'euclidean_dist',
'sqeuclidean_dist'])
def test_sim_search_with_score(metric: str, texts: List[str]) ->None:
"""Test end to end construction and similarity search with score."""
in_memory_vec_store = DocArrayInMemorySearch.from_texts(texts=texts,
embedding=FakeEmbeddings(), metric=metric)
output = in_memory_vec_store.similarity_search_with_score('foo', k=1)
out_doc, out_score = output[0]
assert out_doc == Document(page_content='foo')
expected_score = 0.0 if 'dist' in metric else 1.0
assert np.isclose(out_score, expected_score, atol=1e-06)
|
Test end to end construction and similarity search with score.
|
_get_llm
|
return AzureChatOpenAI(deployment_name=DEPLOYMENT_NAME, openai_api_version=
OPENAI_API_VERSION, azure_endpoint=OPENAI_API_BASE, openai_api_key=
OPENAI_API_KEY, **kwargs)
|
def _get_llm(**kwargs: Any) ->AzureChatOpenAI:
return AzureChatOpenAI(deployment_name=DEPLOYMENT_NAME,
openai_api_version=OPENAI_API_VERSION, azure_endpoint=
OPENAI_API_BASE, openai_api_key=OPENAI_API_KEY, **kwargs)
| null |
test_delete
|
"""Test deleting a new document"""
docsearch = Redis.from_texts(texts, FakeEmbeddings(), redis_url=TEST_REDIS_URL)
ids = docsearch.add_texts(['foo'])
got = docsearch.delete(ids=ids, redis_url=TEST_REDIS_URL)
assert got
assert drop(docsearch.index_name)
|
def test_delete(texts: List[str]) ->None:
"""Test deleting a new document"""
docsearch = Redis.from_texts(texts, FakeEmbeddings(), redis_url=
TEST_REDIS_URL)
ids = docsearch.add_texts(['foo'])
got = docsearch.delete(ids=ids, redis_url=TEST_REDIS_URL)
assert got
assert drop(docsearch.index_name)
|
Test deleting a new document
|
test_agent_iterator_stopped_early
|
"""
Test react chain iterator when max iterations or
max execution time is exceeded.
"""
agent = _get_agent(max_iterations=1)
agent_iter = agent.iter(inputs='when was langchain made')
outputs = []
for step in agent_iter:
outputs.append(step)
assert isinstance(outputs[-1], dict)
assert outputs[-1]['output'
] == 'Agent stopped due to iteration limit or time limit.'
agent = _get_agent(max_execution_time=1e-05)
agent_iter = agent.iter(inputs='when was langchain made')
outputs = []
for step in agent_iter:
outputs.append(step)
assert isinstance(outputs[-1], dict)
assert outputs[-1]['output'
] == 'Agent stopped due to iteration limit or time limit.'
|
def test_agent_iterator_stopped_early() ->None:
"""
Test react chain iterator when max iterations or
max execution time is exceeded.
"""
agent = _get_agent(max_iterations=1)
agent_iter = agent.iter(inputs='when was langchain made')
outputs = []
for step in agent_iter:
outputs.append(step)
assert isinstance(outputs[-1], dict)
assert outputs[-1]['output'
] == 'Agent stopped due to iteration limit or time limit.'
agent = _get_agent(max_execution_time=1e-05)
agent_iter = agent.iter(inputs='when was langchain made')
outputs = []
for step in agent_iter:
outputs.append(step)
assert isinstance(outputs[-1], dict)
assert outputs[-1]['output'
] == 'Agent stopped due to iteration limit or time limit.'
|
Test react chain iterator when max iterations or
max execution time is exceeded.
|
test_xinference_embedding_query
|
"""Test xinference embeddings for query."""
from xinference.client import RESTfulClient
endpoint, _ = setup
client = RESTfulClient(endpoint)
model_uid = client.launch_model(model_name='vicuna-v1.3',
model_size_in_billions=7, quantization='q4_0')
xinference = XinferenceEmbeddings(server_url=endpoint, model_uid=model_uid)
document = 'foo bar'
output = xinference.embed_query(document)
assert len(output) == 4096
|
def test_xinference_embedding_query(setup: Tuple[str, str]) ->None:
"""Test xinference embeddings for query."""
from xinference.client import RESTfulClient
endpoint, _ = setup
client = RESTfulClient(endpoint)
model_uid = client.launch_model(model_name='vicuna-v1.3',
model_size_in_billions=7, quantization='q4_0')
xinference = XinferenceEmbeddings(server_url=endpoint, model_uid=model_uid)
document = 'foo bar'
output = xinference.embed_query(document)
assert len(output) == 4096
|
Test xinference embeddings for query.
|
_withitem
|
self.dispatch(t.context_expr)
if t.optional_vars:
self.write(' as ')
self.dispatch(t.optional_vars)
|
def _withitem(self, t):
self.dispatch(t.context_expr)
if t.optional_vars:
self.write(' as ')
self.dispatch(t.optional_vars)
| null |
test_table_info
|
"""Test that table info is constructed properly."""
engine = create_engine('sqlite:///:memory:')
metadata_obj.create_all(engine)
db = SQLDatabase(engine)
output = db.table_info
expected_output = """
CREATE TABLE user (
user_id INTEGER NOT NULL,
user_name VARCHAR(16) NOT NULL,
user_bio TEXT,
PRIMARY KEY (user_id)
)
/*
3 rows from user table:
user_id user_name user_bio
/*
CREATE TABLE company (
company_id INTEGER NOT NULL,
company_location VARCHAR NOT NULL,
PRIMARY KEY (company_id)
)
/*
3 rows from company table:
company_id company_location
*/
"""
assert sorted(' '.join(output.split())) == sorted(' '.join(expected_output.
split()))
|
def test_table_info() ->None:
"""Test that table info is constructed properly."""
engine = create_engine('sqlite:///:memory:')
metadata_obj.create_all(engine)
db = SQLDatabase(engine)
output = db.table_info
expected_output = """
CREATE TABLE user (
user_id INTEGER NOT NULL,
user_name VARCHAR(16) NOT NULL,
user_bio TEXT,
PRIMARY KEY (user_id)
)
/*
3 rows from user table:
user_id user_name user_bio
/*
CREATE TABLE company (
company_id INTEGER NOT NULL,
company_location VARCHAR NOT NULL,
PRIMARY KEY (company_id)
)
/*
3 rows from company table:
company_id company_location
*/
"""
assert sorted(' '.join(output.split())) == sorted(' '.join(
expected_output.split()))
|
Test that table info is constructed properly.
|
test_file_search_errs_outside_root_dir
|
"""Test the FileSearch tool when a root dir is specified."""
with TemporaryDirectory() as temp_dir:
tool = FileSearchTool(root_dir=temp_dir)
result = tool.run({'dir_path': '..', 'pattern': '*.txt'})
assert result == INVALID_PATH_TEMPLATE.format(arg_name='dir_path',
value='..')
|
def test_file_search_errs_outside_root_dir() ->None:
"""Test the FileSearch tool when a root dir is specified."""
with TemporaryDirectory() as temp_dir:
tool = FileSearchTool(root_dir=temp_dir)
result = tool.run({'dir_path': '..', 'pattern': '*.txt'})
assert result == INVALID_PATH_TEMPLATE.format(arg_name='dir_path',
value='..')
|
Test the FileSearch tool when a root dir is specified.
|
delete_by_document_id
|
return self.table.delete(document_id)
|
def delete_by_document_id(self, document_id: str) ->None:
return self.table.delete(document_id)
| null |
mdelete
|
"""Delete the given keys."""
self.underlying_store.mdelete(keys)
|
def mdelete(self, keys: Sequence[str]) ->None:
"""Delete the given keys."""
self.underlying_store.mdelete(keys)
|
Delete the given keys.
|
test_mosaicml_embedding_query_instruction
|
"""Test MosaicML embeddings with a different query instruction."""
document = 'foo bar'
embedding = MosaicMLInstructorEmbeddings(query_instruction='Embed this query:')
output = embedding.embed_query(document)
assert len(output) == 768
|
def test_mosaicml_embedding_query_instruction() ->None:
"""Test MosaicML embeddings with a different query instruction."""
document = 'foo bar'
embedding = MosaicMLInstructorEmbeddings(query_instruction=
'Embed this query:')
output = embedding.embed_query(document)
assert len(output) == 768
|
Test MosaicML embeddings with a different query instruction.
|
parse_with_prompt
|
"""Parse the output of an LLM call using a wrapped parser.
Args:
completion: The chain completion to parse.
prompt_value: The prompt to use to parse the completion.
Returns:
The parsed completion.
"""
retries = 0
while retries <= self.max_retries:
try:
return self.parser.parse_folder(completion)
except OutputParserException as e:
if retries == self.max_retries:
raise e
else:
retries += 1
completion = self.retry_chain.run(prompt=prompt_value.to_string
(), completion=completion)
raise OutputParserException('Failed to parse')
|
def parse_with_prompt(self, completion: str, prompt_value: PromptValue) ->T:
"""Parse the output of an LLM call using a wrapped parser.
Args:
completion: The chain completion to parse.
prompt_value: The prompt to use to parse the completion.
Returns:
The parsed completion.
"""
retries = 0
while retries <= self.max_retries:
try:
return self.parser.parse_folder(completion)
except OutputParserException as e:
if retries == self.max_retries:
raise e
else:
retries += 1
completion = self.retry_chain.run(prompt=prompt_value.
to_string(), completion=completion)
raise OutputParserException('Failed to parse')
|
Parse the output of an LLM call using a wrapped parser.
Args:
completion: The chain completion to parse.
prompt_value: The prompt to use to parse the completion.
Returns:
The parsed completion.
|
test_convert_to_message_is_strict
|
"""Verify that _convert_to_message is strict."""
with pytest.raises(ValueError):
_convert_to_message(('meow', 'question'))
|
def test_convert_to_message_is_strict() ->None:
"""Verify that _convert_to_message is strict."""
with pytest.raises(ValueError):
_convert_to_message(('meow', 'question'))
|
Verify that _convert_to_message is strict.
|
_parse_message
|
return {'sender_type': msg_type, 'text': text}
|
def _parse_message(msg_type: str, text: str) ->Dict:
return {'sender_type': msg_type, 'text': text}
| null |
test_zero_distance
|
eval_chain = StringDistanceEvalChain(distance=distance)
string = '三人行则必有我师'
result = eval_chain.evaluate_strings(prediction=string, reference=string)
assert 'score' in result
assert result['score'] == 0
|
@pytest.mark.requires('rapidfuzz')
@pytest.mark.parametrize('distance', list(StringDistance))
def test_zero_distance(distance: StringDistance) ->None:
eval_chain = StringDistanceEvalChain(distance=distance)
string = '三人行则必有我师'
result = eval_chain.evaluate_strings(prediction=string, reference=string)
assert 'score' in result
assert result['score'] == 0
| null |
test_konko_streaming_param_validation_test
|
"""Ensure correct token callback during streaming."""
with pytest.raises(ValueError):
ChatKonko(max_tokens=10, streaming=True, temperature=0, n=5)
|
def test_konko_streaming_param_validation_test() ->None:
"""Ensure correct token callback during streaming."""
with pytest.raises(ValueError):
ChatKonko(max_tokens=10, streaming=True, temperature=0, n=5)
|
Ensure correct token callback during streaming.
|
on_tool_start_common
|
self.tool_starts += 1
self.starts += 1
|
def on_tool_start_common(self) ->None:
self.tool_starts += 1
self.starts += 1
| null |
_get_schema_type_for_enum
|
"""Get the schema type when the parameter is an enum."""
param_name = f'{parameter.name}Enum'
return Enum(param_name, {str(v): v for v in schema.enum})
|
@staticmethod
def _get_schema_type_for_enum(parameter: Parameter, schema: Schema) ->Enum:
"""Get the schema type when the parameter is an enum."""
param_name = f'{parameter.name}Enum'
return Enum(param_name, {str(v): v for v in schema.enum})
|
Get the schema type when the parameter is an enum.
|
_import_fireworks
|
from langchain_community.llms.fireworks import Fireworks
return Fireworks
|
def _import_fireworks() ->Any:
from langchain_community.llms.fireworks import Fireworks
return Fireworks
| null |
check_repeated_memory_variable
|
all_variables: Set[str] = set()
for val in value:
overlap = all_variables.intersection(val.memory_variables)
if overlap:
raise ValueError(
f'The same variables {overlap} are found in multiplememory object, which is not allowed by CombinedMemory.'
)
all_variables |= set(val.memory_variables)
return value
|
@validator('memories')
def check_repeated_memory_variable(cls, value: List[BaseMemory]) ->List[
BaseMemory]:
all_variables: Set[str] = set()
for val in value:
overlap = all_variables.intersection(val.memory_variables)
if overlap:
raise ValueError(
f'The same variables {overlap} are found in multiplememory object, which is not allowed by CombinedMemory.'
)
all_variables |= set(val.memory_variables)
return value
| null |
add_step
|
self.steps.append((step, step_response))
|
def add_step(self, step: Step, step_response: StepResponse) ->None:
self.steps.append((step, step_response))
| null |
load_questions_and_answers
|
"""Load a list of questions and answers.
Args:
url_override: A URL to override the default URL.
Returns: List[Document]
"""
loader = WebBaseLoader(self.web_path if url_override is None else url_override)
soup = loader.scrape()
output = []
title = soup.find('h1', 'post-title').text
output.append('# ' + title)
output.append(soup.select_one('.post-content .post-text').text.strip())
answersHeader = soup.find('div', 'post-answers-header')
if answersHeader:
output.append('\n## ' + answersHeader.text.strip())
for answer in soup.select('.js-answers-list .post.post-answer'):
if answer.has_attr('itemprop') and 'acceptedAnswer' in answer['itemprop']:
output.append('\n### Accepted Answer')
elif 'post-helpful' in answer['class']:
output.append('\n### Most Helpful Answer')
else:
output.append('\n### Other Answer')
output += [a.text.strip() for a in answer.select(
'.post-content .post-text')]
output.append('\n')
text = '\n'.join(output).strip()
metadata = {'source': self.web_path, 'title': title}
return [Document(page_content=text, metadata=metadata)]
|
def load_questions_and_answers(self, url_override: Optional[str]=None) ->List[
Document]:
"""Load a list of questions and answers.
Args:
url_override: A URL to override the default URL.
Returns: List[Document]
"""
loader = WebBaseLoader(self.web_path if url_override is None else
url_override)
soup = loader.scrape()
output = []
title = soup.find('h1', 'post-title').text
output.append('# ' + title)
output.append(soup.select_one('.post-content .post-text').text.strip())
answersHeader = soup.find('div', 'post-answers-header')
if answersHeader:
output.append('\n## ' + answersHeader.text.strip())
for answer in soup.select('.js-answers-list .post.post-answer'):
if answer.has_attr('itemprop') and 'acceptedAnswer' in answer[
'itemprop']:
output.append('\n### Accepted Answer')
elif 'post-helpful' in answer['class']:
output.append('\n### Most Helpful Answer')
else:
output.append('\n### Other Answer')
output += [a.text.strip() for a in answer.select(
'.post-content .post-text')]
output.append('\n')
text = '\n'.join(output).strip()
metadata = {'source': self.web_path, 'title': title}
return [Document(page_content=text, metadata=metadata)]
|
Load a list of questions and answers.
Args:
url_override: A URL to override the default URL.
Returns: List[Document]
|
Y
|
return [[0.5, 1.0, 1.5], [1.0, 0.0, 0.0], [2.0, 5.0, 2.0], [0.0, 0.0, 0.0]]
|
@pytest.fixture
def Y() ->List[List[float]]:
return [[0.5, 1.0, 1.5], [1.0, 0.0, 0.0], [2.0, 5.0, 2.0], [0.0, 0.0, 0.0]]
| null |
_create_request
|
url = f'{self.url}/query'
json = {'queries': [{'query': query, 'filter': self.filter, 'top_k': self.
top_k}]}
headers = {'Content-Type': 'application/json', 'Authorization':
f'Bearer {self.bearer_token}'}
return url, json, headers
|
def _create_request(self, query: str) ->tuple[str, dict, dict]:
url = f'{self.url}/query'
json = {'queries': [{'query': query, 'filter': self.filter, 'top_k':
self.top_k}]}
headers = {'Content-Type': 'application/json', 'Authorization':
f'Bearer {self.bearer_token}'}
return url, json, headers
| null |
_run
|
mailbox = self.account.mailbox()
message = mailbox.new_message()
message.body = body
message.subject = subject
message.to.add(to)
if cc is not None:
message.cc.add(cc)
if bcc is not None:
message.bcc.add(bcc)
message.save_draft()
output = 'Draft created: ' + str(message)
return output
|
def _run(self, body: str, to: List[str], subject: str, cc: Optional[List[
str]]=None, bcc: Optional[List[str]]=None, run_manager: Optional[
CallbackManagerForToolRun]=None) ->str:
mailbox = self.account.mailbox()
message = mailbox.new_message()
message.body = body
message.subject = subject
message.to.add(to)
if cc is not None:
message.cc.add(cc)
if bcc is not None:
message.bcc.add(bcc)
message.save_draft()
output = 'Draft created: ' + str(message)
return output
| null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.