method_name
stringlengths 1
78
| method_body
stringlengths 3
9.66k
| full_code
stringlengths 31
10.7k
| docstring
stringlengths 4
4.74k
⌀ |
|---|---|---|---|
_construct_doc
|
"""Construct the contents of the reference.rst file for the given package.
Args:
package_namespace: The package top level namespace
members_by_namespace: The members of the package, dict organized by top level
module contains a list of classes and functions
inside of the top level namespace.
Returns:
The contents of the reference.rst file.
"""
full_doc = f"""=======================
``{package_namespace}`` {package_version}
=======================
"""
namespaces = sorted(members_by_namespace)
for module in namespaces:
_members = members_by_namespace[module]
classes = _members['classes_']
functions = _members['functions']
if not (classes or functions):
continue
section = f':mod:`{package_namespace}.{module}`'
underline = '=' * (len(section) + 1)
full_doc += f"""{section}
{underline}
.. automodule:: {package_namespace}.{module}
:no-members:
:no-inherited-members:
"""
if classes:
full_doc += f"""Classes
--------------
.. currentmodule:: {package_namespace}
.. autosummary::
:toctree: {module}
"""
for class_ in sorted(classes, key=lambda c: c['qualified_name']):
if not class_['is_public']:
continue
if class_['kind'] == 'TypedDict':
template = 'typeddict.rst'
elif class_['kind'] == 'enum':
template = 'enum.rst'
elif class_['kind'] == 'Pydantic':
template = 'pydantic.rst'
else:
template = 'class.rst'
full_doc += (
f" :template: {template}\n \n {class_['qualified_name']}\n \n"
)
if functions:
_functions = [f['qualified_name'] for f in functions if f['is_public']]
fstring = '\n '.join(sorted(_functions))
full_doc += f"""Functions
--------------
.. currentmodule:: {package_namespace}
.. autosummary::
:toctree: {module}
:template: function.rst
{fstring}
"""
return full_doc
|
def _construct_doc(package_namespace: str, members_by_namespace: Dict[str,
ModuleMembers], package_version: str) ->str:
"""Construct the contents of the reference.rst file for the given package.
Args:
package_namespace: The package top level namespace
members_by_namespace: The members of the package, dict organized by top level
module contains a list of classes and functions
inside of the top level namespace.
Returns:
The contents of the reference.rst file.
"""
full_doc = f"""=======================
``{package_namespace}`` {package_version}
=======================
"""
namespaces = sorted(members_by_namespace)
for module in namespaces:
_members = members_by_namespace[module]
classes = _members['classes_']
functions = _members['functions']
if not (classes or functions):
continue
section = f':mod:`{package_namespace}.{module}`'
underline = '=' * (len(section) + 1)
full_doc += f"""{section}
{underline}
.. automodule:: {package_namespace}.{module}
:no-members:
:no-inherited-members:
"""
if classes:
full_doc += f"""Classes
--------------
.. currentmodule:: {package_namespace}
.. autosummary::
:toctree: {module}
"""
for class_ in sorted(classes, key=lambda c: c['qualified_name']):
if not class_['is_public']:
continue
if class_['kind'] == 'TypedDict':
template = 'typeddict.rst'
elif class_['kind'] == 'enum':
template = 'enum.rst'
elif class_['kind'] == 'Pydantic':
template = 'pydantic.rst'
else:
template = 'class.rst'
full_doc += f""" :template: {template}
{class_['qualified_name']}
"""
if functions:
_functions = [f['qualified_name'] for f in functions if f[
'is_public']]
fstring = '\n '.join(sorted(_functions))
full_doc += f"""Functions
--------------
.. currentmodule:: {package_namespace}
.. autosummary::
:toctree: {module}
:template: function.rst
{fstring}
"""
return full_doc
|
Construct the contents of the reference.rst file for the given package.
Args:
package_namespace: The package top level namespace
members_by_namespace: The members of the package, dict organized by top level
module contains a list of classes and functions
inside of the top level namespace.
Returns:
The contents of the reference.rst file.
|
test_chat_hunyuan_with_temperature
|
chat = ChatHunyuan(temperature=0.6)
message = HumanMessage(content='Hello')
response = chat([message])
assert isinstance(response, AIMessage)
assert isinstance(response.content, str)
|
def test_chat_hunyuan_with_temperature() ->None:
chat = ChatHunyuan(temperature=0.6)
message = HumanMessage(content='Hello')
response = chat([message])
assert isinstance(response, AIMessage)
assert isinstance(response.content, str)
| null |
__add__
|
if isinstance(other, FunctionMessageChunk):
if self.name != other.name:
raise ValueError(
'Cannot concatenate FunctionMessageChunks with different names.')
return self.__class__(name=self.name, content=merge_content(self.
content, other.content), additional_kwargs=self._merge_kwargs_dict(
self.additional_kwargs, other.additional_kwargs))
return super().__add__(other)
|
def __add__(self, other: Any) ->BaseMessageChunk:
if isinstance(other, FunctionMessageChunk):
if self.name != other.name:
raise ValueError(
'Cannot concatenate FunctionMessageChunks with different names.'
)
return self.__class__(name=self.name, content=merge_content(self.
content, other.content), additional_kwargs=self.
_merge_kwargs_dict(self.additional_kwargs, other.additional_kwargs)
)
return super().__add__(other)
| null |
_get_embedding
|
return list(np.random.normal(size=self.size))
|
def _get_embedding(self) ->List[float]:
return list(np.random.normal(size=self.size))
| null |
compress_documents
|
"""Transform a list of documents."""
for _transformer in self.transformers:
if isinstance(_transformer, BaseDocumentCompressor):
accepts_callbacks = signature(_transformer.compress_documents
).parameters.get('callbacks') is not None
if accepts_callbacks:
documents = _transformer.compress_documents(documents, query,
callbacks=callbacks)
else:
documents = _transformer.compress_documents(documents, query)
elif isinstance(_transformer, BaseDocumentTransformer):
documents = _transformer.transform_documents(documents)
else:
raise ValueError(f'Got unexpected transformer type: {_transformer}')
return documents
|
def compress_documents(self, documents: Sequence[Document], query: str,
callbacks: Optional[Callbacks]=None) ->Sequence[Document]:
"""Transform a list of documents."""
for _transformer in self.transformers:
if isinstance(_transformer, BaseDocumentCompressor):
accepts_callbacks = signature(_transformer.compress_documents
).parameters.get('callbacks') is not None
if accepts_callbacks:
documents = _transformer.compress_documents(documents,
query, callbacks=callbacks)
else:
documents = _transformer.compress_documents(documents, query)
elif isinstance(_transformer, BaseDocumentTransformer):
documents = _transformer.transform_documents(documents)
else:
raise ValueError(f'Got unexpected transformer type: {_transformer}'
)
return documents
|
Transform a list of documents.
|
_import_bing_search_tool_BingSearchRun
|
from langchain_community.tools.bing_search.tool import BingSearchRun
return BingSearchRun
|
def _import_bing_search_tool_BingSearchRun() ->Any:
from langchain_community.tools.bing_search.tool import BingSearchRun
return BingSearchRun
| null |
test_mget
|
"""Test mget method."""
store = RedisStore(client=redis_client, ttl=None)
keys = ['key1', 'key2']
redis_client.mset({'key1': b'value1', 'key2': b'value2'})
result = store.mget(keys)
assert result == [b'value1', b'value2']
|
def test_mget(redis_client: Redis) ->None:
"""Test mget method."""
store = RedisStore(client=redis_client, ttl=None)
keys = ['key1', 'key2']
redis_client.mset({'key1': b'value1', 'key2': b'value2'})
result = store.mget(keys)
assert result == [b'value1', b'value2']
|
Test mget method.
|
run
|
"""Run query through Google Trends with Serpapi"""
serpapi_api_key = cast(SecretStr, self.serp_api_key)
params = {'engine': 'google_trends', 'api_key': serpapi_api_key.
get_secret_value(), 'q': query}
total_results = []
client = self.serp_search_engine(params)
total_results = client.get_dict()['interest_over_time']['timeline_data']
if not total_results:
return 'No good Trend Result was found'
start_date = total_results[0]['date'].split()
end_date = total_results[-1]['date'].split()
values = [results.get('values')[0].get('extracted_value') for results in
total_results]
min_value = min(values)
max_value = max(values)
avg_value = sum(values) / len(values)
percentage_change = (values[-1] - values[0]) / (values[0] if values[0] != 0
else 1) * (100 if values[0] != 0 else 1)
params = {'engine': 'google_trends', 'api_key': serpapi_api_key.
get_secret_value(), 'data_type': 'RELATED_QUERIES', 'q': query}
total_results2 = {}
client = self.serp_search_engine(params)
total_results2 = client.get_dict().get('related_queries', {})
rising = []
top = []
rising = [results.get('query') for results in total_results2.get('rising', [])]
top = [results.get('query') for results in total_results2.get('top', [])]
doc = [
f"""Query: {query}
Date From: {start_date[0]} {start_date[1]}, {start_date[-1]}
Date To: {end_date[0]} {end_date[3]} {end_date[-1]}
Min Value: {min_value}
Max Value: {max_value}
Average Value: {avg_value}
Percent Change: {str(percentage_change) + '%'}
Trend values: {', '.join([str(x) for x in values])}
Rising Related Queries: {', '.join(rising)}
Top Related Queries: {', '.join(top)}"""
]
return '\n\n'.join(doc)
|
def run(self, query: str) ->str:
"""Run query through Google Trends with Serpapi"""
serpapi_api_key = cast(SecretStr, self.serp_api_key)
params = {'engine': 'google_trends', 'api_key': serpapi_api_key.
get_secret_value(), 'q': query}
total_results = []
client = self.serp_search_engine(params)
total_results = client.get_dict()['interest_over_time']['timeline_data']
if not total_results:
return 'No good Trend Result was found'
start_date = total_results[0]['date'].split()
end_date = total_results[-1]['date'].split()
values = [results.get('values')[0].get('extracted_value') for results in
total_results]
min_value = min(values)
max_value = max(values)
avg_value = sum(values) / len(values)
percentage_change = (values[-1] - values[0]) / (values[0] if values[0] !=
0 else 1) * (100 if values[0] != 0 else 1)
params = {'engine': 'google_trends', 'api_key': serpapi_api_key.
get_secret_value(), 'data_type': 'RELATED_QUERIES', 'q': query}
total_results2 = {}
client = self.serp_search_engine(params)
total_results2 = client.get_dict().get('related_queries', {})
rising = []
top = []
rising = [results.get('query') for results in total_results2.get(
'rising', [])]
top = [results.get('query') for results in total_results2.get('top', [])]
doc = [
f"""Query: {query}
Date From: {start_date[0]} {start_date[1]}, {start_date[-1]}
Date To: {end_date[0]} {end_date[3]} {end_date[-1]}
Min Value: {min_value}
Max Value: {max_value}
Average Value: {avg_value}
Percent Change: {str(percentage_change) + '%'}
Trend values: {', '.join([str(x) for x in values])}
Rising Related Queries: {', '.join(rising)}
Top Related Queries: {', '.join(top)}"""
]
return '\n\n'.join(doc)
|
Run query through Google Trends with Serpapi
|
parse_result
|
"""Parse a list of candidate model Generations into a specific format.
Args:
result: A list of Generations to be parsed. The Generations are assumed
to be different candidate outputs for a single model input.
Returns:
Structured output.
"""
|
@abstractmethod
def parse_result(self, result: List[Generation], *, partial: bool=False) ->T:
"""Parse a list of candidate model Generations into a specific format.
Args:
result: A list of Generations to be parsed. The Generations are assumed
to be different candidate outputs for a single model input.
Returns:
Structured output.
"""
|
Parse a list of candidate model Generations into a specific format.
Args:
result: A list of Generations to be parsed. The Generations are assumed
to be different candidate outputs for a single model input.
Returns:
Structured output.
|
date_to_range_filter
|
constructor_args = {key: kwargs[key] for key in ['start_date', 'end_date',
'time_delta', 'start_inclusive', 'end_inclusive'] if key in kwargs}
if not constructor_args or len(constructor_args) == 0:
return None
try:
from timescale_vector import client
except ImportError:
raise ImportError(
'Could not import timescale_vector python package. Please install it with `pip install timescale-vector`.'
)
return client.UUIDTimeRange(**constructor_args)
|
def date_to_range_filter(self, **kwargs: Any) ->Any:
constructor_args = {key: kwargs[key] for key in ['start_date',
'end_date', 'time_delta', 'start_inclusive', 'end_inclusive'] if
key in kwargs}
if not constructor_args or len(constructor_args) == 0:
return None
try:
from timescale_vector import client
except ImportError:
raise ImportError(
'Could not import timescale_vector python package. Please install it with `pip install timescale-vector`.'
)
return client.UUIDTimeRange(**constructor_args)
| null |
ZillizRetreiver
|
"""Deprecated ZillizRetreiver.
Please use ZillizRetriever ('i' before 'e') instead.
Args:
*args:
**kwargs:
Returns:
ZillizRetriever
"""
warnings.warn(
"ZillizRetreiver will be deprecated in the future. Please use ZillizRetriever ('i' before 'e') instead."
, DeprecationWarning)
return ZillizRetriever(*args, **kwargs)
|
def ZillizRetreiver(*args: Any, **kwargs: Any) ->ZillizRetriever:
"""Deprecated ZillizRetreiver.
Please use ZillizRetriever ('i' before 'e') instead.
Args:
*args:
**kwargs:
Returns:
ZillizRetriever
"""
warnings.warn(
"ZillizRetreiver will be deprecated in the future. Please use ZillizRetriever ('i' before 'e') instead."
, DeprecationWarning)
return ZillizRetriever(*args, **kwargs)
|
Deprecated ZillizRetreiver.
Please use ZillizRetriever ('i' before 'e') instead.
Args:
*args:
**kwargs:
Returns:
ZillizRetriever
|
wrap
|
async def wrapped_f(*args: Any, **kwargs: Any) ->Callable:
async for _ in async_retrying:
return await func(*args, **kwargs)
raise AssertionError('this is unreachable')
return wrapped_f
|
def wrap(func: Callable) ->Callable:
async def wrapped_f(*args: Any, **kwargs: Any) ->Callable:
async for _ in async_retrying:
return await func(*args, **kwargs)
raise AssertionError('this is unreachable')
return wrapped_f
| null |
test__convert_dict_to_message_human
|
message_dict = {'role': 'user', 'content': 'foo'}
result = _convert_dict_to_message(message_dict)
expected_output = HumanMessage(content='foo')
assert result == expected_output
|
def test__convert_dict_to_message_human() ->None:
message_dict = {'role': 'user', 'content': 'foo'}
result = _convert_dict_to_message(message_dict)
expected_output = HumanMessage(content='foo')
assert result == expected_output
| null |
embed_documents
|
"""Embed a list of documents using GPT4All.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
embeddings = [self.client.embed(text) for text in texts]
return [list(map(float, e)) for e in embeddings]
|
def embed_documents(self, texts: List[str]) ->List[List[float]]:
"""Embed a list of documents using GPT4All.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
embeddings = [self.client.embed(text) for text in texts]
return [list(map(float, e)) for e in embeddings]
|
Embed a list of documents using GPT4All.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
|
test_load_valid_bool_content
|
file_path = '/workspaces/langchain/test.json'
expected_docs = [Document(page_content='False', metadata={'source':
file_path, 'seq_num': 1}), Document(page_content='True', metadata={
'source': file_path, 'seq_num': 2})]
mocker.patch('builtins.open', mocker.mock_open())
mocker.patch('pathlib.Path.read_text', return_value=
"""
[
{"flag": false}, {"flag": true}
]
"""
)
loader = JSONLoader(file_path=file_path, jq_schema='.[].flag', text_content
=False)
result = loader.load()
assert result == expected_docs
|
def test_load_valid_bool_content(mocker: MockerFixture) ->None:
file_path = '/workspaces/langchain/test.json'
expected_docs = [Document(page_content='False', metadata={'source':
file_path, 'seq_num': 1}), Document(page_content='True', metadata={
'source': file_path, 'seq_num': 2})]
mocker.patch('builtins.open', mocker.mock_open())
mocker.patch('pathlib.Path.read_text', return_value=
"""
[
{"flag": false}, {"flag": true}
]
"""
)
loader = JSONLoader(file_path=file_path, jq_schema='.[].flag',
text_content=False)
result = loader.load()
assert result == expected_docs
| null |
__gt__
|
"""Create a Numeric greater than filter expression.
Args:
other (Union[int, float]): The value to filter on.
Example:
>>> from langchain_community.vectorstores.redis import RedisNum
>>> filter = RedisNum("age") > 18
"""
self._set_value(other, self.SUPPORTED_VAL_TYPES, RedisFilterOperator.GT)
return RedisFilterExpression(str(self))
|
def __gt__(self, other: Union[int, float]) ->'RedisFilterExpression':
"""Create a Numeric greater than filter expression.
Args:
other (Union[int, float]): The value to filter on.
Example:
>>> from langchain_community.vectorstores.redis import RedisNum
>>> filter = RedisNum("age") > 18
"""
self._set_value(other, self.SUPPORTED_VAL_TYPES, RedisFilterOperator.GT)
return RedisFilterExpression(str(self))
|
Create a Numeric greater than filter expression.
Args:
other (Union[int, float]): The value to filter on.
Example:
>>> from langchain_community.vectorstores.redis import RedisNum
>>> filter = RedisNum("age") > 18
|
test_all_imports
|
assert set(__all__) == set(EXPECTED_ALL)
|
def test_all_imports() ->None:
assert set(__all__) == set(EXPECTED_ALL)
| null |
_import_openweathermap
|
from langchain_community.utilities.openweathermap import OpenWeatherMapAPIWrapper
return OpenWeatherMapAPIWrapper
|
def _import_openweathermap() ->Any:
from langchain_community.utilities.openweathermap import OpenWeatherMapAPIWrapper
return OpenWeatherMapAPIWrapper
| null |
_format_func
|
self._validate_func(func)
map_dict = {Operator.AND: 'And', Operator.OR: 'Or', Comparator.EQ: 'Equal',
Comparator.NE: 'NotEqual', Comparator.GTE: 'GreaterThanEqual',
Comparator.LTE: 'LessThanEqual', Comparator.LT: 'LessThan', Comparator.
GT: 'GreaterThan'}
return map_dict[func]
|
def _format_func(self, func: Union[Operator, Comparator]) ->str:
self._validate_func(func)
map_dict = {Operator.AND: 'And', Operator.OR: 'Or', Comparator.EQ:
'Equal', Comparator.NE: 'NotEqual', Comparator.GTE:
'GreaterThanEqual', Comparator.LTE: 'LessThanEqual', Comparator.LT:
'LessThan', Comparator.GT: 'GreaterThan'}
return map_dict[func]
| null |
from_data
|
members = [Member.from_data(member_data) for member_data in data['members']]
return cls(id=data['id'], name=data['name'], members=members)
|
@classmethod
def from_data(cls, data: Dict) ->'Team':
members = [Member.from_data(member_data) for member_data in data['members']
]
return cls(id=data['id'], name=data['name'], members=members)
| null |
__init__
|
super().__init__(inputs=inputs, selected=selected)
self.to_select_from = to_select_from
self.based_on = based_on
|
def __init__(self, inputs: Dict[str, Any], to_select_from: Dict[str, Any],
based_on: Dict[str, Any], selected: Optional[PickBestSelected]=None):
super().__init__(inputs=inputs, selected=selected)
self.to_select_from = to_select_from
self.based_on = based_on
| null |
embed_query
|
"""Call out to Jina's embedding endpoint.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self._embed([text])[0]
|
def embed_query(self, text: str) ->List[float]:
"""Call out to Jina's embedding endpoint.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self._embed([text])[0]
|
Call out to Jina's embedding endpoint.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
|
_llm_type
|
return 'giga-chat-model'
|
@property
def _llm_type(self) ->str:
return 'giga-chat-model'
| null |
test_sequential_usage_multiple_inputs
|
"""Test sequential on multiple input chains."""
chain_1 = FakeChain(input_variables=['foo', 'test'], output_variables=['bar'])
chain_2 = FakeChain(input_variables=['bar', 'foo'], output_variables=['baz'])
chain = SequentialChain(chains=[chain_1, chain_2], input_variables=['foo',
'test'])
output = chain({'foo': '123', 'test': '456'})
expected_output = {'baz': '123 456foo 123foo', 'foo': '123', 'test': '456'}
assert output == expected_output
|
def test_sequential_usage_multiple_inputs() ->None:
"""Test sequential on multiple input chains."""
chain_1 = FakeChain(input_variables=['foo', 'test'], output_variables=[
'bar'])
chain_2 = FakeChain(input_variables=['bar', 'foo'], output_variables=[
'baz'])
chain = SequentialChain(chains=[chain_1, chain_2], input_variables=[
'foo', 'test'])
output = chain({'foo': '123', 'test': '456'})
expected_output = {'baz': '123 456foo 123foo', 'foo': '123', 'test': '456'}
assert output == expected_output
|
Test sequential on multiple input chains.
|
on_tool_end
|
"""Run when tool ends running."""
self.metrics['step'] += 1
self.metrics['tool_ends'] += 1
self.metrics['ends'] += 1
tool_ends = self.metrics['tool_ends']
resp: Dict[str, Any] = {}
resp.update({'action': 'on_tool_end', 'output': output})
resp.update(self.metrics)
self.mlflg.metrics(self.metrics, step=self.metrics['step'])
self.records['on_tool_end_records'].append(resp)
self.records['action_records'].append(resp)
self.mlflg.jsonf(resp, f'tool_end_{tool_ends}')
|
def on_tool_end(self, output: str, **kwargs: Any) ->None:
"""Run when tool ends running."""
self.metrics['step'] += 1
self.metrics['tool_ends'] += 1
self.metrics['ends'] += 1
tool_ends = self.metrics['tool_ends']
resp: Dict[str, Any] = {}
resp.update({'action': 'on_tool_end', 'output': output})
resp.update(self.metrics)
self.mlflg.metrics(self.metrics, step=self.metrics['step'])
self.records['on_tool_end_records'].append(resp)
self.records['action_records'].append(resp)
self.mlflg.jsonf(resp, f'tool_end_{tool_ends}')
|
Run when tool ends running.
|
test_load_returns_full_set_of_metadata
|
"""Test that returns several docs"""
api_client = PubMedAPIWrapper(load_max_docs=1, load_all_available_meta=True)
docs = api_client.load_docs('ChatGPT')
assert len(docs) == 3
for doc in docs:
assert doc.metadata
assert set(doc.metadata).issuperset({'Copyright Information',
'Published', 'Title', 'uid'})
|
def test_load_returns_full_set_of_metadata() ->None:
"""Test that returns several docs"""
api_client = PubMedAPIWrapper(load_max_docs=1, load_all_available_meta=True
)
docs = api_client.load_docs('ChatGPT')
assert len(docs) == 3
for doc in docs:
assert doc.metadata
assert set(doc.metadata).issuperset({'Copyright Information',
'Published', 'Title', 'uid'})
|
Test that returns several docs
|
logging_enabled
|
return bool(self.path)
|
def logging_enabled(self) ->bool:
return bool(self.path)
| null |
_type
|
return 'api_responder'
|
@property
def _type(self) ->str:
return 'api_responder'
| null |
test_fireworks_model_param
|
"""Tests model parameters for Fireworks"""
llm = Fireworks(model='foo')
assert llm.model == 'foo'
|
@pytest.mark.scheduled
def test_fireworks_model_param() ->None:
"""Tests model parameters for Fireworks"""
llm = Fireworks(model='foo')
assert llm.model == 'foo'
|
Tests model parameters for Fireworks
|
box
|
"""Create a box on ASCII canvas.
Args:
x0 (int): x coordinate of the box corner.
y0 (int): y coordinate of the box corner.
width (int): box width.
height (int): box height.
"""
assert width > 1
assert height > 1
width -= 1
height -= 1
for x in range(x0, x0 + width):
self.point(x, y0, '-')
self.point(x, y0 + height, '-')
for y in range(y0, y0 + height):
self.point(x0, y, '|')
self.point(x0 + width, y, '|')
self.point(x0, y0, '+')
self.point(x0 + width, y0, '+')
self.point(x0, y0 + height, '+')
self.point(x0 + width, y0 + height, '+')
|
def box(self, x0: int, y0: int, width: int, height: int) ->None:
"""Create a box on ASCII canvas.
Args:
x0 (int): x coordinate of the box corner.
y0 (int): y coordinate of the box corner.
width (int): box width.
height (int): box height.
"""
assert width > 1
assert height > 1
width -= 1
height -= 1
for x in range(x0, x0 + width):
self.point(x, y0, '-')
self.point(x, y0 + height, '-')
for y in range(y0, y0 + height):
self.point(x0, y, '|')
self.point(x0 + width, y, '|')
self.point(x0, y0, '+')
self.point(x0 + width, y0, '+')
self.point(x0, y0 + height, '+')
self.point(x0 + width, y0 + height, '+')
|
Create a box on ASCII canvas.
Args:
x0 (int): x coordinate of the box corner.
y0 (int): y coordinate of the box corner.
width (int): box width.
height (int): box height.
|
similarity_search_with_score_by_vector
|
"""Return docs most similar to query.
Args:
embedding: Embedding vector to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, Any]]): Filter by metadata. Defaults to None.
fetch_k: (Optional[int]) Number of Documents to fetch before filtering.
Defaults to 20.
**kwargs: kwargs to be passed to similarity search. Can include:
score_threshold: Optional, a floating point value between 0 to 1 to
filter the resulting set of retrieved docs
Returns:
List of documents most similar to the query text and L2 distance
in float for each. Lower score represents more similarity.
"""
vector = np.array([embedding], dtype=np.float32)
if self._normalize_L2:
vector = normalize(vector)
indices, scores = self.index.search_batched(vector, k if filter is None else
fetch_k)
docs = []
for j, i in enumerate(indices[0]):
if i == -1:
continue
_id = self.index_to_docstore_id[i]
doc = self.docstore.search(_id)
if not isinstance(doc, Document):
raise ValueError(f'Could not find document for id {_id}, got {doc}')
if filter is not None:
filter = {key: ([value] if not isinstance(value, list) else value) for
key, value in filter.items()}
if all(doc.metadata.get(key) in value for key, value in filter.items()
):
docs.append((doc, scores[0][j]))
else:
docs.append((doc, scores[0][j]))
score_threshold = kwargs.get('score_threshold')
if score_threshold is not None:
cmp = operator.ge if self.distance_strategy in (DistanceStrategy.
MAX_INNER_PRODUCT, DistanceStrategy.JACCARD) else operator.le
docs = [(doc, similarity) for doc, similarity in docs if cmp(similarity,
score_threshold)]
return docs[:k]
|
def similarity_search_with_score_by_vector(self, embedding: List[float], k:
int=4, filter: Optional[Dict[str, Any]]=None, fetch_k: int=20, **kwargs:
Any) ->List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
embedding: Embedding vector to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, Any]]): Filter by metadata. Defaults to None.
fetch_k: (Optional[int]) Number of Documents to fetch before filtering.
Defaults to 20.
**kwargs: kwargs to be passed to similarity search. Can include:
score_threshold: Optional, a floating point value between 0 to 1 to
filter the resulting set of retrieved docs
Returns:
List of documents most similar to the query text and L2 distance
in float for each. Lower score represents more similarity.
"""
vector = np.array([embedding], dtype=np.float32)
if self._normalize_L2:
vector = normalize(vector)
indices, scores = self.index.search_batched(vector, k if filter is None
else fetch_k)
docs = []
for j, i in enumerate(indices[0]):
if i == -1:
continue
_id = self.index_to_docstore_id[i]
doc = self.docstore.search(_id)
if not isinstance(doc, Document):
raise ValueError(f'Could not find document for id {_id}, got {doc}'
)
if filter is not None:
filter = {key: ([value] if not isinstance(value, list) else
value) for key, value in filter.items()}
if all(doc.metadata.get(key) in value for key, value in filter.
items()):
docs.append((doc, scores[0][j]))
else:
docs.append((doc, scores[0][j]))
score_threshold = kwargs.get('score_threshold')
if score_threshold is not None:
cmp = operator.ge if self.distance_strategy in (DistanceStrategy.
MAX_INNER_PRODUCT, DistanceStrategy.JACCARD) else operator.le
docs = [(doc, similarity) for doc, similarity in docs if cmp(
similarity, score_threshold)]
return docs[:k]
|
Return docs most similar to query.
Args:
embedding: Embedding vector to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, Any]]): Filter by metadata. Defaults to None.
fetch_k: (Optional[int]) Number of Documents to fetch before filtering.
Defaults to 20.
**kwargs: kwargs to be passed to similarity search. Can include:
score_threshold: Optional, a floating point value between 0 to 1 to
filter the resulting set of retrieved docs
Returns:
List of documents most similar to the query text and L2 distance
in float for each. Lower score represents more similarity.
|
from_texts
|
"""
Args:
skip_strict_exist_check: Deprecated. This is not used basically.
"""
vald = cls(embedding=embedding, host=host, port=port, grpc_options=
grpc_options, grpc_use_secure=grpc_use_secure, grpc_credentials=
grpc_credentials, **kwargs)
vald.add_texts(texts=texts, metadatas=metadatas, grpc_metadata=
grpc_metadata, skip_strict_exist_check=skip_strict_exist_check)
return vald
|
@classmethod
def from_texts(cls: Type[Vald], texts: List[str], embedding: Embeddings,
metadatas: Optional[List[dict]]=None, host: str='localhost', port: int=
8080, grpc_options: Tuple=(('grpc.keepalive_time_ms', 1000 * 10), (
'grpc.keepalive_timeout_ms', 1000 * 10)), grpc_use_secure: bool=False,
grpc_credentials: Optional[Any]=None, grpc_metadata: Optional[Any]=None,
skip_strict_exist_check: bool=False, **kwargs: Any) ->Vald:
"""
Args:
skip_strict_exist_check: Deprecated. This is not used basically.
"""
vald = cls(embedding=embedding, host=host, port=port, grpc_options=
grpc_options, grpc_use_secure=grpc_use_secure, grpc_credentials=
grpc_credentials, **kwargs)
vald.add_texts(texts=texts, metadatas=metadatas, grpc_metadata=
grpc_metadata, skip_strict_exist_check=skip_strict_exist_check)
return vald
|
Args:
skip_strict_exist_check: Deprecated. This is not used basically.
|
_run
|
"""Use the tool."""
return str(self.api_wrapper.results(query, self.num_results))
|
def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun]
=None) ->str:
"""Use the tool."""
return str(self.api_wrapper.results(query, self.num_results))
|
Use the tool.
|
test_draw
|
"""
Test CPAL chain can draw its resulting DAG.
"""
import os
narrative_input = (
'Jan has three times the number of pets as Marcia.Marcia has two more pets than Cindy.If Marcia has ten pets, how many pets does Jan have?'
)
llm = OpenAI(temperature=0, max_tokens=512)
cpal_chain = CPALChain.from_univariate_prompt(llm=llm, verbose=True)
cpal_chain.run(narrative_input)
path = 'graph.svg'
cpal_chain.draw(path=path)
self.assertTrue(os.path.exists(path))
|
@pytest.mark.skip(reason='requires manual install of debian and py packages')
def test_draw(self) ->None:
"""
Test CPAL chain can draw its resulting DAG.
"""
import os
narrative_input = (
'Jan has three times the number of pets as Marcia.Marcia has two more pets than Cindy.If Marcia has ten pets, how many pets does Jan have?'
)
llm = OpenAI(temperature=0, max_tokens=512)
cpal_chain = CPALChain.from_univariate_prompt(llm=llm, verbose=True)
cpal_chain.run(narrative_input)
path = 'graph.svg'
cpal_chain.draw(path=path)
self.assertTrue(os.path.exists(path))
|
Test CPAL chain can draw its resulting DAG.
|
_log_message_for_verbose
|
if self.run_manager:
self.run_manager.on_text(message)
|
def _log_message_for_verbose(self, message: str) ->None:
if self.run_manager:
self.run_manager.on_text(message)
| null |
_identifying_params
|
return {**{'endpoint': self.endpoint, 'model': self.model}, **super().
_identifying_params}
|
@property
def _identifying_params(self) ->Dict[str, Any]:
return {**{'endpoint': self.endpoint, 'model': self.model}, **super().
_identifying_params}
| null |
get_num_tokens
|
"""Get the number of tokens present in the text.
Useful for checking if an input will fit in a model's context window.
Args:
text: The string input to tokenize.
Returns:
The integer number of tokens in the text.
"""
return len(self.get_token_ids(text))
|
def get_num_tokens(self, text: str) ->int:
"""Get the number of tokens present in the text.
Useful for checking if an input will fit in a model's context window.
Args:
text: The string input to tokenize.
Returns:
The integer number of tokens in the text.
"""
return len(self.get_token_ids(text))
|
Get the number of tokens present in the text.
Useful for checking if an input will fit in a model's context window.
Args:
text: The string input to tokenize.
Returns:
The integer number of tokens in the text.
|
__enter__
|
pass
|
def __enter__(self) ->None:
pass
| null |
exact_match_string_evaluator
|
"""Create an ExactMatchStringEvaluator with default configuration."""
return ExactMatchStringEvaluator()
|
@pytest.fixture
def exact_match_string_evaluator() ->ExactMatchStringEvaluator:
"""Create an ExactMatchStringEvaluator with default configuration."""
return ExactMatchStringEvaluator()
|
Create an ExactMatchStringEvaluator with default configuration.
|
test_similarity_search_empty_result
|
index = mock_index(index_details)
index.similarity_search.return_value = {'manifest': {'column_count': 3,
'columns': [{'name': DEFAULT_PRIMARY_KEY}, {'name': DEFAULT_TEXT_COLUMN
}, {'name': 'score'}]}, 'result': {'row_count': 0, 'data_array': []},
'next_page_token': ''}
vectorsearch = default_databricks_vector_search(index)
search_result = vectorsearch.similarity_search('foo')
assert len(search_result) == 0
|
@pytest.mark.requires('databricks', 'databricks.vector_search')
@pytest.mark.parametrize('index_details', ALL_INDEXES)
def test_similarity_search_empty_result(index_details: dict) ->None:
index = mock_index(index_details)
index.similarity_search.return_value = {'manifest': {'column_count': 3,
'columns': [{'name': DEFAULT_PRIMARY_KEY}, {'name':
DEFAULT_TEXT_COLUMN}, {'name': 'score'}]}, 'result': {'row_count':
0, 'data_array': []}, 'next_page_token': ''}
vectorsearch = default_databricks_vector_search(index)
search_result = vectorsearch.similarity_search('foo')
assert len(search_result) == 0
| null |
_import_llm_rails
|
from langchain_community.vectorstores.llm_rails import LLMRails
return LLMRails
|
def _import_llm_rails() ->Any:
from langchain_community.vectorstores.llm_rails import LLMRails
return LLMRails
| null |
_get_mock_authenticated_user
|
return {'shared_folder_ids': self.MOCK_FOLDER_IDS, 'id': 'Test'}
|
def _get_mock_authenticated_user(self) ->Dict:
return {'shared_folder_ids': self.MOCK_FOLDER_IDS, 'id': 'Test'}
| null |
_signature
|
input_str = secret_key.get_secret_value() + json.dumps(payload) + str(timestamp
)
md5 = hashlib.md5()
md5.update(input_str.encode('utf-8'))
return md5.hexdigest()
|
def _signature(secret_key: SecretStr, payload: Dict[str, Any], timestamp: int
) ->str:
input_str = secret_key.get_secret_value() + json.dumps(payload) + str(
timestamp)
md5 = hashlib.md5()
md5.update(input_str.encode('utf-8'))
return md5.hexdigest()
| null |
get_num_tokens_from_messages
|
"""Calculate num tokens with tiktoken package.
Official documentation: https://github.com/openai/openai-cookbook/blob/
main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb"""
if sys.version_info[1] <= 7:
return super().get_num_tokens_from_messages(messages)
model, encoding = self._get_encoding_model()
tokens_per_message = 3
tokens_per_name = 1
num_tokens = 0
messages_dict = [convert_message_to_dict(m) for m in messages]
for message in messages_dict:
num_tokens += tokens_per_message
for key, value in message.items():
num_tokens += len(encoding.encode(str(value)))
if key == 'name':
num_tokens += tokens_per_name
num_tokens += 3
return num_tokens
|
def get_num_tokens_from_messages(self, messages: list[BaseMessage]) ->int:
"""Calculate num tokens with tiktoken package.
Official documentation: https://github.com/openai/openai-cookbook/blob/
main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb"""
if sys.version_info[1] <= 7:
return super().get_num_tokens_from_messages(messages)
model, encoding = self._get_encoding_model()
tokens_per_message = 3
tokens_per_name = 1
num_tokens = 0
messages_dict = [convert_message_to_dict(m) for m in messages]
for message in messages_dict:
num_tokens += tokens_per_message
for key, value in message.items():
num_tokens += len(encoding.encode(str(value)))
if key == 'name':
num_tokens += tokens_per_name
num_tokens += 3
return num_tokens
|
Calculate num tokens with tiktoken package.
Official documentation: https://github.com/openai/openai-cookbook/blob/
main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb
|
__init__
|
"""Take a raw result from Searx and make it into a dict like object."""
json_data = json.loads(data)
super().__init__(json_data)
self.__dict__ = self
|
def __init__(self, data: str):
"""Take a raw result from Searx and make it into a dict like object."""
json_data = json.loads(data)
super().__init__(json_data)
self.__dict__ = self
|
Take a raw result from Searx and make it into a dict like object.
|
get_verbose
|
"""Get the value of the `verbose` global setting."""
try:
import langchain
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message=
'.*Importing verbose from langchain root module is no longer supported'
)
old_verbose = langchain.verbose
except ImportError:
old_verbose = False
global _verbose
return _verbose or old_verbose
|
def get_verbose() ->bool:
"""Get the value of the `verbose` global setting."""
try:
import langchain
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message=
'.*Importing verbose from langchain root module is no longer supported'
)
old_verbose = langchain.verbose
except ImportError:
old_verbose = False
global _verbose
return _verbose or old_verbose
|
Get the value of the `verbose` global setting.
|
get_input_schema
|
return create_model(self.get_name('Input'), __root__=(List[self.bound.
get_input_schema(config)], None), __config__=_SchemaConfig)
|
def get_input_schema(self, config: Optional[RunnableConfig]=None) ->Type[
BaseModel]:
return create_model(self.get_name('Input'), __root__=(List[self.bound.
get_input_schema(config)], None), __config__=_SchemaConfig)
| null |
test_anthropic_incorrect_field
|
with pytest.warns(match='not default parameter'):
llm = ChatAnthropic(foo='bar')
assert llm.model_kwargs == {'foo': 'bar'}
|
@pytest.mark.requires('anthropic')
def test_anthropic_incorrect_field() ->None:
with pytest.warns(match='not default parameter'):
llm = ChatAnthropic(foo='bar')
assert llm.model_kwargs == {'foo': 'bar'}
| null |
is_lc_serializable
|
return False
|
@classmethod
def is_lc_serializable(cls) ->bool:
return False
| null |
_import_sql_database_tool_QuerySQLDataBaseTool
|
from langchain_community.tools.sql_database.tool import QuerySQLDataBaseTool
return QuerySQLDataBaseTool
|
def _import_sql_database_tool_QuerySQLDataBaseTool() ->Any:
from langchain_community.tools.sql_database.tool import QuerySQLDataBaseTool
return QuerySQLDataBaseTool
| null |
_run
|
"""Use the tool."""
return self.api_wrapper.run(query)
|
def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun]
=None) ->str:
"""Use the tool."""
return self.api_wrapper.run(query)
|
Use the tool.
|
test_cosine_similarity_top_k_and_score_threshold
|
expected_idxs = [(0, 0), (2, 2), (1, 2), (0, 2)]
expected_scores = [1.0, 0.93419873, 0.87038828, 0.83743579]
actual_idxs, actual_scores = cosine_similarity_top_k(X, Y, score_threshold=0.8)
assert actual_idxs == expected_idxs
assert np.allclose(expected_scores, actual_scores)
|
def test_cosine_similarity_top_k_and_score_threshold(X: List[List[float]],
Y: List[List[float]]) ->None:
expected_idxs = [(0, 0), (2, 2), (1, 2), (0, 2)]
expected_scores = [1.0, 0.93419873, 0.87038828, 0.83743579]
actual_idxs, actual_scores = cosine_similarity_top_k(X, Y,
score_threshold=0.8)
assert actual_idxs == expected_idxs
assert np.allclose(expected_scores, actual_scores)
| null |
get_models
|
"""List available models"""
backend = AviaryBackend.from_env()
request_url = backend.backend_url + '-/routes'
response = requests.get(request_url, headers=backend.header, timeout=TIMEOUT)
try:
result = response.json()
except requests.JSONDecodeError as e:
raise RuntimeError(
f'Error decoding JSON from {request_url}. Text response: {response.text}'
) from e
result = sorted([k.lstrip('/').replace('--', '/') for k in result.keys() if
'--' in k])
return result
|
def get_models() ->List[str]:
"""List available models"""
backend = AviaryBackend.from_env()
request_url = backend.backend_url + '-/routes'
response = requests.get(request_url, headers=backend.header, timeout=
TIMEOUT)
try:
result = response.json()
except requests.JSONDecodeError as e:
raise RuntimeError(
f'Error decoding JSON from {request_url}. Text response: {response.text}'
) from e
result = sorted([k.lstrip('/').replace('--', '/') for k in result.keys(
) if '--' in k])
return result
|
List available models
|
test_from_document
|
"""Test from document class method."""
document = Document(page_content='Lorem ipsum dolor sit amet', metadata={
'key': 'value'})
hashed_document = _HashedDocument.from_document(document)
assert hashed_document.hash_ == 'fd1dc827-051b-537d-a1fe-1fa043e8b276'
assert hashed_document.uid == hashed_document.hash_
|
def test_from_document() ->None:
"""Test from document class method."""
document = Document(page_content='Lorem ipsum dolor sit amet', metadata
={'key': 'value'})
hashed_document = _HashedDocument.from_document(document)
assert hashed_document.hash_ == 'fd1dc827-051b-537d-a1fe-1fa043e8b276'
assert hashed_document.uid == hashed_document.hash_
|
Test from document class method.
|
test_qdrant_from_texts_stores_duplicated_texts
|
"""Test end to end Qdrant.from_texts stores duplicated texts separately."""
from qdrant_client import QdrantClient
collection_name = uuid.uuid4().hex
with tempfile.TemporaryDirectory() as tmpdir:
vec_store = Qdrant.from_texts(['abc', 'abc'], ConsistentFakeEmbeddings(
), collection_name=collection_name, path=str(tmpdir))
del vec_store
client = QdrantClient(path=str(tmpdir))
assert 2 == client.count(collection_name).count
|
def test_qdrant_from_texts_stores_duplicated_texts() ->None:
"""Test end to end Qdrant.from_texts stores duplicated texts separately."""
from qdrant_client import QdrantClient
collection_name = uuid.uuid4().hex
with tempfile.TemporaryDirectory() as tmpdir:
vec_store = Qdrant.from_texts(['abc', 'abc'],
ConsistentFakeEmbeddings(), collection_name=collection_name,
path=str(tmpdir))
del vec_store
client = QdrantClient(path=str(tmpdir))
assert 2 == client.count(collection_name).count
|
Test end to end Qdrant.from_texts stores duplicated texts separately.
|
test_read_schema_dict_input
|
"""Test read_schema with dict input."""
index_schema = {'text': [{'name': 'content'}], 'tag': [{'name': 'tag'}],
'vector': [{'name': 'content_vector', 'dims': 100, 'algorithm': 'FLAT'}]}
output = read_schema(index_schema=index_schema)
assert output == index_schema
|
def test_read_schema_dict_input() ->None:
"""Test read_schema with dict input."""
index_schema = {'text': [{'name': 'content'}], 'tag': [{'name': 'tag'}],
'vector': [{'name': 'content_vector', 'dims': 100, 'algorithm':
'FLAT'}]}
output = read_schema(index_schema=index_schema)
assert output == index_schema
|
Test read_schema with dict input.
|
test_load
|
mocker.patch('assemblyai.Transcriber.transcribe', return_value=mocker.
MagicMock(text='Test transcription text', json_response={'id': '1'},
error=None))
loader = AssemblyAIAudioTranscriptLoader(file_path='./testfile.mp3',
api_key='api_key')
docs = loader.load()
assert len(docs) == 1
assert docs[0].page_content == 'Test transcription text'
assert docs[0].metadata == {'id': '1'}
|
@pytest.mark.requires('assemblyai')
def test_load(mocker: MockerFixture) ->None:
mocker.patch('assemblyai.Transcriber.transcribe', return_value=mocker.
MagicMock(text='Test transcription text', json_response={'id': '1'},
error=None))
loader = AssemblyAIAudioTranscriptLoader(file_path='./testfile.mp3',
api_key='api_key')
docs = loader.load()
assert len(docs) == 1
assert docs[0].page_content == 'Test transcription text'
assert docs[0].metadata == {'id': '1'}
| null |
similarity_search_by_vector
|
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query vector.
"""
query_doc = self.doc_cls(embedding=embedding)
docs = self.doc_index.find(query_doc, search_field='embedding', limit=k
).documents
result = [Document(page_content=doc.text, metadata=doc.metadata) for doc in
docs]
return result
|
def similarity_search_by_vector(self, embedding: List[float], k: int=4, **
kwargs: Any) ->List[Document]:
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query vector.
"""
query_doc = self.doc_cls(embedding=embedding)
docs = self.doc_index.find(query_doc, search_field='embedding', limit=k
).documents
result = [Document(page_content=doc.text, metadata=doc.metadata) for
doc in docs]
return result
|
Return docs most similar to embedding vector.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query vector.
|
_set_initial_conditions
|
for entity_setting in self.intervention.entity_settings:
for entity in self.causal_operations.entities:
if entity.name == entity_setting.name:
entity.value = entity_setting.value
|
def _set_initial_conditions(self) ->None:
for entity_setting in self.intervention.entity_settings:
for entity in self.causal_operations.entities:
if entity.name == entity_setting.name:
entity.value = entity_setting.value
| null |
llm
|
return _get_llm(max_tokens=10)
|
@pytest.mark.scheduled
@pytest.fixture
def llm() ->AzureChatOpenAI:
return _get_llm(max_tokens=10)
| null |
validate_environment
|
"""Validate that api key and python package exists in environment."""
google_api_key = get_from_dict_or_env(values, 'google_api_key',
'GOOGLE_API_KEY')
values['google_api_key'] = google_api_key
google_cse_id = get_from_dict_or_env(values, 'google_cse_id', 'GOOGLE_CSE_ID')
values['google_cse_id'] = google_cse_id
try:
from googleapiclient.discovery import build
except ImportError:
raise ImportError(
'google-api-python-client is not installed. Please install it with `pip install google-api-python-client>=2.100.0`'
)
service = build('customsearch', 'v1', developerKey=google_api_key)
values['search_engine'] = service
return values
|
@root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that api key and python package exists in environment."""
google_api_key = get_from_dict_or_env(values, 'google_api_key',
'GOOGLE_API_KEY')
values['google_api_key'] = google_api_key
google_cse_id = get_from_dict_or_env(values, 'google_cse_id',
'GOOGLE_CSE_ID')
values['google_cse_id'] = google_cse_id
try:
from googleapiclient.discovery import build
except ImportError:
raise ImportError(
'google-api-python-client is not installed. Please install it with `pip install google-api-python-client>=2.100.0`'
)
service = build('customsearch', 'v1', developerKey=google_api_key)
values['search_engine'] = service
return values
|
Validate that api key and python package exists in environment.
|
_generate
|
res = self._chat(messages, **kwargs)
if res.status_code != 200:
raise ValueError(f'Error code: {res.status_code}, reason: {res.reason}')
response = res.json()
return self._create_chat_result(response)
|
def _generate(self, messages: List[BaseMessage], stop: Optional[List[str]]=
None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any
) ->ChatResult:
res = self._chat(messages, **kwargs)
if res.status_code != 200:
raise ValueError(f'Error code: {res.status_code}, reason: {res.reason}'
)
response = res.json()
return self._create_chat_result(response)
| null |
load
|
"""Get important HN webpage information.
HN webpage components are:
- title
- content
- source url,
- time of post
- author of the post
- number of comments
- rank of the post
"""
soup_info = self.scrape()
if 'item' in self.web_path:
return self.load_comments(soup_info)
else:
return self.load_results(soup_info)
|
def load(self) ->List[Document]:
"""Get important HN webpage information.
HN webpage components are:
- title
- content
- source url,
- time of post
- author of the post
- number of comments
- rank of the post
"""
soup_info = self.scrape()
if 'item' in self.web_path:
return self.load_comments(soup_info)
else:
return self.load_results(soup_info)
|
Get important HN webpage information.
HN webpage components are:
- title
- content
- source url,
- time of post
- author of the post
- number of comments
- rank of the post
|
test_missing_normalize_score_fn
|
"""Test doesn't perform similarity search without a valid distance strategy."""
texts = ['foo', 'bar', 'baz']
faiss_instance = FAISS.from_texts(texts, FakeEmbeddings(),
distance_strategy='fake')
with pytest.raises(ValueError):
faiss_instance.similarity_search_with_relevance_scores('foo', k=2)
|
@pytest.mark.requires('faiss')
def test_missing_normalize_score_fn() ->None:
"""Test doesn't perform similarity search without a valid distance strategy."""
texts = ['foo', 'bar', 'baz']
faiss_instance = FAISS.from_texts(texts, FakeEmbeddings(),
distance_strategy='fake')
with pytest.raises(ValueError):
faiss_instance.similarity_search_with_relevance_scores('foo', k=2)
|
Test doesn't perform similarity search without a valid distance strategy.
|
_flatten_dict
|
"""
Generator that yields flattened items from a nested dictionary for a flat dict.
Parameters:
nested_dict (dict): The nested dictionary to flatten.
parent_key (str): The prefix to prepend to the keys of the flattened dict.
sep (str): The separator to use between the parent key and the key of the
flattened dictionary.
Yields:
(str, any): A key-value pair from the flattened dictionary.
"""
for key, value in nested_dict.items():
new_key = parent_key + sep + key if parent_key else key
if isinstance(value, dict):
yield from _flatten_dict(value, new_key, sep)
else:
yield new_key, value
|
def _flatten_dict(nested_dict: Dict[str, Any], parent_key: str='', sep: str='_'
) ->Iterable[Tuple[str, Any]]:
"""
Generator that yields flattened items from a nested dictionary for a flat dict.
Parameters:
nested_dict (dict): The nested dictionary to flatten.
parent_key (str): The prefix to prepend to the keys of the flattened dict.
sep (str): The separator to use between the parent key and the key of the
flattened dictionary.
Yields:
(str, any): A key-value pair from the flattened dictionary.
"""
for key, value in nested_dict.items():
new_key = parent_key + sep + key if parent_key else key
if isinstance(value, dict):
yield from _flatten_dict(value, new_key, sep)
else:
yield new_key, value
|
Generator that yields flattened items from a nested dictionary for a flat dict.
Parameters:
nested_dict (dict): The nested dictionary to flatten.
parent_key (str): The prefix to prepend to the keys of the flattened dict.
sep (str): The separator to use between the parent key and the key of the
flattened dictionary.
Yields:
(str, any): A key-value pair from the flattened dictionary.
|
from_open_api_endpoint_chain
|
"""Convert an endpoint chain to an API endpoint tool."""
expanded_name = (
f"{api_title.replace(' ', '_')}.{chain.api_operation.operation_id}")
description = (
f"I'm an AI from {api_title}. Instruct what you want, and I'll assist via an API with description: {chain.api_operation.description}"
)
return cls(name=expanded_name, func=chain.run, description=description)
|
@classmethod
def from_open_api_endpoint_chain(cls, chain: OpenAPIEndpointChain,
api_title: str) ->'NLATool':
"""Convert an endpoint chain to an API endpoint tool."""
expanded_name = (
f"{api_title.replace(' ', '_')}.{chain.api_operation.operation_id}")
description = (
f"I'm an AI from {api_title}. Instruct what you want, and I'll assist via an API with description: {chain.api_operation.description}"
)
return cls(name=expanded_name, func=chain.run, description=description)
|
Convert an endpoint chain to an API endpoint tool.
|
_require_arg
|
"""Raise ValueError if the required arg with name `arg_name` is None."""
if not arg:
raise ValueError(f'`{arg_name}` is required for this index.')
|
@staticmethod
def _require_arg(arg: Any, arg_name: str) ->None:
"""Raise ValueError if the required arg with name `arg_name` is None."""
if not arg:
raise ValueError(f'`{arg_name}` is required for this index.')
|
Raise ValueError if the required arg with name `arg_name` is None.
|
_cosine_distance
|
"""Compute the cosine distance between two vectors.
Args:
a (np.ndarray): The first vector.
b (np.ndarray): The second vector.
Returns:
np.ndarray: The cosine distance.
"""
return 1.0 - cosine_similarity(a, b)
|
@staticmethod
def _cosine_distance(a: np.ndarray, b: np.ndarray) ->np.ndarray:
"""Compute the cosine distance between two vectors.
Args:
a (np.ndarray): The first vector.
b (np.ndarray): The second vector.
Returns:
np.ndarray: The cosine distance.
"""
return 1.0 - cosine_similarity(a, b)
|
Compute the cosine distance between two vectors.
Args:
a (np.ndarray): The first vector.
b (np.ndarray): The second vector.
Returns:
np.ndarray: The cosine distance.
|
run
|
"""Run command with own globals/locals and returns anything printed.
Timeout after the specified number of seconds."""
warn_once()
queue: multiprocessing.Queue = multiprocessing.Queue()
if timeout is not None:
p = multiprocessing.Process(target=self.worker, args=(command, self.
globals, self.locals, queue))
p.start()
p.join(timeout)
if p.is_alive():
p.terminate()
return 'Execution timed out'
else:
self.worker(command, self.globals, self.locals, queue)
return queue.get()
|
def run(self, command: str, timeout: Optional[int]=None) ->str:
"""Run command with own globals/locals and returns anything printed.
Timeout after the specified number of seconds."""
warn_once()
queue: multiprocessing.Queue = multiprocessing.Queue()
if timeout is not None:
p = multiprocessing.Process(target=self.worker, args=(command, self
.globals, self.locals, queue))
p.start()
p.join(timeout)
if p.is_alive():
p.terminate()
return 'Execution timed out'
else:
self.worker(command, self.globals, self.locals, queue)
return queue.get()
|
Run command with own globals/locals and returns anything printed.
Timeout after the specified number of seconds.
|
__init__
|
"""
Initialize the graph transformer with various options.
Args:
diffbot_api_key (str):
The API key for Diffbot's NLP services.
fact_confidence_threshold (float):
Minimum confidence level for facts to be included.
include_qualifiers (bool):
Whether to include qualifiers in the relationships.
include_evidence (bool):
Whether to include evidence for the relationships.
simplified_schema (bool):
Whether to use a simplified schema for relationships.
"""
self.diffbot_api_key = diffbot_api_key or get_from_env('diffbot_api_key',
'DIFFBOT_API_KEY')
self.fact_threshold_confidence = fact_confidence_threshold
self.include_qualifiers = include_qualifiers
self.include_evidence = include_evidence
self.simplified_schema = None
if simplified_schema:
self.simplified_schema = SimplifiedSchema()
|
def __init__(self, diffbot_api_key: Optional[str]=None,
fact_confidence_threshold: float=0.7, include_qualifiers: bool=True,
include_evidence: bool=True, simplified_schema: bool=True) ->None:
"""
Initialize the graph transformer with various options.
Args:
diffbot_api_key (str):
The API key for Diffbot's NLP services.
fact_confidence_threshold (float):
Minimum confidence level for facts to be included.
include_qualifiers (bool):
Whether to include qualifiers in the relationships.
include_evidence (bool):
Whether to include evidence for the relationships.
simplified_schema (bool):
Whether to use a simplified schema for relationships.
"""
self.diffbot_api_key = diffbot_api_key or get_from_env('diffbot_api_key',
'DIFFBOT_API_KEY')
self.fact_threshold_confidence = fact_confidence_threshold
self.include_qualifiers = include_qualifiers
self.include_evidence = include_evidence
self.simplified_schema = None
if simplified_schema:
self.simplified_schema = SimplifiedSchema()
|
Initialize the graph transformer with various options.
Args:
diffbot_api_key (str):
The API key for Diffbot's NLP services.
fact_confidence_threshold (float):
Minimum confidence level for facts to be included.
include_qualifiers (bool):
Whether to include qualifiers in the relationships.
include_evidence (bool):
Whether to include evidence for the relationships.
simplified_schema (bool):
Whether to use a simplified schema for relationships.
|
validate_search_type
|
"""Validate search type."""
search_type = values['search_type']
if search_type not in cls.allowed_search_types:
raise ValueError(
f'search_type of {search_type} not allowed. Valid values are: {cls.allowed_search_types}'
)
if search_type == 'similarity_score_threshold':
score_threshold = values['search_kwargs'].get('score_threshold')
if score_threshold is None or not isinstance(score_threshold, float):
raise ValueError(
'`score_threshold` is not specified with a float value(0~1) in `search_kwargs`.'
)
return values
|
@root_validator()
def validate_search_type(cls, values: Dict) ->Dict:
"""Validate search type."""
search_type = values['search_type']
if search_type not in cls.allowed_search_types:
raise ValueError(
f'search_type of {search_type} not allowed. Valid values are: {cls.allowed_search_types}'
)
if search_type == 'similarity_score_threshold':
score_threshold = values['search_kwargs'].get('score_threshold')
if score_threshold is None or not isinstance(score_threshold, float):
raise ValueError(
'`score_threshold` is not specified with a float value(0~1) in `search_kwargs`.'
)
return values
|
Validate search type.
|
get_issue
|
"""
Fetches a specific issue and its first 10 comments
Parameters:
issue_number(int): The number for the gitlab issue
Returns:
dict: A dictionary containing the issue's title,
body, and comments as a string
"""
issue = self.gitlab_repo_instance.issues.get(issue_number)
page = 0
comments: List[dict] = []
while len(comments) <= 10:
comments_page = issue.notes.list(page=page)
if len(comments_page) == 0:
break
for comment in comments_page:
comment = issue.notes.get(comment.id)
comments.append({'body': comment.body, 'user': comment.author[
'username']})
page += 1
return {'title': issue.title, 'body': issue.description, 'comments': str(
comments)}
|
def get_issue(self, issue_number: int) ->Dict[str, Any]:
"""
Fetches a specific issue and its first 10 comments
Parameters:
issue_number(int): The number for the gitlab issue
Returns:
dict: A dictionary containing the issue's title,
body, and comments as a string
"""
issue = self.gitlab_repo_instance.issues.get(issue_number)
page = 0
comments: List[dict] = []
while len(comments) <= 10:
comments_page = issue.notes.list(page=page)
if len(comments_page) == 0:
break
for comment in comments_page:
comment = issue.notes.get(comment.id)
comments.append({'body': comment.body, 'user': comment.author[
'username']})
page += 1
return {'title': issue.title, 'body': issue.description, 'comments':
str(comments)}
|
Fetches a specific issue and its first 10 comments
Parameters:
issue_number(int): The number for the gitlab issue
Returns:
dict: A dictionary containing the issue's title,
body, and comments as a string
|
input_keys
|
"""Input keys for the chain."""
return ['user_input']
|
@property
def input_keys(self) ->List[str]:
"""Input keys for the chain."""
return ['user_input']
|
Input keys for the chain.
|
test_cypher_save_load
|
"""Test saving and loading."""
FILE_PATH = 'cypher.yaml'
url = os.environ.get('NEO4J_URI')
username = os.environ.get('NEO4J_USERNAME')
password = os.environ.get('NEO4J_PASSWORD')
assert url is not None
assert username is not None
assert password is not None
graph = Neo4jGraph(url=url, username=username, password=password)
chain = GraphCypherQAChain.from_llm(OpenAI(temperature=0), graph=graph,
return_direct=True)
chain.save(file_path=FILE_PATH)
qa_loaded = load_chain(FILE_PATH, graph=graph)
assert qa_loaded == chain
|
def test_cypher_save_load() ->None:
"""Test saving and loading."""
FILE_PATH = 'cypher.yaml'
url = os.environ.get('NEO4J_URI')
username = os.environ.get('NEO4J_USERNAME')
password = os.environ.get('NEO4J_PASSWORD')
assert url is not None
assert username is not None
assert password is not None
graph = Neo4jGraph(url=url, username=username, password=password)
chain = GraphCypherQAChain.from_llm(OpenAI(temperature=0), graph=graph,
return_direct=True)
chain.save(file_path=FILE_PATH)
qa_loaded = load_chain(FILE_PATH, graph=graph)
assert qa_loaded == chain
|
Test saving and loading.
|
_get_clean_text
|
"""Returns cleaned text with newlines preserved and irrelevant elements removed."""
elements_to_skip = ['script', 'noscript', 'canvas', 'meta', 'svg', 'map',
'area', 'audio', 'source', 'track', 'video', 'embed', 'object', 'param',
'picture', 'iframe', 'frame', 'frameset', 'noframes', 'applet', 'form',
'button', 'select', 'base', 'style', 'img']
newline_elements = ['p', 'div', 'ul', 'ol', 'li', 'h1', 'h2', 'h3', 'h4',
'h5', 'h6', 'pre', 'table', 'tr']
text = _process_element(element, elements_to_skip, newline_elements)
return text.strip()
|
def _get_clean_text(element: Tag) ->str:
"""Returns cleaned text with newlines preserved and irrelevant elements removed."""
elements_to_skip = ['script', 'noscript', 'canvas', 'meta', 'svg',
'map', 'area', 'audio', 'source', 'track', 'video', 'embed',
'object', 'param', 'picture', 'iframe', 'frame', 'frameset',
'noframes', 'applet', 'form', 'button', 'select', 'base', 'style',
'img']
newline_elements = ['p', 'div', 'ul', 'ol', 'li', 'h1', 'h2', 'h3',
'h4', 'h5', 'h6', 'pre', 'table', 'tr']
text = _process_element(element, elements_to_skip, newline_elements)
return text.strip()
|
Returns cleaned text with newlines preserved and irrelevant elements removed.
|
_get_node_properties
|
node_properties_query = """
MATCH (a:`{n_label}`)
RETURN properties(a) AS props
LIMIT 100
"""
node_properties = []
for label in n_labels:
q = node_properties_query.format(n_label=label)
data = {'label': label, 'properties': self.query(q)['results']}
s = set({})
for p in data['properties']:
for k, v in p['props'].items():
s.add((k, types[type(v).__name__]))
np = {'properties': [{'property': k, 'type': v} for k, v in s],
'labels': label}
node_properties.append(np)
return node_properties
|
def _get_node_properties(self, n_labels: List[str], types: Dict) ->List:
node_properties_query = """
MATCH (a:`{n_label}`)
RETURN properties(a) AS props
LIMIT 100
"""
node_properties = []
for label in n_labels:
q = node_properties_query.format(n_label=label)
data = {'label': label, 'properties': self.query(q)['results']}
s = set({})
for p in data['properties']:
for k, v in p['props'].items():
s.add((k, types[type(v).__name__]))
np = {'properties': [{'property': k, 'type': v} for k, v in s],
'labels': label}
node_properties.append(np)
return node_properties
| null |
_wrap_prompt
|
if not self.HUMAN_PROMPT or not self.AI_PROMPT:
raise NameError('Please ensure the anthropic package is loaded')
if prompt.startswith(self.HUMAN_PROMPT):
return prompt
corrected_prompt, n_subs = re.subn('^\\n*Human:', self.HUMAN_PROMPT, prompt)
if n_subs == 1:
return corrected_prompt
return f'{self.HUMAN_PROMPT} {prompt}{self.AI_PROMPT} Sure, here you go:\n'
|
def _wrap_prompt(self, prompt: str) ->str:
if not self.HUMAN_PROMPT or not self.AI_PROMPT:
raise NameError('Please ensure the anthropic package is loaded')
if prompt.startswith(self.HUMAN_PROMPT):
return prompt
corrected_prompt, n_subs = re.subn('^\\n*Human:', self.HUMAN_PROMPT, prompt
)
if n_subs == 1:
return corrected_prompt
return f'{self.HUMAN_PROMPT} {prompt}{self.AI_PROMPT} Sure, here you go:\n'
| null |
_send_pipeline_to_device
|
"""Send a pipeline to a device on the cluster."""
if isinstance(pipeline, str):
with open(pipeline, 'rb') as f:
pipeline = pickle.load(f)
if importlib.util.find_spec('torch') is not None:
import torch
cuda_device_count = torch.cuda.device_count()
if device < -1 or device >= cuda_device_count:
raise ValueError(
f'Got device=={device}, device is required to be within [-1, {cuda_device_count})'
)
if device < 0 and cuda_device_count > 0:
logger.warning(
'Device has %d GPUs available. Provide device={deviceId} to `from_model_id` to use availableGPUs for execution. deviceId is -1 for CPU and can be a positive integer associated with CUDA device id.'
, cuda_device_count)
pipeline.device = torch.device(device)
pipeline.model = pipeline.model.to(pipeline.device)
return pipeline
|
def _send_pipeline_to_device(pipeline: Any, device: int) ->Any:
"""Send a pipeline to a device on the cluster."""
if isinstance(pipeline, str):
with open(pipeline, 'rb') as f:
pipeline = pickle.load(f)
if importlib.util.find_spec('torch') is not None:
import torch
cuda_device_count = torch.cuda.device_count()
if device < -1 or device >= cuda_device_count:
raise ValueError(
f'Got device=={device}, device is required to be within [-1, {cuda_device_count})'
)
if device < 0 and cuda_device_count > 0:
logger.warning(
'Device has %d GPUs available. Provide device={deviceId} to `from_model_id` to use availableGPUs for execution. deviceId is -1 for CPU and can be a positive integer associated with CUDA device id.'
, cuda_device_count)
pipeline.device = torch.device(device)
pipeline.model = pipeline.model.to(pipeline.device)
return pipeline
|
Send a pipeline to a device on the cluster.
|
test_json_schema_evaluator_invalid_prediction
|
prediction = '{"name": "John", "age": "30"}'
reference = {'type': 'object', 'properties': {'name': {'type': 'string'},
'age': {'type': 'integer'}}}
result = json_schema_evaluator._evaluate_strings(prediction=prediction,
reference=reference)
assert result['score'] is False
assert 'reasoning' in result
|
@pytest.mark.requires('jsonschema')
def test_json_schema_evaluator_invalid_prediction(json_schema_evaluator:
JsonSchemaEvaluator) ->None:
prediction = '{"name": "John", "age": "30"}'
reference = {'type': 'object', 'properties': {'name': {'type': 'string'
}, 'age': {'type': 'integer'}}}
result = json_schema_evaluator._evaluate_strings(prediction=prediction,
reference=reference)
assert result['score'] is False
assert 'reasoning' in result
| null |
test_tracing_sequential
|
from langchain.agents import AgentType, initialize_agent, load_tools
os.environ['LANGCHAIN_WANDB_TRACING'] = 'true'
os.environ['WANDB_PROJECT'] = 'langchain-tracing'
for q in questions[:3]:
llm = OpenAI(temperature=0)
tools = load_tools(['llm-math', 'serpapi'], llm=llm)
agent = initialize_agent(tools, llm, agent=AgentType.
ZERO_SHOT_REACT_DESCRIPTION, verbose=True)
agent.run(q)
|
def test_tracing_sequential() ->None:
from langchain.agents import AgentType, initialize_agent, load_tools
os.environ['LANGCHAIN_WANDB_TRACING'] = 'true'
os.environ['WANDB_PROJECT'] = 'langchain-tracing'
for q in questions[:3]:
llm = OpenAI(temperature=0)
tools = load_tools(['llm-math', 'serpapi'], llm=llm)
agent = initialize_agent(tools, llm, agent=AgentType.
ZERO_SHOT_REACT_DESCRIPTION, verbose=True)
agent.run(q)
| null |
_call
|
"""
Displays the prompt to the user and returns their input as a response.
Args:
prompt (str): The prompt to be displayed to the user.
stop (Optional[List[str]]): A list of stop strings.
run_manager (Optional[CallbackManagerForLLMRun]): Currently not used.
Returns:
str: The user's input as a response.
"""
self.prompt_func(prompt, **self.prompt_kwargs)
user_input = self.input_func(separator=self.separator, stop=stop, **self.
input_kwargs)
if stop is not None:
user_input = enforce_stop_tokens(user_input, stop)
return user_input
|
def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager:
Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str:
"""
Displays the prompt to the user and returns their input as a response.
Args:
prompt (str): The prompt to be displayed to the user.
stop (Optional[List[str]]): A list of stop strings.
run_manager (Optional[CallbackManagerForLLMRun]): Currently not used.
Returns:
str: The user's input as a response.
"""
self.prompt_func(prompt, **self.prompt_kwargs)
user_input = self.input_func(separator=self.separator, stop=stop, **
self.input_kwargs)
if stop is not None:
user_input = enforce_stop_tokens(user_input, stop)
return user_input
|
Displays the prompt to the user and returns their input as a response.
Args:
prompt (str): The prompt to be displayed to the user.
stop (Optional[List[str]]): A list of stop strings.
run_manager (Optional[CallbackManagerForLLMRun]): Currently not used.
Returns:
str: The user's input as a response.
|
get_lc_namespace
|
"""Get the namespace of the langchain object."""
return ['langchain', 'chat_models', 'mistralai']
|
@classmethod
def get_lc_namespace(cls) ->List[str]:
"""Get the namespace of the langchain object."""
return ['langchain', 'chat_models', 'mistralai']
|
Get the namespace of the langchain object.
|
get_images_from_pdf
|
"""
Extract images from each page of a PDF document and save as JPEG files.
:param pdf_path: A string representing the path to the PDF file.
:param img_dump_path: A string representing the path to dummp images.
"""
pdf = pdfium.PdfDocument(pdf_path)
n_pages = len(pdf)
for page_number in range(n_pages):
page = pdf.get_page(page_number)
bitmap = page.render(scale=1, rotation=0, crop=(0, 0, 0, 0))
pil_image = bitmap.to_pil()
pil_image.save(f'{img_dump_path}/img_{page_number + 1}.jpg', format='JPEG')
|
def get_images_from_pdf(pdf_path, img_dump_path):
"""
Extract images from each page of a PDF document and save as JPEG files.
:param pdf_path: A string representing the path to the PDF file.
:param img_dump_path: A string representing the path to dummp images.
"""
pdf = pdfium.PdfDocument(pdf_path)
n_pages = len(pdf)
for page_number in range(n_pages):
page = pdf.get_page(page_number)
bitmap = page.render(scale=1, rotation=0, crop=(0, 0, 0, 0))
pil_image = bitmap.to_pil()
pil_image.save(f'{img_dump_path}/img_{page_number + 1}.jpg', format
='JPEG')
|
Extract images from each page of a PDF document and save as JPEG files.
:param pdf_path: A string representing the path to the PDF file.
:param img_dump_path: A string representing the path to dummp images.
|
test_url
|
os.environ['MS_GRAPH_CLIENT_ID'] = 'CLIENT_ID'
os.environ['MS_GRAPH_CLIENT_SECRET'] = 'CLIENT_SECRET'
loader = OneNoteLoader(notebook_name='test_notebook', section_name=
'test_section', page_title='test_title', access_token='access_token',
onenote_api_base_url='https://graph.microsoft.com/v1.0/me/onenote')
assert loader._url == "https://graph.microsoft.com/v1.0/me/onenote/pages?$select=id&$expand=parentNotebook,parentSection&$filter=parentNotebook/displayName%20eq%20'test_notebook'%20and%20parentSection/displayName%20eq%20'test_section'%20and%20title%20eq%20'test_title'"
loader = OneNoteLoader(notebook_name='test_notebook', section_name=
'test_section', access_token='access_token', onenote_api_base_url=
'https://graph.microsoft.com/v1.0/me/onenote')
assert loader._url == "https://graph.microsoft.com/v1.0/me/onenote/pages?$select=id&$expand=parentNotebook,parentSection&$filter=parentNotebook/displayName%20eq%20'test_notebook'%20and%20parentSection/displayName%20eq%20'test_section'"
loader = OneNoteLoader(notebook_name='test_notebook', access_token=
'access_token', onenote_api_base_url=
'https://graph.microsoft.com/v1.0/me/onenote')
assert loader._url == "https://graph.microsoft.com/v1.0/me/onenote/pages?$select=id&$expand=parentNotebook&$filter=parentNotebook/displayName%20eq%20'test_notebook'"
loader = OneNoteLoader(section_name='test_section', access_token=
'access_token', onenote_api_base_url=
'https://graph.microsoft.com/v1.0/me/onenote')
assert loader._url == "https://graph.microsoft.com/v1.0/me/onenote/pages?$select=id&$expand=parentSection&$filter=parentSection/displayName%20eq%20'test_section'"
loader = OneNoteLoader(section_name='test_section', page_title='test_title',
access_token='access_token', onenote_api_base_url=
'https://graph.microsoft.com/v1.0/me/onenote')
assert loader._url == "https://graph.microsoft.com/v1.0/me/onenote/pages?$select=id&$expand=parentSection&$filter=parentSection/displayName%20eq%20'test_section'%20and%20title%20eq%20'test_title'"
loader = OneNoteLoader(page_title='test_title', access_token='access_token',
onenote_api_base_url='https://graph.microsoft.com/v1.0/me/onenote')
assert loader._url == "https://graph.microsoft.com/v1.0/me/onenote/pages?$select=id&$filter=title%20eq%20'test_title'"
|
def test_url() ->None:
os.environ['MS_GRAPH_CLIENT_ID'] = 'CLIENT_ID'
os.environ['MS_GRAPH_CLIENT_SECRET'] = 'CLIENT_SECRET'
loader = OneNoteLoader(notebook_name='test_notebook', section_name=
'test_section', page_title='test_title', access_token=
'access_token', onenote_api_base_url=
'https://graph.microsoft.com/v1.0/me/onenote')
assert loader._url == "https://graph.microsoft.com/v1.0/me/onenote/pages?$select=id&$expand=parentNotebook,parentSection&$filter=parentNotebook/displayName%20eq%20'test_notebook'%20and%20parentSection/displayName%20eq%20'test_section'%20and%20title%20eq%20'test_title'"
loader = OneNoteLoader(notebook_name='test_notebook', section_name=
'test_section', access_token='access_token', onenote_api_base_url=
'https://graph.microsoft.com/v1.0/me/onenote')
assert loader._url == "https://graph.microsoft.com/v1.0/me/onenote/pages?$select=id&$expand=parentNotebook,parentSection&$filter=parentNotebook/displayName%20eq%20'test_notebook'%20and%20parentSection/displayName%20eq%20'test_section'"
loader = OneNoteLoader(notebook_name='test_notebook', access_token=
'access_token', onenote_api_base_url=
'https://graph.microsoft.com/v1.0/me/onenote')
assert loader._url == "https://graph.microsoft.com/v1.0/me/onenote/pages?$select=id&$expand=parentNotebook&$filter=parentNotebook/displayName%20eq%20'test_notebook'"
loader = OneNoteLoader(section_name='test_section', access_token=
'access_token', onenote_api_base_url=
'https://graph.microsoft.com/v1.0/me/onenote')
assert loader._url == "https://graph.microsoft.com/v1.0/me/onenote/pages?$select=id&$expand=parentSection&$filter=parentSection/displayName%20eq%20'test_section'"
loader = OneNoteLoader(section_name='test_section', page_title=
'test_title', access_token='access_token', onenote_api_base_url=
'https://graph.microsoft.com/v1.0/me/onenote')
assert loader._url == "https://graph.microsoft.com/v1.0/me/onenote/pages?$select=id&$expand=parentSection&$filter=parentSection/displayName%20eq%20'test_section'%20and%20title%20eq%20'test_title'"
loader = OneNoteLoader(page_title='test_title', access_token=
'access_token', onenote_api_base_url=
'https://graph.microsoft.com/v1.0/me/onenote')
assert loader._url == "https://graph.microsoft.com/v1.0/me/onenote/pages?$select=id&$filter=title%20eq%20'test_title'"
| null |
__init__
|
"""Initialize the parser.
Args:
device: device to use.
lang_model: whisper model to use, for example "openai/whisper-medium".
Defaults to None.
forced_decoder_ids: id states for decoder in a multilanguage model.
Defaults to None.
"""
try:
from transformers import pipeline
except ImportError:
raise ImportError(
'transformers package not found, please install it with `pip install transformers`'
)
try:
import torch
except ImportError:
raise ImportError(
'torch package not found, please install it with `pip install torch`')
if device == 'cpu':
self.device = 'cpu'
if lang_model is not None:
self.lang_model = lang_model
print('WARNING! Model override. Using model: ', self.lang_model)
else:
self.lang_model = 'openai/whisper-base'
elif torch.cuda.is_available():
self.device = 'cuda:0'
mem = torch.cuda.get_device_properties(self.device
).total_memory / 1024 ** 2
if mem < 5000:
rec_model = 'openai/whisper-base'
elif mem < 7000:
rec_model = 'openai/whisper-small'
elif mem < 12000:
rec_model = 'openai/whisper-medium'
else:
rec_model = 'openai/whisper-large'
if lang_model is not None:
self.lang_model = lang_model
print('WARNING! Model override. Might not fit in your GPU')
else:
self.lang_model = rec_model
else:
"""cpu"""
print('Using the following model: ', self.lang_model)
self.pipe = pipeline('automatic-speech-recognition', model=self.lang_model,
chunk_length_s=30, device=self.device)
if forced_decoder_ids is not None:
try:
self.pipe.model.config.forced_decoder_ids = forced_decoder_ids
except Exception as exception_text:
logger.info(
f'Unable to set forced_decoder_ids parameter for whisper modelText of exception: {exception_text}Therefore whisper model will use default mode for decoder'
)
|
def __init__(self, device: str='0', lang_model: Optional[str]=None,
forced_decoder_ids: Optional[Tuple[Dict]]=None):
"""Initialize the parser.
Args:
device: device to use.
lang_model: whisper model to use, for example "openai/whisper-medium".
Defaults to None.
forced_decoder_ids: id states for decoder in a multilanguage model.
Defaults to None.
"""
try:
from transformers import pipeline
except ImportError:
raise ImportError(
'transformers package not found, please install it with `pip install transformers`'
)
try:
import torch
except ImportError:
raise ImportError(
'torch package not found, please install it with `pip install torch`'
)
if device == 'cpu':
self.device = 'cpu'
if lang_model is not None:
self.lang_model = lang_model
print('WARNING! Model override. Using model: ', self.lang_model)
else:
self.lang_model = 'openai/whisper-base'
elif torch.cuda.is_available():
self.device = 'cuda:0'
mem = torch.cuda.get_device_properties(self.device
).total_memory / 1024 ** 2
if mem < 5000:
rec_model = 'openai/whisper-base'
elif mem < 7000:
rec_model = 'openai/whisper-small'
elif mem < 12000:
rec_model = 'openai/whisper-medium'
else:
rec_model = 'openai/whisper-large'
if lang_model is not None:
self.lang_model = lang_model
print('WARNING! Model override. Might not fit in your GPU')
else:
self.lang_model = rec_model
else:
"""cpu"""
print('Using the following model: ', self.lang_model)
self.pipe = pipeline('automatic-speech-recognition', model=self.
lang_model, chunk_length_s=30, device=self.device)
if forced_decoder_ids is not None:
try:
self.pipe.model.config.forced_decoder_ids = forced_decoder_ids
except Exception as exception_text:
logger.info(
f'Unable to set forced_decoder_ids parameter for whisper modelText of exception: {exception_text}Therefore whisper model will use default mode for decoder'
)
|
Initialize the parser.
Args:
device: device to use.
lang_model: whisper model to use, for example "openai/whisper-medium".
Defaults to None.
forced_decoder_ids: id states for decoder in a multilanguage model.
Defaults to None.
|
validate_inputs
|
"""Validate that either folder_id or document_ids is set, but not both."""
if values.get('folder_id') and (values.get('document_ids') or values.get(
'file_ids')):
raise ValueError(
'Cannot specify both folder_id and document_ids nor folder_id and file_ids'
)
if not values.get('folder_id') and not values.get('document_ids'
) and not values.get('file_ids'):
raise ValueError('Must specify either folder_id, document_ids, or file_ids'
)
file_types = values.get('file_types')
if file_types:
if values.get('document_ids') or values.get('file_ids'):
raise ValueError(
'file_types can only be given when folder_id is given, (not when document_ids or file_ids are given).'
)
type_mapping = {'document': 'application/vnd.google-apps.document',
'sheet': 'application/vnd.google-apps.spreadsheet', 'pdf':
'application/pdf'}
allowed_types = list(type_mapping.keys()) + list(type_mapping.values())
short_names = ', '.join([f"'{x}'" for x in type_mapping.keys()])
full_names = ', '.join([f"'{x}'" for x in type_mapping.values()])
for file_type in file_types:
if file_type not in allowed_types:
raise ValueError(
f'Given file type {file_type} is not supported. Supported values are: {short_names}; and their full-form names: {full_names}'
)
def full_form(x: str) ->str:
return type_mapping[x] if x in type_mapping else x
values['file_types'] = [full_form(file_type) for file_type in file_types]
return values
|
@root_validator
def validate_inputs(cls, values: Dict[str, Any]) ->Dict[str, Any]:
"""Validate that either folder_id or document_ids is set, but not both."""
if values.get('folder_id') and (values.get('document_ids') or values.
get('file_ids')):
raise ValueError(
'Cannot specify both folder_id and document_ids nor folder_id and file_ids'
)
if not values.get('folder_id') and not values.get('document_ids'
) and not values.get('file_ids'):
raise ValueError(
'Must specify either folder_id, document_ids, or file_ids')
file_types = values.get('file_types')
if file_types:
if values.get('document_ids') or values.get('file_ids'):
raise ValueError(
'file_types can only be given when folder_id is given, (not when document_ids or file_ids are given).'
)
type_mapping = {'document': 'application/vnd.google-apps.document',
'sheet': 'application/vnd.google-apps.spreadsheet', 'pdf':
'application/pdf'}
allowed_types = list(type_mapping.keys()) + list(type_mapping.values())
short_names = ', '.join([f"'{x}'" for x in type_mapping.keys()])
full_names = ', '.join([f"'{x}'" for x in type_mapping.values()])
for file_type in file_types:
if file_type not in allowed_types:
raise ValueError(
f'Given file type {file_type} is not supported. Supported values are: {short_names}; and their full-form names: {full_names}'
)
def full_form(x: str) ->str:
return type_mapping[x] if x in type_mapping else x
values['file_types'] = [full_form(file_type) for file_type in
file_types]
return values
|
Validate that either folder_id or document_ids is set, but not both.
|
to_json_not_implemented
|
return to_json_not_implemented(self)
|
def to_json_not_implemented(self) ->SerializedNotImplemented:
return to_json_not_implemented(self)
| null |
validate_environment
|
values['ernie_api_base'] = get_from_dict_or_env(values, 'ernie_api_base',
'ERNIE_API_BASE', 'https://aip.baidubce.com')
values['ernie_client_id'] = get_from_dict_or_env(values, 'ernie_client_id',
'ERNIE_CLIENT_ID')
values['ernie_client_secret'] = get_from_dict_or_env(values,
'ernie_client_secret', 'ERNIE_CLIENT_SECRET')
return values
|
@root_validator()
def validate_environment(cls, values: Dict) ->Dict:
values['ernie_api_base'] = get_from_dict_or_env(values,
'ernie_api_base', 'ERNIE_API_BASE', 'https://aip.baidubce.com')
values['ernie_client_id'] = get_from_dict_or_env(values,
'ernie_client_id', 'ERNIE_CLIENT_ID')
values['ernie_client_secret'] = get_from_dict_or_env(values,
'ernie_client_secret', 'ERNIE_CLIENT_SECRET')
return values
| null |
stringify_value
|
"""Stringify a value.
Args:
val: The value to stringify.
Returns:
str: The stringified value.
"""
if isinstance(val, str):
return val
elif isinstance(val, dict):
return '\n' + stringify_dict(val)
elif isinstance(val, list):
return '\n'.join(stringify_value(v) for v in val)
else:
return str(val)
|
def stringify_value(val: Any) ->str:
"""Stringify a value.
Args:
val: The value to stringify.
Returns:
str: The stringified value.
"""
if isinstance(val, str):
return val
elif isinstance(val, dict):
return '\n' + stringify_dict(val)
elif isinstance(val, list):
return '\n'.join(stringify_value(v) for v in val)
else:
return str(val)
|
Stringify a value.
Args:
val: The value to stringify.
Returns:
str: The stringified value.
|
_on_run_create
|
"""Process a run upon creation."""
|
def _on_run_create(self, run: Run) ->None:
"""Process a run upon creation."""
|
Process a run upon creation.
|
reset_callback_meta
|
"""Reset the callback metadata."""
self.step = 0
self.starts = 0
self.ends = 0
self.errors = 0
self.text_ctr = 0
self.ignore_llm_ = False
self.ignore_chain_ = False
self.ignore_agent_ = False
self.always_verbose_ = False
self.chain_starts = 0
self.chain_ends = 0
self.llm_starts = 0
self.llm_ends = 0
self.llm_streams = 0
self.tool_starts = 0
self.tool_ends = 0
self.agent_ends = 0
return None
|
def reset_callback_meta(self) ->None:
"""Reset the callback metadata."""
self.step = 0
self.starts = 0
self.ends = 0
self.errors = 0
self.text_ctr = 0
self.ignore_llm_ = False
self.ignore_chain_ = False
self.ignore_agent_ = False
self.always_verbose_ = False
self.chain_starts = 0
self.chain_ends = 0
self.llm_starts = 0
self.llm_ends = 0
self.llm_streams = 0
self.tool_starts = 0
self.tool_ends = 0
self.agent_ends = 0
return None
|
Reset the callback metadata.
|
parse
|
"""Return AutoGPTAction"""
|
@abstractmethod
def parse(self, text: str) ->AutoGPTAction:
"""Return AutoGPTAction"""
|
Return AutoGPTAction
|
test_sitemap_block_size_to_small
|
"""Test sitemap loader."""
with pytest.raises(ValueError, match='Sitemap blocksize should be at least 1'):
SitemapLoader('https://api.python.langchain.com/sitemap.xml', blocksize=0)
|
def test_sitemap_block_size_to_small() ->None:
"""Test sitemap loader."""
with pytest.raises(ValueError, match=
'Sitemap blocksize should be at least 1'):
SitemapLoader('https://api.python.langchain.com/sitemap.xml',
blocksize=0)
|
Test sitemap loader.
|
get_relevant_documents_with_filter
|
body = self.body.copy()
_filter = f' and {_filter}' if _filter else ''
body['yql'] = body['yql'] + _filter
body['query'] = query
return self._query(body)
|
def get_relevant_documents_with_filter(self, query: str, *, _filter:
Optional[str]=None) ->List[Document]:
body = self.body.copy()
_filter = f' and {_filter}' if _filter else ''
body['yql'] = body['yql'] + _filter
body['query'] = query
return self._query(body)
| null |
test_get_final_answer_new_line
|
"""Test getting final answer."""
llm_output = """Thought: I can now answer the question
Final Answer:
1994"""
action, action_input = get_action_and_input(llm_output)
assert action == 'Final Answer'
assert action_input == '1994'
|
def test_get_final_answer_new_line() ->None:
"""Test getting final answer."""
llm_output = 'Thought: I can now answer the question\nFinal Answer:\n1994'
action, action_input = get_action_and_input(llm_output)
assert action == 'Final Answer'
assert action_input == '1994'
|
Test getting final answer.
|
_create_function_message
|
"""Convert agent action and observation into a function message.
Args:
agent_action: the tool invocation request from the agent
observation: the result of the tool invocation
Returns:
FunctionMessage that corresponds to the original tool invocation
"""
if not isinstance(observation, str):
try:
content = json.dumps(observation, ensure_ascii=False)
except Exception:
content = str(observation)
else:
content = observation
return FunctionMessage(name=agent_action.tool, content=content)
|
def _create_function_message(agent_action: AgentAction, observation: Any
) ->FunctionMessage:
"""Convert agent action and observation into a function message.
Args:
agent_action: the tool invocation request from the agent
observation: the result of the tool invocation
Returns:
FunctionMessage that corresponds to the original tool invocation
"""
if not isinstance(observation, str):
try:
content = json.dumps(observation, ensure_ascii=False)
except Exception:
content = str(observation)
else:
content = observation
return FunctionMessage(name=agent_action.tool, content=content)
|
Convert agent action and observation into a function message.
Args:
agent_action: the tool invocation request from the agent
observation: the result of the tool invocation
Returns:
FunctionMessage that corresponds to the original tool invocation
|
_get_messages_from_run_dict
|
if not messages:
return []
first_message = messages[0]
if 'lc' in first_message:
return [load(dumpd(message)) for message in messages]
else:
return messages_from_dict(messages)
|
def _get_messages_from_run_dict(messages: List[dict]) ->List[BaseMessage]:
if not messages:
return []
first_message = messages[0]
if 'lc' in first_message:
return [load(dumpd(message)) for message in messages]
else:
return messages_from_dict(messages)
| null |
validate_environment
|
"""Validate that api key exists in environment."""
scenex_api_key = get_from_dict_or_env(values, 'scenex_api_key',
'SCENEX_API_KEY')
values['scenex_api_key'] = scenex_api_key
return values
|
@root_validator(pre=True)
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that api key exists in environment."""
scenex_api_key = get_from_dict_or_env(values, 'scenex_api_key',
'SCENEX_API_KEY')
values['scenex_api_key'] = scenex_api_key
return values
|
Validate that api key exists in environment.
|
test_deanonymize
|
"""Test deanonymizing a name in a simple sentence"""
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
text = 'Hello, my name is John Doe.'
anonymizer = PresidioReversibleAnonymizer(analyzed_fields=['PERSON'])
anonymized_text = anonymizer.anonymize(text)
deanonymized_text = anonymizer.deanonymize(anonymized_text)
assert deanonymized_text == text
|
@pytest.mark.requires('presidio_analyzer', 'presidio_anonymizer', 'faker')
def test_deanonymize() ->None:
"""Test deanonymizing a name in a simple sentence"""
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
text = 'Hello, my name is John Doe.'
anonymizer = PresidioReversibleAnonymizer(analyzed_fields=['PERSON'])
anonymized_text = anonymizer.anonymize(text)
deanonymized_text = anonymizer.deanonymize(anonymized_text)
assert deanonymized_text == text
|
Test deanonymizing a name in a simple sentence
|
from_documents
|
"""
Create a BM25Retriever from a list of Documents.
Args:
documents: A list of Documents to vectorize.
bm25_params: Parameters to pass to the BM25 vectorizer.
preprocess_func: A function to preprocess each text before vectorization.
**kwargs: Any other arguments to pass to the retriever.
Returns:
A BM25Retriever instance.
"""
texts, metadatas = zip(*((d.page_content, d.metadata) for d in documents))
return cls.from_texts(texts=texts, bm25_params=bm25_params, metadatas=
metadatas, preprocess_func=preprocess_func, **kwargs)
|
@classmethod
def from_documents(cls, documents: Iterable[Document], *, bm25_params:
Optional[Dict[str, Any]]=None, preprocess_func: Callable[[str], List[
str]]=default_preprocessing_func, **kwargs: Any) ->BM25Retriever:
"""
Create a BM25Retriever from a list of Documents.
Args:
documents: A list of Documents to vectorize.
bm25_params: Parameters to pass to the BM25 vectorizer.
preprocess_func: A function to preprocess each text before vectorization.
**kwargs: Any other arguments to pass to the retriever.
Returns:
A BM25Retriever instance.
"""
texts, metadatas = zip(*((d.page_content, d.metadata) for d in documents))
return cls.from_texts(texts=texts, bm25_params=bm25_params, metadatas=
metadatas, preprocess_func=preprocess_func, **kwargs)
|
Create a BM25Retriever from a list of Documents.
Args:
documents: A list of Documents to vectorize.
bm25_params: Parameters to pass to the BM25 vectorizer.
preprocess_func: A function to preprocess each text before vectorization.
**kwargs: Any other arguments to pass to the retriever.
Returns:
A BM25Retriever instance.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.