method_name
stringlengths 1
78
| method_body
stringlengths 3
9.66k
| full_code
stringlengths 31
10.7k
| docstring
stringlengths 4
4.74k
⌀ |
|---|---|---|---|
format_request_payload
|
ContentFormatterBase.escape_special_characters(prompt)
request_payload = json.dumps({'inputs': [f'"{prompt}"'], 'parameters':
model_kwargs})
return str.encode(request_payload)
|
def format_request_payload(self, prompt: str, model_kwargs: Dict) ->bytes:
ContentFormatterBase.escape_special_characters(prompt)
request_payload = json.dumps({'inputs': [f'"{prompt}"'], 'parameters':
model_kwargs})
return str.encode(request_payload)
| null |
test_llm_caching
|
prompt = 'How are you?'
response = 'Test response'
cached_response = 'Cached test response'
llm = FakeListLLM(responses=[response])
if get_llm_cache():
get_llm_cache().update(prompt=prompt, llm_string=create_llm_string(llm),
return_val=[Generation(text=cached_response)])
assert llm(prompt) == cached_response
else:
raise ValueError(
'The cache not set. This should never happen, as the pytest fixture `set_cache_and_teardown` always sets the cache.'
)
|
def test_llm_caching() ->None:
prompt = 'How are you?'
response = 'Test response'
cached_response = 'Cached test response'
llm = FakeListLLM(responses=[response])
if get_llm_cache():
get_llm_cache().update(prompt=prompt, llm_string=create_llm_string(
llm), return_val=[Generation(text=cached_response)])
assert llm(prompt) == cached_response
else:
raise ValueError(
'The cache not set. This should never happen, as the pytest fixture `set_cache_and_teardown` always sets the cache.'
)
| null |
_check_evaluation_args
|
"""Check if the evaluation arguments are valid.
Args:
reference (Optional[str], optional): The reference label.
input (Optional[str], optional): The input string.
Raises:
ValueError: If the evaluator requires an input string but none is provided,
or if the evaluator requires a reference label but none is provided.
"""
if self.requires_input and input is None:
raise ValueError(f'{self.__class__.__name__} requires an input string.')
elif input is not None and not self.requires_input:
warn(self._skip_input_warning)
if self.requires_reference and reference is None:
raise ValueError(f'{self.__class__.__name__} requires a reference string.')
elif reference is not None and not self.requires_reference:
warn(self._skip_reference_warning)
|
def _check_evaluation_args(self, reference: Optional[str]=None, input:
Optional[str]=None) ->None:
"""Check if the evaluation arguments are valid.
Args:
reference (Optional[str], optional): The reference label.
input (Optional[str], optional): The input string.
Raises:
ValueError: If the evaluator requires an input string but none is provided,
or if the evaluator requires a reference label but none is provided.
"""
if self.requires_input and input is None:
raise ValueError(f'{self.__class__.__name__} requires an input string.'
)
elif input is not None and not self.requires_input:
warn(self._skip_input_warning)
if self.requires_reference and reference is None:
raise ValueError(
f'{self.__class__.__name__} requires a reference string.')
elif reference is not None and not self.requires_reference:
warn(self._skip_reference_warning)
|
Check if the evaluation arguments are valid.
Args:
reference (Optional[str], optional): The reference label.
input (Optional[str], optional): The input string.
Raises:
ValueError: If the evaluator requires an input string but none is provided,
or if the evaluator requires a reference label but none is provided.
|
__init__
|
"""Unparser(tree, file=sys.stdout) -> None.
Print the source for tree to file."""
self.f = file
self._indent = 0
self.dispatch(tree)
self.f.flush()
|
def __init__(self, tree, file=sys.stdout):
"""Unparser(tree, file=sys.stdout) -> None.
Print the source for tree to file."""
self.f = file
self._indent = 0
self.dispatch(tree)
self.f.flush()
|
Unparser(tree, file=sys.stdout) -> None.
Print the source for tree to file.
|
test_chat_openai_multiple_completions
|
"""Test ChatOpenAI wrapper with multiple completions."""
chat = ChatOpenAI(max_tokens=10, n=5)
message = HumanMessage(content='Hello')
response = chat._generate([message])
assert isinstance(response, ChatResult)
assert len(response.generations) == 5
for generation in response.generations:
assert isinstance(generation.message, BaseMessage)
assert isinstance(generation.message.content, str)
|
@pytest.mark.scheduled
def test_chat_openai_multiple_completions() ->None:
"""Test ChatOpenAI wrapper with multiple completions."""
chat = ChatOpenAI(max_tokens=10, n=5)
message = HumanMessage(content='Hello')
response = chat._generate([message])
assert isinstance(response, ChatResult)
assert len(response.generations) == 5
for generation in response.generations:
assert isinstance(generation.message, BaseMessage)
assert isinstance(generation.message.content, str)
|
Test ChatOpenAI wrapper with multiple completions.
|
test_deepinfra_call
|
"""Test valid call to DeepInfra."""
llm = DeepInfra(model_id='meta-llama/Llama-2-7b-chat-hf')
output = llm('What is 2 + 2?')
assert isinstance(output, str)
|
def test_deepinfra_call() ->None:
"""Test valid call to DeepInfra."""
llm = DeepInfra(model_id='meta-llama/Llama-2-7b-chat-hf')
output = llm('What is 2 + 2?')
assert isinstance(output, str)
|
Test valid call to DeepInfra.
|
_ddgs_news
|
"""Run query through DuckDuckGo news search and return results."""
from duckduckgo_search import DDGS
with DDGS() as ddgs:
ddgs_gen = ddgs.news(query, region=self.region, safesearch=self.
safesearch, timelimit=self.time, max_results=max_results or self.
max_results)
if ddgs_gen:
return [r for r in ddgs_gen]
return []
|
def _ddgs_news(self, query: str, max_results: Optional[int]=None) ->List[Dict
[str, str]]:
"""Run query through DuckDuckGo news search and return results."""
from duckduckgo_search import DDGS
with DDGS() as ddgs:
ddgs_gen = ddgs.news(query, region=self.region, safesearch=self.
safesearch, timelimit=self.time, max_results=max_results or
self.max_results)
if ddgs_gen:
return [r for r in ddgs_gen]
return []
|
Run query through DuckDuckGo news search and return results.
|
transform
|
if self.func is None:
for chunk in self._transform_stream_with_config(input, identity, config):
yield chunk
else:
final = None
for chunk in self._transform_stream_with_config(input, identity, config):
yield chunk
if final is None:
final = chunk
else:
final = final + chunk
if final is not None:
call_func_with_variable_args(self.func, final, ensure_config(config
), **kwargs)
|
def transform(self, input: Iterator[Other], config: Optional[RunnableConfig
]=None, **kwargs: Any) ->Iterator[Other]:
if self.func is None:
for chunk in self._transform_stream_with_config(input, identity, config
):
yield chunk
else:
final = None
for chunk in self._transform_stream_with_config(input, identity, config
):
yield chunk
if final is None:
final = chunk
else:
final = final + chunk
if final is not None:
call_func_with_variable_args(self.func, final, ensure_config(
config), **kwargs)
| null |
raw_spec
|
"""Return a raw OpenAPI spec."""
from openapi_pydantic import Info
return OpenAPISpec(info=Info(title='Test API', version='1.0.0'))
|
@pytest.mark.requires('openapi_pydantic')
@pytest.fixture
def raw_spec() ->OpenAPISpec:
"""Return a raw OpenAPI spec."""
from openapi_pydantic import Info
return OpenAPISpec(info=Info(title='Test API', version='1.0.0'))
|
Return a raw OpenAPI spec.
|
test_api_request_body_property_from_schema
|
from openapi_pydantic import Components, Reference, Schema
raw_spec.components = Components(schemas={'Bar': Schema(type='number')})
schema = Schema(type='object', properties={'foo': Schema(type='string'),
'bar': Reference(ref='#/components/schemas/Bar')}, required=['bar'])
api_request_body_property = APIRequestBodyProperty.from_schema(schema,
'test', required=True, spec=raw_spec)
expected_sub_properties = [APIRequestBodyProperty(name='foo', required=
False, type='string', default=None, description=None, properties=[],
references_used=[]), APIRequestBodyProperty(name='bar', required=True,
type='number', default=None, description=None, properties=[],
references_used=['Bar'])]
assert api_request_body_property.properties[0] == expected_sub_properties[0]
assert api_request_body_property.properties[1] == expected_sub_properties[1]
assert api_request_body_property.type == 'object'
assert api_request_body_property.properties[1].references_used == ['Bar']
|
@pytest.mark.requires('openapi_pydantic')
def test_api_request_body_property_from_schema(raw_spec: OpenAPISpec) ->None:
from openapi_pydantic import Components, Reference, Schema
raw_spec.components = Components(schemas={'Bar': Schema(type='number')})
schema = Schema(type='object', properties={'foo': Schema(type='string'),
'bar': Reference(ref='#/components/schemas/Bar')}, required=['bar'])
api_request_body_property = APIRequestBodyProperty.from_schema(schema,
'test', required=True, spec=raw_spec)
expected_sub_properties = [APIRequestBodyProperty(name='foo', required=
False, type='string', default=None, description=None, properties=[],
references_used=[]), APIRequestBodyProperty(name='bar', required=
True, type='number', default=None, description=None, properties=[],
references_used=['Bar'])]
assert api_request_body_property.properties[0] == expected_sub_properties[0
]
assert api_request_body_property.properties[1] == expected_sub_properties[1
]
assert api_request_body_property.type == 'object'
assert api_request_body_property.properties[1].references_used == ['Bar']
| null |
construct_html_from_prompt_and_generation
|
"""Construct an html element from a prompt and a generation.
Parameters:
prompt (str): The prompt.
generation (str): The generation.
Returns:
(str): The html string."""
formatted_prompt = prompt.replace('\n', '<br>')
formatted_generation = generation.replace('\n', '<br>')
return f"""
<p style="color:black;">{formatted_prompt}:</p>
<blockquote>
<p style="color:green;">
{formatted_generation}
</p>
</blockquote>
"""
|
def construct_html_from_prompt_and_generation(prompt: str, generation: str
) ->Any:
"""Construct an html element from a prompt and a generation.
Parameters:
prompt (str): The prompt.
generation (str): The generation.
Returns:
(str): The html string."""
formatted_prompt = prompt.replace('\n', '<br>')
formatted_generation = generation.replace('\n', '<br>')
return f"""
<p style="color:black;">{formatted_prompt}:</p>
<blockquote>
<p style="color:green;">
{formatted_generation}
</p>
</blockquote>
"""
|
Construct an html element from a prompt and a generation.
Parameters:
prompt (str): The prompt.
generation (str): The generation.
Returns:
(str): The html string.
|
simplify_code
|
raise NotImplementedError()
|
@abstractmethod
def simplify_code(self) ->str:
raise NotImplementedError()
| null |
on_agent_action
|
self.on_agent_action_common()
|
def on_agent_action(self, *args: Any, **kwargs: Any) ->Any:
self.on_agent_action_common()
| null |
_get_invocation_params
|
"""Get the parameters used to invoke the model FOR THE CALLBACKS."""
return {**self._default_params, **super()._get_invocation_params(stop=stop,
**kwargs)}
|
def _get_invocation_params(self, stop: Optional[List[str]]=None, **kwargs: Any
) ->Dict[str, Any]:
"""Get the parameters used to invoke the model FOR THE CALLBACKS."""
return {**self._default_params, **super()._get_invocation_params(stop=
stop, **kwargs)}
|
Get the parameters used to invoke the model FOR THE CALLBACKS.
|
on_tool_end
|
"""Do nothing when tool ends."""
pass
|
def on_tool_end(self, output: str, observation_prefix: Optional[str]=None,
llm_prefix: Optional[str]=None, **kwargs: Any) ->None:
"""Do nothing when tool ends."""
pass
|
Do nothing when tool ends.
|
load
|
"""Load full dataframe."""
return list(self.lazy_load())
|
def load(self) ->List[Document]:
"""Load full dataframe."""
return list(self.lazy_load())
|
Load full dataframe.
|
test_all_imports
|
"""Simple test to make sure all things can be imported."""
for cls in llms.__all__:
assert issubclass(getattr(llms, cls), BaseLLM)
assert set(llms.__all__) == set(EXPECT_ALL)
|
def test_all_imports() ->None:
"""Simple test to make sure all things can be imported."""
for cls in llms.__all__:
assert issubclass(getattr(llms, cls), BaseLLM)
assert set(llms.__all__) == set(EXPECT_ALL)
|
Simple test to make sure all things can be imported.
|
validate_environment
|
"""Validate that api key exists in environment."""
serper_api_key = get_from_dict_or_env(values, 'serper_api_key',
'SERPER_API_KEY')
values['serper_api_key'] = serper_api_key
return values
|
@root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that api key exists in environment."""
serper_api_key = get_from_dict_or_env(values, 'serper_api_key',
'SERPER_API_KEY')
values['serper_api_key'] = serper_api_key
return values
|
Validate that api key exists in environment.
|
test_list_raises_401_invalid_access_token
|
"""Test that a valid error is raised when the API Key is invalid."""
mock_response = MagicMock()
mock_response.status_code = 401
mock_response.raise_for_status.side_effect = requests.HTTPError(
'401 Client Error: Unauthorized for url: https://nla.zapier.com/api/v1/exposed/'
, response=mock_response)
mock_session = MagicMock()
mock_session.get.return_value = mock_response
with patch('requests.Session', return_value=mock_session):
wrapper = ZapierNLAWrapper(zapier_nla_oauth_access_token='test')
with pytest.raises(requests.HTTPError) as err:
wrapper.list()
assert str(err.value).startswith(
"An unauthorized response occurred. Check that your access token is correct and doesn't need to be refreshed. Err:"
)
|
def test_list_raises_401_invalid_access_token() ->None:
"""Test that a valid error is raised when the API Key is invalid."""
mock_response = MagicMock()
mock_response.status_code = 401
mock_response.raise_for_status.side_effect = requests.HTTPError(
'401 Client Error: Unauthorized for url: https://nla.zapier.com/api/v1/exposed/'
, response=mock_response)
mock_session = MagicMock()
mock_session.get.return_value = mock_response
with patch('requests.Session', return_value=mock_session):
wrapper = ZapierNLAWrapper(zapier_nla_oauth_access_token='test')
with pytest.raises(requests.HTTPError) as err:
wrapper.list()
assert str(err.value).startswith(
"An unauthorized response occurred. Check that your access token is correct and doesn't need to be refreshed. Err:"
)
|
Test that a valid error is raised when the API Key is invalid.
|
download
|
"""Download a file from an url.
Args:
path: Path to the file.
"""
response = self.session.get(self.base_url + path, allow_redirects=True)
filename = self.parse_filename(response.url)
with open(Path(self.folder_path) / filename, 'wb') as f:
f.write(response.content)
|
def download(self, path: str) ->None:
"""Download a file from an url.
Args:
path: Path to the file.
"""
response = self.session.get(self.base_url + path, allow_redirects=True)
filename = self.parse_filename(response.url)
with open(Path(self.folder_path) / filename, 'wb') as f:
f.write(response.content)
|
Download a file from an url.
Args:
path: Path to the file.
|
output_keys
|
"""Will always return text key.
:meta private:
"""
return [self.output_key]
|
@property
def output_keys(self) ->List[str]:
"""Will always return text key.
:meta private:
"""
return [self.output_key]
|
Will always return text key.
:meta private:
|
_get_elements
|
min_unstructured_version = '0.5.4'
if not satisfies_min_unstructured_version(min_unstructured_version):
raise ValueError(
f'Partitioning epub files is only supported in unstructured>={min_unstructured_version}.'
)
from unstructured.partition.epub import partition_epub
return partition_epub(filename=self.file_path, **self.unstructured_kwargs)
|
def _get_elements(self) ->List:
min_unstructured_version = '0.5.4'
if not satisfies_min_unstructured_version(min_unstructured_version):
raise ValueError(
f'Partitioning epub files is only supported in unstructured>={min_unstructured_version}.'
)
from unstructured.partition.epub import partition_epub
return partition_epub(filename=self.file_path, **self.unstructured_kwargs)
| null |
format_request_payload
|
"""Formats the request according to the chosen api"""
prompt = ContentFormatterBase.escape_special_characters(prompt)
request_payload = json.dumps({'input_data': {'input_string': [f'"{prompt}"'
], 'parameters': model_kwargs}})
return str.encode(request_payload)
|
def format_request_payload(self, prompt: str, model_kwargs: Dict) ->bytes:
"""Formats the request according to the chosen api"""
prompt = ContentFormatterBase.escape_special_characters(prompt)
request_payload = json.dumps({'input_data': {'input_string': [
f'"{prompt}"'], 'parameters': model_kwargs}})
return str.encode(request_payload)
|
Formats the request according to the chosen api
|
_format_func
|
self._validate_func(func)
map_dict = {Operator.AND: '$and', Operator.OR: '$or', Comparator.EQ: '$eq',
Comparator.NE: '$ne', Comparator.GTE: '$gte', Comparator.LTE: '$lte',
Comparator.LT: '$lt', Comparator.GT: '$gt', Comparator.IN: '$in',
Comparator.NIN: '$nin'}
return map_dict[func]
|
def _format_func(self, func: Union[Operator, Comparator]) ->str:
self._validate_func(func)
map_dict = {Operator.AND: '$and', Operator.OR: '$or', Comparator.EQ:
'$eq', Comparator.NE: '$ne', Comparator.GTE: '$gte', Comparator.LTE:
'$lte', Comparator.LT: '$lt', Comparator.GT: '$gt', Comparator.IN:
'$in', Comparator.NIN: '$nin'}
return map_dict[func]
| null |
_llm_type
|
"""Return type of llm."""
return 'ai21'
|
@property
def _llm_type(self) ->str:
"""Return type of llm."""
return 'ai21'
|
Return type of llm.
|
list_open_pull_requests
|
"""
Fetches all open PRs from the repo
Returns:
str: A plaintext report containing the number of PRs
and each PR's title and number.
"""
pull_requests = self.github_repo_instance.get_pulls(state='open')
if pull_requests.totalCount > 0:
parsed_prs = self.parse_pull_requests(pull_requests)
parsed_prs_str = 'Found ' + str(len(parsed_prs)
) + ' pull requests:\n' + str(parsed_prs)
return parsed_prs_str
else:
return 'No open pull requests available'
|
def list_open_pull_requests(self) ->str:
"""
Fetches all open PRs from the repo
Returns:
str: A plaintext report containing the number of PRs
and each PR's title and number.
"""
pull_requests = self.github_repo_instance.get_pulls(state='open')
if pull_requests.totalCount > 0:
parsed_prs = self.parse_pull_requests(pull_requests)
parsed_prs_str = 'Found ' + str(len(parsed_prs)
) + ' pull requests:\n' + str(parsed_prs)
return parsed_prs_str
else:
return 'No open pull requests available'
|
Fetches all open PRs from the repo
Returns:
str: A plaintext report containing the number of PRs
and each PR's title and number.
|
on_llm_end
|
"""Run when LLM ends running."""
|
def on_llm_end(self, response: LLMResult, *, run_id: UUID, parent_run_id:
Optional[UUID]=None, **kwargs: Any) ->Any:
"""Run when LLM ends running."""
|
Run when LLM ends running.
|
_build_payload
|
from gigachat.models import Chat
payload = Chat(messages=[_convert_message_to_dict(m) for m in messages],
profanity_check=self.profanity)
if self.temperature is not None:
payload.temperature = self.temperature
if self.max_tokens is not None:
payload.max_tokens = self.max_tokens
if self.verbose:
logger.info('Giga request: %s', payload.dict())
return payload
|
def _build_payload(self, messages: List[BaseMessage]) ->Any:
from gigachat.models import Chat
payload = Chat(messages=[_convert_message_to_dict(m) for m in messages],
profanity_check=self.profanity)
if self.temperature is not None:
payload.temperature = self.temperature
if self.max_tokens is not None:
payload.max_tokens = self.max_tokens
if self.verbose:
logger.info('Giga request: %s', payload.dict())
return payload
| null |
_identifying_params
|
"""Get the identifying parameters."""
return {'base_url': self.base_url, **{}, **self._default_params}
|
@property
def _identifying_params(self) ->Mapping[str, Any]:
"""Get the identifying parameters."""
return {'base_url': self.base_url, **{}, **self._default_params}
|
Get the identifying parameters.
|
_chain_type
|
"""Return the chain type."""
return 'retrieval_qa'
|
@property
def _chain_type(self) ->str:
"""Return the chain type."""
return 'retrieval_qa'
|
Return the chain type.
|
test_create_events
|
assert create_events([EventDict(event='Test Event', properties={'test':
'test'})]) == {'status': 'success'}
|
def test_create_events() ->None:
assert create_events([EventDict(event='Test Event', properties={'test':
'test'})]) == {'status': 'success'}
| null |
_prepare_message
|
"""Create a message for an email."""
mime_message = MIMEMultipart()
mime_message.attach(MIMEText(message, 'html'))
mime_message['To'] = ', '.join(to if isinstance(to, list) else [to])
mime_message['Subject'] = subject
if cc is not None:
mime_message['Cc'] = ', '.join(cc if isinstance(cc, list) else [cc])
if bcc is not None:
mime_message['Bcc'] = ', '.join(bcc if isinstance(bcc, list) else [bcc])
encoded_message = base64.urlsafe_b64encode(mime_message.as_bytes()).decode()
return {'raw': encoded_message}
|
def _prepare_message(self, message: str, to: Union[str, List[str]], subject:
str, cc: Optional[Union[str, List[str]]]=None, bcc: Optional[Union[str,
List[str]]]=None) ->Dict[str, Any]:
"""Create a message for an email."""
mime_message = MIMEMultipart()
mime_message.attach(MIMEText(message, 'html'))
mime_message['To'] = ', '.join(to if isinstance(to, list) else [to])
mime_message['Subject'] = subject
if cc is not None:
mime_message['Cc'] = ', '.join(cc if isinstance(cc, list) else [cc])
if bcc is not None:
mime_message['Bcc'] = ', '.join(bcc if isinstance(bcc, list) else [bcc]
)
encoded_message = base64.urlsafe_b64encode(mime_message.as_bytes()).decode(
)
return {'raw': encoded_message}
|
Create a message for an email.
|
test_file_names_exist
|
"""Verify that the file names exist."""
glob_pattern = params['glob']
suffixes = params['suffixes']
exclude = params['exclude']
relative_filenames = params['relative_filenames']
loader = FileSystemBlobLoader(toy_dir, glob=glob_pattern, suffixes=suffixes,
exclude=exclude)
blobs = list(loader.yield_blobs())
file_names = sorted(str(blob.path) for blob in blobs)
expected_filenames = sorted(str(Path(toy_dir) / relative_filename) for
relative_filename in relative_filenames)
assert file_names == expected_filenames
assert loader.count_matching_files() == len(relative_filenames)
|
@pytest.mark.parametrize('params', _TEST_CASES)
def test_file_names_exist(toy_dir: str, params: dict) ->None:
"""Verify that the file names exist."""
glob_pattern = params['glob']
suffixes = params['suffixes']
exclude = params['exclude']
relative_filenames = params['relative_filenames']
loader = FileSystemBlobLoader(toy_dir, glob=glob_pattern, suffixes=
suffixes, exclude=exclude)
blobs = list(loader.yield_blobs())
file_names = sorted(str(blob.path) for blob in blobs)
expected_filenames = sorted(str(Path(toy_dir) / relative_filename) for
relative_filename in relative_filenames)
assert file_names == expected_filenames
assert loader.count_matching_files() == len(relative_filenames)
|
Verify that the file names exist.
|
input_keys
|
return self.prompt.input_variables
|
@property
def input_keys(self) ->List[str]:
return self.prompt.input_variables
| null |
validate_environment
|
"""Validate that api key and python package exists in environment."""
banana_api_key = get_from_dict_or_env(values, 'banana_api_key',
'BANANA_API_KEY')
values['banana_api_key'] = banana_api_key
return values
|
@root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that api key and python package exists in environment."""
banana_api_key = get_from_dict_or_env(values, 'banana_api_key',
'BANANA_API_KEY')
values['banana_api_key'] = banana_api_key
return values
|
Validate that api key and python package exists in environment.
|
similarity_search_with_score
|
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of documents most similar to the query text and
cosine distance in float for each.
Lower score represents more similarity.
"""
query_embedding = self.embedding.embed_query(query)
query_doc = self.doc_cls(embedding=query_embedding)
docs, scores = self.doc_index.find(query_doc, search_field='embedding', limit=k
)
result = [(Document(page_content=doc.text, metadata=doc.metadata), score) for
doc, score in zip(docs, scores)]
return result
|
def similarity_search_with_score(self, query: str, k: int=4, **kwargs: Any
) ->List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of documents most similar to the query text and
cosine distance in float for each.
Lower score represents more similarity.
"""
query_embedding = self.embedding.embed_query(query)
query_doc = self.doc_cls(embedding=query_embedding)
docs, scores = self.doc_index.find(query_doc, search_field='embedding',
limit=k)
result = [(Document(page_content=doc.text, metadata=doc.metadata),
score) for doc, score in zip(docs, scores)]
return result
|
Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of documents most similar to the query text and
cosine distance in float for each.
Lower score represents more similarity.
|
test_qianfan_key_masked_when_passed_from_env
|
"""Test initialization with an API key provided via an env variable"""
monkeypatch.setenv('QIANFAN_AK', 'test-api-key')
monkeypatch.setenv('QIANFAN_SK', 'test-secret-key')
chat = QianfanChatEndpoint()
print(chat.qianfan_ak, end='')
captured = capsys.readouterr()
assert captured.out == '**********'
print(chat.qianfan_sk, end='')
captured = capsys.readouterr()
assert captured.out == '**********'
|
def test_qianfan_key_masked_when_passed_from_env(monkeypatch: MonkeyPatch,
capsys: CaptureFixture) ->None:
"""Test initialization with an API key provided via an env variable"""
monkeypatch.setenv('QIANFAN_AK', 'test-api-key')
monkeypatch.setenv('QIANFAN_SK', 'test-secret-key')
chat = QianfanChatEndpoint()
print(chat.qianfan_ak, end='')
captured = capsys.readouterr()
assert captured.out == '**********'
print(chat.qianfan_sk, end='')
captured = capsys.readouterr()
assert captured.out == '**********'
|
Test initialization with an API key provided via an env variable
|
test_loadnotewithemptycontent_emptydocumentcontent
|
documents = EverNoteLoader(self.example_notebook_path(
'sample_notebook_emptynote.enex'), False).load()
note = documents[0]
assert note.page_content == ''
|
def test_loadnotewithemptycontent_emptydocumentcontent(self) ->None:
documents = EverNoteLoader(self.example_notebook_path(
'sample_notebook_emptynote.enex'), False).load()
note = documents[0]
assert note.page_content == ''
| null |
steps
|
"""All the runnables that make up the sequence in order."""
return [self.first] + self.middle + [self.last]
|
@property
def steps(self) ->List[Runnable[Any, Any]]:
"""All the runnables that make up the sequence in order."""
return [self.first] + self.middle + [self.last]
|
All the runnables that make up the sequence in order.
|
test_similarity_search_with_score_by_vector
|
"""Test vector similarity with score by vector."""
texts = ['foo', 'bar', 'baz']
docsearch = ScaNN.from_texts(texts, FakeEmbeddings())
index_to_id = docsearch.index_to_docstore_id
expected_docstore = InMemoryDocstore({index_to_id[0]: Document(page_content
='foo'), index_to_id[1]: Document(page_content='bar'), index_to_id[2]:
Document(page_content='baz')})
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
query_vec = FakeEmbeddings().embed_query(text='foo')
output = docsearch.similarity_search_with_score_by_vector(query_vec, k=1)
assert len(output) == 1
assert output[0][0] == Document(page_content='foo')
|
def test_similarity_search_with_score_by_vector() ->None:
"""Test vector similarity with score by vector."""
texts = ['foo', 'bar', 'baz']
docsearch = ScaNN.from_texts(texts, FakeEmbeddings())
index_to_id = docsearch.index_to_docstore_id
expected_docstore = InMemoryDocstore({index_to_id[0]: Document(
page_content='foo'), index_to_id[1]: Document(page_content='bar'),
index_to_id[2]: Document(page_content='baz')})
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
query_vec = FakeEmbeddings().embed_query(text='foo')
output = docsearch.similarity_search_with_score_by_vector(query_vec, k=1)
assert len(output) == 1
assert output[0][0] == Document(page_content='foo')
|
Test vector similarity with score by vector.
|
on_llm_end
|
"""Run when LLM ends running."""
aim = import_aim()
self.step += 1
self.llm_ends += 1
self.ends += 1
resp = {'action': 'on_llm_end'}
resp.update(self.get_custom_callback_meta())
response_res = deepcopy(response)
generated = [aim.Text(generation.text) for generations in response_res.
generations for generation in generations]
self._run.track(generated, name='on_llm_end', context=resp)
|
def on_llm_end(self, response: LLMResult, **kwargs: Any) ->None:
"""Run when LLM ends running."""
aim = import_aim()
self.step += 1
self.llm_ends += 1
self.ends += 1
resp = {'action': 'on_llm_end'}
resp.update(self.get_custom_callback_meta())
response_res = deepcopy(response)
generated = [aim.Text(generation.text) for generations in response_res.
generations for generation in generations]
self._run.track(generated, name='on_llm_end', context=resp)
|
Run when LLM ends running.
|
_call
|
"""Call out to Titan Takeoff (Pro) generate endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
prompt = "What is the capital of the United Kingdom?"
response = model(prompt)
"""
try:
if self.streaming:
text_output = ''
for chunk in self._stream(prompt=prompt, stop=stop, run_manager=
run_manager):
text_output += chunk.text
return text_output
url = f'{self.base_url}/generate'
params = {'text': prompt, **self._default_params}
response = requests.post(url, json=params)
response.raise_for_status()
response.encoding = 'utf-8'
text = ''
if 'text' in response.json():
text = response.json()['text']
text = text.replace('</s>', '')
else:
raise ValueError('Something went wrong.')
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
except ConnectionError:
raise ConnectionError(
'Could not connect to Titan Takeoff (Pro) server. Please make sure that the server is running.'
)
|
def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager:
Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str:
"""Call out to Titan Takeoff (Pro) generate endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
prompt = "What is the capital of the United Kingdom?"
response = model(prompt)
"""
try:
if self.streaming:
text_output = ''
for chunk in self._stream(prompt=prompt, stop=stop, run_manager
=run_manager):
text_output += chunk.text
return text_output
url = f'{self.base_url}/generate'
params = {'text': prompt, **self._default_params}
response = requests.post(url, json=params)
response.raise_for_status()
response.encoding = 'utf-8'
text = ''
if 'text' in response.json():
text = response.json()['text']
text = text.replace('</s>', '')
else:
raise ValueError('Something went wrong.')
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
except ConnectionError:
raise ConnectionError(
'Could not connect to Titan Takeoff (Pro) server. Please make sure that the server is running.'
)
|
Call out to Titan Takeoff (Pro) generate endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
prompt = "What is the capital of the United Kingdom?"
response = model(prompt)
|
on_agent_finish
|
if self.__has_valid_config is False:
return
try:
output = _parse_output(finish.return_values)
self.__track_event('agent', 'end', run_id=str(run_id), parent_run_id=
str(parent_run_id) if parent_run_id else None, output=output,
app_id=self.__app_id)
except Exception as e:
logger.error(f'[LLMonitor] An error occurred in on_agent_finish: {e}')
|
def on_agent_finish(self, finish: AgentFinish, *, run_id: UUID,
parent_run_id: Union[UUID, None]=None, **kwargs: Any) ->Any:
if self.__has_valid_config is False:
return
try:
output = _parse_output(finish.return_values)
self.__track_event('agent', 'end', run_id=str(run_id),
parent_run_id=str(parent_run_id) if parent_run_id else None,
output=output, app_id=self.__app_id)
except Exception as e:
logger.error(f'[LLMonitor] An error occurred in on_agent_finish: {e}')
| null |
_chat
|
if self.baichuan_secret_key is None:
raise ValueError('Baichuan secret key is not set.')
parameters = {**self._default_params, **kwargs}
model = parameters.pop('model')
headers = parameters.pop('headers', {})
payload = {'model': model, 'messages': [_convert_message_to_dict(m) for m in
messages], 'parameters': parameters}
timestamp = int(time.time())
url = self.baichuan_api_base
if self.streaming:
url = f'{url}/stream'
url = f'{url}/chat'
api_key = ''
if self.baichuan_api_key:
api_key = self.baichuan_api_key.get_secret_value()
res = requests.post(url=url, timeout=self.request_timeout, headers={
'Content-Type': 'application/json', 'Authorization':
f'Bearer {api_key}', 'X-BC-Timestamp': str(timestamp), 'X-BC-Signature':
_signature(secret_key=self.baichuan_secret_key, payload=payload,
timestamp=timestamp), 'X-BC-Sign-Algo': 'MD5', **headers}, json=payload,
stream=self.streaming)
return res
|
def _chat(self, messages: List[BaseMessage], **kwargs: Any
) ->requests.Response:
if self.baichuan_secret_key is None:
raise ValueError('Baichuan secret key is not set.')
parameters = {**self._default_params, **kwargs}
model = parameters.pop('model')
headers = parameters.pop('headers', {})
payload = {'model': model, 'messages': [_convert_message_to_dict(m) for
m in messages], 'parameters': parameters}
timestamp = int(time.time())
url = self.baichuan_api_base
if self.streaming:
url = f'{url}/stream'
url = f'{url}/chat'
api_key = ''
if self.baichuan_api_key:
api_key = self.baichuan_api_key.get_secret_value()
res = requests.post(url=url, timeout=self.request_timeout, headers={
'Content-Type': 'application/json', 'Authorization':
f'Bearer {api_key}', 'X-BC-Timestamp': str(timestamp),
'X-BC-Signature': _signature(secret_key=self.baichuan_secret_key,
payload=payload, timestamp=timestamp), 'X-BC-Sign-Algo': 'MD5', **
headers}, json=payload, stream=self.streaming)
return res
| null |
_generate_text
|
"""Inference function to send to the remote hardware.
Accepts a pipeline callable (or, more likely,
a key pointing to the model on the cluster's object store)
and returns text predictions for each document
in the batch.
"""
text = pipeline(prompt, *args, **kwargs)
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
|
def _generate_text(pipeline: Any, prompt: str, *args: Any, stop: Optional[
List[str]]=None, **kwargs: Any) ->str:
"""Inference function to send to the remote hardware.
Accepts a pipeline callable (or, more likely,
a key pointing to the model on the cluster's object store)
and returns text predictions for each document
in the batch.
"""
text = pipeline(prompt, *args, **kwargs)
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
|
Inference function to send to the remote hardware.
Accepts a pipeline callable (or, more likely,
a key pointing to the model on the cluster's object store)
and returns text predictions for each document
in the batch.
|
validate_environment
|
"""Validate that api key and python package exists in environment."""
serp_api_key = get_from_dict_or_env(values, 'serp_api_key', 'SERP_API_KEY')
values['SERP_API_KEY'] = serp_api_key
try:
from serpapi import GoogleScholarSearch
except ImportError:
raise ImportError(
'google-search-results is not installed. Please install it with `pip install google-search-results>=2.4.2`'
)
GoogleScholarSearch.SERP_API_KEY = serp_api_key
values['google_scholar_engine'] = GoogleScholarSearch
return values
|
@root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that api key and python package exists in environment."""
serp_api_key = get_from_dict_or_env(values, 'serp_api_key', 'SERP_API_KEY')
values['SERP_API_KEY'] = serp_api_key
try:
from serpapi import GoogleScholarSearch
except ImportError:
raise ImportError(
'google-search-results is not installed. Please install it with `pip install google-search-results>=2.4.2`'
)
GoogleScholarSearch.SERP_API_KEY = serp_api_key
values['google_scholar_engine'] = GoogleScholarSearch
return values
|
Validate that api key and python package exists in environment.
|
_import_ai21
|
from langchain_community.llms.ai21 import AI21
return AI21
|
def _import_ai21() ->Any:
from langchain_community.llms.ai21 import AI21
return AI21
| null |
process_value
|
"""Convert a value to a string and add single quotes if it is a string."""
if isinstance(value, str):
return f"'{value}'"
else:
return str(value)
|
def process_value(value: Union[int, float, str]) ->str:
"""Convert a value to a string and add single quotes if it is a string."""
if isinstance(value, str):
return f"'{value}'"
else:
return str(value)
|
Convert a value to a string and add single quotes if it is a string.
|
enter
|
"""Print ':', and increase the indentation."""
self.write(':')
self._indent += 1
|
def enter(self):
"""Print ':', and increase the indentation."""
self.write(':')
self._indent += 1
|
Print ':', and increase the indentation.
|
_identifying_params
|
"""Get the identifying parameters."""
return {**{'model': self.model}, **self._default_params}
|
@property
def _identifying_params(self) ->Mapping[str, Any]:
"""Get the identifying parameters."""
return {**{'model': self.model}, **self._default_params}
|
Get the identifying parameters.
|
__init__
|
"""Initialize with a Spark DataFrame object.
Args:
spark_session: The SparkSession object.
df: The Spark DataFrame object.
page_content_column: The name of the column containing the page content.
Defaults to "text".
fraction_of_memory: The fraction of memory to use. Defaults to 0.1.
"""
try:
from pyspark.sql import DataFrame, SparkSession
except ImportError:
raise ImportError(
'pyspark is not installed. Please install it with `pip install pyspark`'
)
self.spark = (spark_session if spark_session else SparkSession.builder.
getOrCreate())
if not isinstance(df, DataFrame):
raise ValueError(
f'Expected data_frame to be a PySpark DataFrame, got {type(df)}')
self.df = df
self.page_content_column = page_content_column
self.fraction_of_memory = fraction_of_memory
self.num_rows, self.max_num_rows = self.get_num_rows()
self.rdd_df = self.df.rdd.map(list)
self.column_names = self.df.columns
|
def __init__(self, spark_session: Optional['SparkSession']=None, df:
Optional[Any]=None, page_content_column: str='text', fraction_of_memory:
float=0.1):
"""Initialize with a Spark DataFrame object.
Args:
spark_session: The SparkSession object.
df: The Spark DataFrame object.
page_content_column: The name of the column containing the page content.
Defaults to "text".
fraction_of_memory: The fraction of memory to use. Defaults to 0.1.
"""
try:
from pyspark.sql import DataFrame, SparkSession
except ImportError:
raise ImportError(
'pyspark is not installed. Please install it with `pip install pyspark`'
)
self.spark = (spark_session if spark_session else SparkSession.builder.
getOrCreate())
if not isinstance(df, DataFrame):
raise ValueError(
f'Expected data_frame to be a PySpark DataFrame, got {type(df)}')
self.df = df
self.page_content_column = page_content_column
self.fraction_of_memory = fraction_of_memory
self.num_rows, self.max_num_rows = self.get_num_rows()
self.rdd_df = self.df.rdd.map(list)
self.column_names = self.df.columns
|
Initialize with a Spark DataFrame object.
Args:
spark_session: The SparkSession object.
df: The Spark DataFrame object.
page_content_column: The name of the column containing the page content.
Defaults to "text".
fraction_of_memory: The fraction of memory to use. Defaults to 0.1.
|
test_lambda_schemas
|
first_lambda = lambda x: x['hello']
assert RunnableLambda(first_lambda).input_schema.schema() == {'title':
'RunnableLambdaInput', 'type': 'object', 'properties': {'hello': {
'title': 'Hello'}}}
second_lambda = lambda x, y: (x['hello'], x['bye'], y['bah'])
assert RunnableLambda(second_lambda).input_schema.schema() == {'title':
'RunnableLambdaInput', 'type': 'object', 'properties': {'hello': {
'title': 'Hello'}, 'bye': {'title': 'Bye'}}}
def get_value(input):
return input['variable_name']
assert RunnableLambda(get_value).input_schema.schema() == {'title':
'get_value_input', 'type': 'object', 'properties': {'variable_name': {
'title': 'Variable Name'}}}
async def aget_value(input):
return input['variable_name'], input.get('another')
assert RunnableLambda(aget_value).input_schema.schema() == {'title':
'aget_value_input', 'type': 'object', 'properties': {'another': {
'title': 'Another'}, 'variable_name': {'title': 'Variable Name'}}}
async def aget_values(input):
return {'hello': input['variable_name'], 'bye': input['variable_name'],
'byebye': input['yo']}
assert RunnableLambda(aget_values).input_schema.schema() == {'title':
'aget_values_input', 'type': 'object', 'properties': {'variable_name':
{'title': 'Variable Name'}, 'yo': {'title': 'Yo'}}}
class InputType(TypedDict):
variable_name: str
yo: int
class OutputType(TypedDict):
hello: str
bye: str
byebye: int
async def aget_values_typed(input: InputType) ->OutputType:
return {'hello': input['variable_name'], 'bye': input['variable_name'],
'byebye': input['yo']}
assert RunnableLambda(aget_values_typed).input_schema.schema() == {'title':
'aget_values_typed_input', '$ref': '#/definitions/InputType',
'definitions': {'InputType': {'properties': {'variable_name': {'title':
'Variable Name', 'type': 'string'}, 'yo': {'title': 'Yo', 'type':
'integer'}}, 'required': ['variable_name', 'yo'], 'title': 'InputType',
'type': 'object'}}}
assert RunnableLambda(aget_values_typed).output_schema.schema() == {'title':
'aget_values_typed_output', '$ref': '#/definitions/OutputType',
'definitions': {'OutputType': {'properties': {'bye': {'title': 'Bye',
'type': 'string'}, 'byebye': {'title': 'Byebye', 'type': 'integer'},
'hello': {'title': 'Hello', 'type': 'string'}}, 'required': ['hello',
'bye', 'byebye'], 'title': 'OutputType', 'type': 'object'}}}
|
@pytest.mark.skipif(sys.version_info < (3, 9), reason=
'Requires python version >= 3.9 to run.')
def test_lambda_schemas() ->None:
first_lambda = lambda x: x['hello']
assert RunnableLambda(first_lambda).input_schema.schema() == {'title':
'RunnableLambdaInput', 'type': 'object', 'properties': {'hello': {
'title': 'Hello'}}}
second_lambda = lambda x, y: (x['hello'], x['bye'], y['bah'])
assert RunnableLambda(second_lambda).input_schema.schema() == {'title':
'RunnableLambdaInput', 'type': 'object', 'properties': {'hello': {
'title': 'Hello'}, 'bye': {'title': 'Bye'}}}
def get_value(input):
return input['variable_name']
assert RunnableLambda(get_value).input_schema.schema() == {'title':
'get_value_input', 'type': 'object', 'properties': {'variable_name':
{'title': 'Variable Name'}}}
async def aget_value(input):
return input['variable_name'], input.get('another')
assert RunnableLambda(aget_value).input_schema.schema() == {'title':
'aget_value_input', 'type': 'object', 'properties': {'another': {
'title': 'Another'}, 'variable_name': {'title': 'Variable Name'}}}
async def aget_values(input):
return {'hello': input['variable_name'], 'bye': input[
'variable_name'], 'byebye': input['yo']}
assert RunnableLambda(aget_values).input_schema.schema() == {'title':
'aget_values_input', 'type': 'object', 'properties': {
'variable_name': {'title': 'Variable Name'}, 'yo': {'title': 'Yo'}}}
class InputType(TypedDict):
variable_name: str
yo: int
class OutputType(TypedDict):
hello: str
bye: str
byebye: int
async def aget_values_typed(input: InputType) ->OutputType:
return {'hello': input['variable_name'], 'bye': input[
'variable_name'], 'byebye': input['yo']}
assert RunnableLambda(aget_values_typed).input_schema.schema() == {'title':
'aget_values_typed_input', '$ref': '#/definitions/InputType',
'definitions': {'InputType': {'properties': {'variable_name': {
'title': 'Variable Name', 'type': 'string'}, 'yo': {'title': 'Yo',
'type': 'integer'}}, 'required': ['variable_name', 'yo'], 'title':
'InputType', 'type': 'object'}}}
assert RunnableLambda(aget_values_typed).output_schema.schema() == {'title'
: 'aget_values_typed_output', '$ref': '#/definitions/OutputType',
'definitions': {'OutputType': {'properties': {'bye': {'title':
'Bye', 'type': 'string'}, 'byebye': {'title': 'Byebye', 'type':
'integer'}, 'hello': {'title': 'Hello', 'type': 'string'}},
'required': ['hello', 'bye', 'byebye'], 'title': 'OutputType',
'type': 'object'}}}
| null |
clean_body
|
"""Clean body of a message or event."""
try:
from bs4 import BeautifulSoup
try:
soup = BeautifulSoup(str(body), 'html.parser')
body = soup.get_text()
body = ''.join(body.splitlines())
body = ' '.join(body.split())
return str(body)
except Exception:
return str(body)
except ImportError:
return str(body)
|
def clean_body(body: str) ->str:
"""Clean body of a message or event."""
try:
from bs4 import BeautifulSoup
try:
soup = BeautifulSoup(str(body), 'html.parser')
body = soup.get_text()
body = ''.join(body.splitlines())
body = ' '.join(body.split())
return str(body)
except Exception:
return str(body)
except ImportError:
return str(body)
|
Clean body of a message or event.
|
test_load_uses_page_content_column_to_create_document_text
|
sample_data_frame = sample_data_frame.rename(columns={'text':
'dummy_test_column'})
loader = DataFrameLoader(sample_data_frame, page_content_column=
'dummy_test_column')
docs = loader.load()
assert docs[0].page_content == 'Hello'
assert docs[1].page_content == 'World'
|
def test_load_uses_page_content_column_to_create_document_text(
sample_data_frame: pd.DataFrame) ->None:
sample_data_frame = sample_data_frame.rename(columns={'text':
'dummy_test_column'})
loader = DataFrameLoader(sample_data_frame, page_content_column=
'dummy_test_column')
docs = loader.load()
assert docs[0].page_content == 'Hello'
assert docs[1].page_content == 'World'
| null |
create_llm_string
|
_dict: Dict = llm.dict()
_dict['stop'] = None
return str(sorted([(k, v) for k, v in _dict.items()]))
|
def create_llm_string(llm: Union[BaseLLM, BaseChatModel]) ->str:
_dict: Dict = llm.dict()
_dict['stop'] = None
return str(sorted([(k, v) for k, v in _dict.items()]))
| null |
on_llm_start_common
|
self.llm_starts += 1
self.starts += 1
|
def on_llm_start_common(self) ->None:
self.llm_starts += 1
self.starts += 1
| null |
lc_secrets
|
return {'google_api_key': 'GOOGLE_API_KEY'}
|
@property
def lc_secrets(self) ->Dict[str, str]:
return {'google_api_key': 'GOOGLE_API_KEY'}
| null |
test__convert_delta_to_message_human
|
delta = {'role': 'user', 'content': 'foo'}
result = _convert_delta_to_message_chunk(delta, HumanMessageChunk)
expected_output = HumanMessageChunk(content='foo')
assert result == expected_output
|
def test__convert_delta_to_message_human() ->None:
delta = {'role': 'user', 'content': 'foo'}
result = _convert_delta_to_message_chunk(delta, HumanMessageChunk)
expected_output = HumanMessageChunk(content='foo')
assert result == expected_output
| null |
_get_prompt_output_key
|
"""Get the output key for the prompt."""
if self.output_key is None:
if len(outputs) != 1:
raise ValueError(f'One output key expected, got {outputs.keys()}')
return list(outputs.keys())[0]
return self.output_key
|
def _get_prompt_output_key(self, outputs: Dict[str, Any]) ->str:
"""Get the output key for the prompt."""
if self.output_key is None:
if len(outputs) != 1:
raise ValueError(f'One output key expected, got {outputs.keys()}')
return list(outputs.keys())[0]
return self.output_key
|
Get the output key for the prompt.
|
_embedding_func
|
"""Call out to SageMaker Inference embedding endpoint."""
texts = list(map(lambda x: x.replace('\n', ' '), texts))
_model_kwargs = self.model_kwargs or {}
_endpoint_kwargs = self.endpoint_kwargs or {}
body = self.content_handler.transform_input(texts, _model_kwargs)
content_type = self.content_handler.content_type
accepts = self.content_handler.accepts
try:
response = self.client.invoke_endpoint(EndpointName=self.endpoint_name,
Body=body, ContentType=content_type, Accept=accepts, **_endpoint_kwargs
)
except Exception as e:
raise ValueError(f'Error raised by inference endpoint: {e}')
return self.content_handler.transform_output(response['Body'])
|
def _embedding_func(self, texts: List[str]) ->List[List[float]]:
"""Call out to SageMaker Inference embedding endpoint."""
texts = list(map(lambda x: x.replace('\n', ' '), texts))
_model_kwargs = self.model_kwargs or {}
_endpoint_kwargs = self.endpoint_kwargs or {}
body = self.content_handler.transform_input(texts, _model_kwargs)
content_type = self.content_handler.content_type
accepts = self.content_handler.accepts
try:
response = self.client.invoke_endpoint(EndpointName=self.
endpoint_name, Body=body, ContentType=content_type, Accept=
accepts, **_endpoint_kwargs)
except Exception as e:
raise ValueError(f'Error raised by inference endpoint: {e}')
return self.content_handler.transform_output(response['Body'])
|
Call out to SageMaker Inference embedding endpoint.
|
load_docs
|
return list(self.lazy_load_docs(query=query))
|
def load_docs(self, query: str) ->List[Document]:
return list(self.lazy_load_docs(query=query))
| null |
completion_with_retry
|
"""Use tenacity to retry the completion call."""
retry_decorator = self._create_retry_decorator()
@retry_decorator
def _completion_with_retry(**kwargs: Any) ->Any:
return self.client.create(**kwargs)
return _completion_with_retry(**kwargs)
|
def completion_with_retry(self, **kwargs: Any) ->Any:
"""Use tenacity to retry the completion call."""
retry_decorator = self._create_retry_decorator()
@retry_decorator
def _completion_with_retry(**kwargs: Any) ->Any:
return self.client.create(**kwargs)
return _completion_with_retry(**kwargs)
|
Use tenacity to retry the completion call.
|
_cosine_relevance_score_fn
|
"""Normalize the distance to a score on a scale [0, 1]."""
return 1.0 - distance
|
@staticmethod
def _cosine_relevance_score_fn(distance: float) ->float:
"""Normalize the distance to a score on a scale [0, 1]."""
return 1.0 - distance
|
Normalize the distance to a score on a scale [0, 1].
|
is_lc_serializable
|
return True
|
@classmethod
def is_lc_serializable(cls) ->bool:
return True
| null |
load
|
"""
Run Arxiv search and get the article texts plus the article meta information.
See https://lukasschwab.me/arxiv.py/index.html#Search
Returns: a list of documents with the document.page_content in text format
Performs an arxiv search, downloads the top k results as PDFs, loads
them as Documents, and returns them in a List.
Args:
query: a plaintext search query
"""
try:
import fitz
except ImportError:
raise ImportError(
'PyMuPDF package not found, please install it with `pip install pymupdf`'
)
try:
query = query.replace(':', '').replace('-', '')
if self.is_arxiv_identifier(query):
results = self.arxiv_search(id_list=query[:self.
ARXIV_MAX_QUERY_LENGTH].split(), max_results=self.load_max_docs
).results()
else:
results = self.arxiv_search(query[:self.ARXIV_MAX_QUERY_LENGTH],
max_results=self.load_max_docs).results()
except self.arxiv_exceptions as ex:
logger.debug('Error on arxiv: %s', ex)
return []
docs: List[Document] = []
for result in results:
try:
doc_file_name: str = result.download_pdf()
with fitz.open(doc_file_name) as doc_file:
text: str = ''.join(page.get_text() for page in doc_file)
except (FileNotFoundError, fitz.fitz.FileDataError) as f_ex:
logger.debug(f_ex)
continue
if self.load_all_available_meta:
extra_metadata = {'entry_id': result.entry_id,
'published_first_time': str(result.published.date()), 'comment':
result.comment, 'journal_ref': result.journal_ref, 'doi':
result.doi, 'primary_category': result.primary_category,
'categories': result.categories, 'links': [link.href for link in
result.links]}
else:
extra_metadata = {}
metadata = {'Published': str(result.updated.date()), 'Title': result.
title, 'Authors': ', '.join(a.name for a in result.authors),
'Summary': result.summary, **extra_metadata}
doc = Document(page_content=text[:self.doc_content_chars_max], metadata
=metadata)
docs.append(doc)
os.remove(doc_file_name)
return docs
|
def load(self, query: str) ->List[Document]:
"""
Run Arxiv search and get the article texts plus the article meta information.
See https://lukasschwab.me/arxiv.py/index.html#Search
Returns: a list of documents with the document.page_content in text format
Performs an arxiv search, downloads the top k results as PDFs, loads
them as Documents, and returns them in a List.
Args:
query: a plaintext search query
"""
try:
import fitz
except ImportError:
raise ImportError(
'PyMuPDF package not found, please install it with `pip install pymupdf`'
)
try:
query = query.replace(':', '').replace('-', '')
if self.is_arxiv_identifier(query):
results = self.arxiv_search(id_list=query[:self.
ARXIV_MAX_QUERY_LENGTH].split(), max_results=self.load_max_docs
).results()
else:
results = self.arxiv_search(query[:self.ARXIV_MAX_QUERY_LENGTH],
max_results=self.load_max_docs).results()
except self.arxiv_exceptions as ex:
logger.debug('Error on arxiv: %s', ex)
return []
docs: List[Document] = []
for result in results:
try:
doc_file_name: str = result.download_pdf()
with fitz.open(doc_file_name) as doc_file:
text: str = ''.join(page.get_text() for page in doc_file)
except (FileNotFoundError, fitz.fitz.FileDataError) as f_ex:
logger.debug(f_ex)
continue
if self.load_all_available_meta:
extra_metadata = {'entry_id': result.entry_id,
'published_first_time': str(result.published.date()),
'comment': result.comment, 'journal_ref': result.
journal_ref, 'doi': result.doi, 'primary_category': result.
primary_category, 'categories': result.categories, 'links':
[link.href for link in result.links]}
else:
extra_metadata = {}
metadata = {'Published': str(result.updated.date()), 'Title':
result.title, 'Authors': ', '.join(a.name for a in result.
authors), 'Summary': result.summary, **extra_metadata}
doc = Document(page_content=text[:self.doc_content_chars_max],
metadata=metadata)
docs.append(doc)
os.remove(doc_file_name)
return docs
|
Run Arxiv search and get the article texts plus the article meta information.
See https://lukasschwab.me/arxiv.py/index.html#Search
Returns: a list of documents with the document.page_content in text format
Performs an arxiv search, downloads the top k results as PDFs, loads
them as Documents, and returns them in a List.
Args:
query: a plaintext search query
|
delete_documents_with_texts
|
"""Delete documents based on their page content.
Args:
texts: List of document page content.
Returns:
Whether the deletion was successful or not.
"""
id_list = [sha1(t.encode('utf-8')).hexdigest() for t in texts]
return self.delete_documents_with_document_id(id_list)
|
def delete_documents_with_texts(self, texts: List[str]) ->bool:
"""Delete documents based on their page content.
Args:
texts: List of document page content.
Returns:
Whether the deletion was successful or not.
"""
id_list = [sha1(t.encode('utf-8')).hexdigest() for t in texts]
return self.delete_documents_with_document_id(id_list)
|
Delete documents based on their page content.
Args:
texts: List of document page content.
Returns:
Whether the deletion was successful or not.
|
test_all_imports
|
assert set(__all__) == set(EXPECTED_ALL)
|
def test_all_imports() ->None:
assert set(__all__) == set(EXPECTED_ALL)
| null |
add_tags
|
for tag in tags:
if tag in self.tags:
self.remove_tags([tag])
self.tags.extend(tags)
if inherit:
self.inheritable_tags.extend(tags)
|
def add_tags(self, tags: List[str], inherit: bool=True) ->None:
for tag in tags:
if tag in self.tags:
self.remove_tags([tag])
self.tags.extend(tags)
if inherit:
self.inheritable_tags.extend(tags)
| null |
_import_sql_database_tool_QuerySQLDataBaseTool
|
from langchain_community.tools.sql_database.tool import QuerySQLDataBaseTool
return QuerySQLDataBaseTool
|
def _import_sql_database_tool_QuerySQLDataBaseTool() ->Any:
from langchain_community.tools.sql_database.tool import QuerySQLDataBaseTool
return QuerySQLDataBaseTool
| null |
_get_next_response_in_sequence
|
queries = cast(Mapping, self.queries)
response = queries[list(queries.keys())[self.response_index]]
self.response_index = self.response_index + 1
return response
|
@property
def _get_next_response_in_sequence(self) ->str:
queries = cast(Mapping, self.queries)
response = queries[list(queries.keys())[self.response_index]]
self.response_index = self.response_index + 1
return response
| null |
test_create_sql_agent
|
db = SQLDatabase.from_uri('sqlite:///:memory:')
queries = {'foo': 'Final Answer: baz'}
llm = FakeLLM(queries=queries, sequential_responses=True)
toolkit = SQLDatabaseToolkit(db=db, llm=llm)
agent_executor = create_sql_agent(llm=llm, toolkit=toolkit)
assert agent_executor.run('hello') == 'baz'
|
def test_create_sql_agent() ->None:
db = SQLDatabase.from_uri('sqlite:///:memory:')
queries = {'foo': 'Final Answer: baz'}
llm = FakeLLM(queries=queries, sequential_responses=True)
toolkit = SQLDatabaseToolkit(db=db, llm=llm)
agent_executor = create_sql_agent(llm=llm, toolkit=toolkit)
assert agent_executor.run('hello') == 'baz'
| null |
clear
|
"""
Delete all records in jaguardb
Args: No args
Returns: None
"""
podstore = self._pod + '.' + self._store
q = 'truncate store ' + podstore
self.run(q)
|
def clear(self) ->None:
"""
Delete all records in jaguardb
Args: No args
Returns: None
"""
podstore = self._pod + '.' + self._store
q = 'truncate store ' + podstore
self.run(q)
|
Delete all records in jaguardb
Args: No args
Returns: None
|
on_chain_start
|
"""Run when chain starts running."""
self.step += 1
self.chain_starts += 1
self.starts += 1
resp = self._init_resp()
resp.update({'action': 'on_chain_start'})
resp.update(flatten_dict(serialized))
resp.update(self.get_custom_callback_meta())
chain_input = inputs.get('input', inputs.get('human_input'))
if isinstance(chain_input, str):
input_resp = deepcopy(resp)
input_resp['input'] = chain_input
self.on_chain_start_records.append(input_resp)
self.action_records.append(input_resp)
if self.stream_logs:
self.logger.report_text(input_resp)
elif isinstance(chain_input, list):
for inp in chain_input:
input_resp = deepcopy(resp)
input_resp.update(inp)
self.on_chain_start_records.append(input_resp)
self.action_records.append(input_resp)
if self.stream_logs:
self.logger.report_text(input_resp)
else:
raise ValueError('Unexpected data format provided!')
|
def on_chain_start(self, serialized: Dict[str, Any], inputs: Dict[str, Any],
**kwargs: Any) ->None:
"""Run when chain starts running."""
self.step += 1
self.chain_starts += 1
self.starts += 1
resp = self._init_resp()
resp.update({'action': 'on_chain_start'})
resp.update(flatten_dict(serialized))
resp.update(self.get_custom_callback_meta())
chain_input = inputs.get('input', inputs.get('human_input'))
if isinstance(chain_input, str):
input_resp = deepcopy(resp)
input_resp['input'] = chain_input
self.on_chain_start_records.append(input_resp)
self.action_records.append(input_resp)
if self.stream_logs:
self.logger.report_text(input_resp)
elif isinstance(chain_input, list):
for inp in chain_input:
input_resp = deepcopy(resp)
input_resp.update(inp)
self.on_chain_start_records.append(input_resp)
self.action_records.append(input_resp)
if self.stream_logs:
self.logger.report_text(input_resp)
else:
raise ValueError('Unexpected data format provided!')
|
Run when chain starts running.
|
_transform
|
mapper_keys = set(self.mapper.steps.keys())
for_passthrough, for_map = safetee(input, 2, lock=threading.Lock())
map_output = self.mapper.transform(for_map, patch_config(config, callbacks=
run_manager.get_child()), **kwargs)
with get_executor_for_config(config) as executor:
first_map_chunk_future = executor.submit(next, map_output, None)
for chunk in for_passthrough:
assert isinstance(chunk, dict
), 'The input to RunnablePassthrough.assign() must be a dict.'
filtered = AddableDict({k: v for k, v in chunk.items() if k not in
mapper_keys})
if filtered:
yield filtered
yield cast(Dict[str, Any], first_map_chunk_future.result())
for chunk in map_output:
yield chunk
|
def _transform(self, input: Iterator[Dict[str, Any]], run_manager:
CallbackManagerForChainRun, config: RunnableConfig, **kwargs: Any
) ->Iterator[Dict[str, Any]]:
mapper_keys = set(self.mapper.steps.keys())
for_passthrough, for_map = safetee(input, 2, lock=threading.Lock())
map_output = self.mapper.transform(for_map, patch_config(config,
callbacks=run_manager.get_child()), **kwargs)
with get_executor_for_config(config) as executor:
first_map_chunk_future = executor.submit(next, map_output, None)
for chunk in for_passthrough:
assert isinstance(chunk, dict
), 'The input to RunnablePassthrough.assign() must be a dict.'
filtered = AddableDict({k: v for k, v in chunk.items() if k not in
mapper_keys})
if filtered:
yield filtered
yield cast(Dict[str, Any], first_map_chunk_future.result())
for chunk in map_output:
yield chunk
| null |
test_all_imports
|
assert set(__all__) == set(EXPECTED_ALL)
|
def test_all_imports() ->None:
assert set(__all__) == set(EXPECTED_ALL)
| null |
_import_wikipedia
|
from langchain_community.utilities.wikipedia import WikipediaAPIWrapper
return WikipediaAPIWrapper
|
def _import_wikipedia() ->Any:
from langchain_community.utilities.wikipedia import WikipediaAPIWrapper
return WikipediaAPIWrapper
| null |
_generate
|
completion = ''
if self.streaming:
for chunk in self._stream(messages, stop, run_manager, **kwargs):
completion += chunk.text
else:
params = self._convert_prompt_msg_params(messages, **kwargs)
res = self.client.chat(params)
msg = convert_dict_to_message(res)
completion = cast(str, msg.content)
message = AIMessage(content=completion)
return ChatResult(generations=[ChatGeneration(message=message)])
|
def _generate(self, messages: List[BaseMessage], stop: Optional[List[str]]=
None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any
) ->ChatResult:
completion = ''
if self.streaming:
for chunk in self._stream(messages, stop, run_manager, **kwargs):
completion += chunk.text
else:
params = self._convert_prompt_msg_params(messages, **kwargs)
res = self.client.chat(params)
msg = convert_dict_to_message(res)
completion = cast(str, msg.content)
message = AIMessage(content=completion)
return ChatResult(generations=[ChatGeneration(message=message)])
| null |
validate_environment
|
"""Validate that api key and python package exists in environment."""
values['serp_api_key'] = convert_to_secret_str(get_from_dict_or_env(values,
'serp_api_key', 'SERPAPI_API_KEY'))
try:
from serpapi import SerpApiClient
except ImportError:
raise ImportError(
'google-search-results is not installed. Please install it with `pip install google-search-results>=2.4.2`'
)
serp_search_engine = SerpApiClient
values['serp_search_engine'] = serp_search_engine
return values
|
@root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that api key and python package exists in environment."""
values['serp_api_key'] = convert_to_secret_str(get_from_dict_or_env(
values, 'serp_api_key', 'SERPAPI_API_KEY'))
try:
from serpapi import SerpApiClient
except ImportError:
raise ImportError(
'google-search-results is not installed. Please install it with `pip install google-search-results>=2.4.2`'
)
serp_search_engine = SerpApiClient
values['serp_search_engine'] = serp_search_engine
return values
|
Validate that api key and python package exists in environment.
|
name_to_tool_map
|
return {tool.name: tool for tool in self.agent_executor.tools}
|
@property
def name_to_tool_map(self) ->Dict[str, BaseTool]:
return {tool.name: tool for tool in self.agent_executor.tools}
| null |
test_multiple_history
|
"""Tests multiple history works."""
chat = VolcEngineMaasChat()
response = chat(messages=[HumanMessage(content='Hello'), AIMessage(content=
'Hello!'), HumanMessage(content='How are you?')])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
|
def test_multiple_history() ->None:
"""Tests multiple history works."""
chat = VolcEngineMaasChat()
response = chat(messages=[HumanMessage(content='Hello'), AIMessage(
content='Hello!'), HumanMessage(content='How are you?')])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
|
Tests multiple history works.
|
_run
|
"""Run commands and return final output."""
return self.process.run(commands)
|
def _run(self, commands: Union[str, List[str]], run_manager: Optional[
CallbackManagerForToolRun]=None) ->str:
"""Run commands and return final output."""
return self.process.run(commands)
|
Run commands and return final output.
|
map
|
"""Maps the Run to a dictionary."""
if not run.outputs:
raise ValueError(
f'Run with ID {run.id} lacks outputs required for evaluation. Ensure the Run has valid outputs.'
)
if self.input_key is not None and self.input_key not in run.inputs:
raise ValueError(
f"""Run with ID {run.id} is missing the expected input key '{self.input_key}'.
Available input keys in this Run are: {run.inputs.keys()}.
Adjust the evaluator's input_key or ensure your input data includes key '{self.input_key}'."""
)
elif self.prediction_key is not None and self.prediction_key not in run.outputs:
available_keys = ', '.join(run.outputs.keys())
raise ValueError(
f"Run with ID {run.id} doesn't have the expected prediction key '{self.prediction_key}'. Available prediction keys in this Run are: {available_keys}. Adjust the evaluator's prediction_key or ensure the Run object's outputs the expected key."
)
else:
input_ = self._get_key(run.inputs, self.input_key, 'input')
prediction = self._get_key(run.outputs, self.prediction_key, 'prediction')
return {'input': input_, 'prediction': prediction}
|
def map(self, run: Run) ->Dict[str, str]:
"""Maps the Run to a dictionary."""
if not run.outputs:
raise ValueError(
f'Run with ID {run.id} lacks outputs required for evaluation. Ensure the Run has valid outputs.'
)
if self.input_key is not None and self.input_key not in run.inputs:
raise ValueError(
f"""Run with ID {run.id} is missing the expected input key '{self.input_key}'.
Available input keys in this Run are: {run.inputs.keys()}.
Adjust the evaluator's input_key or ensure your input data includes key '{self.input_key}'."""
)
elif self.prediction_key is not None and self.prediction_key not in run.outputs:
available_keys = ', '.join(run.outputs.keys())
raise ValueError(
f"Run with ID {run.id} doesn't have the expected prediction key '{self.prediction_key}'. Available prediction keys in this Run are: {available_keys}. Adjust the evaluator's prediction_key or ensure the Run object's outputs the expected key."
)
else:
input_ = self._get_key(run.inputs, self.input_key, 'input')
prediction = self._get_key(run.outputs, self.prediction_key,
'prediction')
return {'input': input_, 'prediction': prediction}
|
Maps the Run to a dictionary.
|
load
|
"""Load documents."""
p = Path(self.file_path)
with open(p, encoding='utf8') as f:
d = json.load(f)
text = ''.join(concatenate_rows(message) for message in d['messages'] if
message['type'] == 'message' and isinstance(message['text'], str))
metadata = {'source': str(p)}
return [Document(page_content=text, metadata=metadata)]
|
def load(self) ->List[Document]:
"""Load documents."""
p = Path(self.file_path)
with open(p, encoding='utf8') as f:
d = json.load(f)
text = ''.join(concatenate_rows(message) for message in d['messages'] if
message['type'] == 'message' and isinstance(message['text'], str))
metadata = {'source': str(p)}
return [Document(page_content=text, metadata=metadata)]
|
Load documents.
|
_default_params
|
"""Get the default parameters for calling Tongyi Qwen API."""
return {'model': self.model_name, 'top_p': self.top_p, 'api_key': self.
dashscope_api_key.get_secret_value(), 'result_format': 'message', **
self.model_kwargs}
|
@property
def _default_params(self) ->Dict[str, Any]:
"""Get the default parameters for calling Tongyi Qwen API."""
return {'model': self.model_name, 'top_p': self.top_p, 'api_key': self.
dashscope_api_key.get_secret_value(), 'result_format': 'message',
**self.model_kwargs}
|
Get the default parameters for calling Tongyi Qwen API.
|
from_texts
|
"""Create an DocArrayHnswSearch store and insert data.
Args:
texts (List[str]): Text data.
embedding (Embeddings): Embedding function.
metadatas (Optional[List[dict]]): Metadata for each text if it exists.
Defaults to None.
work_dir (str): path to the location where all the data will be stored.
n_dim (int): dimension of an embedding.
**kwargs: Other keyword arguments to be passed to the __init__ method.
Returns:
DocArrayHnswSearch Vector Store
"""
if work_dir is None:
raise ValueError('`work_dir` parameter has not been set.')
if n_dim is None:
raise ValueError('`n_dim` parameter has not been set.')
store = cls.from_params(embedding, work_dir, n_dim, **kwargs)
store.add_texts(texts=texts, metadatas=metadatas)
return store
|
@classmethod
def from_texts(cls, texts: List[str], embedding: Embeddings, metadatas:
Optional[List[dict]]=None, work_dir: Optional[str]=None, n_dim:
Optional[int]=None, **kwargs: Any) ->DocArrayHnswSearch:
"""Create an DocArrayHnswSearch store and insert data.
Args:
texts (List[str]): Text data.
embedding (Embeddings): Embedding function.
metadatas (Optional[List[dict]]): Metadata for each text if it exists.
Defaults to None.
work_dir (str): path to the location where all the data will be stored.
n_dim (int): dimension of an embedding.
**kwargs: Other keyword arguments to be passed to the __init__ method.
Returns:
DocArrayHnswSearch Vector Store
"""
if work_dir is None:
raise ValueError('`work_dir` parameter has not been set.')
if n_dim is None:
raise ValueError('`n_dim` parameter has not been set.')
store = cls.from_params(embedding, work_dir, n_dim, **kwargs)
store.add_texts(texts=texts, metadatas=metadatas)
return store
|
Create an DocArrayHnswSearch store and insert data.
Args:
texts (List[str]): Text data.
embedding (Embeddings): Embedding function.
metadatas (Optional[List[dict]]): Metadata for each text if it exists.
Defaults to None.
work_dir (str): path to the location where all the data will be stored.
n_dim (int): dimension of an embedding.
**kwargs: Other keyword arguments to be passed to the __init__ method.
Returns:
DocArrayHnswSearch Vector Store
|
_type
|
return 'openai-tools-agent-output-parser'
|
@property
def _type(self) ->str:
return 'openai-tools-agent-output-parser'
| null |
_validate_embeddings_and_bulk_size
|
"""Validate Embeddings Length and Bulk Size."""
if embeddings_length == 0:
raise RuntimeError('Embeddings size is zero')
if bulk_size < embeddings_length:
raise RuntimeError(
f'The embeddings count, {embeddings_length} is more than the [bulk_size], {bulk_size}. Increase the value of [bulk_size].'
)
|
def _validate_embeddings_and_bulk_size(embeddings_length: int, bulk_size: int
) ->None:
"""Validate Embeddings Length and Bulk Size."""
if embeddings_length == 0:
raise RuntimeError('Embeddings size is zero')
if bulk_size < embeddings_length:
raise RuntimeError(
f'The embeddings count, {embeddings_length} is more than the [bulk_size], {bulk_size}. Increase the value of [bulk_size].'
)
|
Validate Embeddings Length and Bulk Size.
|
_run
|
train_result = self.llm.train_unsupervised((information_to_learn,))
return f"Train complete. Loss: {train_result['loss']}"
|
def _run(self, information_to_learn: str, run_manager: Optional[
CallbackManagerForToolRun]=None) ->str:
train_result = self.llm.train_unsupervised((information_to_learn,))
return f"Train complete. Loss: {train_result['loss']}"
| null |
test_unstructured_pdf_loader_elements_mode
|
"""Test unstructured loader with various modes."""
file_path = Path(__file__).parent.parent / 'examples/hello.pdf'
loader = UnstructuredPDFLoader(str(file_path), mode='elements')
docs = loader.load()
assert len(docs) == 2
|
def test_unstructured_pdf_loader_elements_mode() ->None:
"""Test unstructured loader with various modes."""
file_path = Path(__file__).parent.parent / 'examples/hello.pdf'
loader = UnstructuredPDFLoader(str(file_path), mode='elements')
docs = loader.load()
assert len(docs) == 2
|
Test unstructured loader with various modes.
|
generate_ngrams
|
"""Generate n-grams from a list of words"""
return [' '.join(words_list[i:i + n]) for i in range(len(words_list) - (n - 1))
]
|
def generate_ngrams(words_list: List[str], n: int) ->list:
"""Generate n-grams from a list of words"""
return [' '.join(words_list[i:i + n]) for i in range(len(words_list) -
(n - 1))]
|
Generate n-grams from a list of words
|
get_final_agent_thought_label
|
"""Return the markdown label for the agent's final thought -
the "Now I have the answer" thought, that doesn't involve
a tool.
"""
return f'{CHECKMARK_EMOJI} **Complete!**'
|
def get_final_agent_thought_label(self) ->str:
"""Return the markdown label for the agent's final thought -
the "Now I have the answer" thought, that doesn't involve
a tool.
"""
return f'{CHECKMARK_EMOJI} **Complete!**'
|
Return the markdown label for the agent's final thought -
the "Now I have the answer" thought, that doesn't involve
a tool.
|
assert_llm_equality
|
"""Assert LLM Equality for tests."""
assert type(llm) == type(loaded_llm)
for field in llm.__fields__.keys():
if field != 'client' and field != 'pipeline':
val = getattr(llm, field)
new_val = getattr(loaded_llm, field)
assert new_val == val
|
def assert_llm_equality(llm: BaseLLM, loaded_llm: BaseLLM) ->None:
"""Assert LLM Equality for tests."""
assert type(llm) == type(loaded_llm)
for field in llm.__fields__.keys():
if field != 'client' and field != 'pipeline':
val = getattr(llm, field)
new_val = getattr(loaded_llm, field)
assert new_val == val
|
Assert LLM Equality for tests.
|
_llm_type
|
"""Return type of chat model."""
return 'gpt-router-chat'
|
@property
def _llm_type(self) ->str:
"""Return type of chat model."""
return 'gpt-router-chat'
|
Return type of chat model.
|
rank_fusion
|
"""
Retrieve the results of the retrievers and use rank_fusion_func to get
the final result.
Args:
query: The query to search for.
Returns:
A list of reranked documents.
"""
retriever_docs = [retriever.get_relevant_documents(query, callbacks=
run_manager.get_child(tag=f'retriever_{i + 1}')) for i, retriever in
enumerate(self.retrievers)]
for i in range(len(retriever_docs)):
retriever_docs[i] = [(Document(page_content=doc) if not isinstance(doc,
Document) else doc) for doc in retriever_docs[i]]
fused_documents = self.weighted_reciprocal_rank(retriever_docs)
return fused_documents
|
def rank_fusion(self, query: str, run_manager: CallbackManagerForRetrieverRun
) ->List[Document]:
"""
Retrieve the results of the retrievers and use rank_fusion_func to get
the final result.
Args:
query: The query to search for.
Returns:
A list of reranked documents.
"""
retriever_docs = [retriever.get_relevant_documents(query, callbacks=
run_manager.get_child(tag=f'retriever_{i + 1}')) for i, retriever in
enumerate(self.retrievers)]
for i in range(len(retriever_docs)):
retriever_docs[i] = [(Document(page_content=doc) if not isinstance(
doc, Document) else doc) for doc in retriever_docs[i]]
fused_documents = self.weighted_reciprocal_rank(retriever_docs)
return fused_documents
|
Retrieve the results of the retrievers and use rank_fusion_func to get
the final result.
Args:
query: The query to search for.
Returns:
A list of reranked documents.
|
_import_nasa
|
from langchain_community.utilities.nasa import NasaAPIWrapper
return NasaAPIWrapper
|
def _import_nasa() ->Any:
from langchain_community.utilities.nasa import NasaAPIWrapper
return NasaAPIWrapper
| null |
test_two_thoughts_invalid
|
memory = ToTDFSMemory([Thought(text='a', validity=ThoughtValidity.
VALID_INTERMEDIATE), Thought(text='b', validity=ThoughtValidity.INVALID)])
self.assertEqual(self.controller(memory), ('a',))
|
def test_two_thoughts_invalid(self) ->None:
memory = ToTDFSMemory([Thought(text='a', validity=ThoughtValidity.
VALID_INTERMEDIATE), Thought(text='b', validity=ThoughtValidity.
INVALID)])
self.assertEqual(self.controller(memory), ('a',))
| null |
get_resized_images
|
"""
Resize images from base64-encoded strings.
:param docs: A list of base64-encoded image to be resized.
:return: Dict containing a list of resized base64-encoded strings.
"""
b64_images = []
for doc in docs:
if isinstance(doc, Document):
doc = doc.page_content
resized_image = resize_base64_image(doc, size=(1280, 720))
b64_images.append(resized_image)
return {'images': b64_images}
|
def get_resized_images(docs):
"""
Resize images from base64-encoded strings.
:param docs: A list of base64-encoded image to be resized.
:return: Dict containing a list of resized base64-encoded strings.
"""
b64_images = []
for doc in docs:
if isinstance(doc, Document):
doc = doc.page_content
resized_image = resize_base64_image(doc, size=(1280, 720))
b64_images.append(resized_image)
return {'images': b64_images}
|
Resize images from base64-encoded strings.
:param docs: A list of base64-encoded image to be resized.
:return: Dict containing a list of resized base64-encoded strings.
|
getERC20Tx
|
url = (
f'https://api.etherscan.io/api?module=account&action=tokentx&address={self.account_address}&startblock={self.start_block}&endblock={self.end_block}&page={self.page}&offset={self.offset}&sort={self.sort}&apikey={self.api_key}'
)
try:
response = requests.get(url)
response.raise_for_status()
except requests.exceptions.RequestException as e:
print('Error occurred while making the request:', e)
items = response.json()['result']
result = []
if len(items) == 0:
return [Document(page_content='')]
for item in items:
content = str(item)
metadata = {'from': item['from'], 'tx_hash': item['hash'], 'to': item['to']
}
result.append(Document(page_content=content, metadata=metadata))
return result
|
def getERC20Tx(self) ->List[Document]:
url = (
f'https://api.etherscan.io/api?module=account&action=tokentx&address={self.account_address}&startblock={self.start_block}&endblock={self.end_block}&page={self.page}&offset={self.offset}&sort={self.sort}&apikey={self.api_key}'
)
try:
response = requests.get(url)
response.raise_for_status()
except requests.exceptions.RequestException as e:
print('Error occurred while making the request:', e)
items = response.json()['result']
result = []
if len(items) == 0:
return [Document(page_content='')]
for item in items:
content = str(item)
metadata = {'from': item['from'], 'tx_hash': item['hash'], 'to':
item['to']}
result.append(Document(page_content=content, metadata=metadata))
return result
| null |
create_documents
|
"""Create documents from a list of texts."""
_metadatas = metadatas or [{}] * len(texts)
documents = []
for i, text in enumerate(texts):
index = -1
for chunk in self.split_text(text):
metadata = copy.deepcopy(_metadatas[i])
if self._add_start_index:
index = text.find(chunk, index + 1)
metadata['start_index'] = index
new_doc = Document(page_content=chunk, metadata=metadata)
documents.append(new_doc)
return documents
|
def create_documents(self, texts: List[str], metadatas: Optional[List[dict]
]=None) ->List[Document]:
"""Create documents from a list of texts."""
_metadatas = metadatas or [{}] * len(texts)
documents = []
for i, text in enumerate(texts):
index = -1
for chunk in self.split_text(text):
metadata = copy.deepcopy(_metadatas[i])
if self._add_start_index:
index = text.find(chunk, index + 1)
metadata['start_index'] = index
new_doc = Document(page_content=chunk, metadata=metadata)
documents.append(new_doc)
return documents
|
Create documents from a list of texts.
|
visit_Subscript
|
if isinstance(node.ctx, ast.Load) and isinstance(node.value, ast.Name
) and node.value.id == self.name and isinstance(node.slice, ast.Constant
) and isinstance(node.slice.value, str):
self.keys.add(node.slice.value)
|
def visit_Subscript(self, node: ast.Subscript) ->Any:
if isinstance(node.ctx, ast.Load) and isinstance(node.value, ast.Name
) and node.value.id == self.name and isinstance(node.slice, ast.
Constant) and isinstance(node.slice.value, str):
self.keys.add(node.slice.value)
| null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.