method_name
stringlengths 1
78
| method_body
stringlengths 3
9.66k
| full_code
stringlengths 31
10.7k
| docstring
stringlengths 4
4.74k
⌀ |
|---|---|---|---|
visit_comparison
|
comparator = self._format_func(comparison.comparator)
processed_value = process_value(comparison.value)
attribute = comparison.attribute
return '( ' + 'doc.' + attribute + ' ' + comparator + ' ' + processed_value + ' )'
|
def visit_comparison(self, comparison: Comparison) ->str:
comparator = self._format_func(comparison.comparator)
processed_value = process_value(comparison.value)
attribute = comparison.attribute
return ('( ' + 'doc.' + attribute + ' ' + comparator + ' ' +
processed_value + ' )')
| null |
test_comet_tracer__trace_chain_with_single_span__happyflow
|
chain_module_mock = mock.Mock()
chain_instance_mock = mock.Mock()
chain_module_mock.Chain.return_value = chain_instance_mock
span_module_mock = mock.Mock()
span_instance_mock = mock.MagicMock()
span_instance_mock.__api__start__ = mock.Mock()
span_instance_mock.__api__end__ = mock.Mock()
span_module_mock.Span.return_value = span_instance_mock
experiment_info_module_mock = mock.Mock()
experiment_info_module_mock.get.return_value = 'the-experiment-info'
chain_api_module_mock = mock.Mock()
comet_ml_api_mock = SimpleNamespace(chain=chain_module_mock, span=
span_module_mock, experiment_info=experiment_info_module_mock,
chain_api=chain_api_module_mock, flush='not-used-in-this-test')
with mock.patch.object(comet, 'import_comet_llm_api', return_value=
comet_ml_api_mock):
tracer = comet.CometTracer()
run_id_1 = uuid.UUID('9d878ab3-e5ca-4218-aef6-44cbdc90160a')
run_id_2 = uuid.UUID('4f31216e-7c26-4027-a5fd-0bbf9ace17dc')
tracer.on_chain_start({'name': 'chain-input'}, {'input':
'chain-input-prompt'}, parent_run_id=None, run_id=run_id_1)
chain_module_mock.Chain.assert_called_once_with(inputs={'input':
'chain-input-prompt'}, metadata=None, experiment_info='the-experiment-info'
)
tracer.on_llm_start({'name': 'span-input'}, ['span-input-prompt'],
parent_run_id=run_id_1, run_id=run_id_2)
span_module_mock.Span.assert_called_once_with(inputs={'prompts': [
'span-input-prompt']}, category=mock.ANY, metadata=mock.ANY, name=mock.ANY)
span_instance_mock.__api__start__(chain_instance_mock)
tracer.on_llm_end(LLMResult(generations=[], llm_output={'span-output-key':
'span-output-value'}), run_id=run_id_2)
span_instance_mock.set_outputs.assert_called_once()
actual_span_outputs = span_instance_mock.set_outputs.call_args[1]['outputs']
assert {'llm_output': {'span-output-key': 'span-output-value'},
'generations': []}.items() <= actual_span_outputs.items()
span_instance_mock.__api__end__()
tracer.on_chain_end({'chain-output-key': 'chain-output-value'}, run_id=run_id_1
)
chain_instance_mock.set_outputs.assert_called_once()
actual_chain_outputs = chain_instance_mock.set_outputs.call_args[1]['outputs']
assert ('chain-output-key', 'chain-output-value'
) in actual_chain_outputs.items()
chain_api_module_mock.log_chain.assert_called_once_with(chain_instance_mock)
|
def test_comet_tracer__trace_chain_with_single_span__happyflow() ->None:
chain_module_mock = mock.Mock()
chain_instance_mock = mock.Mock()
chain_module_mock.Chain.return_value = chain_instance_mock
span_module_mock = mock.Mock()
span_instance_mock = mock.MagicMock()
span_instance_mock.__api__start__ = mock.Mock()
span_instance_mock.__api__end__ = mock.Mock()
span_module_mock.Span.return_value = span_instance_mock
experiment_info_module_mock = mock.Mock()
experiment_info_module_mock.get.return_value = 'the-experiment-info'
chain_api_module_mock = mock.Mock()
comet_ml_api_mock = SimpleNamespace(chain=chain_module_mock, span=
span_module_mock, experiment_info=experiment_info_module_mock,
chain_api=chain_api_module_mock, flush='not-used-in-this-test')
with mock.patch.object(comet, 'import_comet_llm_api', return_value=
comet_ml_api_mock):
tracer = comet.CometTracer()
run_id_1 = uuid.UUID('9d878ab3-e5ca-4218-aef6-44cbdc90160a')
run_id_2 = uuid.UUID('4f31216e-7c26-4027-a5fd-0bbf9ace17dc')
tracer.on_chain_start({'name': 'chain-input'}, {'input':
'chain-input-prompt'}, parent_run_id=None, run_id=run_id_1)
chain_module_mock.Chain.assert_called_once_with(inputs={'input':
'chain-input-prompt'}, metadata=None, experiment_info=
'the-experiment-info')
tracer.on_llm_start({'name': 'span-input'}, ['span-input-prompt'],
parent_run_id=run_id_1, run_id=run_id_2)
span_module_mock.Span.assert_called_once_with(inputs={'prompts': [
'span-input-prompt']}, category=mock.ANY, metadata=mock.ANY, name=
mock.ANY)
span_instance_mock.__api__start__(chain_instance_mock)
tracer.on_llm_end(LLMResult(generations=[], llm_output={
'span-output-key': 'span-output-value'}), run_id=run_id_2)
span_instance_mock.set_outputs.assert_called_once()
actual_span_outputs = span_instance_mock.set_outputs.call_args[1]['outputs'
]
assert {'llm_output': {'span-output-key': 'span-output-value'},
'generations': []}.items() <= actual_span_outputs.items()
span_instance_mock.__api__end__()
tracer.on_chain_end({'chain-output-key': 'chain-output-value'}, run_id=
run_id_1)
chain_instance_mock.set_outputs.assert_called_once()
actual_chain_outputs = chain_instance_mock.set_outputs.call_args[1][
'outputs']
assert ('chain-output-key', 'chain-output-value'
) in actual_chain_outputs.items()
chain_api_module_mock.log_chain.assert_called_once_with(chain_instance_mock
)
| null |
batch
|
configs = get_config_list(config, len(inputs))
prepared = [self._prepare(c) for c in configs]
if all(p is self.default for p, _ in prepared):
return self.default.batch(inputs, [c for _, c in prepared],
return_exceptions=return_exceptions, **kwargs)
if not inputs:
return []
def invoke(prepared: Tuple[Runnable[Input, Output], RunnableConfig], input:
Input) ->Union[Output, Exception]:
bound, config = prepared
if return_exceptions:
try:
return bound.invoke(input, config, **kwargs)
except Exception as e:
return e
else:
return bound.invoke(input, config, **kwargs)
if len(inputs) == 1:
return cast(List[Output], [invoke(prepared[0], inputs[0])])
with get_executor_for_config(configs[0]) as executor:
return cast(List[Output], list(executor.map(invoke, prepared, inputs)))
|
def batch(self, inputs: List[Input], config: Optional[Union[RunnableConfig,
List[RunnableConfig]]]=None, *, return_exceptions: bool=False, **kwargs:
Optional[Any]) ->List[Output]:
configs = get_config_list(config, len(inputs))
prepared = [self._prepare(c) for c in configs]
if all(p is self.default for p, _ in prepared):
return self.default.batch(inputs, [c for _, c in prepared],
return_exceptions=return_exceptions, **kwargs)
if not inputs:
return []
def invoke(prepared: Tuple[Runnable[Input, Output], RunnableConfig],
input: Input) ->Union[Output, Exception]:
bound, config = prepared
if return_exceptions:
try:
return bound.invoke(input, config, **kwargs)
except Exception as e:
return e
else:
return bound.invoke(input, config, **kwargs)
if len(inputs) == 1:
return cast(List[Output], [invoke(prepared[0], inputs[0])])
with get_executor_for_config(configs[0]) as executor:
return cast(List[Output], list(executor.map(invoke, prepared, inputs)))
| null |
lc_secrets
|
return {'openai_api_key': 'OPENAI_API_KEY'}
|
@property
def lc_secrets(self) ->Dict[str, str]:
return {'openai_api_key': 'OPENAI_API_KEY'}
| null |
input_keys
|
"""Expect input key.
:meta private:
"""
return [self.instructions_key]
|
@property
def input_keys(self) ->List[str]:
"""Expect input key.
:meta private:
"""
return [self.instructions_key]
|
Expect input key.
:meta private:
|
embed_query
|
"""Compute query embeddings using a HuggingFace instruct model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
instruction_pair = [self.query_instruction, text]
embedding = self.client(self.pipeline_ref, [instruction_pair])[0]
return embedding.tolist()
|
def embed_query(self, text: str) ->List[float]:
"""Compute query embeddings using a HuggingFace instruct model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
instruction_pair = [self.query_instruction, text]
embedding = self.client(self.pipeline_ref, [instruction_pair])[0]
return embedding.tolist()
|
Compute query embeddings using a HuggingFace instruct model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
|
process_pages
|
"""Process a list of pages into a list of documents."""
docs = []
for page in pages:
if not include_restricted_content and not self.is_public_page(page):
continue
doc = self.process_page(page, include_attachments, include_comments,
content_format, ocr_languages=ocr_languages, keep_markdown_format=
keep_markdown_format, keep_newlines=keep_newlines)
docs.append(doc)
return docs
|
def process_pages(self, pages: List[dict], include_restricted_content: bool,
include_attachments: bool, include_comments: bool, content_format:
ContentFormat, ocr_languages: Optional[str]=None, keep_markdown_format:
Optional[bool]=False, keep_newlines: bool=False) ->List[Document]:
"""Process a list of pages into a list of documents."""
docs = []
for page in pages:
if not include_restricted_content and not self.is_public_page(page):
continue
doc = self.process_page(page, include_attachments, include_comments,
content_format, ocr_languages=ocr_languages,
keep_markdown_format=keep_markdown_format, keep_newlines=
keep_newlines)
docs.append(doc)
return docs
|
Process a list of pages into a list of documents.
|
test_continue_on_failure_false
|
"""Test exception is raised when continue_on_failure=False."""
loader = RSSFeedLoader(['badurl.foobar'], continue_on_failure=False)
with pytest.raises(Exception):
loader.load()
|
@pytest.mark.requires('feedparser', 'newspaper')
def test_continue_on_failure_false() ->None:
"""Test exception is raised when continue_on_failure=False."""
loader = RSSFeedLoader(['badurl.foobar'], continue_on_failure=False)
with pytest.raises(Exception):
loader.load()
|
Test exception is raised when continue_on_failure=False.
|
test_datetime_output_parser_parse
|
parser = DatetimeOutputParser()
date = datetime.now()
datestr = date.strftime(parser.format)
result = parser.parse_folder(datestr)
assert result == date
parser.format = '%Y-%m-%dT%H:%M:%S'
date = datetime.now()
datestr = date.strftime(parser.format)
result = parser.parse_folder(datestr)
assert result.year == date.year and result.month == date.month and result.day == date.day and result.hour == date.hour and result.minute == date.minute and result.second == date.second
parser.format = '%H:%M:%S'
date = datetime.now()
datestr = date.strftime(parser.format)
result = parser.parse_folder(datestr)
assert result.hour == date.hour and result.minute == date.minute and result.second == date.second
try:
sleep(0.001)
datestr = date.strftime(parser.format)
result = parser.parse_folder(datestr)
assert result == date
assert False, 'Should have raised AssertionError'
except AssertionError:
pass
|
def test_datetime_output_parser_parse() ->None:
parser = DatetimeOutputParser()
date = datetime.now()
datestr = date.strftime(parser.format)
result = parser.parse_folder(datestr)
assert result == date
parser.format = '%Y-%m-%dT%H:%M:%S'
date = datetime.now()
datestr = date.strftime(parser.format)
result = parser.parse_folder(datestr)
assert result.year == date.year and result.month == date.month and result.day == date.day and result.hour == date.hour and result.minute == date.minute and result.second == date.second
parser.format = '%H:%M:%S'
date = datetime.now()
datestr = date.strftime(parser.format)
result = parser.parse_folder(datestr)
assert result.hour == date.hour and result.minute == date.minute and result.second == date.second
try:
sleep(0.001)
datestr = date.strftime(parser.format)
result = parser.parse_folder(datestr)
assert result == date
assert False, 'Should have raised AssertionError'
except AssertionError:
pass
| null |
texts
|
return ['foo', 'bar', 'baz']
|
@pytest.fixture
def texts() ->List[str]:
return ['foo', 'bar', 'baz']
| null |
_llm_type
|
"""Return type of chat model."""
return 'openai-chat'
|
@property
def _llm_type(self) ->str:
"""Return type of chat model."""
return 'openai-chat'
|
Return type of chat model.
|
_generate
|
if self.streaming:
stream_iter = self._stream(messages=messages, stop=stop, run_manager=
run_manager, **kwargs)
return generate_from_stream(stream_iter)
res = self._chat(messages, **kwargs)
response = res.json()
if response.get('code') != 0:
raise ValueError(f'Error from Baichuan api response: {response}')
return self._create_chat_result(response)
|
def _generate(self, messages: List[BaseMessage], stop: Optional[List[str]]=
None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any
) ->ChatResult:
if self.streaming:
stream_iter = self._stream(messages=messages, stop=stop,
run_manager=run_manager, **kwargs)
return generate_from_stream(stream_iter)
res = self._chat(messages, **kwargs)
response = res.json()
if response.get('code') != 0:
raise ValueError(f'Error from Baichuan api response: {response}')
return self._create_chat_result(response)
| null |
test__convert_message_to_dict_ai
|
message = AIMessage(content='foo')
result = _convert_message_to_dict(message)
expected_output = {'role': 'assistant', 'content': 'foo'}
assert result == expected_output
|
def test__convert_message_to_dict_ai() ->None:
message = AIMessage(content='foo')
result = _convert_message_to_dict(message)
expected_output = {'role': 'assistant', 'content': 'foo'}
assert result == expected_output
| null |
test_alibabacloud_opensearch_delete_doc
|
opensearch = create_alibabacloud_opensearch()
delete_result = opensearch.delete_documents_with_texts(['bar'])
assert delete_result
time.sleep(1)
search_result = opensearch.similarity_search(query='bar', search_filter={
'int_field': 2}, k=1)
assert len(search_result) == 0
|
def test_alibabacloud_opensearch_delete_doc() ->None:
opensearch = create_alibabacloud_opensearch()
delete_result = opensearch.delete_documents_with_texts(['bar'])
assert delete_result
time.sleep(1)
search_result = opensearch.similarity_search(query='bar', search_filter
={'int_field': 2}, k=1)
assert len(search_result) == 0
| null |
assert_docs
|
for doc in docs:
assert doc.page_content
assert doc.metadata
main_meta = {'title', 'summary', 'source'}
assert set(doc.metadata).issuperset(main_meta)
if all_meta:
assert len(set(doc.metadata)) > len(main_meta)
else:
assert len(set(doc.metadata)) == len(main_meta)
|
def assert_docs(docs: List[Document], all_meta: bool=False) ->None:
for doc in docs:
assert doc.page_content
assert doc.metadata
main_meta = {'title', 'summary', 'source'}
assert set(doc.metadata).issuperset(main_meta)
if all_meta:
assert len(set(doc.metadata)) > len(main_meta)
else:
assert len(set(doc.metadata)) == len(main_meta)
| null |
test_python_ast_repl_one_line_print
|
program = 'print("The square of {} is {:.2f}".format(3, 3**2))'
tool = PythonAstREPLTool()
assert tool.run(program) == 'The square of 3 is 9.00\n'
|
@pytest.mark.skipif(sys.version_info < (3, 9), reason=
'Requires python version >= 3.9 to run.')
def test_python_ast_repl_one_line_print() ->None:
program = 'print("The square of {} is {:.2f}".format(3, 3**2))'
tool = PythonAstREPLTool()
assert tool.run(program) == 'The square of 3 is 9.00\n'
| null |
_speech2text
|
try:
import azure.cognitiveservices.speech as speechsdk
except ImportError:
pass
audio_src_type = detect_file_src_type(audio_path)
if audio_src_type == 'local':
audio_config = speechsdk.AudioConfig(filename=audio_path)
elif audio_src_type == 'remote':
tmp_audio_path = download_audio_from_url(audio_path)
audio_config = speechsdk.AudioConfig(filename=tmp_audio_path)
else:
raise ValueError(f'Invalid audio path: {audio_path}')
self.speech_config.speech_recognition_language = speech_language
speech_recognizer = speechsdk.SpeechRecognizer(self.speech_config, audio_config
)
return self._continuous_recognize(speech_recognizer)
|
def _speech2text(self, audio_path: str, speech_language: str) ->str:
try:
import azure.cognitiveservices.speech as speechsdk
except ImportError:
pass
audio_src_type = detect_file_src_type(audio_path)
if audio_src_type == 'local':
audio_config = speechsdk.AudioConfig(filename=audio_path)
elif audio_src_type == 'remote':
tmp_audio_path = download_audio_from_url(audio_path)
audio_config = speechsdk.AudioConfig(filename=tmp_audio_path)
else:
raise ValueError(f'Invalid audio path: {audio_path}')
self.speech_config.speech_recognition_language = speech_language
speech_recognizer = speechsdk.SpeechRecognizer(self.speech_config,
audio_config)
return self._continuous_recognize(speech_recognizer)
| null |
test_get_nfts_with_pagination
|
contract_address = '0x1a92f7381b9f03921564a437210bb9396471050c'
startToken = (
'0x0000000000000000000000000000000000000000000000000000000000000077')
result = BlockchainDocumentLoader(contract_address, BlockchainType.
ETH_MAINNET, api_key=apiKey, startToken=startToken).load()
print('Tokens returned for contract with offset: ', len(result))
assert len(result) > 0, 'No NFTs returned'
|
@pytest.mark.skipif(not alchemyKeySet, reason='Alchemy API key not provided.')
def test_get_nfts_with_pagination() ->None:
contract_address = '0x1a92f7381b9f03921564a437210bb9396471050c'
startToken = (
'0x0000000000000000000000000000000000000000000000000000000000000077')
result = BlockchainDocumentLoader(contract_address, BlockchainType.
ETH_MAINNET, api_key=apiKey, startToken=startToken).load()
print('Tokens returned for contract with offset: ', len(result))
assert len(result) > 0, 'No NFTs returned'
| null |
create_client
|
if values.get('client') is not None:
return values
try:
import boto3
if values.get('credentials_profile_name'):
session = boto3.Session(profile_name=values['credentials_profile_name']
)
else:
session = boto3.Session()
client_params = {}
if values.get('region_name'):
client_params['region_name'] = values['region_name']
values['client'] = session.client('kendra', **client_params)
return values
except ImportError:
raise ModuleNotFoundError(
'Could not import boto3 python package. Please install it with `pip install boto3`.'
)
except Exception as e:
raise ValueError(
'Could not load credentials to authenticate with AWS client. Please check that credentials in the specified profile name are valid.'
) from e
|
@root_validator(pre=True)
def create_client(cls, values: Dict[str, Any]) ->Dict[str, Any]:
if values.get('client') is not None:
return values
try:
import boto3
if values.get('credentials_profile_name'):
session = boto3.Session(profile_name=values[
'credentials_profile_name'])
else:
session = boto3.Session()
client_params = {}
if values.get('region_name'):
client_params['region_name'] = values['region_name']
values['client'] = session.client('kendra', **client_params)
return values
except ImportError:
raise ModuleNotFoundError(
'Could not import boto3 python package. Please install it with `pip install boto3`.'
)
except Exception as e:
raise ValueError(
'Could not load credentials to authenticate with AWS client. Please check that credentials in the specified profile name are valid.'
) from e
| null |
test_run_arg_with_memory
|
"""Test run method works when arg is passed."""
chain = FakeChain(the_input_keys=['foo', 'baz'], memory=FakeMemory())
chain.run('bar')
|
def test_run_arg_with_memory() ->None:
"""Test run method works when arg is passed."""
chain = FakeChain(the_input_keys=['foo', 'baz'], memory=FakeMemory())
chain.run('bar')
|
Test run method works when arg is passed.
|
add_ai_message
|
"""Convenience method for adding an AI message string to the store.
Args:
message: The AI message to add.
"""
if isinstance(message, AIMessage):
self.add_message(message)
else:
self.add_message(AIMessage(content=message))
|
def add_ai_message(self, message: Union[AIMessage, str]) ->None:
"""Convenience method for adding an AI message string to the store.
Args:
message: The AI message to add.
"""
if isinstance(message, AIMessage):
self.add_message(message)
else:
self.add_message(AIMessage(content=message))
|
Convenience method for adding an AI message string to the store.
Args:
message: The AI message to add.
|
test_add_texts
|
index = mock_index(DIRECT_ACCESS_INDEX)
vectorsearch = DatabricksVectorSearch(index, embedding=
DEFAULT_EMBEDDING_MODEL, text_column=DEFAULT_TEXT_COLUMN)
ids = [idx for idx, i in enumerate(fake_texts)]
vectors = DEFAULT_EMBEDDING_MODEL.embed_documents(fake_texts)
added_ids = vectorsearch.add_texts(fake_texts, ids=ids)
index.upsert.assert_called_once_with([{DEFAULT_PRIMARY_KEY: id_,
DEFAULT_TEXT_COLUMN: text, DEFAULT_VECTOR_COLUMN: vector} for text,
vector, id_ in zip(fake_texts, vectors, ids)])
assert len(added_ids) == len(fake_texts)
assert added_ids == ids
|
@pytest.mark.requires('databricks', 'databricks.vector_search')
def test_add_texts() ->None:
index = mock_index(DIRECT_ACCESS_INDEX)
vectorsearch = DatabricksVectorSearch(index, embedding=
DEFAULT_EMBEDDING_MODEL, text_column=DEFAULT_TEXT_COLUMN)
ids = [idx for idx, i in enumerate(fake_texts)]
vectors = DEFAULT_EMBEDDING_MODEL.embed_documents(fake_texts)
added_ids = vectorsearch.add_texts(fake_texts, ids=ids)
index.upsert.assert_called_once_with([{DEFAULT_PRIMARY_KEY: id_,
DEFAULT_TEXT_COLUMN: text, DEFAULT_VECTOR_COLUMN: vector} for text,
vector, id_ in zip(fake_texts, vectors, ids)])
assert len(added_ids) == len(fake_texts)
assert added_ids == ids
| null |
validate_environment
|
"""Validate that api key and endpoint exists in environment."""
azure_cogs_key = get_from_dict_or_env(values, 'azure_cogs_key',
'AZURE_COGS_KEY')
azure_cogs_region = get_from_dict_or_env(values, 'azure_cogs_region',
'AZURE_COGS_REGION')
try:
import azure.cognitiveservices.speech as speechsdk
values['speech_config'] = speechsdk.SpeechConfig(subscription=
azure_cogs_key, region=azure_cogs_region)
except ImportError:
raise ImportError(
'azure-cognitiveservices-speech is not installed. Run `pip install azure-cognitiveservices-speech` to install.'
)
return values
|
@root_validator(pre=True)
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that api key and endpoint exists in environment."""
azure_cogs_key = get_from_dict_or_env(values, 'azure_cogs_key',
'AZURE_COGS_KEY')
azure_cogs_region = get_from_dict_or_env(values, 'azure_cogs_region',
'AZURE_COGS_REGION')
try:
import azure.cognitiveservices.speech as speechsdk
values['speech_config'] = speechsdk.SpeechConfig(subscription=
azure_cogs_key, region=azure_cogs_region)
except ImportError:
raise ImportError(
'azure-cognitiveservices-speech is not installed. Run `pip install azure-cognitiveservices-speech` to install.'
)
return values
|
Validate that api key and endpoint exists in environment.
|
test_pandas_output_parser_row_no_array
|
try:
parser.parse_folder('row:5')
assert False, 'Should have raised OutputParserException'
except OutputParserException:
assert True
|
def test_pandas_output_parser_row_no_array() ->None:
try:
parser.parse_folder('row:5')
assert False, 'Should have raised OutputParserException'
except OutputParserException:
assert True
| null |
_convert_delta_to_message_chunk
|
role = _dict.get('role')
content = _dict.get('content') or ''
additional_kwargs: Dict = {}
if _dict.get('function_call'):
function_call = dict(_dict['function_call'])
if 'name' in function_call and function_call['name'] is None:
function_call['name'] = ''
additional_kwargs['function_call'] = function_call
if _dict.get('tool_calls'):
additional_kwargs['tool_calls'] = _dict['tool_calls']
if role == 'user' or default_class == HumanMessageChunk:
return HumanMessageChunk(content=content)
elif role == 'assistant' or default_class == AIMessageChunk:
return AIMessageChunk(content=content, additional_kwargs=additional_kwargs)
elif role == 'system' or default_class == SystemMessageChunk:
return SystemMessageChunk(content=content)
elif role == 'function' or default_class == FunctionMessageChunk:
return FunctionMessageChunk(content=content, name=_dict['name'])
elif role == 'tool' or default_class == ToolMessageChunk:
return ToolMessageChunk(content=content, tool_call_id=_dict['tool_call_id']
)
elif role or default_class == ChatMessageChunk:
return ChatMessageChunk(content=content, role=role)
else:
return default_class(content=content)
|
def _convert_delta_to_message_chunk(_dict: Mapping[str, Any], default_class:
Type[BaseMessageChunk]) ->BaseMessageChunk:
role = _dict.get('role')
content = _dict.get('content') or ''
additional_kwargs: Dict = {}
if _dict.get('function_call'):
function_call = dict(_dict['function_call'])
if 'name' in function_call and function_call['name'] is None:
function_call['name'] = ''
additional_kwargs['function_call'] = function_call
if _dict.get('tool_calls'):
additional_kwargs['tool_calls'] = _dict['tool_calls']
if role == 'user' or default_class == HumanMessageChunk:
return HumanMessageChunk(content=content)
elif role == 'assistant' or default_class == AIMessageChunk:
return AIMessageChunk(content=content, additional_kwargs=
additional_kwargs)
elif role == 'system' or default_class == SystemMessageChunk:
return SystemMessageChunk(content=content)
elif role == 'function' or default_class == FunctionMessageChunk:
return FunctionMessageChunk(content=content, name=_dict['name'])
elif role == 'tool' or default_class == ToolMessageChunk:
return ToolMessageChunk(content=content, tool_call_id=_dict[
'tool_call_id'])
elif role or default_class == ChatMessageChunk:
return ChatMessageChunk(content=content, role=role)
else:
return default_class(content=content)
| null |
_import_reddit_search_RedditSearchRun
|
from langchain_community.tools.reddit_search.tool import RedditSearchRun
return RedditSearchRun
|
def _import_reddit_search_RedditSearchRun() ->Any:
from langchain_community.tools.reddit_search.tool import RedditSearchRun
return RedditSearchRun
| null |
append_to_last_tokens
|
self.last_tokens.append(token)
self.last_tokens_stripped.append(token.strip())
if len(self.last_tokens) > len(self.answer_prefix_tokens):
self.last_tokens.pop(0)
self.last_tokens_stripped.pop(0)
|
def append_to_last_tokens(self, token: str) ->None:
self.last_tokens.append(token)
self.last_tokens_stripped.append(token.strip())
if len(self.last_tokens) > len(self.answer_prefix_tokens):
self.last_tokens.pop(0)
self.last_tokens_stripped.pop(0)
| null |
clear
|
"""Nothing to clear, got a memory like a vault."""
pass
|
def clear(self) ->None:
"""Nothing to clear, got a memory like a vault."""
pass
|
Nothing to clear, got a memory like a vault.
|
_import_nlp_engine_provider
|
try:
from presidio_analyzer.nlp_engine import NlpEngineProvider
except ImportError as e:
raise ImportError(
'Could not import presidio_analyzer, please install with `pip install presidio-analyzer`. You will also need to download a spaCy model to use the analyzer, e.g. `python -m spacy download en_core_web_lg`.'
) from e
return NlpEngineProvider
|
def _import_nlp_engine_provider() ->'NlpEngineProvider':
try:
from presidio_analyzer.nlp_engine import NlpEngineProvider
except ImportError as e:
raise ImportError(
'Could not import presidio_analyzer, please install with `pip install presidio-analyzer`. You will also need to download a spaCy model to use the analyzer, e.g. `python -m spacy download en_core_web_lg`.'
) from e
return NlpEngineProvider
| null |
on_llm_new_token_common
|
self.llm_streams += 1
|
def on_llm_new_token_common(self) ->None:
self.llm_streams += 1
| null |
_import_sql_database
|
from langchain_community.utilities.sql_database import SQLDatabase
return SQLDatabase
|
def _import_sql_database() ->Any:
from langchain_community.utilities.sql_database import SQLDatabase
return SQLDatabase
| null |
on_llm_new_token
|
"""Run when LLM generates a new token."""
self.step += 1
self.llm_streams += 1
|
def on_llm_new_token(self, token: str, **kwargs: Any) ->None:
"""Run when LLM generates a new token."""
self.step += 1
self.llm_streams += 1
|
Run when LLM generates a new token.
|
test_add_documents_with_ids
|
"""Test end to end construction and search with scores and IDs."""
from momento.responses.vector_index import Search
texts = ['apple', 'orange', 'hammer']
ids = [random_string() for _ in range(len(texts))]
metadatas = [{'page': f'{i}'} for i in range(len(texts))]
stored_ids = vector_store.add_texts(texts, metadatas, ids=ids)
assert stored_ids == ids
wait()
response = vector_store._client.search(vector_store.index_name,
vector_store.embeddings.embed_query('apple'))
assert isinstance(response, Search.Success)
assert [hit.id for hit in response.hits] == ids
|
def test_add_documents_with_ids(vector_store: MomentoVectorIndex) ->None:
"""Test end to end construction and search with scores and IDs."""
from momento.responses.vector_index import Search
texts = ['apple', 'orange', 'hammer']
ids = [random_string() for _ in range(len(texts))]
metadatas = [{'page': f'{i}'} for i in range(len(texts))]
stored_ids = vector_store.add_texts(texts, metadatas, ids=ids)
assert stored_ids == ids
wait()
response = vector_store._client.search(vector_store.index_name,
vector_store.embeddings.embed_query('apple'))
assert isinstance(response, Search.Success)
assert [hit.id for hit in response.hits] == ids
|
Test end to end construction and search with scores and IDs.
|
test_pdfplumber_parser
|
"""Test PDFPlumber parser."""
_assert_with_parser(PDFPlumberParser())
_assert_with_duplicate_parser(PDFPlumberParser())
_assert_with_duplicate_parser(PDFPlumberParser(dedupe=True), dedupe=True)
|
def test_pdfplumber_parser() ->None:
"""Test PDFPlumber parser."""
_assert_with_parser(PDFPlumberParser())
_assert_with_duplicate_parser(PDFPlumberParser())
_assert_with_duplicate_parser(PDFPlumberParser(dedupe=True), dedupe=True)
|
Test PDFPlumber parser.
|
create_schema
|
"""Create the database schema for the record manager."""
|
@abstractmethod
def create_schema(self) ->None:
"""Create the database schema for the record manager."""
|
Create the database schema for the record manager.
|
__init__
|
super().__init__()
self.dimension = dimension
|
def __init__(self, dimension: int=DEFAULT_VECTOR_DIMENSION):
super().__init__()
self.dimension = dimension
| null |
_type
|
return 'tot_llm_checker_output'
|
@property
def _type(self) ->str:
return 'tot_llm_checker_output'
| null |
test_visit_structured_query
|
query = 'What is the capital of France?'
operation = Operation(operator=Operator.AND, arguments=[Comparison(
comparator=Comparator.EQ, attribute='foo', value='20'), Operation(
operator=Operator.OR, arguments=[Comparison(comparator=Comparator.LTE,
attribute='bar', value=7), Comparison(comparator=Comparator.LIKE,
attribute='baz', value='abc')])])
structured_query = StructuredQuery(query=query, filter=operation, limit=None)
expected = query, {'filter': {'bool': {'must': [{'term': {
'metadata.foo.keyword': '20'}}, {'bool': {'should': [{'range': {
'metadata.bar': {'lte': 7}}}, {'fuzzy': {'metadata.baz': {'value':
'abc'}}}]}}]}}}
actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query)
assert expected == actual
|
def test_visit_structured_query() ->None:
query = 'What is the capital of France?'
operation = Operation(operator=Operator.AND, arguments=[Comparison(
comparator=Comparator.EQ, attribute='foo', value='20'), Operation(
operator=Operator.OR, arguments=[Comparison(comparator=Comparator.
LTE, attribute='bar', value=7), Comparison(comparator=Comparator.
LIKE, attribute='baz', value='abc')])])
structured_query = StructuredQuery(query=query, filter=operation, limit
=None)
expected = query, {'filter': {'bool': {'must': [{'term': {
'metadata.foo.keyword': '20'}}, {'bool': {'should': [{'range': {
'metadata.bar': {'lte': 7}}}, {'fuzzy': {'metadata.baz': {'value':
'abc'}}}]}}]}}}
actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query)
assert expected == actual
| null |
_invocation_params
|
params = {**self._default_params, **kwargs}
params['stop_sequences'] = params['stop_sequences'] + (runtime_stop or [])
return params
|
def _invocation_params(self, runtime_stop: Optional[List[str]], **kwargs: Any
) ->Dict[str, Any]:
params = {**self._default_params, **kwargs}
params['stop_sequences'] = params['stop_sequences'] + (runtime_stop or [])
return params
| null |
_get_default_embeddings
|
"""This function returns the default embedding.
Returns:
Default TensorflowHubEmbeddings to use.
"""
from langchain_community.embeddings import TensorflowHubEmbeddings
return TensorflowHubEmbeddings()
|
@classmethod
def _get_default_embeddings(cls) ->'TensorflowHubEmbeddings':
"""This function returns the default embedding.
Returns:
Default TensorflowHubEmbeddings to use.
"""
from langchain_community.embeddings import TensorflowHubEmbeddings
return TensorflowHubEmbeddings()
|
This function returns the default embedding.
Returns:
Default TensorflowHubEmbeddings to use.
|
pipe
|
"""Compose this runnable with another object to create a RunnableSequence."""
return RunnableSequence(self, *others, name=name)
|
def pipe(self, *others: Union[Runnable[Any, Other], Callable[[Any], Other]],
name: Optional[str]=None) ->RunnableSerializable[Input, Other]:
"""Compose this runnable with another object to create a RunnableSequence."""
return RunnableSequence(self, *others, name=name)
|
Compose this runnable with another object to create a RunnableSequence.
|
test_daxquery
|
from azure.identity import DefaultAzureCredential
DATASET_ID = get_from_env('', 'POWERBI_DATASET_ID')
TABLE_NAME = get_from_env('', 'POWERBI_TABLE_NAME')
NUM_ROWS = get_from_env('', 'POWERBI_NUMROWS')
fast_llm = ChatOpenAI(temperature=0.5, max_tokens=1000, model_name=
'gpt-3.5-turbo', verbose=True)
smart_llm = ChatOpenAI(temperature=0, max_tokens=100, model_name='gpt-4',
verbose=True)
toolkit = PowerBIToolkit(powerbi=PowerBIDataset(dataset_id=DATASET_ID,
table_names=[TABLE_NAME], credential=DefaultAzureCredential()), llm=
smart_llm)
agent_executor = create_pbi_agent(llm=fast_llm, toolkit=toolkit, verbose=True)
output = agent_executor.run(f'How many rows are in the table, {TABLE_NAME}')
assert NUM_ROWS in output
|
@pytest.mark.skipif(not azure_installed(), reason='requires azure package')
def test_daxquery() ->None:
from azure.identity import DefaultAzureCredential
DATASET_ID = get_from_env('', 'POWERBI_DATASET_ID')
TABLE_NAME = get_from_env('', 'POWERBI_TABLE_NAME')
NUM_ROWS = get_from_env('', 'POWERBI_NUMROWS')
fast_llm = ChatOpenAI(temperature=0.5, max_tokens=1000, model_name=
'gpt-3.5-turbo', verbose=True)
smart_llm = ChatOpenAI(temperature=0, max_tokens=100, model_name=
'gpt-4', verbose=True)
toolkit = PowerBIToolkit(powerbi=PowerBIDataset(dataset_id=DATASET_ID,
table_names=[TABLE_NAME], credential=DefaultAzureCredential()), llm
=smart_llm)
agent_executor = create_pbi_agent(llm=fast_llm, toolkit=toolkit,
verbose=True)
output = agent_executor.run(f'How many rows are in the table, {TABLE_NAME}'
)
assert NUM_ROWS in output
| null |
_split_sources
|
"""Split sources from answer."""
if re.search('SOURCES?:', answer, re.IGNORECASE):
answer, sources = re.split('SOURCES?:|QUESTION:\\s', answer, flags=re.
IGNORECASE)[:2]
sources = re.split('\\n', sources)[0].strip()
else:
sources = ''
return answer, sources
|
def _split_sources(self, answer: str) ->Tuple[str, str]:
"""Split sources from answer."""
if re.search('SOURCES?:', answer, re.IGNORECASE):
answer, sources = re.split('SOURCES?:|QUESTION:\\s', answer, flags=
re.IGNORECASE)[:2]
sources = re.split('\\n', sources)[0].strip()
else:
sources = ''
return answer, sources
|
Split sources from answer.
|
lazy_load
|
"""Lazy load Documents from table."""
result = []
if self.filter == 'normal_transaction':
result = self.getNormTx()
elif self.filter == 'internal_transaction':
result = self.getInternalTx()
elif self.filter == 'erc20_transaction':
result = self.getERC20Tx()
elif self.filter == 'eth_balance':
result = self.getEthBalance()
elif self.filter == 'erc721_transaction':
result = self.getERC721Tx()
elif self.filter == 'erc1155_transaction':
result = self.getERC1155Tx()
else:
raise ValueError(f'Invalid filter {filter}')
for doc in result:
yield doc
|
def lazy_load(self) ->Iterator[Document]:
"""Lazy load Documents from table."""
result = []
if self.filter == 'normal_transaction':
result = self.getNormTx()
elif self.filter == 'internal_transaction':
result = self.getInternalTx()
elif self.filter == 'erc20_transaction':
result = self.getERC20Tx()
elif self.filter == 'eth_balance':
result = self.getEthBalance()
elif self.filter == 'erc721_transaction':
result = self.getERC721Tx()
elif self.filter == 'erc1155_transaction':
result = self.getERC1155Tx()
else:
raise ValueError(f'Invalid filter {filter}')
for doc in result:
yield doc
|
Lazy load Documents from table.
|
__eq__
|
"""Create a RedisTag equality filter expression.
Args:
other (Union[List[str], Set[str], Tuple[str], str]):
The tag(s) to filter on.
Example:
>>> from langchain_community.vectorstores.redis import RedisTag
>>> filter = RedisTag("brand") == "nike"
"""
self._set_tag_value(other, RedisFilterOperator.EQ)
return RedisFilterExpression(str(self))
|
@check_operator_misuse
def __eq__(self, other: Union[List[str], Set[str], Tuple[str], str]
) ->'RedisFilterExpression':
"""Create a RedisTag equality filter expression.
Args:
other (Union[List[str], Set[str], Tuple[str], str]):
The tag(s) to filter on.
Example:
>>> from langchain_community.vectorstores.redis import RedisTag
>>> filter = RedisTag("brand") == "nike"
"""
self._set_tag_value(other, RedisFilterOperator.EQ)
return RedisFilterExpression(str(self))
|
Create a RedisTag equality filter expression.
Args:
other (Union[List[str], Set[str], Tuple[str], str]):
The tag(s) to filter on.
Example:
>>> from langchain_community.vectorstores.redis import RedisTag
>>> filter = RedisTag("brand") == "nike"
|
query
|
g = self.client.gremlin()
res = g.exec(query)
return res['data']
|
def query(self, query: str) ->List[Dict[str, Any]]:
g = self.client.gremlin()
res = g.exec(query)
return res['data']
| null |
configurable_fields
|
return self.__class__(which=self.which, default=self.default.
configurable_fields(**kwargs), alternatives=self.alternatives)
|
def configurable_fields(self, **kwargs: AnyConfigurableField
) ->RunnableSerializable[Input, Output]:
return self.__class__(which=self.which, default=self.default.
configurable_fields(**kwargs), alternatives=self.alternatives)
| null |
_validate_content_key
|
"""Check if a content key is valid"""
sample = data.first()
if not isinstance(sample, dict):
raise ValueError(
f'Expected the jq schema to result in a list of objects (dict), so sample must be a dict but got `{type(sample)}`'
)
if sample.get(self._content_key) is None:
raise ValueError(
f'Expected the jq schema to result in a list of objects (dict) with the key `{self._content_key}`'
)
|
def _validate_content_key(self, data: Any) ->None:
"""Check if a content key is valid"""
sample = data.first()
if not isinstance(sample, dict):
raise ValueError(
f'Expected the jq schema to result in a list of objects (dict), so sample must be a dict but got `{type(sample)}`'
)
if sample.get(self._content_key) is None:
raise ValueError(
f'Expected the jq schema to result in a list of objects (dict) with the key `{self._content_key}`'
)
|
Check if a content key is valid
|
learn
|
pass
|
def learn(self, event: TEvent) ->None:
pass
| null |
__init__
|
super().__init__(**kwargs)
from google.cloud.discoveryengine_v1beta import ConversationalSearchServiceClient
self._client = ConversationalSearchServiceClient(credentials=self.
credentials, client_options=self.client_options, client_info=
get_client_info(module='vertex-ai-search'))
self._serving_config = self._client.serving_config_path(project=self.
project_id, location=self.location_id, data_store=self.data_store_id,
serving_config=self.serving_config_id)
if self.engine_data_type == 1:
raise NotImplementedError(
'Data store type 1 (Structured)is not currently supported for multi-turn search.'
+ f' Got {self.engine_data_type}')
|
def __init__(self, **kwargs: Any):
super().__init__(**kwargs)
from google.cloud.discoveryengine_v1beta import ConversationalSearchServiceClient
self._client = ConversationalSearchServiceClient(credentials=self.
credentials, client_options=self.client_options, client_info=
get_client_info(module='vertex-ai-search'))
self._serving_config = self._client.serving_config_path(project=self.
project_id, location=self.location_id, data_store=self.
data_store_id, serving_config=self.serving_config_id)
if self.engine_data_type == 1:
raise NotImplementedError(
'Data store type 1 (Structured)is not currently supported for multi-turn search.'
+ f' Got {self.engine_data_type}')
| null |
test_openai_streaming
|
"""Test streaming tokens from AzureOpenAI."""
generator = llm.stream("I'm Pickle Rick")
assert isinstance(generator, Generator)
full_response = ''
for token in generator:
assert isinstance(token, str)
full_response += token
assert full_response
|
@pytest.mark.scheduled
def test_openai_streaming(llm: AzureOpenAI) ->None:
"""Test streaming tokens from AzureOpenAI."""
generator = llm.stream("I'm Pickle Rick")
assert isinstance(generator, Generator)
full_response = ''
for token in generator:
assert isinstance(token, str)
full_response += token
assert full_response
|
Test streaming tokens from AzureOpenAI.
|
test_convert_to_message
|
"""Test convert to message."""
assert _convert_to_message(args) == expected
|
@pytest.mark.parametrize('args,expected', [(('human', '{question}'),
HumanMessagePromptTemplate(prompt=PromptTemplate.from_template(
'{question}'))), ('{question}', HumanMessagePromptTemplate(prompt=
PromptTemplate.from_template('{question}'))), (HumanMessage(content=
'question'), HumanMessage(content='question')), (
HumanMessagePromptTemplate(prompt=PromptTemplate.from_template(
'{question}')), HumanMessagePromptTemplate(prompt=PromptTemplate.
from_template('{question}')))])
def test_convert_to_message(args: Any, expected: Union[BaseMessage,
BaseMessagePromptTemplate]) ->None:
"""Test convert to message."""
assert _convert_to_message(args) == expected
|
Test convert to message.
|
is_stringtype_instance
|
"""Helper function to check if an item is a string."""
return isinstance(item, str) or isinstance(item, _Embed) and isinstance(item
.value, str)
|
def is_stringtype_instance(item: Any) ->bool:
"""Helper function to check if an item is a string."""
return isinstance(item, str) or isinstance(item, _Embed) and isinstance(
item.value, str)
|
Helper function to check if an item is a string.
|
run_query
|
return db.run(query)
|
def run_query(query):
return db.run(query)
| null |
_process_results
|
typed_results = cast(List[dict], results)
sorted_res = sorted(zip(typed_results, docs), key=lambda x: -int(x[0][self.
rank_key]))
output, document = sorted_res[0]
extra_info = {}
if self.metadata_keys is not None:
for key in self.metadata_keys:
extra_info[key] = document.metadata[key]
if self.return_intermediate_steps:
extra_info['intermediate_steps'] = results
return output[self.answer_key], extra_info
|
def _process_results(self, docs: List[Document], results: Sequence[Union[
str, List[str], Dict[str, str]]]) ->Tuple[str, dict]:
typed_results = cast(List[dict], results)
sorted_res = sorted(zip(typed_results, docs), key=lambda x: -int(x[0][
self.rank_key]))
output, document = sorted_res[0]
extra_info = {}
if self.metadata_keys is not None:
for key in self.metadata_keys:
extra_info[key] = document.metadata[key]
if self.return_intermediate_steps:
extra_info['intermediate_steps'] = results
return output[self.answer_key], extra_info
| null |
test_check_instances
|
"""Test anonymizing multiple items in a sentence"""
from langchain_experimental.data_anonymizer import PresidioAnonymizer
text = (
'This is John Smith. John Smith works in a bakery.John Smith is a good guy'
)
anonymizer = PresidioAnonymizer(['PERSON'], faker_seed=42)
anonymized_text = anonymizer.anonymize(text)
assert anonymized_text.count('Connie Lawrence') == 3
anonymized_text = anonymizer.anonymize(text)
assert anonymized_text.count('Connie Lawrence') == 0
|
@pytest.mark.requires('presidio_analyzer', 'presidio_anonymizer', 'faker')
def test_check_instances() ->None:
"""Test anonymizing multiple items in a sentence"""
from langchain_experimental.data_anonymizer import PresidioAnonymizer
text = (
'This is John Smith. John Smith works in a bakery.John Smith is a good guy'
)
anonymizer = PresidioAnonymizer(['PERSON'], faker_seed=42)
anonymized_text = anonymizer.anonymize(text)
assert anonymized_text.count('Connie Lawrence') == 3
anonymized_text = anonymizer.anonymize(text)
assert anonymized_text.count('Connie Lawrence') == 0
|
Test anonymizing multiple items in a sentence
|
client
|
import marqo
client = marqo.Client(url=DEFAULT_MARQO_URL, api_key=DEFAULT_MARQO_API_KEY)
try:
client.index(INDEX_NAME).delete()
except Exception:
pass
client.create_index(INDEX_NAME)
return client
|
@pytest.fixture
def client() ->Marqo:
import marqo
client = marqo.Client(url=DEFAULT_MARQO_URL, api_key=DEFAULT_MARQO_API_KEY)
try:
client.index(INDEX_NAME).delete()
except Exception:
pass
client.create_index(INDEX_NAME)
return client
| null |
test_sql_query
|
import rockset
assert os.environ.get('ROCKSET_API_KEY') is not None
assert os.environ.get('ROCKSET_REGION') is not None
api_key = os.environ.get('ROCKSET_API_KEY')
region = os.environ.get('ROCKSET_REGION')
if region == 'use1a1':
host = rockset.Regions.use1a1
elif region == 'usw2a1':
host = rockset.Regions.usw2a1
elif region == 'euc1a1':
host = rockset.Regions.euc1a1
elif region == 'dev':
host = rockset.DevRegions.usw2a1
else:
logger.warning(
"Using ROCKSET_REGION:%s as it is.. You should know what you're doing..."
, region)
host = region
client = rockset.RocksetClient(host, api_key)
col_1 = 'Rockset is a real-time analytics database'
col_2 = 2
col_3 = 'e903e069-b0b5-4b80-95e2-86471b41f55f'
id = 7320132
"""Run a simple SQL query"""
loader = RocksetLoader(client, rockset.models.QueryRequestSql(query=
f"SELECT '{col_1}' AS col_1, {col_2} AS col_2, '{col_3}' AS col_3, {id} AS id"
), ['col_1'], metadata_keys=['col_2', 'col_3', 'id'])
output = loader.load()
assert len(output) == 1
assert isinstance(output[0], Document)
assert output[0].page_content == col_1
assert output[0].metadata == {'col_2': col_2, 'col_3': col_3, 'id': id}
|
def test_sql_query() ->None:
import rockset
assert os.environ.get('ROCKSET_API_KEY') is not None
assert os.environ.get('ROCKSET_REGION') is not None
api_key = os.environ.get('ROCKSET_API_KEY')
region = os.environ.get('ROCKSET_REGION')
if region == 'use1a1':
host = rockset.Regions.use1a1
elif region == 'usw2a1':
host = rockset.Regions.usw2a1
elif region == 'euc1a1':
host = rockset.Regions.euc1a1
elif region == 'dev':
host = rockset.DevRegions.usw2a1
else:
logger.warning(
"Using ROCKSET_REGION:%s as it is.. You should know what you're doing..."
, region)
host = region
client = rockset.RocksetClient(host, api_key)
col_1 = 'Rockset is a real-time analytics database'
col_2 = 2
col_3 = 'e903e069-b0b5-4b80-95e2-86471b41f55f'
id = 7320132
"""Run a simple SQL query"""
loader = RocksetLoader(client, rockset.models.QueryRequestSql(query=
f"SELECT '{col_1}' AS col_1, {col_2} AS col_2, '{col_3}' AS col_3, {id} AS id"
), ['col_1'], metadata_keys=['col_2', 'col_3', 'id'])
output = loader.load()
assert len(output) == 1
assert isinstance(output[0], Document)
assert output[0].page_content == col_1
assert output[0].metadata == {'col_2': col_2, 'col_3': col_3, 'id': id}
| null |
_convert_prompt_msg_params
|
model_req = {'model': {'name': self.model}}
if self.model_version is not None:
model_req['model']['version'] = self.model_version
return {**model_req, 'messages': [_convert_message_to_dict(message) for
message in messages], 'parameters': {**self._default_params, **kwargs}}
|
def _convert_prompt_msg_params(self, messages: List[BaseMessage], **kwargs: Any
) ->Dict[str, Any]:
model_req = {'model': {'name': self.model}}
if self.model_version is not None:
model_req['model']['version'] = self.model_version
return {**model_req, 'messages': [_convert_message_to_dict(message) for
message in messages], 'parameters': {**self._default_params, **kwargs}}
| null |
extract_node_variable
|
"""
Args:
part: node in string format
"""
part = part.lstrip('(').rstrip(')')
idx = part.find(':')
if idx != -1:
part = part[:idx]
return None if part == '' else part
|
def extract_node_variable(self, part: str) ->Optional[str]:
"""
Args:
part: node in string format
"""
part = part.lstrip('(').rstrip(')')
idx = part.find(':')
if idx != -1:
part = part[:idx]
return None if part == '' else part
|
Args:
part: node in string format
|
mset
|
"""Set the given key-value pairs."""
for key, value in key_value_pairs:
self.client.set(self._get_prefixed_key(key), value, ex=self.ttl)
|
def mset(self, key_value_pairs: Sequence[Tuple[str, str]]) ->None:
"""Set the given key-value pairs."""
for key, value in key_value_pairs:
self.client.set(self._get_prefixed_key(key), value, ex=self.ttl)
|
Set the given key-value pairs.
|
on_text
|
pass
|
def on_text(self, text: str, **kwargs: Any) ->None:
pass
| null |
test_multiple_namespaces
|
loader = MWDumpLoader(file_path=(PARENT_DIR / 'mwtest_current_pages.xml').
absolute(), namespaces=[0, 6], skip_redirects=True, stop_on_error=False)
documents = loader.load()
[print(doc) for doc in documents]
assert len(documents) == 2
|
@pytest.mark.requires('mwparserfromhell', 'mwxml')
def test_multiple_namespaces() ->None:
loader = MWDumpLoader(file_path=(PARENT_DIR /
'mwtest_current_pages.xml').absolute(), namespaces=[0, 6],
skip_redirects=True, stop_on_error=False)
documents = loader.load()
[print(doc) for doc in documents]
assert len(documents) == 2
| null |
from_llm
|
"""Initialize from LLM."""
_prompt = prompt or PROMPT_SELECTOR.get_prompt(llm)
llm_chain = LLMChain(llm=llm, prompt=_prompt, callbacks=callbacks, **
llm_chain_kwargs or {})
document_prompt = PromptTemplate(input_variables=['page_content'], template
="""Context:
{page_content}""")
combine_documents_chain = StuffDocumentsChain(llm_chain=llm_chain,
document_variable_name='context', document_prompt=document_prompt,
callbacks=callbacks)
return cls(combine_documents_chain=combine_documents_chain, callbacks=
callbacks, **kwargs)
|
@classmethod
def from_llm(cls, llm: BaseLanguageModel, prompt: Optional[PromptTemplate]=
None, callbacks: Callbacks=None, llm_chain_kwargs: Optional[dict]=None,
**kwargs: Any) ->BaseRetrievalQA:
"""Initialize from LLM."""
_prompt = prompt or PROMPT_SELECTOR.get_prompt(llm)
llm_chain = LLMChain(llm=llm, prompt=_prompt, callbacks=callbacks, **
llm_chain_kwargs or {})
document_prompt = PromptTemplate(input_variables=['page_content'],
template="""Context:
{page_content}""")
combine_documents_chain = StuffDocumentsChain(llm_chain=llm_chain,
document_variable_name='context', document_prompt=document_prompt,
callbacks=callbacks)
return cls(combine_documents_chain=combine_documents_chain, callbacks=
callbacks, **kwargs)
|
Initialize from LLM.
|
_chunk
|
for i in range(0, len(texts), size):
yield texts[i:i + size]
|
def _chunk(texts: List[str], size: int) ->Iterator[List[str]]:
for i in range(0, len(texts), size):
yield texts[i:i + size]
| null |
_create_filter_clause
|
IN, NIN, BETWEEN, GT, LT, NE = 'in', 'nin', 'between', 'gt', 'lt', 'ne'
EQ, LIKE, CONTAINS, OR, AND = 'eq', 'like', 'contains', 'or', 'and'
value_case_insensitive = {k.lower(): v for k, v in value.items()}
if IN in map(str.lower, value):
filter_by_metadata = self.EmbeddingStore.cmetadata[key].astext.in_(
value_case_insensitive[IN])
elif NIN in map(str.lower, value):
filter_by_metadata = self.EmbeddingStore.cmetadata[key].astext.not_in(
value_case_insensitive[NIN])
elif BETWEEN in map(str.lower, value):
filter_by_metadata = self.EmbeddingStore.cmetadata[key].astext.between(str
(value_case_insensitive[BETWEEN][0]), str(value_case_insensitive[
BETWEEN][1]))
elif GT in map(str.lower, value):
filter_by_metadata = self.EmbeddingStore.cmetadata[key].astext > str(
value_case_insensitive[GT])
elif LT in map(str.lower, value):
filter_by_metadata = self.EmbeddingStore.cmetadata[key].astext < str(
value_case_insensitive[LT])
elif NE in map(str.lower, value):
filter_by_metadata = self.EmbeddingStore.cmetadata[key].astext != str(
value_case_insensitive[NE])
elif EQ in map(str.lower, value):
filter_by_metadata = self.EmbeddingStore.cmetadata[key].astext == str(
value_case_insensitive[EQ])
elif LIKE in map(str.lower, value):
filter_by_metadata = self.EmbeddingStore.cmetadata[key].astext.like(
value_case_insensitive[LIKE])
elif CONTAINS in map(str.lower, value):
filter_by_metadata = self.EmbeddingStore.cmetadata[key].astext.contains(
value_case_insensitive[CONTAINS])
elif OR in map(str.lower, value):
or_clauses = [self._create_filter_clause(key, sub_value) for sub_value in
value_case_insensitive[OR]]
filter_by_metadata = sqlalchemy.or_(or_clauses)
elif AND in map(str.lower, value):
and_clauses = [self._create_filter_clause(key, sub_value) for sub_value in
value_case_insensitive[AND]]
filter_by_metadata = sqlalchemy.and_(and_clauses)
else:
filter_by_metadata = None
return filter_by_metadata
|
def _create_filter_clause(self, key, value):
IN, NIN, BETWEEN, GT, LT, NE = 'in', 'nin', 'between', 'gt', 'lt', 'ne'
EQ, LIKE, CONTAINS, OR, AND = 'eq', 'like', 'contains', 'or', 'and'
value_case_insensitive = {k.lower(): v for k, v in value.items()}
if IN in map(str.lower, value):
filter_by_metadata = self.EmbeddingStore.cmetadata[key].astext.in_(
value_case_insensitive[IN])
elif NIN in map(str.lower, value):
filter_by_metadata = self.EmbeddingStore.cmetadata[key].astext.not_in(
value_case_insensitive[NIN])
elif BETWEEN in map(str.lower, value):
filter_by_metadata = self.EmbeddingStore.cmetadata[key].astext.between(
str(value_case_insensitive[BETWEEN][0]), str(
value_case_insensitive[BETWEEN][1]))
elif GT in map(str.lower, value):
filter_by_metadata = self.EmbeddingStore.cmetadata[key].astext > str(
value_case_insensitive[GT])
elif LT in map(str.lower, value):
filter_by_metadata = self.EmbeddingStore.cmetadata[key].astext < str(
value_case_insensitive[LT])
elif NE in map(str.lower, value):
filter_by_metadata = self.EmbeddingStore.cmetadata[key].astext != str(
value_case_insensitive[NE])
elif EQ in map(str.lower, value):
filter_by_metadata = self.EmbeddingStore.cmetadata[key].astext == str(
value_case_insensitive[EQ])
elif LIKE in map(str.lower, value):
filter_by_metadata = self.EmbeddingStore.cmetadata[key].astext.like(
value_case_insensitive[LIKE])
elif CONTAINS in map(str.lower, value):
filter_by_metadata = self.EmbeddingStore.cmetadata[key
].astext.contains(value_case_insensitive[CONTAINS])
elif OR in map(str.lower, value):
or_clauses = [self._create_filter_clause(key, sub_value) for
sub_value in value_case_insensitive[OR]]
filter_by_metadata = sqlalchemy.or_(or_clauses)
elif AND in map(str.lower, value):
and_clauses = [self._create_filter_clause(key, sub_value) for
sub_value in value_case_insensitive[AND]]
filter_by_metadata = sqlalchemy.and_(and_clauses)
else:
filter_by_metadata = None
return filter_by_metadata
| null |
_import_vertex_model_garden
|
from langchain_community.llms.vertexai import VertexAIModelGarden
return VertexAIModelGarden
|
def _import_vertex_model_garden() ->Any:
from langchain_community.llms.vertexai import VertexAIModelGarden
return VertexAIModelGarden
| null |
__init__
|
self.id = id
self.name = name
self.cards = cards
self.lists = lists
|
def __init__(self, id: str, name: str, cards: list, lists: list):
self.id = id
self.name = name
self.cards = cards
self.lists = lists
| null |
test_faiss_vector_sim
|
"""Test vector similarity."""
texts = ['foo', 'bar', 'baz']
docsearch = FAISS.from_texts(texts, FakeEmbeddings())
index_to_id = docsearch.index_to_docstore_id
expected_docstore = InMemoryDocstore({index_to_id[0]: Document(page_content
='foo'), index_to_id[1]: Document(page_content='bar'), index_to_id[2]:
Document(page_content='baz')})
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
query_vec = FakeEmbeddings().embed_query(text='foo')
output = docsearch.similarity_search_by_vector(query_vec, k=1)
assert output == [Document(page_content='foo')]
|
@pytest.mark.requires('faiss')
def test_faiss_vector_sim() ->None:
"""Test vector similarity."""
texts = ['foo', 'bar', 'baz']
docsearch = FAISS.from_texts(texts, FakeEmbeddings())
index_to_id = docsearch.index_to_docstore_id
expected_docstore = InMemoryDocstore({index_to_id[0]: Document(
page_content='foo'), index_to_id[1]: Document(page_content='bar'),
index_to_id[2]: Document(page_content='baz')})
assert docsearch.docstore.__dict__ == expected_docstore.__dict__
query_vec = FakeEmbeddings().embed_query(text='foo')
output = docsearch.similarity_search_by_vector(query_vec, k=1)
assert output == [Document(page_content='foo')]
|
Test vector similarity.
|
test_dependency_string_both
|
_assert_dependency_equals(parse_dependency_string(
'git+https://github.com/efriis/myrepo.git@branch#subdirectory=src',
None, None, None), git='https://github.com/efriis/myrepo.git',
subdirectory='src', ref='branch')
|
def test_dependency_string_both() ->None:
_assert_dependency_equals(parse_dependency_string(
'git+https://github.com/efriis/myrepo.git@branch#subdirectory=src',
None, None, None), git='https://github.com/efriis/myrepo.git',
subdirectory='src', ref='branch')
| null |
_import_vertex
|
from langchain_community.llms.vertexai import VertexAI
return VertexAI
|
def _import_vertex() ->Any:
from langchain_community.llms.vertexai import VertexAI
return VertexAI
| null |
_llm_type
|
"""Return type of llm."""
return 'ctransformers'
|
@property
def _llm_type(self) ->str:
"""Return type of llm."""
return 'ctransformers'
|
Return type of llm.
|
close
|
for child in self._children:
child.close()
|
def close(self) ->None:
for child in self._children:
child.close()
| null |
_comprehension
|
if t.is_async:
self.write(' async for ')
else:
self.write(' for ')
self.dispatch(t.target)
self.write(' in ')
self.dispatch(t.iter)
for if_clause in t.ifs:
self.write(' if ')
self.dispatch(if_clause)
|
def _comprehension(self, t):
if t.is_async:
self.write(' async for ')
else:
self.write(' for ')
self.dispatch(t.target)
self.write(' in ')
self.dispatch(t.iter)
for if_clause in t.ifs:
self.write(' if ')
self.dispatch(if_clause)
| null |
test_selector_trims_one_example
|
"""Test LengthBasedExampleSelector can trim one example."""
long_question = """I am writing a really long question,
this probably is going to affect the example right?"""
output = selector.select_examples({'question': long_question})
assert output == EXAMPLES[:1]
|
def test_selector_trims_one_example(selector: LengthBasedExampleSelector
) ->None:
"""Test LengthBasedExampleSelector can trim one example."""
long_question = """I am writing a really long question,
this probably is going to affect the example right?"""
output = selector.select_examples({'question': long_question})
assert output == EXAMPLES[:1]
|
Test LengthBasedExampleSelector can trim one example.
|
test_get_input_schema_input_dict
|
class RunnableWithChatHistoryInput(BaseModel):
input: Union[str, BaseMessage, Sequence[BaseMessage]]
runnable = RunnableLambda(lambda input: {'output': [AIMessage(content=
'you said: ' + '\n'.join([str(m.content) for m in input['history'] if
isinstance(m, HumanMessage)] + [input['input']]))]})
get_session_history = _get_get_session_history()
with_history = RunnableWithMessageHistory(runnable, get_session_history,
input_messages_key='input', history_messages_key='history',
output_messages_key='output')
assert with_history.get_input_schema().schema(
) == RunnableWithChatHistoryInput.schema()
|
def test_get_input_schema_input_dict() ->None:
class RunnableWithChatHistoryInput(BaseModel):
input: Union[str, BaseMessage, Sequence[BaseMessage]]
runnable = RunnableLambda(lambda input: {'output': [AIMessage(content=
'you said: ' + '\n'.join([str(m.content) for m in input['history'] if
isinstance(m, HumanMessage)] + [input['input']]))]})
get_session_history = _get_get_session_history()
with_history = RunnableWithMessageHistory(runnable, get_session_history,
input_messages_key='input', history_messages_key='history',
output_messages_key='output')
assert with_history.get_input_schema().schema(
) == RunnableWithChatHistoryInput.schema()
| null |
invoke
|
return self._call_with_config(self._invoke, input, config, **kwargs)
|
def invoke(self, input: Input, config: Optional[RunnableConfig]=None, **
kwargs: Any) ->Output:
return self._call_with_config(self._invoke, input, config, **kwargs)
| null |
_llm_type
|
"""Return type of llm."""
return 'huggingface_endpoint'
|
@property
def _llm_type(self) ->str:
"""Return type of llm."""
return 'huggingface_endpoint'
|
Return type of llm.
|
messages
|
"""Retrieve the messages from Upstash Redis"""
_items = self.redis_client.lrange(self.key, 0, -1)
items = [json.loads(m) for m in _items[::-1]]
messages = messages_from_dict(items)
return messages
|
@property
def messages(self) ->List[BaseMessage]:
"""Retrieve the messages from Upstash Redis"""
_items = self.redis_client.lrange(self.key, 0, -1)
items = [json.loads(m) for m in _items[::-1]]
messages = messages_from_dict(items)
return messages
|
Retrieve the messages from Upstash Redis
|
sort_by_index_name
|
"""Sort first element to match the index_name if exists"""
return sorted(lst, key=lambda x: x.get('index_name') != index_name)
|
def sort_by_index_name(lst: List[Dict[str, Any]], index_name: str) ->List[Dict
[str, Any]]:
"""Sort first element to match the index_name if exists"""
return sorted(lst, key=lambda x: x.get('index_name') != index_name)
|
Sort first element to match the index_name if exists
|
_import_powerbi_tool_InfoPowerBITool
|
from langchain_community.tools.powerbi.tool import InfoPowerBITool
return InfoPowerBITool
|
def _import_powerbi_tool_InfoPowerBITool() ->Any:
from langchain_community.tools.powerbi.tool import InfoPowerBITool
return InfoPowerBITool
| null |
_import_scann
|
from langchain_community.vectorstores.scann import ScaNN
return ScaNN
|
def _import_scann() ->Any:
from langchain_community.vectorstores.scann import ScaNN
return ScaNN
| null |
on_chain_end
|
"""Do nothing when LLM chain ends."""
pass
|
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) ->None:
"""Do nothing when LLM chain ends."""
pass
|
Do nothing when LLM chain ends.
|
_create_and_run_api_controller_agent
|
pattern = '\\b(GET|POST|PATCH|DELETE)\\s+(/\\S+)*'
matches = re.findall(pattern, plan_str)
endpoint_names = ['{method} {route}'.format(method=method, route=route.
split('?')[0]) for method, route in matches]
docs_str = ''
for endpoint_name in endpoint_names:
found_match = False
for name, _, docs in api_spec.endpoints:
regex_name = re.compile(re.sub('\\{.*?\\}', '.*', name))
if regex_name.match(endpoint_name):
found_match = True
docs_str += f'== Docs for {endpoint_name} == \n{yaml.dump(docs)}\n'
if not found_match:
raise ValueError(f'{endpoint_name} endpoint does not exist.')
agent = _create_api_controller_agent(base_url, docs_str, requests_wrapper, llm)
return agent.run(plan_str)
|
def _create_and_run_api_controller_agent(plan_str: str) ->str:
pattern = '\\b(GET|POST|PATCH|DELETE)\\s+(/\\S+)*'
matches = re.findall(pattern, plan_str)
endpoint_names = ['{method} {route}'.format(method=method, route=route.
split('?')[0]) for method, route in matches]
docs_str = ''
for endpoint_name in endpoint_names:
found_match = False
for name, _, docs in api_spec.endpoints:
regex_name = re.compile(re.sub('\\{.*?\\}', '.*', name))
if regex_name.match(endpoint_name):
found_match = True
docs_str += (
f'== Docs for {endpoint_name} == \n{yaml.dump(docs)}\n')
if not found_match:
raise ValueError(f'{endpoint_name} endpoint does not exist.')
agent = _create_api_controller_agent(base_url, docs_str,
requests_wrapper, llm)
return agent.run(plan_str)
| null |
_on_tool_end
|
"""Process the Tool Run."""
self._submit(self._update_run_single, _copy(run))
|
def _on_tool_end(self, run: Run) ->None:
"""Process the Tool Run."""
self._submit(self._update_run_single, _copy(run))
|
Process the Tool Run.
|
mock_documents
|
return [Document(page_content='Test Document', metadata={'key': 'value'}) for
_ in range(2)]
|
@pytest.fixture
def mock_documents() ->List[Document]:
return [Document(page_content='Test Document', metadata={'key': 'value'
}) for _ in range(2)]
| null |
requires_reference
|
"""Return whether the chain requires a reference.
Returns:
bool: True if the chain requires a reference, False otherwise.
"""
return True
|
@property
def requires_reference(self) ->bool:
"""Return whether the chain requires a reference.
Returns:
bool: True if the chain requires a reference, False otherwise.
"""
return True
|
Return whether the chain requires a reference.
Returns:
bool: True if the chain requires a reference, False otherwise.
|
_Compare
|
self.write('(')
self.dispatch(t.left)
for o, e in zip(t.ops, t.comparators):
self.write(' ' + self.cmpops[o.__class__.__name__] + ' ')
self.dispatch(e)
self.write(')')
|
def _Compare(self, t):
self.write('(')
self.dispatch(t.left)
for o, e in zip(t.ops, t.comparators):
self.write(' ' + self.cmpops[o.__class__.__name__] + ' ')
self.dispatch(e)
self.write(')')
| null |
_validate_example_inputs_for_chain
|
"""Validate that the example inputs match the chain input keys."""
if input_mapper:
first_inputs = input_mapper(first_example.inputs)
missing_keys = set(chain.input_keys).difference(first_inputs)
if not isinstance(first_inputs, dict):
raise InputFormatError(
f"""When using an input_mapper to prepare dataset example inputs for a chain, the mapped value must be a dictionary.
Got: {first_inputs} of type {type(first_inputs)}."""
)
if missing_keys:
raise InputFormatError(
f"""Missing keys after loading example using input_mapper.
Expected: {chain.input_keys}. Got: {first_inputs.keys()}"""
)
else:
first_inputs = first_example.inputs
missing_keys = set(chain.input_keys).difference(first_inputs)
if len(first_inputs) == 1 and len(chain.input_keys) == 1:
pass
elif missing_keys:
raise InputFormatError(
f'Example inputs missing expected chain input keys. Please provide an input_mapper to convert the example.inputs to a compatible format for the chain you wish to evaluate.Expected: {chain.input_keys}. Got: {first_inputs.keys()}'
)
|
def _validate_example_inputs_for_chain(first_example: Example, chain: Chain,
input_mapper: Optional[Callable[[Dict], Any]]) ->None:
"""Validate that the example inputs match the chain input keys."""
if input_mapper:
first_inputs = input_mapper(first_example.inputs)
missing_keys = set(chain.input_keys).difference(first_inputs)
if not isinstance(first_inputs, dict):
raise InputFormatError(
f"""When using an input_mapper to prepare dataset example inputs for a chain, the mapped value must be a dictionary.
Got: {first_inputs} of type {type(first_inputs)}."""
)
if missing_keys:
raise InputFormatError(
f"""Missing keys after loading example using input_mapper.
Expected: {chain.input_keys}. Got: {first_inputs.keys()}"""
)
else:
first_inputs = first_example.inputs
missing_keys = set(chain.input_keys).difference(first_inputs)
if len(first_inputs) == 1 and len(chain.input_keys) == 1:
pass
elif missing_keys:
raise InputFormatError(
f'Example inputs missing expected chain input keys. Please provide an input_mapper to convert the example.inputs to a compatible format for the chain you wish to evaluate.Expected: {chain.input_keys}. Got: {first_inputs.keys()}'
)
|
Validate that the example inputs match the chain input keys.
|
test_jinachat_streaming
|
"""Test that streaming correctly invokes on_llm_new_token callback."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
chat = JinaChat(max_tokens=10, streaming=True, temperature=0,
callback_manager=callback_manager, verbose=True)
message = HumanMessage(content='Hello')
response = chat([message])
assert callback_handler.llm_streams > 0
assert isinstance(response, BaseMessage)
|
def test_jinachat_streaming() ->None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
chat = JinaChat(max_tokens=10, streaming=True, temperature=0,
callback_manager=callback_manager, verbose=True)
message = HumanMessage(content='Hello')
response = chat([message])
assert callback_handler.llm_streams > 0
assert isinstance(response, BaseMessage)
|
Test that streaming correctly invokes on_llm_new_token callback.
|
test_parse_float_value
|
_test_parse_value(x)
|
@pytest.mark.parametrize('x', (-1.001, 2e-08, 1234567.654321))
def test_parse_float_value(x: float) ->None:
_test_parse_value(x)
| null |
validate_environment
|
volc_engine_maas_ak = convert_to_secret_str(get_from_dict_or_env(values,
'volc_engine_maas_ak', 'VOLC_ACCESSKEY'))
volc_engine_maas_sk = convert_to_secret_str(get_from_dict_or_env(values,
'volc_engine_maas_sk', 'VOLC_SECRETKEY'))
endpoint = values['endpoint']
if values['endpoint'] is not None and values['endpoint'] != '':
endpoint = values['endpoint']
try:
from volcengine.maas import MaasService
maas = MaasService(endpoint, values['region'], connection_timeout=
values['connect_timeout'], socket_timeout=values['read_timeout'])
maas.set_ak(volc_engine_maas_ak.get_secret_value())
maas.set_sk(volc_engine_maas_sk.get_secret_value())
values['volc_engine_maas_ak'] = volc_engine_maas_ak
values['volc_engine_maas_sk'] = volc_engine_maas_sk
values['client'] = maas
except ImportError:
raise ImportError(
'volcengine package not found, please install it with `pip install volcengine`'
)
return values
|
@root_validator()
def validate_environment(cls, values: Dict) ->Dict:
volc_engine_maas_ak = convert_to_secret_str(get_from_dict_or_env(values,
'volc_engine_maas_ak', 'VOLC_ACCESSKEY'))
volc_engine_maas_sk = convert_to_secret_str(get_from_dict_or_env(values,
'volc_engine_maas_sk', 'VOLC_SECRETKEY'))
endpoint = values['endpoint']
if values['endpoint'] is not None and values['endpoint'] != '':
endpoint = values['endpoint']
try:
from volcengine.maas import MaasService
maas = MaasService(endpoint, values['region'], connection_timeout=
values['connect_timeout'], socket_timeout=values['read_timeout'])
maas.set_ak(volc_engine_maas_ak.get_secret_value())
maas.set_sk(volc_engine_maas_sk.get_secret_value())
values['volc_engine_maas_ak'] = volc_engine_maas_ak
values['volc_engine_maas_sk'] = volc_engine_maas_sk
values['client'] = maas
except ImportError:
raise ImportError(
'volcengine package not found, please install it with `pip install volcengine`'
)
return values
| null |
_import_python
|
from langchain_community.utilities.python import PythonREPL
return PythonREPL
|
def _import_python() ->Any:
from langchain_community.utilities.python import PythonREPL
return PythonREPL
| null |
h
|
"""Height of the box."""
return self._h
|
@property
def h(self) ->int:
"""Height of the box."""
return self._h
|
Height of the box.
|
_transform_llama2_chat
|
return response['candidates'][0]['text']
|
def _transform_llama2_chat(response: Dict[str, Any]) ->str:
return response['candidates'][0]['text']
| null |
_run
|
"""Use the tool."""
if self.sync_browser is None:
raise ValueError(f'Synchronous browser not provided to {self.name}')
page = get_current_page(self.sync_browser)
selector_effective = self._selector_effective(selector=selector)
from playwright.sync_api import TimeoutError as PlaywrightTimeoutError
try:
page.click(selector_effective, strict=self.playwright_strict, timeout=
self.playwright_timeout)
except PlaywrightTimeoutError:
return f"Unable to click on element '{selector}'"
return f"Clicked element '{selector}'"
|
def _run(self, selector: str, run_manager: Optional[
CallbackManagerForToolRun]=None) ->str:
"""Use the tool."""
if self.sync_browser is None:
raise ValueError(f'Synchronous browser not provided to {self.name}')
page = get_current_page(self.sync_browser)
selector_effective = self._selector_effective(selector=selector)
from playwright.sync_api import TimeoutError as PlaywrightTimeoutError
try:
page.click(selector_effective, strict=self.playwright_strict,
timeout=self.playwright_timeout)
except PlaywrightTimeoutError:
return f"Unable to click on element '{selector}'"
return f"Clicked element '{selector}'"
|
Use the tool.
|
test_chat_baichuan_with_kwargs
|
chat = ChatBaichuan()
message = HumanMessage(content='Hello')
response = chat([message], temperature=0.88, top_p=0.7)
assert isinstance(response, AIMessage)
assert isinstance(response.content, str)
|
def test_chat_baichuan_with_kwargs() ->None:
chat = ChatBaichuan()
message = HumanMessage(content='Hello')
response = chat([message], temperature=0.88, top_p=0.7)
assert isinstance(response, AIMessage)
assert isinstance(response.content, str)
| null |
set
|
self.store[key] = value
|
def set(self, key: str, value: Optional[str]) ->None:
self.store[key] = value
| null |
_import_google_cloud_texttospeech
|
try:
from google.cloud import texttospeech
except ImportError as e:
raise ImportError(
'Cannot import google.cloud.texttospeech, please install `pip install google-cloud-texttospeech`.'
) from e
return texttospeech
|
def _import_google_cloud_texttospeech() ->Any:
try:
from google.cloud import texttospeech
except ImportError as e:
raise ImportError(
'Cannot import google.cloud.texttospeech, please install `pip install google-cloud-texttospeech`.'
) from e
return texttospeech
| null |
indent_lines_after_first
|
"""Indent all lines of text after the first line.
Args:
text: The text to indent
prefix: Used to determine the number of spaces to indent
Returns:
str: The indented text
"""
n_spaces = len(prefix)
spaces = ' ' * n_spaces
lines = text.splitlines()
return '\n'.join([lines[0]] + [(spaces + line) for line in lines[1:]])
|
def indent_lines_after_first(text: str, prefix: str) ->str:
"""Indent all lines of text after the first line.
Args:
text: The text to indent
prefix: Used to determine the number of spaces to indent
Returns:
str: The indented text
"""
n_spaces = len(prefix)
spaces = ' ' * n_spaces
lines = text.splitlines()
return '\n'.join([lines[0]] + [(spaces + line) for line in lines[1:]])
|
Indent all lines of text after the first line.
Args:
text: The text to indent
prefix: Used to determine the number of spaces to indent
Returns:
str: The indented text
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.