method_name
stringlengths 1
78
| method_body
stringlengths 3
9.66k
| full_code
stringlengths 31
10.7k
| docstring
stringlengths 4
4.74k
⌀ |
|---|---|---|---|
format_agent_scratchpad
|
thoughts = ''
for action, observation in intermediate_steps:
thoughts += action.log
thoughts += '</search_query>' + _format_docs(observation)
return thoughts
|
def format_agent_scratchpad(intermediate_steps):
thoughts = ''
for action, observation in intermediate_steps:
thoughts += action.log
thoughts += '</search_query>' + _format_docs(observation)
return thoughts
| null |
is_public_page
|
"""Check if a page is publicly accessible."""
restrictions = self.confluence.get_all_restrictions_for_content(page['id'])
return page['status'] == 'current' and not restrictions['read']['restrictions'
]['user']['results'] and not restrictions['read']['restrictions']['group'][
'results']
|
def is_public_page(self, page: dict) ->bool:
"""Check if a page is publicly accessible."""
restrictions = self.confluence.get_all_restrictions_for_content(page['id'])
return page['status'] == 'current' and not restrictions['read'][
'restrictions']['user']['results'] and not restrictions['read'][
'restrictions']['group']['results']
|
Check if a page is publicly accessible.
|
validate_environment
|
"""Validate that service name, index name and api key exists in environment."""
values['service_name'] = get_from_dict_or_env(values, 'service_name',
'AZURE_COGNITIVE_SEARCH_SERVICE_NAME')
values['index_name'] = get_from_dict_or_env(values, 'index_name',
'AZURE_COGNITIVE_SEARCH_INDEX_NAME')
values['api_key'] = get_from_dict_or_env(values, 'api_key',
'AZURE_COGNITIVE_SEARCH_API_KEY')
return values
|
@root_validator(pre=True)
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that service name, index name and api key exists in environment."""
values['service_name'] = get_from_dict_or_env(values, 'service_name',
'AZURE_COGNITIVE_SEARCH_SERVICE_NAME')
values['index_name'] = get_from_dict_or_env(values, 'index_name',
'AZURE_COGNITIVE_SEARCH_INDEX_NAME')
values['api_key'] = get_from_dict_or_env(values, 'api_key',
'AZURE_COGNITIVE_SEARCH_API_KEY')
return values
|
Validate that service name, index name and api key exists in environment.
|
similarity_search_by_vector
|
results = self.similarity_search_by_vector_with_score(embedding, k, **kwargs)
return [r[0] for r in results]
|
def similarity_search_by_vector(self, embedding: List[float], k: int=4, **
kwargs: Any) ->List[Document]:
results = self.similarity_search_by_vector_with_score(embedding, k, **
kwargs)
return [r[0] for r in results]
| null |
test_all_imports
|
assert set(__all__) == set(EXPECTED_ALL)
|
def test_all_imports() ->None:
assert set(__all__) == set(EXPECTED_ALL)
| null |
_generate
|
return LLMResult(generations=[[Generation(text='foo') for _ in range(self.n)]])
|
def _generate(self, prompts: List[str], stop: Optional[List[str]]=None,
run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any
) ->LLMResult:
return LLMResult(generations=[[Generation(text='foo') for _ in range(
self.n)]])
| null |
transform_documents
|
"""Extracts QA from text documents using doctran."""
try:
from doctran import Doctran
doctran = Doctran(openai_api_key=self.openai_api_key, openai_model=self
.openai_api_model)
except ImportError:
raise ImportError(
'Install doctran to use this parser. (pip install doctran)')
for d in documents:
doctran_doc = doctran.parse_folder(content=d.page_content).interrogate(
).execute()
questions_and_answers = doctran_doc.extracted_properties.get(
'questions_and_answers')
d.metadata['questions_and_answers'] = questions_and_answers
return documents
|
def transform_documents(self, documents: Sequence[Document], **kwargs: Any
) ->Sequence[Document]:
"""Extracts QA from text documents using doctran."""
try:
from doctran import Doctran
doctran = Doctran(openai_api_key=self.openai_api_key, openai_model=
self.openai_api_model)
except ImportError:
raise ImportError(
'Install doctran to use this parser. (pip install doctran)')
for d in documents:
doctran_doc = doctran.parse_folder(content=d.page_content).interrogate(
).execute()
questions_and_answers = doctran_doc.extracted_properties.get(
'questions_and_answers')
d.metadata['questions_and_answers'] = questions_and_answers
return documents
|
Extracts QA from text documents using doctran.
|
_import_volcengine_maas
|
from langchain_community.llms.volcengine_maas import VolcEngineMaasLLM
return VolcEngineMaasLLM
|
def _import_volcengine_maas() ->Any:
from langchain_community.llms.volcengine_maas import VolcEngineMaasLLM
return VolcEngineMaasLLM
| null |
_import_octoai_endpoint
|
from langchain_community.llms.octoai_endpoint import OctoAIEndpoint
return OctoAIEndpoint
|
def _import_octoai_endpoint() ->Any:
from langchain_community.llms.octoai_endpoint import OctoAIEndpoint
return OctoAIEndpoint
| null |
test_default_system_message
|
messages = [HumanMessage(content='usr-msg-1')]
actual = model.predict_messages(messages).content
expected = f"""<s>[INST] <<SYS>>
{DEFAULT_SYSTEM_PROMPT}
<</SYS>>
usr-msg-1 [/INST]"""
assert actual == expected
|
def test_default_system_message(model: Llama2Chat) ->None:
messages = [HumanMessage(content='usr-msg-1')]
actual = model.predict_messages(messages).content
expected = (
f'<s>[INST] <<SYS>>\n{DEFAULT_SYSTEM_PROMPT}\n<</SYS>>\n\nusr-msg-1 [/INST]'
)
assert actual == expected
| null |
__init__
|
super().__init__(**kwargs)
self._lc_kwargs = kwargs
|
def __init__(self, **kwargs: Any) ->None:
super().__init__(**kwargs)
self._lc_kwargs = kwargs
| null |
_identifying_params
|
"""Get the identifying parameters."""
return {**{'model_name': self.model}, **self._default_params}
|
@property
def _identifying_params(self) ->Dict[str, Any]:
"""Get the identifying parameters."""
return {**{'model_name': self.model}, **self._default_params}
|
Get the identifying parameters.
|
_generate
|
if self.streaming:
stream_iter = self._stream(messages, stop=stop, run_manager=run_manager,
**kwargs)
return generate_from_stream(stream_iter)
prompt = self._convert_messages_to_prompt(messages)
params: Dict[str, Any] = {'prompt': prompt, **self._default_params, **kwargs}
if stop:
params['stop_sequences'] = stop
response = self.client.completions.create(**params)
completion = response.completion
message = AIMessage(content=completion)
return ChatResult(generations=[ChatGeneration(message=message)])
|
def _generate(self, messages: List[BaseMessage], stop: Optional[List[str]]=
None, run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any
) ->ChatResult:
if self.streaming:
stream_iter = self._stream(messages, stop=stop, run_manager=
run_manager, **kwargs)
return generate_from_stream(stream_iter)
prompt = self._convert_messages_to_prompt(messages)
params: Dict[str, Any] = {'prompt': prompt, **self._default_params, **
kwargs}
if stop:
params['stop_sequences'] = stop
response = self.client.completions.create(**params)
completion = response.completion
message = AIMessage(content=completion)
return ChatResult(generations=[ChatGeneration(message=message)])
| null |
__init__
|
"""Initializes the loader.
Args:
config: The config to pass to the source connector.
stream_name: The name of the stream to load.
record_handler: A function that takes in a record and an optional id and
returns a Document. If None, the record will be used as the document.
Defaults to None.
state: The state to pass to the source connector. Defaults to None.
"""
source_class = guard_import('source_shopify', pip_name='airbyte-source-shopify'
).SourceShopify
super().__init__(config=config, source_class=source_class, stream_name=
stream_name, record_handler=record_handler, state=state)
|
def __init__(self, config: Mapping[str, Any], stream_name: str,
record_handler: Optional[RecordHandler]=None, state: Optional[Any]=None
) ->None:
"""Initializes the loader.
Args:
config: The config to pass to the source connector.
stream_name: The name of the stream to load.
record_handler: A function that takes in a record and an optional id and
returns a Document. If None, the record will be used as the document.
Defaults to None.
state: The state to pass to the source connector. Defaults to None.
"""
source_class = guard_import('source_shopify', pip_name=
'airbyte-source-shopify').SourceShopify
super().__init__(config=config, source_class=source_class, stream_name=
stream_name, record_handler=record_handler, state=state)
|
Initializes the loader.
Args:
config: The config to pass to the source connector.
stream_name: The name of the stream to load.
record_handler: A function that takes in a record and an optional id and
returns a Document. If None, the record will be used as the document.
Defaults to None.
state: The state to pass to the source connector. Defaults to None.
|
import_flytekit
|
"""Import flytekit and flytekitplugins-deck-standard."""
try:
import flytekit
from flytekitplugins.deck import renderer
except ImportError:
raise ImportError(
'To use the flyte callback manager you needto have the `flytekit` and `flytekitplugins-deck-standard`packages installed. Please install them with `pip install flytekit`and `pip install flytekitplugins-deck-standard`.'
)
return flytekit, renderer
|
def import_flytekit() ->Tuple[flytekit, renderer]:
"""Import flytekit and flytekitplugins-deck-standard."""
try:
import flytekit
from flytekitplugins.deck import renderer
except ImportError:
raise ImportError(
'To use the flyte callback manager you needto have the `flytekit` and `flytekitplugins-deck-standard`packages installed. Please install them with `pip install flytekit`and `pip install flytekitplugins-deck-standard`.'
)
return flytekit, renderer
|
Import flytekit and flytekitplugins-deck-standard.
|
requires_input
|
"""Whether this evaluator requires an input string."""
return False
|
@property
def requires_input(self) ->bool:
"""Whether this evaluator requires an input string."""
return False
|
Whether this evaluator requires an input string.
|
get_lc_namespace
|
"""Get the namespace of the langchain object."""
return ['langchain', 'schema', 'messages']
|
@classmethod
def get_lc_namespace(cls) ->List[str]:
"""Get the namespace of the langchain object."""
return ['langchain', 'schema', 'messages']
|
Get the namespace of the langchain object.
|
load
|
table = self.pq.read_table(self.persist_path)
df = table.to_pandas()
return {col: series.tolist() for col, series in df.items()}
|
def load(self) ->Any:
table = self.pq.read_table(self.persist_path)
df = table.to_pandas()
return {col: series.tolist() for col, series in df.items()}
| null |
test_mset
|
store = InMemoryStore()
store.mset([('key1', 'value1'), ('key2', 'value2')])
values = store.mget(['key1', 'key2'])
assert values == ['value1', 'value2']
|
def test_mset() ->None:
store = InMemoryStore()
store.mset([('key1', 'value1'), ('key2', 'value2')])
values = store.mget(['key1', 'key2'])
assert values == ['value1', 'value2']
| null |
_get_chat_messages
|
if len(prompts) > 1:
raise ValueError(
f'Anyscale currently only supports single prompt, got {prompts}')
messages = self.prefix_messages + [{'role': 'user', 'content': prompts[0]}]
params: Dict[str, Any] = self._invocation_params
if stop is not None:
if 'stop' in params:
raise ValueError('`stop` found in both the input and default params.')
params['stop'] = stop
if params.get('max_tokens') == -1:
del params['max_tokens']
return messages, params
|
def _get_chat_messages(self, prompts: List[str], stop: Optional[List[str]]=None
) ->Tuple:
if len(prompts) > 1:
raise ValueError(
f'Anyscale currently only supports single prompt, got {prompts}')
messages = self.prefix_messages + [{'role': 'user', 'content': prompts[0]}]
params: Dict[str, Any] = self._invocation_params
if stop is not None:
if 'stop' in params:
raise ValueError(
'`stop` found in both the input and default params.')
params['stop'] = stop
if params.get('max_tokens') == -1:
del params['max_tokens']
return messages, params
| null |
_import_bageldb
|
from langchain_community.vectorstores.bageldb import Bagel
return Bagel
|
def _import_bageldb() ->Any:
from langchain_community.vectorstores.bageldb import Bagel
return Bagel
| null |
validate_environment
|
values['llm'] = values.get('llm') or ChatOllama(**values, format='json')
values['tool_system_prompt_template'] = values.get(
'tool_system_prompt_template') or DEFAULT_SYSTEM_TEMPLATE
return values
|
@root_validator(pre=True)
def validate_environment(cls, values: Dict) ->Dict:
values['llm'] = values.get('llm') or ChatOllama(**values, format='json')
values['tool_system_prompt_template'] = values.get(
'tool_system_prompt_template') or DEFAULT_SYSTEM_TEMPLATE
return values
| null |
_generate
|
choices = []
token_usage: Dict[str, int] = {}
_keys = {'completion_tokens', 'prompt_tokens', 'total_tokens'}
for prompt in prompts:
if self.streaming:
generation: Optional[GenerationChunk] = None
for chunk in self._stream(prompt, stop, run_manager, **kwargs):
if generation is None:
generation = chunk
else:
generation += chunk
assert generation is not None
choices.append({'message': {'content': generation.text},
'finish_reason': generation.generation_info.get('finish_reason'
) if generation.generation_info else None, 'logprobs':
generation.generation_info.get('logprobs') if generation.
generation_info else None})
else:
messages, params = self._get_chat_messages([prompt], stop)
params = {**params, **kwargs}
response = completion_with_retry(self, messages=messages,
run_manager=run_manager, **params)
choices.extend(response['choices'])
update_token_usage(_keys, response, token_usage)
return create_llm_result(choices, prompts, token_usage, self.model_name)
|
def _generate(self, prompts: List[str], stop: Optional[List[str]]=None,
run_manager: Optional[CallbackManagerForLLMRun]=None, **kwargs: Any
) ->LLMResult:
choices = []
token_usage: Dict[str, int] = {}
_keys = {'completion_tokens', 'prompt_tokens', 'total_tokens'}
for prompt in prompts:
if self.streaming:
generation: Optional[GenerationChunk] = None
for chunk in self._stream(prompt, stop, run_manager, **kwargs):
if generation is None:
generation = chunk
else:
generation += chunk
assert generation is not None
choices.append({'message': {'content': generation.text},
'finish_reason': generation.generation_info.get(
'finish_reason') if generation.generation_info else None,
'logprobs': generation.generation_info.get('logprobs') if
generation.generation_info else None})
else:
messages, params = self._get_chat_messages([prompt], stop)
params = {**params, **kwargs}
response = completion_with_retry(self, messages=messages,
run_manager=run_manager, **params)
choices.extend(response['choices'])
update_token_usage(_keys, response, token_usage)
return create_llm_result(choices, prompts, token_usage, self.model_name)
| null |
test_namespace_is_used
|
"""Verify that namespace is taken into account for all operations."""
assert manager.namespace == 'kittens'
with manager._make_session() as session:
session.add(UpsertionRecord(key='key1', namespace='kittens'))
session.add(UpsertionRecord(key='key2', namespace='kittens'))
session.add(UpsertionRecord(key='key1', namespace='puppies'))
session.add(UpsertionRecord(key='key3', namespace='puppies'))
session.commit()
assert manager.list_keys() == ['key1', 'key2']
manager.delete_keys(['key1'])
assert manager.list_keys() == ['key2']
manager.update(['key3'], group_ids=['group3'])
with manager._make_session() as session:
results = session.query(UpsertionRecord).all()
assert sorted([(r.namespace, r.key, r.group_id) for r in results]) == [(
'kittens', 'key2', None), ('kittens', 'key3', 'group3'), ('puppies',
'key1', None), ('puppies', 'key3', None)]
|
def test_namespace_is_used(manager: SQLRecordManager) ->None:
"""Verify that namespace is taken into account for all operations."""
assert manager.namespace == 'kittens'
with manager._make_session() as session:
session.add(UpsertionRecord(key='key1', namespace='kittens'))
session.add(UpsertionRecord(key='key2', namespace='kittens'))
session.add(UpsertionRecord(key='key1', namespace='puppies'))
session.add(UpsertionRecord(key='key3', namespace='puppies'))
session.commit()
assert manager.list_keys() == ['key1', 'key2']
manager.delete_keys(['key1'])
assert manager.list_keys() == ['key2']
manager.update(['key3'], group_ids=['group3'])
with manager._make_session() as session:
results = session.query(UpsertionRecord).all()
assert sorted([(r.namespace, r.key, r.group_id) for r in results]) == [
('kittens', 'key2', None), ('kittens', 'key3', 'group3'), (
'puppies', 'key1', None), ('puppies', 'key3', None)]
|
Verify that namespace is taken into account for all operations.
|
test_success
|
"""Test that the tool runs successfully."""
tool = YahooFinanceNewsTool()
query = 'Microsoft'
result = tool.run(query)
assert result is not None
assert f'Company ticker {query} not found.' not in result
|
def test_success() ->None:
"""Test that the tool runs successfully."""
tool = YahooFinanceNewsTool()
query = 'Microsoft'
result = tool.run(query)
assert result is not None
assert f'Company ticker {query} not found.' not in result
|
Test that the tool runs successfully.
|
embed_with_retry
|
"""Use tenacity to retry the embedding call."""
retry_decorator = _create_retry_decorator(embeddings)
@retry_decorator
def _embed_with_retry(**kwargs: Any) ->Any:
response = embeddings.client.create(**kwargs)
return _check_response(response)
return _embed_with_retry(**kwargs)
|
def embed_with_retry(embeddings: LocalAIEmbeddings, **kwargs: Any) ->Any:
"""Use tenacity to retry the embedding call."""
retry_decorator = _create_retry_decorator(embeddings)
@retry_decorator
def _embed_with_retry(**kwargs: Any) ->Any:
response = embeddings.client.create(**kwargs)
return _check_response(response)
return _embed_with_retry(**kwargs)
|
Use tenacity to retry the embedding call.
|
on_llm_error
|
"""Do nothing when LLM outputs an error."""
pass
|
def on_llm_error(self, error: BaseException, **kwargs: Any) ->None:
"""Do nothing when LLM outputs an error."""
pass
|
Do nothing when LLM outputs an error.
|
test_json_spec_value_max_length
|
"""Test JsonSpec can return value of a dict at given path."""
spec = JsonSpec(dict_={'foo': 'bar', 'baz': {'test': {'foo': [1, 2, 3]}}},
max_value_length=5)
assert spec.value('data["foo"]') == 'bar'
assert spec.value('data["baz"]'
) == 'Value is a large dictionary, should explore its keys directly'
assert spec.value('data["baz"]["test"]'
) == 'Value is a large dictionary, should explore its keys directly'
assert spec.value('data["baz"]["test"]["foo"]') == '[1, 2...'
|
def test_json_spec_value_max_length() ->None:
"""Test JsonSpec can return value of a dict at given path."""
spec = JsonSpec(dict_={'foo': 'bar', 'baz': {'test': {'foo': [1, 2, 3]}
}}, max_value_length=5)
assert spec.value('data["foo"]') == 'bar'
assert spec.value('data["baz"]'
) == 'Value is a large dictionary, should explore its keys directly'
assert spec.value('data["baz"]["test"]'
) == 'Value is a large dictionary, should explore its keys directly'
assert spec.value('data["baz"]["test"]["foo"]') == '[1, 2...'
|
Test JsonSpec can return value of a dict at given path.
|
_import_ainetwork_transfer
|
from langchain_community.tools.ainetwork.transfer import AINTransfer
return AINTransfer
|
def _import_ainetwork_transfer() ->Any:
from langchain_community.tools.ainetwork.transfer import AINTransfer
return AINTransfer
| null |
_process_start_trace
|
if not run.parent_run_id:
chain_: 'Chain' = self._chain.Chain(inputs=run.inputs, metadata=None,
experiment_info=self._experiment_info.get())
self._chains_map[run.id] = chain_
else:
span: 'Span' = self._span.Span(inputs=run.inputs, category=
_get_run_type(run), metadata=run.extra, name=run.name)
span.__api__start__(self._chains_map[run.parent_run_id])
self._chains_map[run.id] = self._chains_map[run.parent_run_id]
self._span_map[run.id] = span
|
def _process_start_trace(self, run: 'Run') ->None:
if not run.parent_run_id:
chain_: 'Chain' = self._chain.Chain(inputs=run.inputs, metadata=
None, experiment_info=self._experiment_info.get())
self._chains_map[run.id] = chain_
else:
span: 'Span' = self._span.Span(inputs=run.inputs, category=
_get_run_type(run), metadata=run.extra, name=run.name)
span.__api__start__(self._chains_map[run.parent_run_id])
self._chains_map[run.id] = self._chains_map[run.parent_run_id]
self._span_map[run.id] = span
| null |
on_chain_error
|
"""Do nothing when LLM chain outputs an error."""
|
def on_chain_error(self, error: BaseException, **kwargs: Any) ->None:
"""Do nothing when LLM chain outputs an error."""
|
Do nothing when LLM chain outputs an error.
|
_clean_url
|
"""Strips quotes from the url."""
return url.strip('"\'')
|
def _clean_url(url: str) ->str:
"""Strips quotes from the url."""
return url.strip('"\'')
|
Strips quotes from the url.
|
_import_pinecone
|
from langchain_community.vectorstores.pinecone import Pinecone
return Pinecone
|
def _import_pinecone() ->Any:
from langchain_community.vectorstores.pinecone import Pinecone
return Pinecone
| null |
_infer_skip_keys
|
keys = []
if isinstance(obj, dict):
for k, v in obj.items():
if k == '$ref':
ref = _retrieve_ref(v, full_schema)
keys.append(v.split('/')[1])
keys += _infer_skip_keys(ref, full_schema)
elif isinstance(v, (list, dict)):
keys += _infer_skip_keys(v, full_schema)
elif isinstance(obj, list):
for el in obj:
keys += _infer_skip_keys(el, full_schema)
return keys
|
def _infer_skip_keys(obj: Any, full_schema: dict) ->List[str]:
keys = []
if isinstance(obj, dict):
for k, v in obj.items():
if k == '$ref':
ref = _retrieve_ref(v, full_schema)
keys.append(v.split('/')[1])
keys += _infer_skip_keys(ref, full_schema)
elif isinstance(v, (list, dict)):
keys += _infer_skip_keys(v, full_schema)
elif isinstance(obj, list):
for el in obj:
keys += _infer_skip_keys(el, full_schema)
return keys
| null |
test_bad_outputs
|
"""Test errors are raised if outputs keys are not found."""
chain = FakeChain(be_correct=False)
with pytest.raises(ValueError):
chain({'foo': 'baz'})
|
def test_bad_outputs() ->None:
"""Test errors are raised if outputs keys are not found."""
chain = FakeChain(be_correct=False)
with pytest.raises(ValueError):
chain({'foo': 'baz'})
|
Test errors are raised if outputs keys are not found.
|
format
|
"""Format the prompt with the inputs.
Args:
**kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
Example:
.. code-block:: python
prompt.format(variable1="foo")
"""
kwargs = self._merge_partial_and_user_variables(**kwargs)
examples = self._get_examples(**kwargs)
examples = [{k: e[k] for k in self.example_prompt.input_variables} for e in
examples]
example_strings = [self.example_prompt.format(**example) for example in
examples]
pieces = [self.prefix, *example_strings, self.suffix]
template = self.example_separator.join([piece for piece in pieces if piece])
return DEFAULT_FORMATTER_MAPPING[self.template_format](template, **kwargs)
|
def format(self, **kwargs: Any) ->str:
"""Format the prompt with the inputs.
Args:
**kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
Example:
.. code-block:: python
prompt.format(variable1="foo")
"""
kwargs = self._merge_partial_and_user_variables(**kwargs)
examples = self._get_examples(**kwargs)
examples = [{k: e[k] for k in self.example_prompt.input_variables} for
e in examples]
example_strings = [self.example_prompt.format(**example) for example in
examples]
pieces = [self.prefix, *example_strings, self.suffix]
template = self.example_separator.join([piece for piece in pieces if piece]
)
return DEFAULT_FORMATTER_MAPPING[self.template_format](template, **kwargs)
|
Format the prompt with the inputs.
Args:
**kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
Example:
.. code-block:: python
prompt.format(variable1="foo")
|
on_chain_start
|
self.on_chain_start_common()
|
def on_chain_start(self, *args: Any, **kwargs: Any) ->Any:
self.on_chain_start_common()
| null |
_type
|
return 'xml'
|
@property
def _type(self) ->str:
return 'xml'
| null |
ignore_agent
|
"""Whether to ignore agent callbacks."""
return self.ignore_agent_
|
@property
def ignore_agent(self) ->bool:
"""Whether to ignore agent callbacks."""
return self.ignore_agent_
|
Whether to ignore agent callbacks.
|
load
|
"""Load documents."""
document_list = []
if self.channel_name:
document_list.extend(self._get_document_for_channel(self.channel_name))
elif self.video_ids:
document_list.extend([self._get_document_for_video_id(video_id) for
video_id in self.video_ids])
else:
raise ValueError('Must specify either channel_name or video_ids')
return document_list
|
def load(self) ->List[Document]:
"""Load documents."""
document_list = []
if self.channel_name:
document_list.extend(self._get_document_for_channel(self.channel_name))
elif self.video_ids:
document_list.extend([self._get_document_for_video_id(video_id) for
video_id in self.video_ids])
else:
raise ValueError('Must specify either channel_name or video_ids')
return document_list
|
Load documents.
|
test_replicate_streaming_call
|
"""Test streaming call to Replicate."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
llm = Replicate(streaming=True, callback_manager=callback_manager, model=
TEST_MODEL)
output = llm('What is LangChain')
assert output
assert isinstance(output, str)
|
def test_replicate_streaming_call() ->None:
"""Test streaming call to Replicate."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
llm = Replicate(streaming=True, callback_manager=callback_manager,
model=TEST_MODEL)
output = llm('What is LangChain')
assert output
assert isinstance(output, str)
|
Test streaming call to Replicate.
|
test_conversation_chain_works
|
"""Test that conversation chain works in basic setting."""
llm = FakeLLM()
prompt = PromptTemplate(input_variables=['foo', 'bar'], template='{foo} {bar}')
memory = ConversationBufferMemory(memory_key='foo')
chain = ConversationChain(llm=llm, prompt=prompt, memory=memory, input_key=
'bar')
chain.run('foo')
|
def test_conversation_chain_works() ->None:
"""Test that conversation chain works in basic setting."""
llm = FakeLLM()
prompt = PromptTemplate(input_variables=['foo', 'bar'], template=
'{foo} {bar}')
memory = ConversationBufferMemory(memory_key='foo')
chain = ConversationChain(llm=llm, prompt=prompt, memory=memory,
input_key='bar')
chain.run('foo')
|
Test that conversation chain works in basic setting.
|
extension
|
"""The file extension suggested by this serializer (without dot)."""
|
@classmethod
@abstractmethod
def extension(cls) ->str:
"""The file extension suggested by this serializer (without dot)."""
|
The file extension suggested by this serializer (without dot).
|
_get_root_referenced_schema
|
"""Get the root reference or err."""
from openapi_pydantic import Reference
schema = self.get_referenced_schema(ref)
while isinstance(schema, Reference):
schema = self.get_referenced_schema(schema)
return schema
|
def _get_root_referenced_schema(self, ref: Reference) ->Schema:
"""Get the root reference or err."""
from openapi_pydantic import Reference
schema = self.get_referenced_schema(ref)
while isinstance(schema, Reference):
schema = self.get_referenced_schema(schema)
return schema
|
Get the root reference or err.
|
validate_environment
|
"""
Validate whether qianfan_ak and qianfan_sk in the environment variables or
configuration file are available or not.
init qianfan embedding client with `ak`, `sk`, `model`, `endpoint`
Args:
values: a dictionary containing configuration information, must include the
fields of qianfan_ak and qianfan_sk
Returns:
a dictionary containing configuration information. If qianfan_ak and
qianfan_sk are not provided in the environment variables or configuration
file,the original values will be returned; otherwise, values containing
qianfan_ak and qianfan_sk will be returned.
Raises:
ValueError: qianfan package not found, please install it with `pip install
qianfan`
"""
values['qianfan_ak'] = convert_to_secret_str(get_from_dict_or_env(values,
'qianfan_ak', 'QIANFAN_AK', default=''))
values['qianfan_sk'] = convert_to_secret_str(get_from_dict_or_env(values,
'qianfan_sk', 'QIANFAN_SK', default=''))
try:
import qianfan
params = {**values.get('init_kwargs', {}), 'model': values['model']}
if values['qianfan_ak'].get_secret_value() != '':
params['ak'] = values['qianfan_ak'].get_secret_value()
if values['qianfan_sk'].get_secret_value() != '':
params['sk'] = values['qianfan_sk'].get_secret_value()
if values['endpoint'] is not None and values['endpoint'] != '':
params['endpoint'] = values['endpoint']
values['client'] = qianfan.Embedding(**params)
except ImportError:
raise ImportError(
'qianfan package not found, please install it with `pip install qianfan`'
)
return values
|
@root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""
Validate whether qianfan_ak and qianfan_sk in the environment variables or
configuration file are available or not.
init qianfan embedding client with `ak`, `sk`, `model`, `endpoint`
Args:
values: a dictionary containing configuration information, must include the
fields of qianfan_ak and qianfan_sk
Returns:
a dictionary containing configuration information. If qianfan_ak and
qianfan_sk are not provided in the environment variables or configuration
file,the original values will be returned; otherwise, values containing
qianfan_ak and qianfan_sk will be returned.
Raises:
ValueError: qianfan package not found, please install it with `pip install
qianfan`
"""
values['qianfan_ak'] = convert_to_secret_str(get_from_dict_or_env(
values, 'qianfan_ak', 'QIANFAN_AK', default=''))
values['qianfan_sk'] = convert_to_secret_str(get_from_dict_or_env(
values, 'qianfan_sk', 'QIANFAN_SK', default=''))
try:
import qianfan
params = {**values.get('init_kwargs', {}), 'model': values['model']}
if values['qianfan_ak'].get_secret_value() != '':
params['ak'] = values['qianfan_ak'].get_secret_value()
if values['qianfan_sk'].get_secret_value() != '':
params['sk'] = values['qianfan_sk'].get_secret_value()
if values['endpoint'] is not None and values['endpoint'] != '':
params['endpoint'] = values['endpoint']
values['client'] = qianfan.Embedding(**params)
except ImportError:
raise ImportError(
'qianfan package not found, please install it with `pip install qianfan`'
)
return values
|
Validate whether qianfan_ak and qianfan_sk in the environment variables or
configuration file are available or not.
init qianfan embedding client with `ak`, `sk`, `model`, `endpoint`
Args:
values: a dictionary containing configuration information, must include the
fields of qianfan_ak and qianfan_sk
Returns:
a dictionary containing configuration information. If qianfan_ak and
qianfan_sk are not provided in the environment variables or configuration
file,the original values will be returned; otherwise, values containing
qianfan_ak and qianfan_sk will be returned.
Raises:
ValueError: qianfan package not found, please install it with `pip install
qianfan`
|
test_vectara_with_summary
|
"""Test vectara summary."""
num_results = 10
output1 = vectara3.similarity_search(query='what is generative AI?', k=
num_results, summary_config=SummaryConfig(is_enabled=True, max_results=5))
assert len(output1) == num_results + 1
assert len(output1[num_results].page_content) > 0
|
def test_vectara_with_summary(vectara3) ->None:
"""Test vectara summary."""
num_results = 10
output1 = vectara3.similarity_search(query='what is generative AI?', k=
num_results, summary_config=SummaryConfig(is_enabled=True,
max_results=5))
assert len(output1) == num_results + 1
assert len(output1[num_results].page_content) > 0
|
Test vectara summary.
|
save_context
|
"""
Save context from this conversation history to the entity store.
Generates a summary for each entity in the entity cache by prompting
the model, and saves these summaries to the entity store.
"""
super().save_context(inputs, outputs)
if self.input_key is None:
prompt_input_key = get_prompt_input_key(inputs, self.memory_variables)
else:
prompt_input_key = self.input_key
buffer_string = get_buffer_string(self.buffer[-self.k * 2:], human_prefix=
self.human_prefix, ai_prefix=self.ai_prefix)
input_data = inputs[prompt_input_key]
chain = LLMChain(llm=self.llm, prompt=self.entity_summarization_prompt)
for entity in self.entity_cache:
existing_summary = self.entity_store.get(entity, '')
output = chain.predict(summary=existing_summary, entity=entity, history
=buffer_string, input=input_data)
self.entity_store.set(entity, output.strip())
|
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) ->None:
"""
Save context from this conversation history to the entity store.
Generates a summary for each entity in the entity cache by prompting
the model, and saves these summaries to the entity store.
"""
super().save_context(inputs, outputs)
if self.input_key is None:
prompt_input_key = get_prompt_input_key(inputs, self.memory_variables)
else:
prompt_input_key = self.input_key
buffer_string = get_buffer_string(self.buffer[-self.k * 2:],
human_prefix=self.human_prefix, ai_prefix=self.ai_prefix)
input_data = inputs[prompt_input_key]
chain = LLMChain(llm=self.llm, prompt=self.entity_summarization_prompt)
for entity in self.entity_cache:
existing_summary = self.entity_store.get(entity, '')
output = chain.predict(summary=existing_summary, entity=entity,
history=buffer_string, input=input_data)
self.entity_store.set(entity, output.strip())
|
Save context from this conversation history to the entity store.
Generates a summary for each entity in the entity cache by prompting
the model, and saves these summaries to the entity store.
|
create_schema
|
"""Create the database schema."""
if isinstance(self.engine, AsyncEngine):
raise AssertionError('This method is not supported for async engines.')
Base.metadata.create_all(self.engine)
|
def create_schema(self) ->None:
"""Create the database schema."""
if isinstance(self.engine, AsyncEngine):
raise AssertionError('This method is not supported for async engines.')
Base.metadata.create_all(self.engine)
|
Create the database schema.
|
call_func_with_variable_args
|
"""Call function that may optionally accept a run_manager and/or config.
Args:
func (Union[Callable[[Input], Output],
Callable[[Input, CallbackManagerForChainRun], Output],
Callable[[Input, CallbackManagerForChainRun, RunnableConfig], Output]]):
The function to call.
input (Input): The input to the function.
run_manager (CallbackManagerForChainRun): The run manager to
pass to the function.
config (RunnableConfig): The config to pass to the function.
**kwargs (Any): The keyword arguments to pass to the function.
Returns:
Output: The output of the function.
"""
if accepts_config(func):
if run_manager is not None:
kwargs['config'] = patch_config(config, callbacks=run_manager.
get_child())
else:
kwargs['config'] = config
if run_manager is not None and accepts_run_manager(func):
kwargs['run_manager'] = run_manager
return func(input, **kwargs)
|
def call_func_with_variable_args(func: Union[Callable[[Input], Output],
Callable[[Input, RunnableConfig], Output], Callable[[Input,
CallbackManagerForChainRun], Output], Callable[[Input,
CallbackManagerForChainRun, RunnableConfig], Output]], input: Input,
config: RunnableConfig, run_manager: Optional[
CallbackManagerForChainRun]=None, **kwargs: Any) ->Output:
"""Call function that may optionally accept a run_manager and/or config.
Args:
func (Union[Callable[[Input], Output],
Callable[[Input, CallbackManagerForChainRun], Output],
Callable[[Input, CallbackManagerForChainRun, RunnableConfig], Output]]):
The function to call.
input (Input): The input to the function.
run_manager (CallbackManagerForChainRun): The run manager to
pass to the function.
config (RunnableConfig): The config to pass to the function.
**kwargs (Any): The keyword arguments to pass to the function.
Returns:
Output: The output of the function.
"""
if accepts_config(func):
if run_manager is not None:
kwargs['config'] = patch_config(config, callbacks=run_manager.
get_child())
else:
kwargs['config'] = config
if run_manager is not None and accepts_run_manager(func):
kwargs['run_manager'] = run_manager
return func(input, **kwargs)
|
Call function that may optionally accept a run_manager and/or config.
Args:
func (Union[Callable[[Input], Output],
Callable[[Input, CallbackManagerForChainRun], Output],
Callable[[Input, CallbackManagerForChainRun, RunnableConfig], Output]]):
The function to call.
input (Input): The input to the function.
run_manager (CallbackManagerForChainRun): The run manager to
pass to the function.
config (RunnableConfig): The config to pass to the function.
**kwargs (Any): The keyword arguments to pass to the function.
Returns:
Output: The output of the function.
|
similarity_search_with_score_id
|
embedding_vector = self.embedding.embed_query(query)
return self.similarity_search_with_score_id_by_vector(embedding=
embedding_vector, k=k, filter=filter)
|
def similarity_search_with_score_id(self, query: str, k: int=4, filter:
Optional[Dict[str, str]]=None) ->List[Tuple[Document, float, str]]:
embedding_vector = self.embedding.embed_query(query)
return self.similarity_search_with_score_id_by_vector(embedding=
embedding_vector, k=k, filter=filter)
| null |
load
|
"""Load given path as pages."""
return list(self.lazy_load())
|
def load(self) ->List[Document]:
"""Load given path as pages."""
return list(self.lazy_load())
|
Load given path as pages.
|
_get_bing_search
|
return BingSearchRun(api_wrapper=BingSearchAPIWrapper(**kwargs))
|
def _get_bing_search(**kwargs: Any) ->BaseTool:
return BingSearchRun(api_wrapper=BingSearchAPIWrapper(**kwargs))
| null |
func
|
assert isinstance(tool_input, str)
assert isinstance(other_arg, str)
return tool_input + other_arg
|
def func(tool_input: str, other_arg: str) ->str:
assert isinstance(tool_input, str)
assert isinstance(other_arg, str)
return tool_input + other_arg
| null |
test_embeddings_property
|
index = mock_index(index_details)
vectorsearch = default_databricks_vector_search(index)
assert vectorsearch.embeddings == DEFAULT_EMBEDDING_MODEL
|
@pytest.mark.requires('databricks', 'databricks.vector_search')
@pytest.mark.parametrize('index_details', [
DELTA_SYNC_INDEX_SELF_MANAGED_EMBEDDINGS, DIRECT_ACCESS_INDEX])
def test_embeddings_property(index_details: dict) ->None:
index = mock_index(index_details)
vectorsearch = default_databricks_vector_search(index)
assert vectorsearch.embeddings == DEFAULT_EMBEDDING_MODEL
| null |
_load_chat_prompt
|
"""Load chat prompt from config"""
messages = config.pop('messages')
template = messages[0]['prompt'].pop('template') if messages else None
config.pop('input_variables')
if not template:
raise ValueError("Can't load chat prompt without template")
return ChatPromptTemplate.from_template(template=template, **config)
|
def _load_chat_prompt(config: Dict) ->ChatPromptTemplate:
"""Load chat prompt from config"""
messages = config.pop('messages')
template = messages[0]['prompt'].pop('template') if messages else None
config.pop('input_variables')
if not template:
raise ValueError("Can't load chat prompt without template")
return ChatPromptTemplate.from_template(template=template, **config)
|
Load chat prompt from config
|
from_llm
|
"""Initialize from LLM."""
qa_chain = LLMChain(llm=llm, prompt=qa_prompt)
cypher_generation_chain = LLMChain(llm=llm, prompt=cypher_prompt)
return cls(qa_chain=qa_chain, cypher_generation_chain=
cypher_generation_chain, **kwargs)
|
@classmethod
def from_llm(cls, llm: BaseLanguageModel, *, qa_prompt: BasePromptTemplate=
CYPHER_QA_PROMPT, cypher_prompt: BasePromptTemplate=
CYPHER_GENERATION_PROMPT, **kwargs: Any) ->FalkorDBQAChain:
"""Initialize from LLM."""
qa_chain = LLMChain(llm=llm, prompt=qa_prompt)
cypher_generation_chain = LLMChain(llm=llm, prompt=cypher_prompt)
return cls(qa_chain=qa_chain, cypher_generation_chain=
cypher_generation_chain, **kwargs)
|
Initialize from LLM.
|
validate_environment
|
"""Validate that api key and python package exists in environment."""
gitlab_url = get_from_dict_or_env(values, 'gitlab_url', 'GITLAB_URL',
default='https://gitlab.com')
gitlab_repository = get_from_dict_or_env(values, 'gitlab_repository',
'GITLAB_REPOSITORY')
gitlab_personal_access_token = get_from_dict_or_env(values,
'gitlab_personal_access_token', 'GITLAB_PERSONAL_ACCESS_TOKEN')
gitlab_branch = get_from_dict_or_env(values, 'gitlab_branch',
'GITLAB_BRANCH', default='main')
gitlab_base_branch = get_from_dict_or_env(values, 'gitlab_base_branch',
'GITLAB_BASE_BRANCH', default='main')
try:
import gitlab
except ImportError:
raise ImportError(
'python-gitlab is not installed. Please install it with `pip install python-gitlab`'
)
g = gitlab.Gitlab(url=gitlab_url, private_token=
gitlab_personal_access_token, keep_base_url=True)
g.auth()
values['gitlab'] = g
values['gitlab_repo_instance'] = g.projects.get(gitlab_repository)
values['gitlab_repository'] = gitlab_repository
values['gitlab_personal_access_token'] = gitlab_personal_access_token
values['gitlab_branch'] = gitlab_branch
values['gitlab_base_branch'] = gitlab_base_branch
return values
|
@root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that api key and python package exists in environment."""
gitlab_url = get_from_dict_or_env(values, 'gitlab_url', 'GITLAB_URL',
default='https://gitlab.com')
gitlab_repository = get_from_dict_or_env(values, 'gitlab_repository',
'GITLAB_REPOSITORY')
gitlab_personal_access_token = get_from_dict_or_env(values,
'gitlab_personal_access_token', 'GITLAB_PERSONAL_ACCESS_TOKEN')
gitlab_branch = get_from_dict_or_env(values, 'gitlab_branch',
'GITLAB_BRANCH', default='main')
gitlab_base_branch = get_from_dict_or_env(values, 'gitlab_base_branch',
'GITLAB_BASE_BRANCH', default='main')
try:
import gitlab
except ImportError:
raise ImportError(
'python-gitlab is not installed. Please install it with `pip install python-gitlab`'
)
g = gitlab.Gitlab(url=gitlab_url, private_token=
gitlab_personal_access_token, keep_base_url=True)
g.auth()
values['gitlab'] = g
values['gitlab_repo_instance'] = g.projects.get(gitlab_repository)
values['gitlab_repository'] = gitlab_repository
values['gitlab_personal_access_token'] = gitlab_personal_access_token
values['gitlab_branch'] = gitlab_branch
values['gitlab_base_branch'] = gitlab_base_branch
return values
|
Validate that api key and python package exists in environment.
|
on_llm_end
|
self.on_llm_end_common()
|
def on_llm_end(self, *args: Any, **kwargs: Any) ->Any:
self.on_llm_end_common()
| null |
conditional_decorator
|
"""Define conditional decorator.
Args:
condition: The condition.
decorator: The decorator.
Returns:
The decorated function.
"""
def actual_decorator(func: Callable[[Any], Any]) ->Callable[[Any], Any]:
if condition:
return decorator(func)
return func
return actual_decorator
|
def conditional_decorator(condition: bool, decorator: Callable[[Any], Any]
) ->Callable[[Any], Any]:
"""Define conditional decorator.
Args:
condition: The condition.
decorator: The decorator.
Returns:
The decorated function.
"""
def actual_decorator(func: Callable[[Any], Any]) ->Callable[[Any], Any]:
if condition:
return decorator(func)
return func
return actual_decorator
|
Define conditional decorator.
Args:
condition: The condition.
decorator: The decorator.
Returns:
The decorated function.
|
humanize_sql_error_msg
|
pattern = 'column\\s+(.*?)\\s+not found'
col_match = re.search(pattern, error)
if col_match:
return 'SQL error: ' + col_match.group(1
) + ' is not an attribute in your story!'
else:
return str(error)
|
def humanize_sql_error_msg(error: str) ->str:
pattern = 'column\\s+(.*?)\\s+not found'
col_match = re.search(pattern, error)
if col_match:
return 'SQL error: ' + col_match.group(1
) + ' is not an attribute in your story!'
else:
return str(error)
| null |
max_marginal_relevance_search_by_vector
|
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
filter: Doc fields filter conditions that meet the SQL where clause
specification.
Returns:
List of Documents selected by maximal marginal relevance.
"""
ret = self._collection.query(embedding, topk=fetch_k, filter=filter,
include_vector=True)
if not ret:
raise ValueError(
f'Fail to query docs by vector, error {self._collection.message}')
candidate_embeddings = [doc.vector for doc in ret]
mmr_selected = maximal_marginal_relevance(np.array(embedding),
candidate_embeddings, lambda_mult, k)
metadatas = [ret.output[i].fields for i in mmr_selected]
return [Document(page_content=metadata.pop(self._text_field), metadata=
metadata) for metadata in metadatas]
|
def max_marginal_relevance_search_by_vector(self, embedding: List[float], k:
int=4, fetch_k: int=20, lambda_mult: float=0.5, filter: Optional[dict]=
None, **kwargs: Any) ->List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
filter: Doc fields filter conditions that meet the SQL where clause
specification.
Returns:
List of Documents selected by maximal marginal relevance.
"""
ret = self._collection.query(embedding, topk=fetch_k, filter=filter,
include_vector=True)
if not ret:
raise ValueError(
f'Fail to query docs by vector, error {self._collection.message}')
candidate_embeddings = [doc.vector for doc in ret]
mmr_selected = maximal_marginal_relevance(np.array(embedding),
candidate_embeddings, lambda_mult, k)
metadatas = [ret.output[i].fields for i in mmr_selected]
return [Document(page_content=metadata.pop(self._text_field), metadata=
metadata) for metadata in metadatas]
|
Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
filter: Doc fields filter conditions that meet the SQL where clause
specification.
Returns:
List of Documents selected by maximal marginal relevance.
|
_import_watsonxllm
|
from langchain_community.llms.watsonxllm import WatsonxLLM
return WatsonxLLM
|
def _import_watsonxllm() ->Any:
from langchain_community.llms.watsonxllm import WatsonxLLM
return WatsonxLLM
| null |
_redis_cluster_client
|
from redis.cluster import RedisCluster
return RedisCluster.from_url(redis_url, **kwargs)
|
def _redis_cluster_client(redis_url: str, **kwargs: Any) ->RedisType:
from redis.cluster import RedisCluster
return RedisCluster.from_url(redis_url, **kwargs)
| null |
similarity_search_with_score
|
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query and score for each.
"""
embedding = self.embedding_function.embed_query(query)
docs = self.similarity_search_with_score_by_vector(embedding=embedding, k=k,
filter=filter)
return docs
|
def similarity_search_with_score(self, query: str, k: int=4, filter:
Optional[dict]=None) ->List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query and score for each.
"""
embedding = self.embedding_function.embed_query(query)
docs = self.similarity_search_with_score_by_vector(embedding=embedding,
k=k, filter=filter)
return docs
|
Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query and score for each.
|
__init__
|
if isinstance(exception, dict):
self.message = exception['message'
] if 'message' in exception else 'unknown'
self.details = exception['details'
] if 'details' in exception else 'unknown'
else:
self.message = exception
self.details = 'unknown'
|
def __init__(self, exception: Union[str, Dict]):
if isinstance(exception, dict):
self.message = exception['message'
] if 'message' in exception else 'unknown'
self.details = exception['details'
] if 'details' in exception else 'unknown'
else:
self.message = exception
self.details = 'unknown'
| null |
_get_elements
|
return get_elements_from_api(file=self.file, api_key=self.api_key, api_url=
self.url, **self.unstructured_kwargs)
|
def _get_elements(self) ->List:
return get_elements_from_api(file=self.file, api_key=self.api_key,
api_url=self.url, **self.unstructured_kwargs)
| null |
_import_openai
|
from langchain_community.llms.openai import OpenAI
return OpenAI
|
def _import_openai() ->Any:
from langchain_community.llms.openai import OpenAI
return OpenAI
| null |
get_parser
|
"""
Returns a parser for the query language.
Args:
allowed_comparators: Optional[Sequence[Comparator]]
allowed_operators: Optional[Sequence[Operator]]
Returns:
Lark parser for the query language.
"""
if QueryTransformer is None:
raise ImportError(
"Cannot import lark, please install it with 'pip install lark'.")
transformer = QueryTransformer(allowed_comparators=allowed_comparators,
allowed_operators=allowed_operators, allowed_attributes=allowed_attributes)
return Lark(GRAMMAR, parser='lalr', transformer=transformer, start='program')
|
def get_parser(allowed_comparators: Optional[Sequence[Comparator]]=None,
allowed_operators: Optional[Sequence[Operator]]=None,
allowed_attributes: Optional[Sequence[str]]=None) ->Lark:
"""
Returns a parser for the query language.
Args:
allowed_comparators: Optional[Sequence[Comparator]]
allowed_operators: Optional[Sequence[Operator]]
Returns:
Lark parser for the query language.
"""
if QueryTransformer is None:
raise ImportError(
"Cannot import lark, please install it with 'pip install lark'.")
transformer = QueryTransformer(allowed_comparators=allowed_comparators,
allowed_operators=allowed_operators, allowed_attributes=
allowed_attributes)
return Lark(GRAMMAR, parser='lalr', transformer=transformer, start=
'program')
|
Returns a parser for the query language.
Args:
allowed_comparators: Optional[Sequence[Comparator]]
allowed_operators: Optional[Sequence[Operator]]
Returns:
Lark parser for the query language.
|
test_chroma_large_batch
|
import chromadb
client = chromadb.HttpClient()
embedding_function = Fak(size=255)
col = client.get_or_create_collection('my_collection', embedding_function=
embedding_function.embed_documents)
docs = ['This is a test document'] * (client.max_batch_size + 100)
Chroma.from_texts(client=client, collection_name=col.name, texts=docs,
embedding=embedding_function, ids=[str(uuid.uuid4()) for _ in range(len
(docs))])
|
@pytest.mark.requires('chromadb')
@pytest.mark.skipif(not is_api_accessible(
'http://localhost:8000/api/v1/heartbeat'), reason='API not accessible')
@pytest.mark.skipif(not batch_support_chroma_version(), reason=
'ChromaDB version does not support batching')
def test_chroma_large_batch() ->None:
import chromadb
client = chromadb.HttpClient()
embedding_function = Fak(size=255)
col = client.get_or_create_collection('my_collection',
embedding_function=embedding_function.embed_documents)
docs = ['This is a test document'] * (client.max_batch_size + 100)
Chroma.from_texts(client=client, collection_name=col.name, texts=docs,
embedding=embedding_function, ids=[str(uuid.uuid4()) for _ in range
(len(docs))])
| null |
_is_geminiai
|
return self.model is not None and 'gemini' in self.model
|
@property
def _is_geminiai(self) ->bool:
return self.model is not None and 'gemini' in self.model
| null |
_run
|
"""Get the schema for tables in a comma-separated list."""
return self.db.get_table_info_no_throw(table_names.split(', '))
|
def _run(self, table_names: str, run_manager: Optional[
CallbackManagerForToolRun]=None) ->str:
"""Get the schema for tables in a comma-separated list."""
return self.db.get_table_info_no_throw(table_names.split(', '))
|
Get the schema for tables in a comma-separated list.
|
test_chroma_add_documents_no_metadata
|
db = Chroma(embedding_function=FakeEmbeddings())
db.add_documents([Document(page_content='foo')])
|
def test_chroma_add_documents_no_metadata() ->None:
db = Chroma(embedding_function=FakeEmbeddings())
db.add_documents([Document(page_content='foo')])
| null |
_anonymize
|
"""Abstract method to anonymize text"""
|
@abstractmethod
def _anonymize(self, text: str, language: Optional[str], allow_list:
Optional[List[str]]=None) ->str:
"""Abstract method to anonymize text"""
|
Abstract method to anonymize text
|
_wait_processing
|
for _ in range(10):
time.sleep(1)
audio_analysis_result = self._get_edenai(url)
temp = audio_analysis_result.json()
if temp['status'] == 'finished':
if temp['results'][self.providers[0]]['error'] is not None:
raise Exception(
f"""EdenAI returned an unexpected response
{temp['results'][self.providers[0]]['error']}"""
)
else:
return audio_analysis_result
raise Exception('Edenai speech to text job id processing Timed out')
|
def _wait_processing(self, url: str) ->requests.Response:
for _ in range(10):
time.sleep(1)
audio_analysis_result = self._get_edenai(url)
temp = audio_analysis_result.json()
if temp['status'] == 'finished':
if temp['results'][self.providers[0]]['error'] is not None:
raise Exception(
f"""EdenAI returned an unexpected response
{temp['results'][self.providers[0]]['error']}"""
)
else:
return audio_analysis_result
raise Exception('Edenai speech to text job id processing Timed out')
| null |
load_results
|
"""Load items from an HN page."""
items = soup.select("tr[class='athing']")
documents = []
for lineItem in items:
ranking = lineItem.select_one("span[class='rank']").text
link = lineItem.find('span', {'class': 'titleline'}).find('a').get('href')
title = lineItem.find('span', {'class': 'titleline'}).text.strip()
metadata = {'source': self.web_path, 'title': title, 'link': link,
'ranking': ranking}
documents.append(Document(page_content=title, link=link, ranking=
ranking, metadata=metadata))
return documents
|
def load_results(self, soup: Any) ->List[Document]:
"""Load items from an HN page."""
items = soup.select("tr[class='athing']")
documents = []
for lineItem in items:
ranking = lineItem.select_one("span[class='rank']").text
link = lineItem.find('span', {'class': 'titleline'}).find('a').get(
'href')
title = lineItem.find('span', {'class': 'titleline'}).text.strip()
metadata = {'source': self.web_path, 'title': title, 'link': link,
'ranking': ranking}
documents.append(Document(page_content=title, link=link, ranking=
ranking, metadata=metadata))
return documents
|
Load items from an HN page.
|
post
|
return f'post {str(data)}'
|
@staticmethod
def post(url: str, data: Dict[str, Any], **kwargs: Any) ->str:
return f'post {str(data)}'
| null |
test_add_messages
|
sql_history, other_history = sql_histories
sql_history.add_user_message('Hello!')
sql_history.add_ai_message('Hi there!')
messages = sql_history.messages
assert len(messages) == 2
assert isinstance(messages[0], HumanMessage)
assert isinstance(messages[1], AIMessage)
assert messages[0].content == 'Hello!'
assert messages[1].content == 'Hi there!'
|
def test_add_messages(sql_histories: Tuple[SQLChatMessageHistory,
SQLChatMessageHistory]) ->None:
sql_history, other_history = sql_histories
sql_history.add_user_message('Hello!')
sql_history.add_ai_message('Hi there!')
messages = sql_history.messages
assert len(messages) == 2
assert isinstance(messages[0], HumanMessage)
assert isinstance(messages[1], AIMessage)
assert messages[0].content == 'Hello!'
assert messages[1].content == 'Hi there!'
| null |
_get_relevant_documents
|
if self.search_type == 'similarity':
docs = self.vectorstore.similarity_search(query, **self.search_kwargs)
elif self.search_type == 'similarity_score_threshold':
docs_and_similarities = (self.vectorstore.
similarity_search_with_relevance_scores(query, **self.search_kwargs))
docs = [doc for doc, _ in docs_and_similarities]
elif self.search_type == 'mmr':
docs = self.vectorstore.max_marginal_relevance_search(query, **self.
search_kwargs)
else:
raise ValueError(f'search_type of {self.search_type} not allowed.')
return docs
|
def _get_relevant_documents(self, query: str, *, run_manager:
CallbackManagerForRetrieverRun) ->List[Document]:
if self.search_type == 'similarity':
docs = self.vectorstore.similarity_search(query, **self.search_kwargs)
elif self.search_type == 'similarity_score_threshold':
docs_and_similarities = (self.vectorstore.
similarity_search_with_relevance_scores(query, **self.
search_kwargs))
docs = [doc for doc, _ in docs_and_similarities]
elif self.search_type == 'mmr':
docs = self.vectorstore.max_marginal_relevance_search(query, **self
.search_kwargs)
else:
raise ValueError(f'search_type of {self.search_type} not allowed.')
return docs
| null |
similarity_search_with_score
|
query_emb = []
if self._embedding_function is not None:
query_emb = self._embedding_function.embed_query(query)
return self.similarity_search_by_vector_with_score(query_emb, k, **kwargs)
|
def similarity_search_with_score(self, query: str, k: int=4, **kwargs: Any
) ->List[Tuple[Document, float]]:
query_emb = []
if self._embedding_function is not None:
query_emb = self._embedding_function.embed_query(query)
return self.similarity_search_by_vector_with_score(query_emb, k, **kwargs)
| null |
test_getprojects
|
"""Test for getting projects on JIRA"""
jira = JiraAPIWrapper()
output = jira.run('get_projects', '')
assert 'projects' in output
|
def test_getprojects() ->None:
"""Test for getting projects on JIRA"""
jira = JiraAPIWrapper()
output = jira.run('get_projects', '')
assert 'projects' in output
|
Test for getting projects on JIRA
|
parse
|
includes_answer = FINAL_ANSWER_ACTION in text
try:
found = self.pattern.search(text)
if not found:
raise ValueError('action not found')
action = found.group(1)
response = json.loads(action.strip())
includes_action = 'action' in response
if includes_answer and includes_action:
raise OutputParserException(
f'Parsing LLM output produced a final answer and a parse-able action: {text}'
)
return AgentAction(response['action'], response.get('action_input', {}),
text)
except Exception as exc:
if not includes_answer:
raise OutputParserException(f'Could not parse LLM output: {text}'
) from exc
output = text.split(FINAL_ANSWER_ACTION)[-1].strip()
return AgentFinish({'output': output}, text)
|
def parse(self, text: str) ->Union[AgentAction, AgentFinish]:
includes_answer = FINAL_ANSWER_ACTION in text
try:
found = self.pattern.search(text)
if not found:
raise ValueError('action not found')
action = found.group(1)
response = json.loads(action.strip())
includes_action = 'action' in response
if includes_answer and includes_action:
raise OutputParserException(
f'Parsing LLM output produced a final answer and a parse-able action: {text}'
)
return AgentAction(response['action'], response.get('action_input',
{}), text)
except Exception as exc:
if not includes_answer:
raise OutputParserException(f'Could not parse LLM output: {text}'
) from exc
output = text.split(FINAL_ANSWER_ACTION)[-1].strip()
return AgentFinish({'output': output}, text)
| null |
on_llm_new_token
|
self._require_current_thought().on_llm_new_token(token, **kwargs)
self._prune_old_thought_containers()
|
def on_llm_new_token(self, token: str, **kwargs: Any) ->None:
self._require_current_thought().on_llm_new_token(token, **kwargs)
self._prune_old_thought_containers()
| null |
memory_variables
|
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
|
@property
def memory_variables(self) ->List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
|
Will always return list of memory variables.
:meta private:
|
on_chain_start
|
"""Print out that we are entering a chain."""
class_name = serialized.get('name', serialized.get('id', ['<unknown>'])[-1])
print_text(f"""
[1m> Entering new {class_name} chain...[0m""", end='\n',
file=self.file)
|
def on_chain_start(self, serialized: Dict[str, Any], inputs: Dict[str, Any],
**kwargs: Any) ->None:
"""Print out that we are entering a chain."""
class_name = serialized.get('name', serialized.get('id', ['<unknown>'])[-1]
)
print_text(f'\n\n\x1b[1m> Entering new {class_name} chain...\x1b[0m',
end='\n', file=self.file)
|
Print out that we are entering a chain.
|
__from
|
if ids is None:
ids = [str(uuid.uuid1()) for _ in texts]
if not metadatas:
metadatas = [{} for _ in texts]
if connection_string is None:
connection_string = cls.get_connection_string(kwargs)
store = cls(connection_string=connection_string, collection_name=
collection_name, embedding_function=embedding, distance_strategy=
distance_strategy, pre_delete_collection=pre_delete_collection, **kwargs)
store.add_embeddings(texts=texts, embeddings=embeddings, metadatas=
metadatas, ids=ids, **kwargs)
return store
|
@classmethod
def __from(cls, texts: List[str], embeddings: List[List[float]], embedding:
Embeddings, metadatas: Optional[List[dict]]=None, ids: Optional[List[
str]]=None, collection_name: str=_LANGCHAIN_DEFAULT_COLLECTION_NAME,
distance_strategy: DistanceStrategy=DEFAULT_DISTANCE_STRATEGY,
connection_string: Optional[str]=None, pre_delete_collection: bool=
False, **kwargs: Any) ->PGVector:
if ids is None:
ids = [str(uuid.uuid1()) for _ in texts]
if not metadatas:
metadatas = [{} for _ in texts]
if connection_string is None:
connection_string = cls.get_connection_string(kwargs)
store = cls(connection_string=connection_string, collection_name=
collection_name, embedding_function=embedding, distance_strategy=
distance_strategy, pre_delete_collection=pre_delete_collection, **
kwargs)
store.add_embeddings(texts=texts, embeddings=embeddings, metadatas=
metadatas, ids=ids, **kwargs)
return store
| null |
get_structured_schema
|
"""Returns the structured schema of the Graph"""
return self.structured_schema
|
@property
def get_structured_schema(self) ->Dict[str, Any]:
"""Returns the structured schema of the Graph"""
return self.structured_schema
|
Returns the structured schema of the Graph
|
test_geometry_returned
|
mock_feature_layer.query.return_value = [MagicMock(as_dict={'attributes': {
'field': 'value'}, 'geometry': {'type': 'point', 'coordinates': [0, 0]}})]
loader = ArcGISLoader(layer=mock_feature_layer, gis=mock_gis,
return_geometry=True)
documents = list(loader.lazy_load())
assert 'geometry' in documents[0].metadata
|
def test_geometry_returned(arcgis_mocks, mock_feature_layer, mock_gis):
mock_feature_layer.query.return_value = [MagicMock(as_dict={
'attributes': {'field': 'value'}, 'geometry': {'type': 'point',
'coordinates': [0, 0]}})]
loader = ArcGISLoader(layer=mock_feature_layer, gis=mock_gis,
return_geometry=True)
documents = list(loader.lazy_load())
assert 'geometry' in documents[0].metadata
| null |
_try_raise
|
"""Try to raise an error from a response"""
try:
response.raise_for_status()
except requests.HTTPError as e:
try:
rd = response.json()
except json.JSONDecodeError:
rd = response.__dict__
rd = rd.get('_content', rd)
if isinstance(rd, bytes):
rd = rd.decode('utf-8')[5:]
try:
rd = json.loads(rd)
except Exception:
rd = {'detail': rd}
title = f"[{rd.get('status', '###')}] {rd.get('title', 'Unknown Error')}"
body = f"{rd.get('detail', rd.get('type', rd))}"
raise Exception(f'{title}\n{body}') from e
|
def _try_raise(self, response: Response) ->None:
"""Try to raise an error from a response"""
try:
response.raise_for_status()
except requests.HTTPError as e:
try:
rd = response.json()
except json.JSONDecodeError:
rd = response.__dict__
rd = rd.get('_content', rd)
if isinstance(rd, bytes):
rd = rd.decode('utf-8')[5:]
try:
rd = json.loads(rd)
except Exception:
rd = {'detail': rd}
title = (
f"[{rd.get('status', '###')}] {rd.get('title', 'Unknown Error')}")
body = f"{rd.get('detail', rd.get('type', rd))}"
raise Exception(f'{title}\n{body}') from e
|
Try to raise an error from a response
|
_identifying_params
|
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {**{'endpoint_url': self.endpoint_url}, **{'model_kwargs':
_model_kwargs}}
|
@property
def _identifying_params(self) ->Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {**{'endpoint_url': self.endpoint_url}, **{'model_kwargs':
_model_kwargs}}
|
Get the identifying parameters.
|
test_sklearn_mmr_by_vector
|
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
embeddings = FakeEmbeddings()
docsearch = SKLearnVectorStore.from_texts(texts, embeddings)
embedded_query = embeddings.embed_query('foo')
output = docsearch.max_marginal_relevance_search_by_vector(embedded_query,
k=1, fetch_k=3)
assert len(output) == 1
assert output[0].page_content == 'foo'
|
@pytest.mark.requires('numpy', 'sklearn')
def test_sklearn_mmr_by_vector() ->None:
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
embeddings = FakeEmbeddings()
docsearch = SKLearnVectorStore.from_texts(texts, embeddings)
embedded_query = embeddings.embed_query('foo')
output = docsearch.max_marginal_relevance_search_by_vector(embedded_query,
k=1, fetch_k=3)
assert len(output) == 1
assert output[0].page_content == 'foo'
|
Test end to end construction and search.
|
test_sync_recursive_url_loader
|
url = 'https://docs.python.org/3.9/'
loader = RecursiveUrlLoader(url, extractor=lambda _: 'placeholder',
use_async=False, max_depth=2)
docs = loader.load()
assert len(docs) == 25
assert docs[0].page_content == 'placeholder'
|
def test_sync_recursive_url_loader() ->None:
url = 'https://docs.python.org/3.9/'
loader = RecursiveUrlLoader(url, extractor=lambda _: 'placeholder',
use_async=False, max_depth=2)
docs = loader.load()
assert len(docs) == 25
assert docs[0].page_content == 'placeholder'
| null |
invoke
|
if self.model == 'chatglm_turbo':
return self.zhipuai.model_api.invoke(model=self.model, prompt=prompt,
top_p=self.top_p, temperature=self.temperature, request_id=self.
request_id, return_type=self.return_type)
elif self.model == 'characterglm':
meta = self.meta.dict()
return self.zhipuai.model_api.invoke(model=self.model, meta=meta,
prompt=prompt, request_id=self.request_id, return_type=self.return_type
)
return None
|
def invoke(self, prompt):
if self.model == 'chatglm_turbo':
return self.zhipuai.model_api.invoke(model=self.model, prompt=
prompt, top_p=self.top_p, temperature=self.temperature,
request_id=self.request_id, return_type=self.return_type)
elif self.model == 'characterglm':
meta = self.meta.dict()
return self.zhipuai.model_api.invoke(model=self.model, meta=meta,
prompt=prompt, request_id=self.request_id, return_type=self.
return_type)
return None
| null |
retriever
|
from qdrant_client import QdrantClient, models
client = QdrantClient(location=':memory:')
collection_name = uuid.uuid4().hex
vector_name = uuid.uuid4().hex
client.recreate_collection(collection_name, vectors_config={},
sparse_vectors_config={vector_name: models.SparseVectorParams(index=
models.SparseIndexParams(on_disk=False))})
return QdrantSparseVectorRetriever(client=client, collection_name=
collection_name, sparse_vector_name=vector_name, sparse_encoder=
consistent_fake_sparse_encoder)
|
@pytest.fixture
def retriever() ->QdrantSparseVectorRetriever:
from qdrant_client import QdrantClient, models
client = QdrantClient(location=':memory:')
collection_name = uuid.uuid4().hex
vector_name = uuid.uuid4().hex
client.recreate_collection(collection_name, vectors_config={},
sparse_vectors_config={vector_name: models.SparseVectorParams(index
=models.SparseIndexParams(on_disk=False))})
return QdrantSparseVectorRetriever(client=client, collection_name=
collection_name, sparse_vector_name=vector_name, sparse_encoder=
consistent_fake_sparse_encoder)
| null |
_transform_completions
|
return response['choices'][0]['text']
|
def _transform_completions(response: Dict[str, Any]) ->str:
return response['choices'][0]['text']
| null |
_signature
|
sorted_keys = sorted(payload.keys())
url_info = urlparse(url)
sign_str = url_info.netloc + url_info.path + '?'
for key in sorted_keys:
value = payload[key]
if isinstance(value, list) or isinstance(value, dict):
value = json.dumps(value, separators=(',', ':'))
elif isinstance(value, float):
value = '%g' % value
sign_str = sign_str + key + '=' + str(value) + '&'
sign_str = sign_str[:-1]
hmacstr = hmac.new(key=secret_key.get_secret_value().encode('utf-8'), msg=
sign_str.encode('utf-8'), digestmod=hashlib.sha1).digest()
return base64.b64encode(hmacstr).decode('utf-8')
|
def _signature(secret_key: SecretStr, url: str, payload: Dict[str, Any]) ->str:
sorted_keys = sorted(payload.keys())
url_info = urlparse(url)
sign_str = url_info.netloc + url_info.path + '?'
for key in sorted_keys:
value = payload[key]
if isinstance(value, list) or isinstance(value, dict):
value = json.dumps(value, separators=(',', ':'))
elif isinstance(value, float):
value = '%g' % value
sign_str = sign_str + key + '=' + str(value) + '&'
sign_str = sign_str[:-1]
hmacstr = hmac.new(key=secret_key.get_secret_value().encode('utf-8'),
msg=sign_str.encode('utf-8'), digestmod=hashlib.sha1).digest()
return base64.b64encode(hmacstr).decode('utf-8')
| null |
test_labeled_pairwise_string_comparison_chain_missing_ref
|
llm = FakeLLM(queries={'a': """The values are the same.
[[C]]""", 'b':
"""A is clearly better than b.
[[A]]""", 'c':
"""B is clearly better than a.
[[B]]"""}, sequential_responses=True)
chain = LabeledPairwiseStringEvalChain.from_llm(llm=llm)
with pytest.raises(ValueError):
chain.evaluate_string_pairs(prediction='I like pie.', prediction_b=
'I love pie.', input='What is your favorite food?')
|
def test_labeled_pairwise_string_comparison_chain_missing_ref() ->None:
llm = FakeLLM(queries={'a': 'The values are the same.\n[[C]]', 'b':
"""A is clearly better than b.
[[A]]""", 'c':
"""B is clearly better than a.
[[B]]"""}, sequential_responses=True)
chain = LabeledPairwiseStringEvalChain.from_llm(llm=llm)
with pytest.raises(ValueError):
chain.evaluate_string_pairs(prediction='I like pie.', prediction_b=
'I love pie.', input='What is your favorite food?')
| null |
embed_query
|
"""Return consistent embeddings for the text, if seen before, or a constant
one if the text is unknown."""
return self.embed_documents([text])[0]
|
def embed_query(self, text: str) ->List[float]:
"""Return consistent embeddings for the text, if seen before, or a constant
one if the text is unknown."""
return self.embed_documents([text])[0]
|
Return consistent embeddings for the text, if seen before, or a constant
one if the text is unknown.
|
ignore_retriever
|
"""Whether to ignore retriever callbacks."""
return self.ignore_retriever_
|
@property
def ignore_retriever(self) ->bool:
"""Whether to ignore retriever callbacks."""
return self.ignore_retriever_
|
Whether to ignore retriever callbacks.
|
test_marqo_multimodal
|
import marqo
client = marqo.Client(url=DEFAULT_MARQO_URL, api_key=DEFAULT_MARQO_API_KEY)
try:
client.index(INDEX_NAME).delete()
except Exception:
pass
client.delete_index(INDEX_NAME)
settings = {'treat_urls_and_pointers_as_images': True, 'model': 'ViT-L/14'}
client.create_index(INDEX_NAME, **settings)
client.index(INDEX_NAME).add_documents([{'caption': 'Bus', 'image':
'https://raw.githubusercontent.com/marqo-ai/marqo/mainline/examples/ImageSearchGuide/data/image4.jpg'
}, {'caption': 'Plane', 'image':
'https://raw.githubusercontent.com/marqo-ai/marqo/mainline/examples/ImageSearchGuide/data/image2.jpg'
}], tensor_fields=['caption', 'image'])
def get_content(res: Dict[str, str]) ->str:
if 'text' in res:
return res['text']
return f"{res['caption']}: {res['image']}"
marqo_search = Marqo(client, INDEX_NAME, page_content_builder=get_content)
query = 'vehicles that fly'
docs = marqo_search.similarity_search(query)
assert docs[0].page_content.split(':')[0] == 'Plane'
raised_value_error = False
try:
marqo_search.add_texts(['text'])
except ValueError:
raised_value_error = True
assert raised_value_error
|
def test_marqo_multimodal() ->None:
import marqo
client = marqo.Client(url=DEFAULT_MARQO_URL, api_key=DEFAULT_MARQO_API_KEY)
try:
client.index(INDEX_NAME).delete()
except Exception:
pass
client.delete_index(INDEX_NAME)
settings = {'treat_urls_and_pointers_as_images': True, 'model': 'ViT-L/14'}
client.create_index(INDEX_NAME, **settings)
client.index(INDEX_NAME).add_documents([{'caption': 'Bus', 'image':
'https://raw.githubusercontent.com/marqo-ai/marqo/mainline/examples/ImageSearchGuide/data/image4.jpg'
}, {'caption': 'Plane', 'image':
'https://raw.githubusercontent.com/marqo-ai/marqo/mainline/examples/ImageSearchGuide/data/image2.jpg'
}], tensor_fields=['caption', 'image'])
def get_content(res: Dict[str, str]) ->str:
if 'text' in res:
return res['text']
return f"{res['caption']}: {res['image']}"
marqo_search = Marqo(client, INDEX_NAME, page_content_builder=get_content)
query = 'vehicles that fly'
docs = marqo_search.similarity_search(query)
assert docs[0].page_content.split(':')[0] == 'Plane'
raised_value_error = False
try:
marqo_search.add_texts(['text'])
except ValueError:
raised_value_error = True
assert raised_value_error
| null |
has_mul_sub_str
|
"""
Check if a string has multiple substrings.
Args:
s: The string to check
*args: The substrings to check for in the string
Returns:
bool: True if all substrings are present in the string, False otherwise
"""
for a in args:
if a not in s:
return False
return True
|
def has_mul_sub_str(s: str, *args: Any) ->bool:
"""
Check if a string has multiple substrings.
Args:
s: The string to check
*args: The substrings to check for in the string
Returns:
bool: True if all substrings are present in the string, False otherwise
"""
for a in args:
if a not in s:
return False
return True
|
Check if a string has multiple substrings.
Args:
s: The string to check
*args: The substrings to check for in the string
Returns:
bool: True if all substrings are present in the string, False otherwise
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.