method_name stringlengths 1 78 | method_body stringlengths 3 9.66k | full_code stringlengths 31 10.7k | docstring stringlengths 4 4.74k ⌀ |
|---|---|---|---|
_check_parser | """Check that parser is valid for bs4."""
valid_parsers = ['html.parser', 'lxml', 'xml', 'lxml-xml', 'html5lib']
if parser not in valid_parsers:
raise ValueError('`parser` must be one of ' + ', '.join(valid_parsers) +
'.') | @staticmethod
def _check_parser(parser: str) ->None:
"""Check that parser is valid for bs4."""
valid_parsers = ['html.parser', 'lxml', 'xml', 'lxml-xml', 'html5lib']
if parser not in valid_parsers:
raise ValueError('`parser` must be one of ' + ', '.join(
valid_parsers) + '.') | Check that parser is valid for bs4. |
test_given_engine_args_are_provided_then_they_should_be_used | """When engine arguments are provided then they must be used to create the underlying engine."""
engine_args = {'pool_size': 5, 'max_overflow': 10, 'pool_recycle': -1,
'pool_use_lifo': False, 'pool_pre_ping': False, 'pool_timeout': 30}
pgvector.PGVector(connection_string=_CONNECTION_STRING, embedding_function=
... | @pytest.mark.requires('pgvector')
@mock.patch('sqlalchemy.create_engine')
def test_given_engine_args_are_provided_then_they_should_be_used(create_engine:
Mock) ->None:
"""When engine arguments are provided then they must be used to create the underlying engine."""
engine_args = {'pool_size': 5, 'max_overflo... | When engine arguments are provided then they must be used to create the underlying engine. |
_import_gmail_GmailSearch | from langchain_community.tools.gmail import GmailSearch
return GmailSearch | def _import_gmail_GmailSearch() ->Any:
from langchain_community.tools.gmail import GmailSearch
return GmailSearch | null |
test_from_texts_with_metadatas_delete_multiple | texts = ['Dogs are tough.', 'Cats have fluff.', 'What is a sandwich?',
'The fence is purple.']
metadatas = [{'a': 1}, {'b': 1}, {'c': 1}, {'d': 1, 'e': 2}]
vectorstore = AzureCosmosDBVectorSearch.from_texts(texts,
azure_openai_embeddings, metadatas=metadatas, collection=collection,
index_name=INDEX_NAME)
ve... | def test_from_texts_with_metadatas_delete_multiple(self,
azure_openai_embeddings: OpenAIEmbeddings, collection: Any) ->None:
texts = ['Dogs are tough.', 'Cats have fluff.', 'What is a sandwich?',
'The fence is purple.']
metadatas = [{'a': 1}, {'b': 1}, {'c': 1}, {'d': 1, 'e': 2}]
vectorstore = A... | null |
format_response_payload | return json.loads(output)[0]['generated_text'] | def format_response_payload(self, output: bytes) ->str:
return json.loads(output)[0]['generated_text'] | null |
get_llm_cache | """Get the value of the `llm_cache` global setting."""
import langchain
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message=
'Importing llm_cache from langchain root module is no longer supported'
)
old_llm_cache = langchain.llm_cache
global _llm_cache
return _llm_cache or ... | def get_llm_cache() ->'BaseCache':
"""Get the value of the `llm_cache` global setting."""
import langchain
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message=
'Importing llm_cache from langchain root module is no longer supported'
)
old_llm_cach... | Get the value of the `llm_cache` global setting. |
add_texts | """Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
kwargs: vectorstore specific parameters
Returns:
List of ids... | def add_texts(self, texts: Iterable[str], metadatas: Optional[List[dict]]=
None, **kwargs: Any) ->List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas ass... | Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
kwargs: vectorstore specific parameters
Returns:
List of ids of the added texts. |
metadata_func | metadata['name'] = record.get('name')
metadata['summary'] = record.get('summary')
metadata['url'] = record.get('url')
metadata['category'] = record.get('category')
metadata['updated_at'] = record.get('updated_at')
return metadata | def metadata_func(record: dict, metadata: dict) ->dict:
metadata['name'] = record.get('name')
metadata['summary'] = record.get('summary')
metadata['url'] = record.get('url')
metadata['category'] = record.get('category')
metadata['updated_at'] = record.get('updated_at')
return metadata | null |
_import_ifttt | from langchain_community.tools.ifttt import IFTTTWebhook
return IFTTTWebhook | def _import_ifttt() ->Any:
from langchain_community.tools.ifttt import IFTTTWebhook
return IFTTTWebhook | null |
test_with_config_with_config | llm = FakeListLLM(responses=["i'm a textbot"])
assert dumpd(llm.with_config({'metadata': {'a': 'b'}}).with_config(tags=[
'a-tag'])) == dumpd(llm.with_config({'metadata': {'a': 'b'}, 'tags': [
'a-tag']})) | def test_with_config_with_config() ->None:
llm = FakeListLLM(responses=["i'm a textbot"])
assert dumpd(llm.with_config({'metadata': {'a': 'b'}}).with_config(tags
=['a-tag'])) == dumpd(llm.with_config({'metadata': {'a': 'b'},
'tags': ['a-tag']})) | null |
test_get_input_variables | prompt_a = PromptTemplate.from_template('{foo}')
prompt_b = PromptTemplate.from_template('{bar}')
pipeline_prompt = PipelinePromptTemplate(final_prompt=prompt_b,
pipeline_prompts=[('bar', prompt_a)])
assert pipeline_prompt.input_variables == ['foo'] | def test_get_input_variables() ->None:
prompt_a = PromptTemplate.from_template('{foo}')
prompt_b = PromptTemplate.from_template('{bar}')
pipeline_prompt = PipelinePromptTemplate(final_prompt=prompt_b,
pipeline_prompts=[('bar', prompt_a)])
assert pipeline_prompt.input_variables == ['foo'] | null |
_pull_queue | try:
from nucliadb_protos.writer_pb2 import BrokerMessage
except ImportError as e:
raise ImportError(
'nucliadb-protos is not installed. Run `pip install nucliadb-protos` to install.'
) from e
try:
from google.protobuf.json_format import MessageToJson
except ImportError as e:
raise Impor... | def _pull_queue(self) ->None:
try:
from nucliadb_protos.writer_pb2 import BrokerMessage
except ImportError as e:
raise ImportError(
'nucliadb-protos is not installed. Run `pip install nucliadb-protos` to install.'
) from e
try:
from google.protobuf.json_format... | null |
_check_response | if any(len(d['embedding']) == 1 for d in response['data']):
import openai
raise openai.error.APIError('LocalAI API returned an empty embedding')
return response | def _check_response(response: dict) ->dict:
if any(len(d['embedding']) == 1 for d in response['data']):
import openai
raise openai.error.APIError('LocalAI API returned an empty embedding')
return response | null |
__init__ | super().__init__(persist_path)
self.pd = guard_import('pandas')
self.pa = guard_import('pyarrow')
self.pq = guard_import('pyarrow.parquet') | def __init__(self, persist_path: str) ->None:
super().__init__(persist_path)
self.pd = guard_import('pandas')
self.pa = guard_import('pyarrow')
self.pq = guard_import('pyarrow.parquet') | null |
_call | plan = self.planner.plan(inputs, callbacks=run_manager.get_child() if
run_manager else None)
if run_manager:
run_manager.on_text(str(plan), verbose=self.verbose)
for step in plan.steps:
_new_inputs = {'previous_steps': self.step_container, 'current_step':
step, 'objective': inputs[self.input_key]}
... | def _call(self, inputs: Dict[str, Any], run_manager: Optional[
CallbackManagerForChainRun]=None) ->Dict[str, Any]:
plan = self.planner.plan(inputs, callbacks=run_manager.get_child() if
run_manager else None)
if run_manager:
run_manager.on_text(str(plan), verbose=self.verbose)
for step in... | null |
_dump_as_bytes | """Return a bytes representation of a document."""
return dumps(obj).encode('utf-8') | def _dump_as_bytes(obj: Serializable) ->bytes:
"""Return a bytes representation of a document."""
return dumps(obj).encode('utf-8') | Return a bytes representation of a document. |
_get_edenai | headers = {'accept': 'application/json', 'authorization':
f'Bearer {self.edenai_api_key}', 'User-Agent': self.get_user_agent()}
response = requests.get(url, headers=headers)
self._raise_on_error(response)
return response | def _get_edenai(self, url: str) ->requests.Response:
headers = {'accept': 'application/json', 'authorization':
f'Bearer {self.edenai_api_key}', 'User-Agent': self.get_user_agent()}
response = requests.get(url, headers=headers)
self._raise_on_error(response)
return response | null |
_import_manifest | from langchain_community.llms.manifest import ManifestWrapper
return ManifestWrapper | def _import_manifest() ->Any:
from langchain_community.llms.manifest import ManifestWrapper
return ManifestWrapper | null |
_get_relevant_documents | return self.load_docs(query=query) | def _get_relevant_documents(self, query: str, *, run_manager:
CallbackManagerForRetrieverRun) ->List[Document]:
return self.load_docs(query=query) | null |
get_lc_namespace | """Get the namespace of the langchain object."""
return ['langchain', 'prompts', 'chat'] | @classmethod
def get_lc_namespace(cls) ->List[str]:
"""Get the namespace of the langchain object."""
return ['langchain', 'prompts', 'chat'] | Get the namespace of the langchain object. |
get_title | return self.DocumentTitle.Text | def get_title(self) ->str:
return self.DocumentTitle.Text | null |
__init__ | """Initialize with empty cache."""
self._cache: Dict[Tuple[str, str], RETURN_VAL_TYPE] = {} | def __init__(self) ->None:
"""Initialize with empty cache."""
self._cache: Dict[Tuple[str, str], RETURN_VAL_TYPE] = {} | Initialize with empty cache. |
test_critique | response = 'Test Critique'
llm = FakeListLLM(responses=[response])
prompt = PromptTemplate(input_variables=['product'], template=
'What is a good name for a company that makes {product}?')
chain = SmartLLMChain(llm=llm, prompt=prompt, n_ideas=2)
prompt_value, _ = chain.prep_prompts({'product': 'socks'})
chain.histo... | def test_critique() ->None:
response = 'Test Critique'
llm = FakeListLLM(responses=[response])
prompt = PromptTemplate(input_variables=['product'], template=
'What is a good name for a company that makes {product}?')
chain = SmartLLMChain(llm=llm, prompt=prompt, n_ideas=2)
prompt_value, _ = ... | null |
embeddings | return self._embedding | @property
def embeddings(self) ->Embeddings:
return self._embedding | null |
config_schema | """The type of config this runnable accepts specified as a pydantic model.
To mark a field as configurable, see the `configurable_fields`
and `configurable_alternatives` methods.
Args:
include: A list of fields to include in the config schema.
Returns:
A pydant... | def config_schema(self, *, include: Optional[Sequence[str]]=None) ->Type[
BaseModel]:
"""The type of config this runnable accepts specified as a pydantic model.
To mark a field as configurable, see the `configurable_fields`
and `configurable_alternatives` methods.
Args:
inc... | The type of config this runnable accepts specified as a pydantic model.
To mark a field as configurable, see the `configurable_fields`
and `configurable_alternatives` methods.
Args:
include: A list of fields to include in the config schema.
Returns:
A pydantic model that can be used to validate config. |
validate_environment | """Validate that api key exists in environment."""
values['edenai_api_key'] = convert_to_secret_str(get_from_dict_or_env(
values, 'edenai_api_key', 'EDENAI_API_KEY'))
return values | @root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that api key exists in environment."""
values['edenai_api_key'] = convert_to_secret_str(get_from_dict_or_env(
values, 'edenai_api_key', 'EDENAI_API_KEY'))
return values | Validate that api key exists in environment. |
update | """Upsert records into the database.
Args:
keys: A list of record keys to upsert.
group_ids: A list of group IDs corresponding to the keys.
time_at_least: if provided, updates should only happen if the
updated_at field is at least this time.
Raises:
... | @abstractmethod
def update(self, keys: Sequence[str], *, group_ids: Optional[Sequence[
Optional[str]]]=None, time_at_least: Optional[float]=None) ->None:
"""Upsert records into the database.
Args:
keys: A list of record keys to upsert.
group_ids: A list of group IDs correspondin... | Upsert records into the database.
Args:
keys: A list of record keys to upsert.
group_ids: A list of group IDs corresponding to the keys.
time_at_least: if provided, updates should only happen if the
updated_at field is at least this time.
Raises:
ValueError: If the length of keys doesn't match t... |
get_buffer_string | """Convert sequence of Messages to strings and concatenate them into one string.
Args:
messages: Messages to be converted to strings.
human_prefix: The prefix to prepend to contents of HumanMessages.
ai_prefix: THe prefix to prepend to contents of AIMessages.
Returns:
A single ... | def get_buffer_string(messages: Sequence[BaseMessage], human_prefix: str=
'Human', ai_prefix: str='AI') ->str:
"""Convert sequence of Messages to strings and concatenate them into one string.
Args:
messages: Messages to be converted to strings.
human_prefix: The prefix to prepend to content... | Convert sequence of Messages to strings and concatenate them into one string.
Args:
messages: Messages to be converted to strings.
human_prefix: The prefix to prepend to contents of HumanMessages.
ai_prefix: THe prefix to prepend to contents of AIMessages.
Returns:
A single str... |
from_document | """Create a DocumentWithState from a Document."""
if isinstance(doc, cls):
return doc
return cls(page_content=doc.page_content, metadata=doc.metadata) | @classmethod
def from_document(cls, doc: Document) ->'_DocumentWithState':
"""Create a DocumentWithState from a Document."""
if isinstance(doc, cls):
return doc
return cls(page_content=doc.page_content, metadata=doc.metadata) | Create a DocumentWithState from a Document. |
get_default_output_parser | return XMLAgentOutputParser() | @staticmethod
def get_default_output_parser() ->XMLAgentOutputParser:
return XMLAgentOutputParser() | null |
check_rellm_installation | import_rellm()
return values | @root_validator
def check_rellm_installation(cls, values: dict) ->dict:
import_rellm()
return values | null |
_get_embeddings | embeddings: List[List[float]] = []
if batch_size is None:
batch_size = self.batch_size
if self.show_progress_bar:
try:
from tqdm.auto import tqdm
except ImportError as e:
raise ImportError(
'Must have tqdm installed if `show_progress_bar` is set to True. Please install with `pip ... | def _get_embeddings(self, texts: List[str], batch_size: Optional[int]=None,
input_type: Optional[str]=None) ->List[List[float]]:
embeddings: List[List[float]] = []
if batch_size is None:
batch_size = self.batch_size
if self.show_progress_bar:
try:
from tqdm.auto import tqdm
... | null |
generate_dialogue_response | """React to a given observation."""
call_to_action_template = """What would {agent_name} say? To end the conversation, write: GOODBYE: "what to say". Otherwise to continue the conversation, write: SAY: "what to say next"
"""
full_result = self._generate_reaction(observation, call_to_action_template,
now=now)
resul... | def generate_dialogue_response(self, observation: str, now: Optional[
datetime]=None) ->Tuple[bool, str]:
"""React to a given observation."""
call_to_action_template = """What would {agent_name} say? To end the conversation, write: GOODBYE: "what to say". Otherwise to continue the conversation, write: SAY: ... | React to a given observation. |
test_llm | llm = OpenAI(temperature=0)
eval_config = RunEvalConfig(evaluators=[EvaluatorType.QA, EvaluatorType.
CRITERIA])
with pytest.raises(ValueError, match='Must specify reference_key'):
run_on_dataset(dataset_name=kv_dataset_name, llm_or_chain_factory=llm,
evaluation=eval_config, client=client)
eval_config = ... | def test_llm(kv_dataset_name: str, eval_project_name: str, client: Client
) ->None:
llm = OpenAI(temperature=0)
eval_config = RunEvalConfig(evaluators=[EvaluatorType.QA, EvaluatorType
.CRITERIA])
with pytest.raises(ValueError, match='Must specify reference_key'):
run_on_dataset(dataset_n... | null |
_identifying_params | """Get the identifying parameters."""
if self._client is not None:
self.llm_kwargs.update(self._client._config())
model_name = self._client._metadata()['model_name']
model_id = self._client._metadata()['model_id']
else:
if self._runner is None:
raise ValueError('Runner must be initialized.')
... | @property
def _identifying_params(self) ->IdentifyingParams:
"""Get the identifying parameters."""
if self._client is not None:
self.llm_kwargs.update(self._client._config())
model_name = self._client._metadata()['model_name']
model_id = self._client._metadata()['model_id']
else:
... | Get the identifying parameters. |
_process_name | preprocessed = name.replace('_', '-').lower()
if preprocessed.startswith('langchain-'):
preprocessed = preprocessed[len('langchain-'):]
if not re.match('^[a-z][a-z0-9-]*$', preprocessed):
raise ValueError(
'Name should only contain lowercase letters (a-z), numbers, and hyphens, and start with a letter.'... | def _process_name(name: str):
preprocessed = name.replace('_', '-').lower()
if preprocessed.startswith('langchain-'):
preprocessed = preprocessed[len('langchain-'):]
if not re.match('^[a-z][a-z0-9-]*$', preprocessed):
raise ValueError(
'Name should only contain lowercase letters ... | null |
test_redis_new_vector | """Test adding a new document"""
docsearch = Redis.from_texts(texts, FakeEmbeddings(), redis_url=TEST_REDIS_URL)
docsearch.add_texts(['foo'])
output = docsearch.similarity_search('foo', k=2, return_metadata=False)
assert output == TEST_RESULT
assert drop(docsearch.index_name) | def test_redis_new_vector(texts: List[str]) ->None:
"""Test adding a new document"""
docsearch = Redis.from_texts(texts, FakeEmbeddings(), redis_url=
TEST_REDIS_URL)
docsearch.add_texts(['foo'])
output = docsearch.similarity_search('foo', k=2, return_metadata=False)
assert output == TEST_RES... | Test adding a new document |
_call | _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
event: TEvent = self._call_before_predict(inputs=inputs)
prediction = self.active_policy.predict(event=event)
if self.metrics:
self.metrics.on_decision()
next_chain_inputs, event = self._call_after_predict_before_llm(inputs=
inputs, eve... | def _call(self, inputs: Dict[str, Any], run_manager: Optional[
CallbackManagerForChainRun]=None) ->Dict[str, Any]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
event: TEvent = self._call_before_predict(inputs=inputs)
prediction = self.active_policy.predict(event=event)
... | null |
_import_momento_vector_index | from langchain_community.vectorstores.momento_vector_index import MomentoVectorIndex
return MomentoVectorIndex | def _import_momento_vector_index() ->Any:
from langchain_community.vectorstores.momento_vector_index import MomentoVectorIndex
return MomentoVectorIndex | null |
patch_config | """Patch a config with new values.
Args:
config (Optional[RunnableConfig]): The config to patch.
copy_locals (bool, optional): Whether to copy locals. Defaults to False.
callbacks (Optional[BaseCallbackManager], optional): The callbacks to set.
Defaults to None.
recursion_... | def patch_config(config: Optional[RunnableConfig], *, callbacks: Optional[
BaseCallbackManager]=None, recursion_limit: Optional[int]=None,
max_concurrency: Optional[int]=None, run_name: Optional[str]=None,
configurable: Optional[Dict[str, Any]]=None) ->RunnableConfig:
"""Patch a config with new values.
... | Patch a config with new values.
Args:
config (Optional[RunnableConfig]): The config to patch.
copy_locals (bool, optional): Whether to copy locals. Defaults to False.
callbacks (Optional[BaseCallbackManager], optional): The callbacks to set.
Defaults to None.
recursion_limit (Optional[int], optio... |
split_documents | """Split documents."""
texts, metadatas = [], []
for doc in documents:
texts.append(doc.page_content)
metadatas.append(doc.metadata)
return self.create_documents(texts, metadatas=metadatas) | def split_documents(self, documents: Iterable[Document]) ->List[Document]:
"""Split documents."""
texts, metadatas = [], []
for doc in documents:
texts.append(doc.page_content)
metadatas.append(doc.metadata)
return self.create_documents(texts, metadatas=metadatas) | Split documents. |
_import_vllm | from langchain_community.llms.vllm import VLLM
return VLLM | def _import_vllm() ->Any:
from langchain_community.llms.vllm import VLLM
return VLLM | null |
test_init_fail_embedding_dim_mismatch | index = mock_index(index_details)
with pytest.raises(ValueError) as ex:
DatabricksVectorSearch(index, text_column=DEFAULT_TEXT_COLUMN,
embedding=FakeEmbeddingsWithDimension(DEFAULT_VECTOR_DIMENSION + 1))
assert f"embedding model's dimension '{DEFAULT_VECTOR_DIMENSION + 1}' does not match with the index's di... | @pytest.mark.requires('databricks', 'databricks.vector_search')
@pytest.mark.parametrize('index_details', [
DELTA_SYNC_INDEX_SELF_MANAGED_EMBEDDINGS, DIRECT_ACCESS_INDEX])
def test_init_fail_embedding_dim_mismatch(index_details: dict) ->None:
index = mock_index(index_details)
with pytest.raises(ValueError) ... | null |
clear | """Clear memory contents.""" | @abstractmethod
def clear(self) ->None:
"""Clear memory contents.""" | Clear memory contents. |
on_llm_start | """Run when LLM starts."""
self.metrics['step'] += 1
self.metrics['llm_starts'] += 1
self.metrics['starts'] += 1
llm_starts = self.metrics['llm_starts']
resp: Dict[str, Any] = {}
resp.update({'action': 'on_llm_start'})
resp.update(flatten_dict(serialized))
resp.update(self.metrics)
self.mlflg.metrics(self.metrics, step... | def on_llm_start(self, serialized: Dict[str, Any], prompts: List[str], **
kwargs: Any) ->None:
"""Run when LLM starts."""
self.metrics['step'] += 1
self.metrics['llm_starts'] += 1
self.metrics['starts'] += 1
llm_starts = self.metrics['llm_starts']
resp: Dict[str, Any] = {}
resp.update({'... | Run when LLM starts. |
available_models | """Map the available models that can be invoked."""
return self.client.available_models | @property
def available_models(self) ->dict:
"""Map the available models that can be invoked."""
return self.client.available_models | Map the available models that can be invoked. |
test_add_texts | """Test end to end construction and simple similarity search."""
docsearch = DocArrayInMemorySearch.from_params(FakeEmbeddings())
assert isinstance(docsearch, DocArrayInMemorySearch)
assert docsearch.doc_index.num_docs() == 0
docsearch.add_texts(texts=texts)
assert docsearch.doc_index.num_docs() == 3 | def test_add_texts(texts: List[str], tmp_path: Path) ->None:
"""Test end to end construction and simple similarity search."""
docsearch = DocArrayInMemorySearch.from_params(FakeEmbeddings())
assert isinstance(docsearch, DocArrayInMemorySearch)
assert docsearch.doc_index.num_docs() == 0
docsearch.add... | Test end to end construction and simple similarity search. |
_get_prompt_input_key | """Get the input key for the prompt."""
if self.input_key is None:
return get_prompt_input_key(inputs, self.memory_variables)
return self.input_key | def _get_prompt_input_key(self, inputs: Dict[str, Any]) ->str:
"""Get the input key for the prompt."""
if self.input_key is None:
return get_prompt_input_key(inputs, self.memory_variables)
return self.input_key | Get the input key for the prompt. |
import_google | """Import google libraries.
Returns:
Tuple[Request, Credentials]: Request and Credentials classes.
"""
try:
from google.auth.transport.requests import Request
from google.oauth2.credentials import Credentials
except ImportError:
raise ImportError(
'You need to install google-auth-ht... | def import_google() ->Tuple[Request, Credentials]:
"""Import google libraries.
Returns:
Tuple[Request, Credentials]: Request and Credentials classes.
"""
try:
from google.auth.transport.requests import Request
from google.oauth2.credentials import Credentials
except ImportEr... | Import google libraries.
Returns:
Tuple[Request, Credentials]: Request and Credentials classes. |
test_model_garden | """In order to run this test, you should provide endpoint names.
Example:
export FALCON_ENDPOINT_ID=...
export LLAMA_ENDPOINT_ID=...
export PROJECT=...
"""
endpoint_id = os.environ[endpoint_os_variable_name]
project = os.environ['PROJECT']
location = 'europe-west4'
llm = VertexAIModelGarden(endpoin... | @pytest.mark.parametrize('endpoint_os_variable_name,result_arg', [(
'FALCON_ENDPOINT_ID', 'generated_text'), ('LLAMA_ENDPOINT_ID', None)])
def test_model_garden(endpoint_os_variable_name: str, result_arg: Optional[str]
) ->None:
"""In order to run this test, you should provide endpoint names.
Example:
... | In order to run this test, you should provide endpoint names.
Example:
export FALCON_ENDPOINT_ID=...
export LLAMA_ENDPOINT_ID=...
export PROJECT=... |
_convert_schema | props = {k: {'title': k, **v} for k, v in schema['properties'].items()}
return {'type': 'object', 'properties': props, 'required': schema.get(
'required', [])} | def _convert_schema(schema: dict) ->dict:
props = {k: {'title': k, **v} for k, v in schema['properties'].items()}
return {'type': 'object', 'properties': props, 'required': schema.get(
'required', [])} | null |
from_filesystem | """Create a concurrent generic document loader using a filesystem blob loader.
Args:
path: The path to the directory to load documents from.
glob: The glob pattern to use to find documents.
suffixes: The suffixes to use to filter documents. If None, all files
... | @classmethod
def from_filesystem(cls, path: _PathLike, *, glob: str='**/[!.]*', exclude:
Sequence[str]=(), suffixes: Optional[Sequence[str]]=None, show_progress:
bool=False, parser: Union[DEFAULT, BaseBlobParser]='default',
num_workers: int=4, parser_kwargs: Optional[dict]=None) ->ConcurrentLoader:
"""C... | Create a concurrent generic document loader using a filesystem blob loader.
Args:
path: The path to the directory to load documents from.
glob: The glob pattern to use to find documents.
suffixes: The suffixes to use to filter documents. If None, all files
matching the glob will be loaded.
... |
test_against_pal_chain_doc | """
Test CPAL chain against the first example in the PAL chain notebook doc:
https://github.com/langchain-ai/langchain/blob/master/docs/extras/modules/chains/additional/pal.ipynb
"""
narrative_input = (
'Jan has three times the number of pets as Marcia. Marcia has two more pets than Cindy. ... | def test_against_pal_chain_doc(self) ->None:
"""
Test CPAL chain against the first example in the PAL chain notebook doc:
https://github.com/langchain-ai/langchain/blob/master/docs/extras/modules/chains/additional/pal.ipynb
"""
narrative_input = (
'Jan has three times the number... | Test CPAL chain against the first example in the PAL chain notebook doc:
https://github.com/langchain-ai/langchain/blob/master/docs/extras/modules/chains/additional/pal.ipynb |
load | return list(self.lazy_load()) | def load(self) ->List[Document]:
return list(self.lazy_load()) | null |
__getattr__ | if name == 'AlphaVantageAPIWrapper':
return _import_alpha_vantage()
elif name == 'ApifyWrapper':
return _import_apify()
elif name == 'ArceeWrapper':
return _import_arcee()
elif name == 'ArxivAPIWrapper':
return _import_arxiv()
elif name == 'LambdaWrapper':
return _import_awslambda()
elif name == 'Bi... | def __getattr__(name: str) ->Any:
if name == 'AlphaVantageAPIWrapper':
return _import_alpha_vantage()
elif name == 'ApifyWrapper':
return _import_apify()
elif name == 'ArceeWrapper':
return _import_arcee()
elif name == 'ArxivAPIWrapper':
return _import_arxiv()
elif na... | null |
test_sqlitevss | """Test end to end construction and search."""
docsearch = _sqlite_vss_from_texts()
output = docsearch.similarity_search('foo', k=1)
assert output == [Document(page_content='foo', metadata={})] | @pytest.mark.requires('sqlite-vss')
def test_sqlitevss() ->None:
"""Test end to end construction and search."""
docsearch = _sqlite_vss_from_texts()
output = docsearch.similarity_search('foo', k=1)
assert output == [Document(page_content='foo', metadata={})] | Test end to end construction and search. |
test_cassandra_no_drop | """Test end to end construction and re-opening the same index."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = _vectorstore_from_texts(texts, metadatas=metadatas)
del docsearch
texts2 = ['foo2', 'bar2', 'baz2']
docsearch = _vectorstore_from_texts(texts2, metadatas=metad... | def test_cassandra_no_drop() ->None:
"""Test end to end construction and re-opening the same index."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = _vectorstore_from_texts(texts, metadatas=metadatas)
del docsearch
texts2 = ['foo2', 'bar2', 'baz2'... | Test end to end construction and re-opening the same index. |
test_loadnotebook_eachnotehasexpectedcontentwithleadingandtrailingremoved | documents = EverNoteLoader(self.example_notebook_path(
'sample_notebook.enex'), False).load()
content_note1 = documents[0].page_content
assert content_note1 == 'abc'
content_note2 = documents[1].page_content
assert content_note2 == '**Jan - March 2022**' | def test_loadnotebook_eachnotehasexpectedcontentwithleadingandtrailingremoved(
self) ->None:
documents = EverNoteLoader(self.example_notebook_path(
'sample_notebook.enex'), False).load()
content_note1 = documents[0].page_content
assert content_note1 == 'abc'
content_note2 = documents[1].page... | null |
validate_environment | """Validate that we have all required info to access Clarifai
platform and python package exists in environment."""
values['pat'] = get_from_dict_or_env(values, 'pat', 'CLARIFAI_PAT')
user_id = values.get('user_id')
app_id = values.get('app_id')
model_id = values.get('model_id')
model_url = values.get('model_ur... | @root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that we have all required info to access Clarifai
platform and python package exists in environment."""
values['pat'] = get_from_dict_or_env(values, 'pat', 'CLARIFAI_PAT')
user_id = values.get('user_id')
app_id = v... | Validate that we have all required info to access Clarifai
platform and python package exists in environment. |
_kwargs_post_request | """Build the kwargs for the Post request, used by sync
Args:
prompt (str): prompt used in query
kwargs (dict): model kwargs in payload
Returns:
Dict[str, Union[str,dict]]: _description_
"""
_model_kwargs = self.model_kwargs or {}
_params = {**_model_kwargs, ... | def _kwargs_post_request(self, prompt: str, kwargs: Mapping[str, Any]
) ->Mapping[str, Any]:
"""Build the kwargs for the Post request, used by sync
Args:
prompt (str): prompt used in query
kwargs (dict): model kwargs in payload
Returns:
Dict[str, Union[str,d... | Build the kwargs for the Post request, used by sync
Args:
prompt (str): prompt used in query
kwargs (dict): model kwargs in payload
Returns:
Dict[str, Union[str,dict]]: _description_ |
test_whatsapp_chat_loader | chat_path = pathlib.Path(__file__).parent / 'data' / 'whatsapp_chat.txt'
loader = whatsapp.WhatsAppChatLoader(str(chat_path))
chat_sessions = list(utils.map_ai_messages(loader.lazy_load(), sender=
'Dr. Feather'))
assert chat_sessions, 'Chat sessions should not be empty'
assert chat_sessions[0]['messages'], 'Chat me... | def test_whatsapp_chat_loader() ->None:
chat_path = pathlib.Path(__file__).parent / 'data' / 'whatsapp_chat.txt'
loader = whatsapp.WhatsAppChatLoader(str(chat_path))
chat_sessions = list(utils.map_ai_messages(loader.lazy_load(), sender=
'Dr. Feather'))
assert chat_sessions, 'Chat sessions should... | null |
__init__ | super().__init__()
self.metadata_key = metadata_key | def __init__(self, metadata_key: str='metadata') ->None:
super().__init__()
self.metadata_key = metadata_key | null |
concatenate_cells | """Combine cells information in a readable format ready to be used.
Args:
cell: A dictionary
include_outputs: Whether to include the outputs of the cell.
max_output_length: Maximum length of the output to be displayed.
traceback: Whether to return a traceback of the error.
Retu... | def concatenate_cells(cell: dict, include_outputs: bool, max_output_length:
int, traceback: bool) ->str:
"""Combine cells information in a readable format ready to be used.
Args:
cell: A dictionary
include_outputs: Whether to include the outputs of the cell.
max_output_length: Maxim... | Combine cells information in a readable format ready to be used.
Args:
cell: A dictionary
include_outputs: Whether to include the outputs of the cell.
max_output_length: Maximum length of the output to be displayed.
traceback: Whether to return a traceback of the error.
Returns:
A string with the ... |
test_load_converts_dataframe_columns_to_document_metadata | import xorbits.pandas as pd
data = {'text': ['Hello', 'World'], 'author': ['Alice', 'Bob'], 'date': [
'2022-01-01', '2022-01-02']}
loader = XorbitsLoader(pd.DataFrame(data))
docs = loader.load()
expected = {'author': ['Alice', 'Bob'], 'date': ['2022-01-01', '2022-01-02']}
for i, doc in enumerate(docs):
assert d... | @pytest.mark.skipif(not xorbits_installed, reason='xorbits not installed')
def test_load_converts_dataframe_columns_to_document_metadata() ->None:
import xorbits.pandas as pd
data = {'text': ['Hello', 'World'], 'author': ['Alice', 'Bob'], 'date':
['2022-01-01', '2022-01-02']}
loader = XorbitsLoader(... | null |
wait_for_all_tracers | """Wait for all tracers to finish."""
global _TRACERS
for tracer in list(_TRACERS):
if tracer is not None:
tracer.wait_for_futures() | def wait_for_all_tracers() ->None:
"""Wait for all tracers to finish."""
global _TRACERS
for tracer in list(_TRACERS):
if tracer is not None:
tracer.wait_for_futures() | Wait for all tracers to finish. |
init_gptcache_map | i = getattr(init_gptcache_map, '_i', 0)
cache_path = f'data_map_{i}.txt'
if os.path.isfile(cache_path):
os.remove(cache_path)
cache_obj.init(pre_embedding_func=get_prompt, data_manager=get_data_manager
(data_path=cache_path))
init_gptcache_map._i = i + 1 | def init_gptcache_map(cache_obj: Any) ->None:
i = getattr(init_gptcache_map, '_i', 0)
cache_path = f'data_map_{i}.txt'
if os.path.isfile(cache_path):
os.remove(cache_path)
cache_obj.init(pre_embedding_func=get_prompt, data_manager=
get_data_manager(data_path=cache_path))
init_gptcach... | null |
serialize_inputs | if 'prompts' in inputs:
input_ = '\n\n'.join(inputs['prompts'])
elif 'prompt' in inputs:
input_ = inputs['prompt']
elif 'messages' in inputs:
input_ = self.serialize_chat_messages(inputs['messages'])
else:
raise ValueError('LLM Run must have either messages or prompts as inputs.')
return input_ | def serialize_inputs(self, inputs: Dict) ->str:
if 'prompts' in inputs:
input_ = '\n\n'.join(inputs['prompts'])
elif 'prompt' in inputs:
input_ = inputs['prompt']
elif 'messages' in inputs:
input_ = self.serialize_chat_messages(inputs['messages'])
else:
raise ValueError(
... | null |
test_parse_nested_operation | op = 'and(or(eq("a", "b"), eq("a", "c"), eq("a", "d")), not(eq("z", "foo")))'
eq1 = Comparison(comparator=Comparator.EQ, attribute='a', value='b')
eq2 = Comparison(comparator=Comparator.EQ, attribute='a', value='c')
eq3 = Comparison(comparator=Comparator.EQ, attribute='a', value='d')
eq4 = Comparison(comparator=Compara... | def test_parse_nested_operation() ->None:
op = (
'and(or(eq("a", "b"), eq("a", "c"), eq("a", "d")), not(eq("z", "foo")))'
)
eq1 = Comparison(comparator=Comparator.EQ, attribute='a', value='b')
eq2 = Comparison(comparator=Comparator.EQ, attribute='a', value='c')
eq3 = Comparison(comparato... | null |
_import_yandex_gpt | from langchain_community.llms.yandex import YandexGPT
return YandexGPT | def _import_yandex_gpt() ->Any:
from langchain_community.llms.yandex import YandexGPT
return YandexGPT | null |
test_cloudflare_workersai_stream | response_body = ['data: {"response": "Hello"}', 'data: [DONE]']
responses.add(responses.POST,
'https://api.cloudflare.com/client/v4/accounts/my_account_id/ai/run/@cf/meta/llama-2-7b-chat-int8'
, body='\n'.join(response_body), status=200)
llm = CloudflareWorkersAI(account_id='my_account_id', api_token=
'my_a... | @responses.activate
def test_cloudflare_workersai_stream() ->None:
response_body = ['data: {"response": "Hello"}', 'data: [DONE]']
responses.add(responses.POST,
'https://api.cloudflare.com/client/v4/accounts/my_account_id/ai/run/@cf/meta/llama-2-7b-chat-int8'
, body='\n'.join(response_body), sta... | null |
max_marginal_relevance_search_with_score | """Return docs selected using the maximal marginal relevance with score.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query (str): Text to look up documents similar to.
k (int): Number of Documents to ret... | def max_marginal_relevance_search_with_score(self, query: str, k: int=4,
fetch_k: int=20, lambda_mult: float=0.5, filter: Optional[dict]=None,
**kwargs: Any) ->List[Tuple[Document, float]]:
"""Return docs selected using the maximal marginal relevance with score.
Maximal marginal relevance optimizes... | Return docs selected using the maximal marginal relevance with score.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query (str): Text to look up documents similar to.
k (int): Number of Documents to return. Defaults to 4.
fetch_k (int): Numb... |
format_log_to_messages | """Construct the scratchpad that lets the agent continue its thought process."""
thoughts: List[BaseMessage] = []
for action, observation in intermediate_steps:
thoughts.append(AIMessage(content=action.log))
human_message = HumanMessage(content=template_tool_response.format(
observation=observation))
... | def format_log_to_messages(intermediate_steps: List[Tuple[AgentAction, str]
], template_tool_response: str='{observation}') ->List[BaseMessage]:
"""Construct the scratchpad that lets the agent continue its thought process."""
thoughts: List[BaseMessage] = []
for action, observation in intermediate_steps... | Construct the scratchpad that lets the agent continue its thought process. |
get_allowed_tools | return None | def get_allowed_tools(self) ->Optional[List[str]]:
return None | null |
test_jinachat_api_key_is_secret_string | llm = JinaChat(jinachat_api_key='secret-api-key')
assert isinstance(llm.jinachat_api_key, SecretStr) | def test_jinachat_api_key_is_secret_string() ->None:
llm = JinaChat(jinachat_api_key='secret-api-key')
assert isinstance(llm.jinachat_api_key, SecretStr) | null |
test_visit_comparison_eq | comp = Comparison(comparator=Comparator.EQ, attribute='qty', value=10)
expected = {'qty': {'$eq': 10}}
actual = DEFAULT_TRANSLATOR.visit_comparison(comp)
assert expected == actual | def test_visit_comparison_eq() ->None:
comp = Comparison(comparator=Comparator.EQ, attribute='qty', value=10)
expected = {'qty': {'$eq': 10}}
actual = DEFAULT_TRANSLATOR.visit_comparison(comp)
assert expected == actual | null |
_identifying_params | return {} | @property
def _identifying_params(self) ->Dict[str, Any]:
return {} | null |
ngram_fuzzy_matching_strategy | """
N-gram fuzzy matching strategy for deanonymization.
It replaces all the anonymized entities with the original ones.
It uses fuzzy matching to find the position of the anonymized entity in the text.
It generates n-grams of the same length as the anonymized entity from the text and
uses fuzzy matc... | def ngram_fuzzy_matching_strategy(text: str, deanonymizer_mapping:
MappingDataType, fuzzy_threshold: int=85, use_variable_length: bool=True
) ->str:
"""
N-gram fuzzy matching strategy for deanonymization.
It replaces all the anonymized entities with the original ones.
It uses fuzzy matching to f... | N-gram fuzzy matching strategy for deanonymization.
It replaces all the anonymized entities with the original ones.
It uses fuzzy matching to find the position of the anonymized entity in the text.
It generates n-grams of the same length as the anonymized entity from the text and
uses fuzzy matching to find the positio... |
create_table_if_not_exists | """
Helper function: create table if not exists
"""
from psycopg2 import sql
cursor = self._connection.cursor()
cursor.execute(sql.SQL(
'CREATE TABLE IF NOT EXISTS {} ( id UUID, embedding_id INTEGER, text VARCHAR(60000), metadata VARCHAR(1024), ... | def create_table_if_not_exists(self) ->None:
"""
Helper function: create table if not exists
"""
from psycopg2 import sql
cursor = self._connection.cursor()
cursor.execute(sql.SQL(
'CREATE TABLE IF NOT EXISTS {} ( id UUID, embedding_id INTEGER, ... | Helper function: create table if not exists |
list_as_str | """Same as list, but returns a stringified version of the JSON for
insertting back into an LLM."""
actions = self.list()
return json.dumps(actions) | def list_as_str(self) ->str:
"""Same as list, but returns a stringified version of the JSON for
insertting back into an LLM."""
actions = self.list()
return json.dumps(actions) | Same as list, but returns a stringified version of the JSON for
insertting back into an LLM. |
check_queries_required | if values.get('sequential_response') and not queries:
raise ValueError(
'queries is required when sequential_response is set to True')
return queries | @validator('queries', always=True)
def check_queries_required(cls, queries: Optional[Mapping], values: Mapping
[str, Any]) ->Optional[Mapping]:
if values.get('sequential_response') and not queries:
raise ValueError(
'queries is required when sequential_response is set to True')
return qu... | null |
_get_relevant_documents | assert isinstance(self, FakeRetrieverV2)
assert run_manager is not None
assert isinstance(run_manager, CallbackManagerForRetrieverRun)
if self.throw_error:
raise ValueError('Test error')
return [Document(page_content=query)] | def _get_relevant_documents(self, query: str, *, run_manager: Optional[
CallbackManagerForRetrieverRun]=None) ->List[Document]:
assert isinstance(self, FakeRetrieverV2)
assert run_manager is not None
assert isinstance(run_manager, CallbackManagerForRetrieverRun)
if self.throw_error:
raise Va... | null |
reciprocal_rank_fusion | fused_scores = {}
for docs in results:
for rank, doc in enumerate(docs):
doc_str = dumps(doc)
if doc_str not in fused_scores:
fused_scores[doc_str] = 0
fused_scores[doc_str] += 1 / (rank + k)
reranked_results = [(loads(doc), score) for doc, score in sorted(
fused_scores.items... | def reciprocal_rank_fusion(results: list[list], k=60):
fused_scores = {}
for docs in results:
for rank, doc in enumerate(docs):
doc_str = dumps(doc)
if doc_str not in fused_scores:
fused_scores[doc_str] = 0
fused_scores[doc_str] += 1 / (rank + k)
r... | null |
on_chat_model_start | """Run when LLM starts running.
Args:
serialized (Dict[str, Any]): The serialized LLM.
messages (List[List[BaseMessage]]): The list of messages.
run_id (UUID, optional): The ID of the run. Defaults to None.
Returns:
List[CallbackManagerForLLMRun]: A call... | def on_chat_model_start(self, serialized: Dict[str, Any], messages: List[
List[BaseMessage]], **kwargs: Any) ->List[CallbackManagerForLLMRun]:
"""Run when LLM starts running.
Args:
serialized (Dict[str, Any]): The serialized LLM.
messages (List[List[BaseMessage]]): The list of m... | Run when LLM starts running.
Args:
serialized (Dict[str, Any]): The serialized LLM.
messages (List[List[BaseMessage]]): The list of messages.
run_id (UUID, optional): The ID of the run. Defaults to None.
Returns:
List[CallbackManagerForLLMRun]: A callback manager for each
list of messages as a... |
get_lc_namespace | """Get the namespace of the langchain object.
For example, if the class is `langchain.llms.openai.OpenAI`, then the
namespace is ["langchain", "llms", "openai"]
"""
return cls.__module__.split('.') | @classmethod
def get_lc_namespace(cls) ->List[str]:
"""Get the namespace of the langchain object.
For example, if the class is `langchain.llms.openai.OpenAI`, then the
namespace is ["langchain", "llms", "openai"]
"""
return cls.__module__.split('.') | Get the namespace of the langchain object.
For example, if the class is `langchain.llms.openai.OpenAI`, then the
namespace is ["langchain", "llms", "openai"] |
flush_tracker | """Flush the tracker and setup the session.
Everything after this will be a new table.
Args:
name: Name of the performed session so far so it is identifiable
langchain_asset: The langchain asset to save.
finish: Whether to finish the run.
Returns:
... | def flush_tracker(self, name: Optional[str]=None, langchain_asset: Any=None,
finish: bool=False) ->None:
"""Flush the tracker and setup the session.
Everything after this will be a new table.
Args:
name: Name of the performed session so far so it is identifiable
langcha... | Flush the tracker and setup the session.
Everything after this will be a new table.
Args:
name: Name of the performed session so far so it is identifiable
langchain_asset: The langchain asset to save.
finish: Whether to finish the run.
Returns:
None |
_import_bittensor | from langchain_community.llms.bittensor import NIBittensorLLM
return NIBittensorLLM | def _import_bittensor() ->Any:
from langchain_community.llms.bittensor import NIBittensorLLM
return NIBittensorLLM | null |
test_simple_context_str_w_emb | str1 = 'test'
encoded_str1 = base.stringify_embedding(list(encoded_keyword + str1))
expected = [{'a_namespace': encoded_str1}]
assert base.embed(base.Embed(str1), MockEncoder(), 'a_namespace') == expected
expected_embed_and_keep = [{'a_namespace': str1 + ' ' + encoded_str1}]
assert base.embed(base.EmbedAndKeep(str1), M... | @pytest.mark.requires('vowpal_wabbit_next')
def test_simple_context_str_w_emb() ->None:
str1 = 'test'
encoded_str1 = base.stringify_embedding(list(encoded_keyword + str1))
expected = [{'a_namespace': encoded_str1}]
assert base.embed(base.Embed(str1), MockEncoder(), 'a_namespace'
) == expected
... | null |
_create_retry_decorator | import openai
min_seconds = 1
max_seconds = 60
return retry(reraise=True, stop=stop_after_attempt(self.max_retries), wait=
wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry
=retry_if_exception_type(openai.error.Timeout) |
retry_if_exception_type(openai.error.APIError) |
retry_if_ex... | def _create_retry_decorator(self) ->Callable[[Any], Any]:
import openai
min_seconds = 1
max_seconds = 60
return retry(reraise=True, stop=stop_after_attempt(self.max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=
max_seconds), retry=retry_if_exception_type(openai.erro... | null |
test_visit_operation | op = Operation(operator=Operator.AND, arguments=[Comparison(comparator=
Comparator.LT, attribute='foo', value=2), Comparison(comparator=
Comparator.EQ, attribute='bar', value='baz'), Comparison(comparator=
Comparator.LT, attribute='abc', value=1)])
expected = "( ( doc.foo < 2 ) and ( doc.bar = 'baz' ) and (... | def test_visit_operation() ->None:
op = Operation(operator=Operator.AND, arguments=[Comparison(comparator=
Comparator.LT, attribute='foo', value=2), Comparison(comparator=
Comparator.EQ, attribute='bar', value='baz'), Comparison(comparator
=Comparator.LT, attribute='abc', value=1)])
expe... | null |
_build_rst_file | """Create a rst file for building of documentation.
Args:
package_name: Can be either "langchain" or "core" or "experimental".
"""
package_dir = _package_dir(package_name)
package_members = _load_package_modules(package_dir)
package_version = _get_package_version(package_dir)
with open(_out_file_path(p... | def _build_rst_file(package_name: str='langchain') ->None:
"""Create a rst file for building of documentation.
Args:
package_name: Can be either "langchain" or "core" or "experimental".
"""
package_dir = _package_dir(package_name)
package_members = _load_package_modules(package_dir)
pac... | Create a rst file for building of documentation.
Args:
package_name: Can be either "langchain" or "core" or "experimental". |
_build_condition | from qdrant_client.http import models as rest
out = []
if isinstance(value, dict):
for _key, value in value.items():
out.extend(self._build_condition(f'{key}.{_key}', value))
elif isinstance(value, list):
for _value in value:
if isinstance(_value, dict):
out.extend(self._build_condit... | def _build_condition(self, key: str, value: Any) ->List[rest.FieldCondition]:
from qdrant_client.http import models as rest
out = []
if isinstance(value, dict):
for _key, value in value.items():
out.extend(self._build_condition(f'{key}.{_key}', value))
elif isinstance(value, list):
... | null |
retriever | return PubMedRetriever() | @pytest.fixture
def retriever() ->PubMedRetriever:
return PubMedRetriever() | null |
__init__ | super().__init__() | def __init__(self, **kwargs: Any) ->None:
super().__init__() | null |
visit_structured_query | if structured_query.filter is None:
kwargs = {}
else:
kwargs = {'filter': structured_query.filter.accept(self)}
return structured_query.query, kwargs | def visit_structured_query(self, structured_query: StructuredQuery) ->Tuple[
str, dict]:
if structured_query.filter is None:
kwargs = {}
else:
kwargs = {'filter': structured_query.filter.accept(self)}
return structured_query.query, kwargs | null |
get_input_schema | return self.runnable.get_input_schema(config) | def get_input_schema(self, config: Optional[RunnableConfig]=None) ->Type[
BaseModel]:
return self.runnable.get_input_schema(config) | null |
test_aviary_call | """Test valid call to Anyscale."""
llm = Aviary()
output = llm('Say bar:')
print(f"""llm answer:
{output}""")
assert isinstance(output, str) | def test_aviary_call() ->None:
"""Test valid call to Anyscale."""
llm = Aviary()
output = llm('Say bar:')
print(f'llm answer:\n{output}')
assert isinstance(output, str) | Test valid call to Anyscale. |
_Return | self.fill('return')
if t.value:
self.write(' ')
self.dispatch(t.value) | def _Return(self, t):
self.fill('return')
if t.value:
self.write(' ')
self.dispatch(t.value) | null |
_import_edenai_EdenAiParsingInvoiceTool | from langchain_community.tools.edenai import EdenAiParsingInvoiceTool
return EdenAiParsingInvoiceTool | def _import_edenai_EdenAiParsingInvoiceTool() ->Any:
from langchain_community.tools.edenai import EdenAiParsingInvoiceTool
return EdenAiParsingInvoiceTool | null |
similarity_search | """Search for similar documents to the query string.
Args:
query (str): The query string to search for.
k (int, optional): The number of results to return. Defaults to 4.
Returns:
List[Document]: A list of documents that are similar to the query.
"""
res = s... | def similarity_search(self, query: str, k: int=4, **kwargs: Any) ->List[
Document]:
"""Search for similar documents to the query string.
Args:
query (str): The query string to search for.
k (int, optional): The number of results to return. Defaults to 4.
Returns:
... | Search for similar documents to the query string.
Args:
query (str): The query string to search for.
k (int, optional): The number of results to return. Defaults to 4.
Returns:
List[Document]: A list of documents that are similar to the query. |
_generate_docs_object | page_offset = []
for page in result.pages:
page_offset.append(page.spans[0]['offset'])
for para in result.paragraphs:
yield Document(page_content=para.content, metadata={'role': para.role,
'page': para.bounding_regions[0].page_number, 'bounding_box': para.
bounding_regions[0].polygon, 'type': 'p... | def _generate_docs_object(self, result: Any) ->Iterator[Document]:
page_offset = []
for page in result.pages:
page_offset.append(page.spans[0]['offset'])
for para in result.paragraphs:
yield Document(page_content=para.content, metadata={'role': para.
role, 'page': para.bounding_r... | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.