method_name
stringlengths 1
78
| method_body
stringlengths 3
9.66k
| full_code
stringlengths 31
10.7k
| docstring
stringlengths 4
4.74k
⌀ |
|---|---|---|---|
_check_parser
|
"""Check that parser is valid for bs4."""
valid_parsers = ['html.parser', 'lxml', 'xml', 'lxml-xml', 'html5lib']
if parser not in valid_parsers:
raise ValueError('`parser` must be one of ' + ', '.join(valid_parsers) +
'.')
|
@staticmethod
def _check_parser(parser: str) ->None:
"""Check that parser is valid for bs4."""
valid_parsers = ['html.parser', 'lxml', 'xml', 'lxml-xml', 'html5lib']
if parser not in valid_parsers:
raise ValueError('`parser` must be one of ' + ', '.join(
valid_parsers) + '.')
|
Check that parser is valid for bs4.
|
input_keys
|
"""Expect input key.
:meta private:
"""
return [self.input_key]
|
@property
def input_keys(self) ->List[str]:
"""Expect input key.
:meta private:
"""
return [self.input_key]
|
Expect input key.
:meta private:
|
case_insensitive_matching_strategy
|
"""
Case insensitive matching strategy for deanonymization.
It replaces all the anonymized entities with the original ones
irrespective of their letter case.
Args:
text: text to deanonymize
deanonymizer_mapping: mapping between anonymized entities and original ones
Examples of matching:
keanu reeves -> Keanu Reeves
JOHN F. KENNEDY -> John F. Kennedy
"""
for entity_type in deanonymizer_mapping:
for anonymized, original in deanonymizer_mapping[entity_type].items():
text = re.sub(anonymized, original, text, flags=re.IGNORECASE)
return text
|
def case_insensitive_matching_strategy(text: str, deanonymizer_mapping:
MappingDataType) ->str:
"""
Case insensitive matching strategy for deanonymization.
It replaces all the anonymized entities with the original ones
irrespective of their letter case.
Args:
text: text to deanonymize
deanonymizer_mapping: mapping between anonymized entities and original ones
Examples of matching:
keanu reeves -> Keanu Reeves
JOHN F. KENNEDY -> John F. Kennedy
"""
for entity_type in deanonymizer_mapping:
for anonymized, original in deanonymizer_mapping[entity_type].items():
text = re.sub(anonymized, original, text, flags=re.IGNORECASE)
return text
|
Case insensitive matching strategy for deanonymization.
It replaces all the anonymized entities with the original ones
irrespective of their letter case.
Args:
text: text to deanonymize
deanonymizer_mapping: mapping between anonymized entities and original ones
Examples of matching:
keanu reeves -> Keanu Reeves
JOHN F. KENNEDY -> John F. Kennedy
|
_completion_with_retry
|
return self.client.create(**kwargs)
|
def _completion_with_retry(**kwargs: Any) ->Any:
return self.client.create(**kwargs)
| null |
completed
|
return self.status == 'completed'
|
def completed(self) ->bool:
return self.status == 'completed'
| null |
_embedding_source_column
|
"""Return the embedding source column configs as a dictionary.
Empty if the index is not a Databricks-managed embedding index.
"""
index_spec = self._delta_sync_index_spec
return next(iter(index_spec.get('embedding_source_columns') or list()), dict())
|
def _embedding_source_column(self) ->dict:
"""Return the embedding source column configs as a dictionary.
Empty if the index is not a Databricks-managed embedding index.
"""
index_spec = self._delta_sync_index_spec
return next(iter(index_spec.get('embedding_source_columns') or list()),
dict())
|
Return the embedding source column configs as a dictionary.
Empty if the index is not a Databricks-managed embedding index.
|
test_momento_cache_hit
|
llm = FakeLLM()
params = llm.dict()
params['stop'] = None
llm_string = str(sorted([(k, v) for k, v in params.items()]))
llm_generations = [[Generation(text=generation, generation_info=params) for
generation in prompt_i_generations] for prompt_i_generations in generations
]
for prompt_i, llm_generations_i in zip(prompts, llm_generations):
momento_cache.update(prompt_i, llm_string, llm_generations_i)
assert llm.generate(prompts) == LLMResult(generations=llm_generations,
llm_output={})
|
@pytest.mark.parametrize('prompts, generations', [([random_string()], [[
random_string()]]), ([random_string()], [[random_string(),
random_string()]]), ([random_string()], [[random_string(),
random_string(), random_string()]]), ([random_string(), random_string()
], [[random_string()], [random_string(), random_string()]])])
def test_momento_cache_hit(momento_cache: MomentoCache, prompts: list[str],
generations: list[list[str]]) ->None:
llm = FakeLLM()
params = llm.dict()
params['stop'] = None
llm_string = str(sorted([(k, v) for k, v in params.items()]))
llm_generations = [[Generation(text=generation, generation_info=params) for
generation in prompt_i_generations] for prompt_i_generations in
generations]
for prompt_i, llm_generations_i in zip(prompts, llm_generations):
momento_cache.update(prompt_i, llm_string, llm_generations_i)
assert llm.generate(prompts) == LLMResult(generations=llm_generations,
llm_output={})
| null |
_import_bing_search
|
from langchain_community.utilities.bing_search import BingSearchAPIWrapper
return BingSearchAPIWrapper
|
def _import_bing_search() ->Any:
from langchain_community.utilities.bing_search import BingSearchAPIWrapper
return BingSearchAPIWrapper
| null |
format_response_payload
|
return json.loads(output)[0]
|
def format_response_payload(self, output: bytes) ->str:
return json.loads(output)[0]
| null |
from_texts
|
"""Construct FAISS wrapper from raw documents.
This is a user friendly interface that:
1. Embeds documents.
2. Creates an in memory docstore
3. Initializes the FAISS database
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain_community.vectorstores import FAISS
from langchain_community.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
faiss = FAISS.from_texts(texts, embeddings)
"""
embeddings = embedding.embed_documents(texts)
return cls.__from(texts, embeddings, embedding, metadatas=metadatas, ids=
ids, **kwargs)
|
@classmethod
def from_texts(cls, texts: List[str], embedding: Embeddings, metadatas:
Optional[List[dict]]=None, ids: Optional[List[str]]=None, **kwargs: Any
) ->FAISS:
"""Construct FAISS wrapper from raw documents.
This is a user friendly interface that:
1. Embeds documents.
2. Creates an in memory docstore
3. Initializes the FAISS database
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain_community.vectorstores import FAISS
from langchain_community.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
faiss = FAISS.from_texts(texts, embeddings)
"""
embeddings = embedding.embed_documents(texts)
return cls.__from(texts, embeddings, embedding, metadatas=metadatas,
ids=ids, **kwargs)
|
Construct FAISS wrapper from raw documents.
This is a user friendly interface that:
1. Embeds documents.
2. Creates an in memory docstore
3. Initializes the FAISS database
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain_community.vectorstores import FAISS
from langchain_community.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
faiss = FAISS.from_texts(texts, embeddings)
|
results
|
"""Use praw to search Reddit and return a list of dictionaries,
one for each post.
"""
subredditObject = self.reddit_client.subreddit(subreddit)
search_results = subredditObject.search(query=query, sort=sort, time_filter
=time_filter, limit=limit)
search_results = [r for r in search_results]
results_object = []
for submission in search_results:
results_object.append({'post_subreddit': submission.
subreddit_name_prefixed, 'post_category': submission.category,
'post_title': submission.title, 'post_text': submission.selftext,
'post_score': submission.score, 'post_id': submission.id,
'post_url': submission.url, 'post_author': submission.author})
return results_object
|
def results(self, query: str, sort: str, time_filter: str, subreddit: str,
limit: int) ->List[Dict]:
"""Use praw to search Reddit and return a list of dictionaries,
one for each post.
"""
subredditObject = self.reddit_client.subreddit(subreddit)
search_results = subredditObject.search(query=query, sort=sort,
time_filter=time_filter, limit=limit)
search_results = [r for r in search_results]
results_object = []
for submission in search_results:
results_object.append({'post_subreddit': submission.
subreddit_name_prefixed, 'post_category': submission.category,
'post_title': submission.title, 'post_text': submission.
selftext, 'post_score': submission.score, 'post_id': submission
.id, 'post_url': submission.url, 'post_author': submission.author})
return results_object
|
Use praw to search Reddit and return a list of dictionaries,
one for each post.
|
__init__
|
user_ctx.set(user_id)
user_props_ctx.set(user_props)
|
def __init__(self, user_id: str, user_props: Any=None) ->None:
user_ctx.set(user_id)
user_props_ctx.set(user_props)
| null |
_search_tql
|
"""Function for performing tql_search.
Args:
tql (str): TQL Query string for direct evaluation.
Available only for `compute_engine` and `tensor_db`.
exec_option (str, optional): Supports 3 ways to search.
Could be "python", "compute_engine" or "tensor_db". Default is "python".
- ``python`` - Pure-python implementation for the client.
WARNING: not recommended for big datasets due to potential memory
issues.
- ``compute_engine`` - C++ implementation of Deep Lake Compute
Engine for the client. Not for in-memory or local datasets.
- ``tensor_db`` - Hosted Managed Tensor Database for storage
and query execution. Only for data in Deep Lake Managed Database.
Use runtime = {"db_engine": True} during dataset creation.
return_score (bool): Return score with document. Default is False.
Returns:
Tuple[List[Document], List[Tuple[Document, float]]] - A tuple of two lists.
The first list contains Documents, and the second list contains
tuples of Document and float score.
Raises:
ValueError: If return_score is True but some condition is not met.
"""
result = self.vectorstore.search(query=tql, exec_option=exec_option)
metadatas = result['metadata']
texts = result['text']
docs = [Document(page_content=text, metadata=metadata) for text, metadata in
zip(texts, metadatas)]
if kwargs:
unsupported_argument = next(iter(kwargs))
if kwargs[unsupported_argument] is not False:
raise ValueError(
f'specifying {unsupported_argument} is not supported with tql search.'
)
return docs
|
def _search_tql(self, tql: Optional[str], exec_option: Optional[str]=None,
**kwargs: Any) ->List[Document]:
"""Function for performing tql_search.
Args:
tql (str): TQL Query string for direct evaluation.
Available only for `compute_engine` and `tensor_db`.
exec_option (str, optional): Supports 3 ways to search.
Could be "python", "compute_engine" or "tensor_db". Default is "python".
- ``python`` - Pure-python implementation for the client.
WARNING: not recommended for big datasets due to potential memory
issues.
- ``compute_engine`` - C++ implementation of Deep Lake Compute
Engine for the client. Not for in-memory or local datasets.
- ``tensor_db`` - Hosted Managed Tensor Database for storage
and query execution. Only for data in Deep Lake Managed Database.
Use runtime = {"db_engine": True} during dataset creation.
return_score (bool): Return score with document. Default is False.
Returns:
Tuple[List[Document], List[Tuple[Document, float]]] - A tuple of two lists.
The first list contains Documents, and the second list contains
tuples of Document and float score.
Raises:
ValueError: If return_score is True but some condition is not met.
"""
result = self.vectorstore.search(query=tql, exec_option=exec_option)
metadatas = result['metadata']
texts = result['text']
docs = [Document(page_content=text, metadata=metadata) for text,
metadata in zip(texts, metadatas)]
if kwargs:
unsupported_argument = next(iter(kwargs))
if kwargs[unsupported_argument] is not False:
raise ValueError(
f'specifying {unsupported_argument} is not supported with tql search.'
)
return docs
|
Function for performing tql_search.
Args:
tql (str): TQL Query string for direct evaluation.
Available only for `compute_engine` and `tensor_db`.
exec_option (str, optional): Supports 3 ways to search.
Could be "python", "compute_engine" or "tensor_db". Default is "python".
- ``python`` - Pure-python implementation for the client.
WARNING: not recommended for big datasets due to potential memory
issues.
- ``compute_engine`` - C++ implementation of Deep Lake Compute
Engine for the client. Not for in-memory or local datasets.
- ``tensor_db`` - Hosted Managed Tensor Database for storage
and query execution. Only for data in Deep Lake Managed Database.
Use runtime = {"db_engine": True} during dataset creation.
return_score (bool): Return score with document. Default is False.
Returns:
Tuple[List[Document], List[Tuple[Document, float]]] - A tuple of two lists.
The first list contains Documents, and the second list contains
tuples of Document and float score.
Raises:
ValueError: If return_score is True but some condition is not met.
|
__init__
|
"""Initialize the progress bar.
Args:
total: int, the total number of items to be processed.
ncols: int, the character width of the progress bar.
"""
self.total = total
self.ncols = ncols
self.counter = 0
self.lock = threading.Lock()
self._print_bar()
|
def __init__(self, total: int, ncols: int=50, **kwargs: Any):
"""Initialize the progress bar.
Args:
total: int, the total number of items to be processed.
ncols: int, the character width of the progress bar.
"""
self.total = total
self.ncols = ncols
self.counter = 0
self.lock = threading.Lock()
self._print_bar()
|
Initialize the progress bar.
Args:
total: int, the total number of items to be processed.
ncols: int, the character width of the progress bar.
|
test_update_with_group_ids
|
"""Test updating records in the database."""
read_keys = manager.list_keys()
assert read_keys == []
keys = ['key1', 'key2', 'key3']
manager.update(keys)
read_keys = manager.list_keys()
assert read_keys == ['key1', 'key2', 'key3']
|
def test_update_with_group_ids(manager: SQLRecordManager) ->None:
"""Test updating records in the database."""
read_keys = manager.list_keys()
assert read_keys == []
keys = ['key1', 'key2', 'key3']
manager.update(keys)
read_keys = manager.list_keys()
assert read_keys == ['key1', 'key2', 'key3']
|
Test updating records in the database.
|
from_llm_and_tools
|
"""Construct an agent from an LLM and tools."""
cls._validate_tools(tools)
_output_parser = output_parser or cls._get_default_output_parser()
prompt = cls.create_prompt(tools, system_message=system_message,
human_message=human_message, input_variables=input_variables,
output_parser=_output_parser)
llm_chain = LLMChain(llm=llm, prompt=prompt, callback_manager=callback_manager)
tool_names = [tool.name for tool in tools]
return cls(llm_chain=llm_chain, allowed_tools=tool_names, output_parser=
_output_parser, **kwargs)
|
@classmethod
def from_llm_and_tools(cls, llm: BaseLanguageModel, tools: Sequence[
BaseTool], callback_manager: Optional[BaseCallbackManager]=None,
output_parser: Optional[AgentOutputParser]=None, system_message: str=
PREFIX, human_message: str=SUFFIX, input_variables: Optional[List[str]]
=None, **kwargs: Any) ->Agent:
"""Construct an agent from an LLM and tools."""
cls._validate_tools(tools)
_output_parser = output_parser or cls._get_default_output_parser()
prompt = cls.create_prompt(tools, system_message=system_message,
human_message=human_message, input_variables=input_variables,
output_parser=_output_parser)
llm_chain = LLMChain(llm=llm, prompt=prompt, callback_manager=
callback_manager)
tool_names = [tool.name for tool in tools]
return cls(llm_chain=llm_chain, allowed_tools=tool_names, output_parser
=_output_parser, **kwargs)
|
Construct an agent from an LLM and tools.
|
test_float_metadata
|
"""Verify float metadata is loaded correctly"""
doc = next(doc for doc in docs if doc.metadata['source'] ==
'tags_and_frontmatter.md')
assert doc.metadata['aFloat'] == 13.12345
|
def test_float_metadata() ->None:
"""Verify float metadata is loaded correctly"""
doc = next(doc for doc in docs if doc.metadata['source'] ==
'tags_and_frontmatter.md')
assert doc.metadata['aFloat'] == 13.12345
|
Verify float metadata is loaded correctly
|
save_context
|
input_str, output_str = self._get_input_output(inputs, outputs)
requests.post(f'{self.url}/sessions/{self.session_id}/memory', timeout=self
.timeout, json={'messages': [{'role': 'Human', 'content':
f'{input_str}'}, {'role': 'AI', 'content': f'{output_str}'}]}, headers=
self.__get_headers())
super().save_context(inputs, outputs)
|
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) ->None:
input_str, output_str = self._get_input_output(inputs, outputs)
requests.post(f'{self.url}/sessions/{self.session_id}/memory', timeout=
self.timeout, json={'messages': [{'role': 'Human', 'content':
f'{input_str}'}, {'role': 'AI', 'content': f'{output_str}'}]},
headers=self.__get_headers())
super().save_context(inputs, outputs)
| null |
conditional_str_parser
|
if input == 'a':
return str_parser
else:
return xml_parser
|
def conditional_str_parser(input: str) ->Runnable:
if input == 'a':
return str_parser
else:
return xml_parser
| null |
test_blob_from_str_path
|
"""Test reading blob from a file path."""
content = b'Hello, World!'
with get_temp_file(content) as temp_path:
str_path = str(temp_path)
assert isinstance(str_path, str)
blob = Blob.from_path(str_path)
assert blob.encoding == 'utf-8'
assert blob.path == str(temp_path)
assert blob.source == str(temp_path)
assert blob.data is None
assert blob.as_bytes() == content
assert blob.as_string() == 'Hello, World!'
with blob.as_bytes_io() as bytes_io:
assert bytes_io.read() == content
|
def test_blob_from_str_path() ->None:
"""Test reading blob from a file path."""
content = b'Hello, World!'
with get_temp_file(content) as temp_path:
str_path = str(temp_path)
assert isinstance(str_path, str)
blob = Blob.from_path(str_path)
assert blob.encoding == 'utf-8'
assert blob.path == str(temp_path)
assert blob.source == str(temp_path)
assert blob.data is None
assert blob.as_bytes() == content
assert blob.as_string() == 'Hello, World!'
with blob.as_bytes_io() as bytes_io:
assert bytes_io.read() == content
|
Test reading blob from a file path.
|
test_redis_cache_ttl
|
from upstash_redis import Redis
langchain.llm_cache = UpstashRedisCache(redis_=Redis(url=URL, token=TOKEN),
ttl=1)
langchain.llm_cache.update('foo', 'bar', [Generation(text='fizz')])
key = langchain.llm_cache._key('foo', 'bar')
assert langchain.llm_cache.redis.pttl(key) > 0
|
@pytest.mark.requires('upstash_redis')
def test_redis_cache_ttl() ->None:
from upstash_redis import Redis
langchain.llm_cache = UpstashRedisCache(redis_=Redis(url=URL, token=
TOKEN), ttl=1)
langchain.llm_cache.update('foo', 'bar', [Generation(text='fizz')])
key = langchain.llm_cache._key('foo', 'bar')
assert langchain.llm_cache.redis.pttl(key) > 0
| null |
_is_url
|
try:
result = urlparse(s)
return all([result.scheme, result.netloc])
except Exception as e:
logger.debug(f'Unable to parse URL: {e}')
return False
|
def _is_url(s: str) ->bool:
try:
result = urlparse(s)
return all([result.scheme, result.netloc])
except Exception as e:
logger.debug(f'Unable to parse URL: {e}')
return False
| null |
test_write_file_errs_outside_root_dir
|
"""Test the WriteFile tool when a root dir is specified."""
with TemporaryDirectory() as temp_dir:
tool = WriteFileTool(root_dir=temp_dir)
result = tool.run({'file_path': '../file.txt', 'text': 'Hello, world!'})
assert result == INVALID_PATH_TEMPLATE.format(arg_name='file_path',
value='../file.txt')
|
def test_write_file_errs_outside_root_dir() ->None:
"""Test the WriteFile tool when a root dir is specified."""
with TemporaryDirectory() as temp_dir:
tool = WriteFileTool(root_dir=temp_dir)
result = tool.run({'file_path': '../file.txt', 'text': 'Hello, world!'}
)
assert result == INVALID_PATH_TEMPLATE.format(arg_name='file_path',
value='../file.txt')
|
Test the WriteFile tool when a root dir is specified.
|
_import_openllm
|
from langchain_community.llms.openllm import OpenLLM
return OpenLLM
|
def _import_openllm() ->Any:
from langchain_community.llms.openllm import OpenLLM
return OpenLLM
| null |
on_tool_start
|
self._state = LLMThoughtState.RUNNING_TOOL
tool_name = serialized['name']
self._last_tool = ToolRecord(name=tool_name, input_str=input_str)
self._container.update(new_label=self._labeler.get_tool_label(self.
_last_tool, is_complete=False))
|
def on_tool_start(self, serialized: Dict[str, Any], input_str: str, **
kwargs: Any) ->None:
self._state = LLMThoughtState.RUNNING_TOOL
tool_name = serialized['name']
self._last_tool = ToolRecord(name=tool_name, input_str=input_str)
self._container.update(new_label=self._labeler.get_tool_label(self.
_last_tool, is_complete=False))
| null |
analyze_text
|
"""Analyze text using textstat and spacy.
Parameters:
text (str): The text to analyze.
nlp (spacy.lang): The spacy language model to use for visualization.
Returns:
(dict): A dictionary containing the complexity metrics and visualization
files serialized to HTML string.
"""
resp: Dict[str, Any] = {}
textstat = import_textstat()
spacy = import_spacy()
text_complexity_metrics = {'flesch_reading_ease': textstat.
flesch_reading_ease(text), 'flesch_kincaid_grade': textstat.
flesch_kincaid_grade(text), 'smog_index': textstat.smog_index(text),
'coleman_liau_index': textstat.coleman_liau_index(text),
'automated_readability_index': textstat.automated_readability_index(
text), 'dale_chall_readability_score': textstat.
dale_chall_readability_score(text), 'difficult_words': textstat.
difficult_words(text), 'linsear_write_formula': textstat.
linsear_write_formula(text), 'gunning_fog': textstat.gunning_fog(text),
'fernandez_huerta': textstat.fernandez_huerta(text), 'szigriszt_pazos':
textstat.szigriszt_pazos(text), 'gutierrez_polini': textstat.
gutierrez_polini(text), 'crawford': textstat.crawford(text),
'gulpease_index': textstat.gulpease_index(text), 'osman': textstat.
osman(text)}
resp.update({'text_complexity_metrics': text_complexity_metrics})
resp.update(text_complexity_metrics)
if nlp is not None:
doc = nlp(text)
dep_out = spacy.displacy.render(doc, style='dep', jupyter=False, page=True)
ent_out = spacy.displacy.render(doc, style='ent', jupyter=False, page=True)
text_visualizations = {'dependency_tree': dep_out, 'entities': ent_out}
resp.update(text_visualizations)
return resp
|
def analyze_text(text: str, nlp: Any=None) ->dict:
"""Analyze text using textstat and spacy.
Parameters:
text (str): The text to analyze.
nlp (spacy.lang): The spacy language model to use for visualization.
Returns:
(dict): A dictionary containing the complexity metrics and visualization
files serialized to HTML string.
"""
resp: Dict[str, Any] = {}
textstat = import_textstat()
spacy = import_spacy()
text_complexity_metrics = {'flesch_reading_ease': textstat.
flesch_reading_ease(text), 'flesch_kincaid_grade': textstat.
flesch_kincaid_grade(text), 'smog_index': textstat.smog_index(text),
'coleman_liau_index': textstat.coleman_liau_index(text),
'automated_readability_index': textstat.automated_readability_index
(text), 'dale_chall_readability_score': textstat.
dale_chall_readability_score(text), 'difficult_words': textstat.
difficult_words(text), 'linsear_write_formula': textstat.
linsear_write_formula(text), 'gunning_fog': textstat.gunning_fog(
text), 'fernandez_huerta': textstat.fernandez_huerta(text),
'szigriszt_pazos': textstat.szigriszt_pazos(text),
'gutierrez_polini': textstat.gutierrez_polini(text), 'crawford':
textstat.crawford(text), 'gulpease_index': textstat.gulpease_index(
text), 'osman': textstat.osman(text)}
resp.update({'text_complexity_metrics': text_complexity_metrics})
resp.update(text_complexity_metrics)
if nlp is not None:
doc = nlp(text)
dep_out = spacy.displacy.render(doc, style='dep', jupyter=False,
page=True)
ent_out = spacy.displacy.render(doc, style='ent', jupyter=False,
page=True)
text_visualizations = {'dependency_tree': dep_out, 'entities': ent_out}
resp.update(text_visualizations)
return resp
|
Analyze text using textstat and spacy.
Parameters:
text (str): The text to analyze.
nlp (spacy.lang): The spacy language model to use for visualization.
Returns:
(dict): A dictionary containing the complexity metrics and visualization
files serialized to HTML string.
|
coerce_to_runnable
|
"""Coerce a runnable-like object into a Runnable.
Args:
thing: A runnable-like object.
Returns:
A Runnable.
"""
if isinstance(thing, Runnable):
return thing
elif inspect.isasyncgenfunction(thing) or inspect.isgeneratorfunction(thing):
return RunnableGenerator(thing)
elif callable(thing):
return RunnableLambda(cast(Callable[[Input], Output], thing))
elif isinstance(thing, dict):
return cast(Runnable[Input, Output], RunnableParallel(thing))
else:
raise TypeError(
f'Expected a Runnable, callable or dict.Instead got an unsupported type: {type(thing)}'
)
|
def coerce_to_runnable(thing: RunnableLike) ->Runnable[Input, Output]:
"""Coerce a runnable-like object into a Runnable.
Args:
thing: A runnable-like object.
Returns:
A Runnable.
"""
if isinstance(thing, Runnable):
return thing
elif inspect.isasyncgenfunction(thing) or inspect.isgeneratorfunction(thing
):
return RunnableGenerator(thing)
elif callable(thing):
return RunnableLambda(cast(Callable[[Input], Output], thing))
elif isinstance(thing, dict):
return cast(Runnable[Input, Output], RunnableParallel(thing))
else:
raise TypeError(
f'Expected a Runnable, callable or dict.Instead got an unsupported type: {type(thing)}'
)
|
Coerce a runnable-like object into a Runnable.
Args:
thing: A runnable-like object.
Returns:
A Runnable.
|
_get_elements
|
from unstructured.partition.rtf import partition_rtf
return partition_rtf(filename=self.file_path, **self.unstructured_kwargs)
|
def _get_elements(self) ->List:
from unstructured.partition.rtf import partition_rtf
return partition_rtf(filename=self.file_path, **self.unstructured_kwargs)
| null |
similarity_search_with_score_by_vector
|
"""
Perform a similarity search in the Neo4j database using a
given vector and return the top k similar documents with their scores.
This method uses a Cypher query to find the top k documents that
are most similar to a given embedding. The similarity is measured
using a vector index in the Neo4j database. The results are returned
as a list of tuples, each containing a Document object and
its similarity score.
Args:
embedding (List[float]): The embedding vector to compare against.
k (int, optional): The number of top similar documents to retrieve.
Returns:
List[Tuple[Document, float]]: A list of tuples, each containing
a Document object and its similarity score.
"""
default_retrieval = (
f'RETURN node.`{self.text_node_property}` AS text, score, node {{.*, `{self.text_node_property}`: Null, `{self.embedding_node_property}`: Null, id: Null }} AS metadata'
)
retrieval_query = (self.retrieval_query if self.retrieval_query else
default_retrieval)
read_query = _get_search_index_query(self.search_type) + retrieval_query
parameters = {'index': self.index_name, 'k': k, 'embedding': embedding,
'keyword_index': self.keyword_index_name, 'query': remove_lucene_chars(
kwargs['query'])}
results = self.query(read_query, params=parameters)
docs = [(Document(page_content=result['text'], metadata={k: v for k, v in
result['metadata'].items() if v is not None}), result['score']) for
result in results]
return docs
|
def similarity_search_with_score_by_vector(self, embedding: List[float], k:
int=4, **kwargs: Any) ->List[Tuple[Document, float]]:
"""
Perform a similarity search in the Neo4j database using a
given vector and return the top k similar documents with their scores.
This method uses a Cypher query to find the top k documents that
are most similar to a given embedding. The similarity is measured
using a vector index in the Neo4j database. The results are returned
as a list of tuples, each containing a Document object and
its similarity score.
Args:
embedding (List[float]): The embedding vector to compare against.
k (int, optional): The number of top similar documents to retrieve.
Returns:
List[Tuple[Document, float]]: A list of tuples, each containing
a Document object and its similarity score.
"""
default_retrieval = (
f'RETURN node.`{self.text_node_property}` AS text, score, node {{.*, `{self.text_node_property}`: Null, `{self.embedding_node_property}`: Null, id: Null }} AS metadata'
)
retrieval_query = (self.retrieval_query if self.retrieval_query else
default_retrieval)
read_query = _get_search_index_query(self.search_type) + retrieval_query
parameters = {'index': self.index_name, 'k': k, 'embedding': embedding,
'keyword_index': self.keyword_index_name, 'query':
remove_lucene_chars(kwargs['query'])}
results = self.query(read_query, params=parameters)
docs = [(Document(page_content=result['text'], metadata={k: v for k, v in
result['metadata'].items() if v is not None}), result['score']) for
result in results]
return docs
|
Perform a similarity search in the Neo4j database using a
given vector and return the top k similar documents with their scores.
This method uses a Cypher query to find the top k documents that
are most similar to a given embedding. The similarity is measured
using a vector index in the Neo4j database. The results are returned
as a list of tuples, each containing a Document object and
its similarity score.
Args:
embedding (List[float]): The embedding vector to compare against.
k (int, optional): The number of top similar documents to retrieve.
Returns:
List[Tuple[Document, float]]: A list of tuples, each containing
a Document object and its similarity score.
|
_run
|
try:
read_path = self.get_relative_path(file_path)
except FileValidationError:
return INVALID_PATH_TEMPLATE.format(arg_name='file_path', value=file_path)
if not read_path.exists():
return f'Error: no such file or directory: {file_path}'
try:
with read_path.open('r', encoding='utf-8') as f:
content = f.read()
return content
except Exception as e:
return 'Error: ' + str(e)
|
def _run(self, file_path: str, run_manager: Optional[
CallbackManagerForToolRun]=None) ->str:
try:
read_path = self.get_relative_path(file_path)
except FileValidationError:
return INVALID_PATH_TEMPLATE.format(arg_name='file_path', value=
file_path)
if not read_path.exists():
return f'Error: no such file or directory: {file_path}'
try:
with read_path.open('r', encoding='utf-8') as f:
content = f.read()
return content
except Exception as e:
return 'Error: ' + str(e)
| null |
out_gen
|
for line in response.iter_lines():
if line and line.strip() != b'data: [DONE]':
line = line.decode('utf-8')
msg, final_line = call.postprocess(line, stop=stop)
yield msg
if final_line:
break
self._try_raise(response)
|
def out_gen() ->Generator[dict, Any, Any]:
for line in response.iter_lines():
if line and line.strip() != b'data: [DONE]':
line = line.decode('utf-8')
msg, final_line = call.postprocess(line, stop=stop)
yield msg
if final_line:
break
self._try_raise(response)
| null |
max_marginal_relevance_search
|
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
Defaults to 20.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
filter: Filter by metadata. Defaults to None.
search_params: Additional search params
score_threshold:
Define a minimal score threshold for the result.
If defined, less similar results will not be returned.
Score of the returned result might be higher or smaller than the
threshold depending on the Distance function used.
E.g. for cosine similarity only higher scores will be returned.
consistency:
Read consistency of the search. Defines how many replicas should be
queried before returning the result.
Values:
- int - number of replicas to query, values should present in all
queried replicas
- 'majority' - query all replicas, but return values present in the
majority of replicas
- 'quorum' - query the majority of replicas, return values present in
all of them
- 'all' - query all replicas, and return values present in all replicas
**kwargs:
Any other named arguments to pass through to QdrantClient.search()
Returns:
List of Documents selected by maximal marginal relevance.
"""
query_embedding = self._embed_query(query)
return self.max_marginal_relevance_search_by_vector(query_embedding, k=k,
fetch_k=fetch_k, lambda_mult=lambda_mult, filter=filter, search_params=
search_params, score_threshold=score_threshold, consistency=consistency,
**kwargs)
|
def max_marginal_relevance_search(self, query: str, k: int=4, fetch_k: int=
20, lambda_mult: float=0.5, filter: Optional[MetadataFilter]=None,
search_params: Optional[common_types.SearchParams]=None,
score_threshold: Optional[float]=None, consistency: Optional[
common_types.ReadConsistency]=None, **kwargs: Any) ->List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
Defaults to 20.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
filter: Filter by metadata. Defaults to None.
search_params: Additional search params
score_threshold:
Define a minimal score threshold for the result.
If defined, less similar results will not be returned.
Score of the returned result might be higher or smaller than the
threshold depending on the Distance function used.
E.g. for cosine similarity only higher scores will be returned.
consistency:
Read consistency of the search. Defines how many replicas should be
queried before returning the result.
Values:
- int - number of replicas to query, values should present in all
queried replicas
- 'majority' - query all replicas, but return values present in the
majority of replicas
- 'quorum' - query the majority of replicas, return values present in
all of them
- 'all' - query all replicas, and return values present in all replicas
**kwargs:
Any other named arguments to pass through to QdrantClient.search()
Returns:
List of Documents selected by maximal marginal relevance.
"""
query_embedding = self._embed_query(query)
return self.max_marginal_relevance_search_by_vector(query_embedding, k=
k, fetch_k=fetch_k, lambda_mult=lambda_mult, filter=filter,
search_params=search_params, score_threshold=score_threshold,
consistency=consistency, **kwargs)
|
Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
Defaults to 20.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
filter: Filter by metadata. Defaults to None.
search_params: Additional search params
score_threshold:
Define a minimal score threshold for the result.
If defined, less similar results will not be returned.
Score of the returned result might be higher or smaller than the
threshold depending on the Distance function used.
E.g. for cosine similarity only higher scores will be returned.
consistency:
Read consistency of the search. Defines how many replicas should be
queried before returning the result.
Values:
- int - number of replicas to query, values should present in all
queried replicas
- 'majority' - query all replicas, but return values present in the
majority of replicas
- 'quorum' - query the majority of replicas, return values present in
all of them
- 'all' - query all replicas, and return values present in all replicas
**kwargs:
Any other named arguments to pass through to QdrantClient.search()
Returns:
List of Documents selected by maximal marginal relevance.
|
_generate_embeddings
|
"""Compute embeddings using EdenAi api."""
url = 'https://api.edenai.run/v2/text/embeddings'
headers = {'accept': 'application/json', 'content-type': 'application/json',
'authorization': f'Bearer {self.edenai_api_key.get_secret_value()}',
'User-Agent': self.get_user_agent()}
payload: Dict[str, Any] = {'texts': texts, 'providers': self.provider}
if self.model is not None:
payload['settings'] = {self.provider: self.model}
request = Requests(headers=headers)
response = request.post(url=url, data=payload)
if response.status_code >= 500:
raise Exception(f'EdenAI Server: Error {response.status_code}')
elif response.status_code >= 400:
raise ValueError(f'EdenAI received an invalid payload: {response.text}')
elif response.status_code != 200:
raise Exception(
f'EdenAI returned an unexpected response with status {response.status_code}: {response.text}'
)
temp = response.json()
provider_response = temp[self.provider]
if provider_response.get('status') == 'fail':
err_msg = provider_response.get('error', {}).get('message')
raise Exception(err_msg)
embeddings = []
for embed_item in temp[self.provider]['items']:
embedding = embed_item['embedding']
embeddings.append(embedding)
return embeddings
|
def _generate_embeddings(self, texts: List[str]) ->List[List[float]]:
"""Compute embeddings using EdenAi api."""
url = 'https://api.edenai.run/v2/text/embeddings'
headers = {'accept': 'application/json', 'content-type':
'application/json', 'authorization':
f'Bearer {self.edenai_api_key.get_secret_value()}', 'User-Agent':
self.get_user_agent()}
payload: Dict[str, Any] = {'texts': texts, 'providers': self.provider}
if self.model is not None:
payload['settings'] = {self.provider: self.model}
request = Requests(headers=headers)
response = request.post(url=url, data=payload)
if response.status_code >= 500:
raise Exception(f'EdenAI Server: Error {response.status_code}')
elif response.status_code >= 400:
raise ValueError(f'EdenAI received an invalid payload: {response.text}'
)
elif response.status_code != 200:
raise Exception(
f'EdenAI returned an unexpected response with status {response.status_code}: {response.text}'
)
temp = response.json()
provider_response = temp[self.provider]
if provider_response.get('status') == 'fail':
err_msg = provider_response.get('error', {}).get('message')
raise Exception(err_msg)
embeddings = []
for embed_item in temp[self.provider]['items']:
embedding = embed_item['embedding']
embeddings.append(embedding)
return embeddings
|
Compute embeddings using EdenAi api.
|
update_repo
|
repo_path = _get_repo_path(gitstring, ref, repo_dir)
if repo_path.exists():
try:
repo = Repo(repo_path)
if repo.active_branch.name != ref:
raise ValueError()
repo.remotes.origin.pull()
except Exception:
shutil.rmtree(repo_path)
Repo.clone_from(gitstring, repo_path, branch=ref, depth=1)
else:
Repo.clone_from(gitstring, repo_path, branch=ref, depth=1)
return repo_path
|
def update_repo(gitstring: str, ref: Optional[str], repo_dir: Path) ->Path:
repo_path = _get_repo_path(gitstring, ref, repo_dir)
if repo_path.exists():
try:
repo = Repo(repo_path)
if repo.active_branch.name != ref:
raise ValueError()
repo.remotes.origin.pull()
except Exception:
shutil.rmtree(repo_path)
Repo.clone_from(gitstring, repo_path, branch=ref, depth=1)
else:
Repo.clone_from(gitstring, repo_path, branch=ref, depth=1)
return repo_path
| null |
create_prompt
|
"""Create prompt in the style of the zero-shot agent.
Args:
tools: List of tools the agent will have access to, used to format the
prompt.
prefix: String to put before the list of tools.
suffix: String to put after the list of tools.
ai_prefix: String to use before AI output.
human_prefix: String to use before human output.
input_variables: List of input variables the final prompt will expect.
Returns:
A PromptTemplate with the template assembled from the pieces here.
"""
tool_strings = '\n'.join([f'> {tool.name}: {tool.description}' for tool in
tools])
tool_names = ', '.join([tool.name for tool in tools])
format_instructions = format_instructions.format(tool_names=tool_names,
ai_prefix=ai_prefix, human_prefix=human_prefix)
template = '\n\n'.join([prefix, tool_strings, format_instructions, suffix])
if input_variables is None:
input_variables = ['input', 'chat_history', 'agent_scratchpad']
return PromptTemplate(template=template, input_variables=input_variables)
|
@classmethod
def create_prompt(cls, tools: Sequence[BaseTool], prefix: str=PREFIX,
suffix: str=SUFFIX, format_instructions: str=FORMAT_INSTRUCTIONS,
ai_prefix: str='AI', human_prefix: str='Human', input_variables:
Optional[List[str]]=None) ->PromptTemplate:
"""Create prompt in the style of the zero-shot agent.
Args:
tools: List of tools the agent will have access to, used to format the
prompt.
prefix: String to put before the list of tools.
suffix: String to put after the list of tools.
ai_prefix: String to use before AI output.
human_prefix: String to use before human output.
input_variables: List of input variables the final prompt will expect.
Returns:
A PromptTemplate with the template assembled from the pieces here.
"""
tool_strings = '\n'.join([f'> {tool.name}: {tool.description}' for tool in
tools])
tool_names = ', '.join([tool.name for tool in tools])
format_instructions = format_instructions.format(tool_names=tool_names,
ai_prefix=ai_prefix, human_prefix=human_prefix)
template = '\n\n'.join([prefix, tool_strings, format_instructions, suffix])
if input_variables is None:
input_variables = ['input', 'chat_history', 'agent_scratchpad']
return PromptTemplate(template=template, input_variables=input_variables)
|
Create prompt in the style of the zero-shot agent.
Args:
tools: List of tools the agent will have access to, used to format the
prompt.
prefix: String to put before the list of tools.
suffix: String to put after the list of tools.
ai_prefix: String to use before AI output.
human_prefix: String to use before human output.
input_variables: List of input variables the final prompt will expect.
Returns:
A PromptTemplate with the template assembled from the pieces here.
|
output_keys
|
"""Return command.
:meta private:
"""
return [self.output_key]
|
@property
def output_keys(self) ->List[str]:
"""Return command.
:meta private:
"""
return [self.output_key]
|
Return command.
:meta private:
|
get_history_label
|
"""Return a markdown label for the special 'history' container
that contains overflow thoughts.
"""
return f'{HISTORY_EMOJI} **History**'
|
def get_history_label(self) ->str:
"""Return a markdown label for the special 'history' container
that contains overflow thoughts.
"""
return f'{HISTORY_EMOJI} **History**'
|
Return a markdown label for the special 'history' container
that contains overflow thoughts.
|
test_pandas_output_parser_col_multi_elem
|
expected_output = {'chicken': pd.Series([1, 2], name='chicken', dtype='int64')}
actual_output = parser.parse_folder('column:chicken[0, 1]')
for key in actual_output.keys():
assert expected_output['chicken'].equals(actual_output[key])
|
def test_pandas_output_parser_col_multi_elem() ->None:
expected_output = {'chicken': pd.Series([1, 2], name='chicken', dtype=
'int64')}
actual_output = parser.parse_folder('column:chicken[0, 1]')
for key in actual_output.keys():
assert expected_output['chicken'].equals(actual_output[key])
| null |
combine_docs
|
"""Stuff all documents into one prompt and pass to LLM.
Args:
docs: List of documents to join together into one variable
callbacks: Optional callbacks to pass along
**kwargs: additional parameters to use to get inputs to LLMChain.
Returns:
The first element returned is the single string output. The second
element returned is a dictionary of other keys to return.
"""
inputs = self._get_inputs(docs, **kwargs)
return self.llm_chain.predict(callbacks=callbacks, **inputs), {}
|
def combine_docs(self, docs: List[Document], callbacks: Callbacks=None, **
kwargs: Any) ->Tuple[str, dict]:
"""Stuff all documents into one prompt and pass to LLM.
Args:
docs: List of documents to join together into one variable
callbacks: Optional callbacks to pass along
**kwargs: additional parameters to use to get inputs to LLMChain.
Returns:
The first element returned is the single string output. The second
element returned is a dictionary of other keys to return.
"""
inputs = self._get_inputs(docs, **kwargs)
return self.llm_chain.predict(callbacks=callbacks, **inputs), {}
|
Stuff all documents into one prompt and pass to LLM.
Args:
docs: List of documents to join together into one variable
callbacks: Optional callbacks to pass along
**kwargs: additional parameters to use to get inputs to LLMChain.
Returns:
The first element returned is the single string output. The second
element returned is a dictionary of other keys to return.
|
_import_edenai_EdenAiParsingIDTool
|
from langchain_community.tools.edenai import EdenAiParsingIDTool
return EdenAiParsingIDTool
|
def _import_edenai_EdenAiParsingIDTool() ->Any:
from langchain_community.tools.edenai import EdenAiParsingIDTool
return EdenAiParsingIDTool
| null |
_generate_docs_page
|
for p in result.pages:
content = ' '.join([line.content for line in p.lines])
d = Document(page_content=content, metadata={'page': p.page_number})
yield d
|
def _generate_docs_page(self, result: Any) ->Iterator[Document]:
for p in result.pages:
content = ' '.join([line.content for line in p.lines])
d = Document(page_content=content, metadata={'page': p.page_number})
yield d
| null |
partial
|
"""Return a partial of the prompt template."""
prompt_dict = self.__dict__.copy()
prompt_dict['input_variables'] = list(set(self.input_variables).difference(
kwargs))
prompt_dict['partial_variables'] = {**self.partial_variables, **kwargs}
return type(self)(**prompt_dict)
|
def partial(self, **kwargs: Union[str, Callable[[], str]]
) ->BasePromptTemplate:
"""Return a partial of the prompt template."""
prompt_dict = self.__dict__.copy()
prompt_dict['input_variables'] = list(set(self.input_variables).
difference(kwargs))
prompt_dict['partial_variables'] = {**self.partial_variables, **kwargs}
return type(self)(**prompt_dict)
|
Return a partial of the prompt template.
|
test_google_generativeai_generate
|
n = 1 if model_name == 'gemini-pro' else 2
if model_name:
llm = GooglePalm(temperature=0.3, n=n, model_name=model_name)
else:
llm = GooglePalm(temperature=0.3, n=n)
output = llm.generate(['Say foo:'])
assert isinstance(output, LLMResult)
assert len(output.generations) == 1
assert len(output.generations[0]) == n
|
@pytest.mark.parametrize('model_name', model_names)
def test_google_generativeai_generate(model_name: str) ->None:
n = 1 if model_name == 'gemini-pro' else 2
if model_name:
llm = GooglePalm(temperature=0.3, n=n, model_name=model_name)
else:
llm = GooglePalm(temperature=0.3, n=n)
output = llm.generate(['Say foo:'])
assert isinstance(output, LLMResult)
assert len(output.generations) == 1
assert len(output.generations[0]) == n
| null |
test_predict_and_parse
|
"""Test parsing ability."""
prompt = PromptTemplate(input_variables=['foo'], template='{foo}',
output_parser=FakeOutputParser())
llm = FakeLLM(queries={'foo': 'foo bar'})
chain = LLMChain(prompt=prompt, llm=llm)
output = chain.predict_and_parse(foo='foo')
assert output == ['foo', 'bar']
|
def test_predict_and_parse() ->None:
"""Test parsing ability."""
prompt = PromptTemplate(input_variables=['foo'], template='{foo}',
output_parser=FakeOutputParser())
llm = FakeLLM(queries={'foo': 'foo bar'})
chain = LLMChain(prompt=prompt, llm=llm)
output = chain.predict_and_parse(foo='foo')
assert output == ['foo', 'bar']
|
Test parsing ability.
|
_type
|
return 'structured'
|
@property
def _type(self) ->str:
return 'structured'
| null |
test_timescalevector_with_metadatas_with_scores
|
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': str(i)} for i in range(len(texts))]
docsearch = TimescaleVector.from_texts(texts=texts, collection_name=
'test_collection', embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas, service_url=SERVICE_URL, pre_delete_collection=True)
output = docsearch.similarity_search_with_score('foo', k=1)
assert output == [(Document(page_content='foo', metadata={'page': '0'}), 0.0)]
|
def test_timescalevector_with_metadatas_with_scores() ->None:
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': str(i)} for i in range(len(texts))]
docsearch = TimescaleVector.from_texts(texts=texts, collection_name=
'test_collection', embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas, service_url=SERVICE_URL, pre_delete_collection
=True)
output = docsearch.similarity_search_with_score('foo', k=1)
assert output == [(Document(page_content='foo', metadata={'page': '0'}),
0.0)]
|
Test end to end construction and search.
|
parse
|
includes_answer = FINAL_ANSWER_ACTION in text
regex = (
'Action\\s*\\d*\\s*:[\\s]*(.*?)[\\s]*Action\\s*\\d*\\s*Input\\s*\\d*\\s*:[\\s]*(.*)'
)
action_match = re.search(regex, text, re.DOTALL)
if action_match:
if includes_answer:
raise OutputParserException(
f'{FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE}: {text}')
action = action_match.group(1).strip()
action_input = action_match.group(2)
tool_input = action_input.strip(' ')
tool_input = tool_input.strip('"')
return AgentAction(action, tool_input, text)
elif includes_answer:
return AgentFinish({'output': text.split(FINAL_ANSWER_ACTION)[-1].strip
()}, text)
if not re.search('Action\\s*\\d*\\s*:[\\s]*(.*?)', text, re.DOTALL):
raise OutputParserException(f'Could not parse LLM output: `{text}`',
observation=MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE, llm_output=
text, send_to_llm=True)
elif not re.search('[\\s]*Action\\s*\\d*\\s*Input\\s*\\d*\\s*:[\\s]*(.*)',
text, re.DOTALL):
raise OutputParserException(f'Could not parse LLM output: `{text}`',
observation=MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE,
llm_output=text, send_to_llm=True)
else:
raise OutputParserException(f'Could not parse LLM output: `{text}`')
|
def parse(self, text: str) ->Union[AgentAction, AgentFinish]:
includes_answer = FINAL_ANSWER_ACTION in text
regex = (
'Action\\s*\\d*\\s*:[\\s]*(.*?)[\\s]*Action\\s*\\d*\\s*Input\\s*\\d*\\s*:[\\s]*(.*)'
)
action_match = re.search(regex, text, re.DOTALL)
if action_match:
if includes_answer:
raise OutputParserException(
f'{FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE}: {text}')
action = action_match.group(1).strip()
action_input = action_match.group(2)
tool_input = action_input.strip(' ')
tool_input = tool_input.strip('"')
return AgentAction(action, tool_input, text)
elif includes_answer:
return AgentFinish({'output': text.split(FINAL_ANSWER_ACTION)[-1].
strip()}, text)
if not re.search('Action\\s*\\d*\\s*:[\\s]*(.*?)', text, re.DOTALL):
raise OutputParserException(f'Could not parse LLM output: `{text}`',
observation=MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE,
llm_output=text, send_to_llm=True)
elif not re.search('[\\s]*Action\\s*\\d*\\s*Input\\s*\\d*\\s*:[\\s]*(.*)',
text, re.DOTALL):
raise OutputParserException(f'Could not parse LLM output: `{text}`',
observation=MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE,
llm_output=text, send_to_llm=True)
else:
raise OutputParserException(f'Could not parse LLM output: `{text}`')
| null |
yield_keys
|
"""Get an iterator over keys that match the given prefix.
Args:
prefix (Optional[str]): The prefix to match.
Returns:
Iterator[str]: An iterator over keys that match the given prefix.
"""
prefix_path = self._get_full_path(prefix) if prefix else self.root_path
for file in prefix_path.rglob('*'):
if file.is_file():
relative_path = file.relative_to(self.root_path)
yield str(relative_path)
|
def yield_keys(self, prefix: Optional[str]=None) ->Iterator[str]:
"""Get an iterator over keys that match the given prefix.
Args:
prefix (Optional[str]): The prefix to match.
Returns:
Iterator[str]: An iterator over keys that match the given prefix.
"""
prefix_path = self._get_full_path(prefix) if prefix else self.root_path
for file in prefix_path.rglob('*'):
if file.is_file():
relative_path = file.relative_to(self.root_path)
yield str(relative_path)
|
Get an iterator over keys that match the given prefix.
Args:
prefix (Optional[str]): The prefix to match.
Returns:
Iterator[str]: An iterator over keys that match the given prefix.
|
embeddings
|
return self._embedding
|
@property
def embeddings(self) ->Optional[Embeddings]:
return self._embedding
| null |
from_documents
|
"""Create a Chroma vectorstore from a list of documents.
If a persist_directory is specified, the collection will be persisted there.
Otherwise, the data will be ephemeral in-memory.
Args:
collection_name (str): Name of the collection to create.
persist_directory (Optional[str]): Directory to persist the collection.
ids (Optional[List[str]]): List of document IDs. Defaults to None.
documents (List[Document]): List of documents to add to the vectorstore.
embedding (Optional[Embeddings]): Embedding function. Defaults to None.
client_settings (Optional[chromadb.config.Settings]): Chroma client settings
collection_metadata (Optional[Dict]): Collection configurations.
Defaults to None.
Returns:
Chroma: Chroma vectorstore.
"""
texts = [doc.page_content for doc in documents]
metadatas = [doc.metadata for doc in documents]
return cls.from_texts(texts=texts, embedding=embedding, metadatas=metadatas,
ids=ids, collection_name=collection_name, persist_directory=
persist_directory, client_settings=client_settings, client=client,
collection_metadata=collection_metadata, **kwargs)
|
@classmethod
def from_documents(cls: Type[Chroma], documents: List[Document], embedding:
Optional[Embeddings]=None, ids: Optional[List[str]]=None,
collection_name: str=_LANGCHAIN_DEFAULT_COLLECTION_NAME,
persist_directory: Optional[str]=None, client_settings: Optional[
chromadb.config.Settings]=None, client: Optional[chromadb.Client]=None,
collection_metadata: Optional[Dict]=None, **kwargs: Any) ->Chroma:
"""Create a Chroma vectorstore from a list of documents.
If a persist_directory is specified, the collection will be persisted there.
Otherwise, the data will be ephemeral in-memory.
Args:
collection_name (str): Name of the collection to create.
persist_directory (Optional[str]): Directory to persist the collection.
ids (Optional[List[str]]): List of document IDs. Defaults to None.
documents (List[Document]): List of documents to add to the vectorstore.
embedding (Optional[Embeddings]): Embedding function. Defaults to None.
client_settings (Optional[chromadb.config.Settings]): Chroma client settings
collection_metadata (Optional[Dict]): Collection configurations.
Defaults to None.
Returns:
Chroma: Chroma vectorstore.
"""
texts = [doc.page_content for doc in documents]
metadatas = [doc.metadata for doc in documents]
return cls.from_texts(texts=texts, embedding=embedding, metadatas=
metadatas, ids=ids, collection_name=collection_name,
persist_directory=persist_directory, client_settings=
client_settings, client=client, collection_metadata=
collection_metadata, **kwargs)
|
Create a Chroma vectorstore from a list of documents.
If a persist_directory is specified, the collection will be persisted there.
Otherwise, the data will be ephemeral in-memory.
Args:
collection_name (str): Name of the collection to create.
persist_directory (Optional[str]): Directory to persist the collection.
ids (Optional[List[str]]): List of document IDs. Defaults to None.
documents (List[Document]): List of documents to add to the vectorstore.
embedding (Optional[Embeddings]): Embedding function. Defaults to None.
client_settings (Optional[chromadb.config.Settings]): Chroma client settings
collection_metadata (Optional[Dict]): Collection configurations.
Defaults to None.
Returns:
Chroma: Chroma vectorstore.
|
_import_azure_cosmos_db
|
from langchain_community.vectorstores.azure_cosmos_db import AzureCosmosDBVectorSearch
return AzureCosmosDBVectorSearch
|
def _import_azure_cosmos_db() ->Any:
from langchain_community.vectorstores.azure_cosmos_db import AzureCosmosDBVectorSearch
return AzureCosmosDBVectorSearch
| null |
_split_paragraph
|
"""
Split a paragraph into chunks of sentences, respecting the maximum size limit.
Args:
paragraph (str): The input paragraph to be split into chunks.
max_size (int, optional): The maximum size limit in bytes for
each chunk. Defaults to 1024.
Returns:
List[List[str]]: A list of chunks, where each chunk is a list
of sentences.
Note:
This function validates the maximum sentence size based on service
limits using the 'toxicity_init_validate' function. It uses the NLTK
sentence tokenizer to split the paragraph into sentences.
Example:
paragraph = "This is a sample paragraph. It
contains multiple sentences. ..."
chunks = split_paragraph(paragraph, max_size=2048)
"""
nltk = self._toxicity_init_validate(max_size)
sentences = nltk.sent_tokenize(prompt_value)
chunks = list()
current_chunk = list()
current_size = 0
for sentence in sentences:
sentence_size = len(sentence.encode('utf-8'))
if current_size + sentence_size > max_size or len(current_chunk) >= 10:
if current_chunk:
chunks.append(current_chunk)
current_chunk = []
current_size = 0
current_chunk.append(sentence)
current_size += sentence_size
if current_chunk:
chunks.append(current_chunk)
return chunks
|
def _split_paragraph(self, prompt_value: str, max_size: int=1024 * 4) ->List[
List[str]]:
"""
Split a paragraph into chunks of sentences, respecting the maximum size limit.
Args:
paragraph (str): The input paragraph to be split into chunks.
max_size (int, optional): The maximum size limit in bytes for
each chunk. Defaults to 1024.
Returns:
List[List[str]]: A list of chunks, where each chunk is a list
of sentences.
Note:
This function validates the maximum sentence size based on service
limits using the 'toxicity_init_validate' function. It uses the NLTK
sentence tokenizer to split the paragraph into sentences.
Example:
paragraph = "This is a sample paragraph. It
contains multiple sentences. ..."
chunks = split_paragraph(paragraph, max_size=2048)
"""
nltk = self._toxicity_init_validate(max_size)
sentences = nltk.sent_tokenize(prompt_value)
chunks = list()
current_chunk = list()
current_size = 0
for sentence in sentences:
sentence_size = len(sentence.encode('utf-8'))
if current_size + sentence_size > max_size or len(current_chunk) >= 10:
if current_chunk:
chunks.append(current_chunk)
current_chunk = []
current_size = 0
current_chunk.append(sentence)
current_size += sentence_size
if current_chunk:
chunks.append(current_chunk)
return chunks
|
Split a paragraph into chunks of sentences, respecting the maximum size limit.
Args:
paragraph (str): The input paragraph to be split into chunks.
max_size (int, optional): The maximum size limit in bytes for
each chunk. Defaults to 1024.
Returns:
List[List[str]]: A list of chunks, where each chunk is a list
of sentences.
Note:
This function validates the maximum sentence size based on service
limits using the 'toxicity_init_validate' function. It uses the NLTK
sentence tokenizer to split the paragraph into sentences.
Example:
paragraph = "This is a sample paragraph. It
contains multiple sentences. ..."
chunks = split_paragraph(paragraph, max_size=2048)
|
test_konko_additional_args_test
|
"""Evaluate extra arguments for ChatKonko."""
chat_instance = ChatKonko(extra=3, max_tokens=10)
assert chat_instance.max_tokens == 10
assert chat_instance.model_kwargs == {'extra': 3}
chat_instance = ChatKonko(extra=3, model_kwargs={'addition': 2})
assert chat_instance.model_kwargs == {'extra': 3, 'addition': 2}
with pytest.raises(ValueError):
ChatKonko(extra=3, model_kwargs={'extra': 2})
with pytest.raises(ValueError):
ChatKonko(model_kwargs={'temperature': 0.2})
with pytest.raises(ValueError):
ChatKonko(model_kwargs={'model': 'gpt-3.5-turbo-instruct'})
|
def test_konko_additional_args_test() ->None:
"""Evaluate extra arguments for ChatKonko."""
chat_instance = ChatKonko(extra=3, max_tokens=10)
assert chat_instance.max_tokens == 10
assert chat_instance.model_kwargs == {'extra': 3}
chat_instance = ChatKonko(extra=3, model_kwargs={'addition': 2})
assert chat_instance.model_kwargs == {'extra': 3, 'addition': 2}
with pytest.raises(ValueError):
ChatKonko(extra=3, model_kwargs={'extra': 2})
with pytest.raises(ValueError):
ChatKonko(model_kwargs={'temperature': 0.2})
with pytest.raises(ValueError):
ChatKonko(model_kwargs={'model': 'gpt-3.5-turbo-instruct'})
|
Evaluate extra arguments for ChatKonko.
|
save
|
self.model_repo.save(self.workspace)
|
def save(self) ->None:
self.model_repo.save(self.workspace)
| null |
_parse_chat_history
|
"""Parse a sequence of messages into history.
Args:
history: The list of messages to re-create the history of the chat.
Returns:
A parsed chat history.
Raises:
ValueError: If a sequence of message has a SystemMessage not at the
first place.
"""
from vertexai.language_models import ChatMessage
vertex_messages, context = [], None
for i, message in enumerate(history):
content = cast(str, message.content)
if i == 0 and isinstance(message, SystemMessage):
context = content
elif isinstance(message, AIMessage):
vertex_message = ChatMessage(content=message.content, author='bot')
vertex_messages.append(vertex_message)
elif isinstance(message, HumanMessage):
vertex_message = ChatMessage(content=message.content, author='user')
vertex_messages.append(vertex_message)
else:
raise ValueError(
f'Unexpected message with type {type(message)} at the position {i}.'
)
chat_history = _ChatHistory(context=context, history=vertex_messages)
return chat_history
|
def _parse_chat_history(history: List[BaseMessage]) ->_ChatHistory:
"""Parse a sequence of messages into history.
Args:
history: The list of messages to re-create the history of the chat.
Returns:
A parsed chat history.
Raises:
ValueError: If a sequence of message has a SystemMessage not at the
first place.
"""
from vertexai.language_models import ChatMessage
vertex_messages, context = [], None
for i, message in enumerate(history):
content = cast(str, message.content)
if i == 0 and isinstance(message, SystemMessage):
context = content
elif isinstance(message, AIMessage):
vertex_message = ChatMessage(content=message.content, author='bot')
vertex_messages.append(vertex_message)
elif isinstance(message, HumanMessage):
vertex_message = ChatMessage(content=message.content, author='user'
)
vertex_messages.append(vertex_message)
else:
raise ValueError(
f'Unexpected message with type {type(message)} at the position {i}.'
)
chat_history = _ChatHistory(context=context, history=vertex_messages)
return chat_history
|
Parse a sequence of messages into history.
Args:
history: The list of messages to re-create the history of the chat.
Returns:
A parsed chat history.
Raises:
ValueError: If a sequence of message has a SystemMessage not at the
first place.
|
__init__
|
"""Initialize with a docstore, and set initial document to None."""
self.docstore = docstore
self.document: Optional[Document] = None
self.lookup_str = ''
self.lookup_index = 0
|
def __init__(self, docstore: Docstore):
"""Initialize with a docstore, and set initial document to None."""
self.docstore = docstore
self.document: Optional[Document] = None
self.lookup_str = ''
self.lookup_index = 0
|
Initialize with a docstore, and set initial document to None.
|
_prepare_input
|
"""Prepare the input for the chain.
Args:
prediction (str): The output string from the first model.
prediction_b (str): The output string from the second model.
input (str, optional): The input or task string.
reference (str, optional): The reference string, if any.
Returns:
dict: The prepared input for the chain.
"""
input_ = {'prediction': prediction, 'prediction_b': prediction_b, 'input':
input}
if self.requires_reference:
input_['reference'] = reference
return input_
|
def _prepare_input(self, prediction: str, prediction_b: str, input:
Optional[str], reference: Optional[str]) ->dict:
"""Prepare the input for the chain.
Args:
prediction (str): The output string from the first model.
prediction_b (str): The output string from the second model.
input (str, optional): The input or task string.
reference (str, optional): The reference string, if any.
Returns:
dict: The prepared input for the chain.
"""
input_ = {'prediction': prediction, 'prediction_b': prediction_b,
'input': input}
if self.requires_reference:
input_['reference'] = reference
return input_
|
Prepare the input for the chain.
Args:
prediction (str): The output string from the first model.
prediction_b (str): The output string from the second model.
input (str, optional): The input or task string.
reference (str, optional): The reference string, if any.
Returns:
dict: The prepared input for the chain.
|
input_variables
|
"""Input variables for this prompt template.
Returns:
List of input variable names.
"""
return [self.variable_name] if not self.optional else []
|
@property
def input_variables(self) ->List[str]:
"""Input variables for this prompt template.
Returns:
List of input variable names.
"""
return [self.variable_name] if not self.optional else []
|
Input variables for this prompt template.
Returns:
List of input variable names.
|
test_sql_database_run_update
|
"""Test commands which return no rows return an empty string."""
engine = create_engine('sqlite:///:memory:')
metadata_obj.create_all(engine)
stmt = insert(user).values(user_id=13, user_name='Harrison')
with engine.begin() as conn:
conn.execute(stmt)
db = SQLDatabase(engine)
command = "update user set user_name='Updated' where user_id = 13"
output = db.run(command)
expected_output = ''
assert output == expected_output
|
def test_sql_database_run_update() ->None:
"""Test commands which return no rows return an empty string."""
engine = create_engine('sqlite:///:memory:')
metadata_obj.create_all(engine)
stmt = insert(user).values(user_id=13, user_name='Harrison')
with engine.begin() as conn:
conn.execute(stmt)
db = SQLDatabase(engine)
command = "update user set user_name='Updated' where user_id = 13"
output = db.run(command)
expected_output = ''
assert output == expected_output
|
Test commands which return no rows return an empty string.
|
_load_hyde_chain
|
"""Load hypothetical document embedder chain from config dict."""
if 'llm_chain' in config:
llm_chain_config = config.pop('llm_chain')
llm_chain = load_chain_from_config(llm_chain_config)
elif 'llm_chain_path' in config:
llm_chain = load_chain(config.pop('llm_chain_path'))
else:
raise ValueError('One of `llm_chain` or `llm_chain_path` must be present.')
if 'embeddings' in kwargs:
embeddings = kwargs.pop('embeddings')
else:
raise ValueError('`embeddings` must be present.')
return HypotheticalDocumentEmbedder(llm_chain=llm_chain, base_embeddings=
embeddings, **config)
|
def _load_hyde_chain(config: dict, **kwargs: Any
) ->HypotheticalDocumentEmbedder:
"""Load hypothetical document embedder chain from config dict."""
if 'llm_chain' in config:
llm_chain_config = config.pop('llm_chain')
llm_chain = load_chain_from_config(llm_chain_config)
elif 'llm_chain_path' in config:
llm_chain = load_chain(config.pop('llm_chain_path'))
else:
raise ValueError(
'One of `llm_chain` or `llm_chain_path` must be present.')
if 'embeddings' in kwargs:
embeddings = kwargs.pop('embeddings')
else:
raise ValueError('`embeddings` must be present.')
return HypotheticalDocumentEmbedder(llm_chain=llm_chain,
base_embeddings=embeddings, **config)
|
Load hypothetical document embedder chain from config dict.
|
_get_agent
|
"""Get agent for testing."""
bad_action_name = 'BadAction'
responses = [
f"""I'm turning evil
Action: {bad_action_name}
Action Input: misalignment"""
, """Oh well
Final Answer: curses foiled again"""]
fake_llm = FakeListLLM(cache=False, responses=responses)
tools = [Tool(name='Search', func=lambda x: x, description=
'Useful for searching'), Tool(name='Lookup', func=lambda x: x,
description='Useful for looking up things in a table')]
agent = initialize_agent(tools, fake_llm, agent=AgentType.
ZERO_SHOT_REACT_DESCRIPTION, verbose=True, **kwargs)
return agent
|
def _get_agent(**kwargs: Any) ->AgentExecutor:
"""Get agent for testing."""
bad_action_name = 'BadAction'
responses = [
f"I'm turning evil\nAction: {bad_action_name}\nAction Input: misalignment"
, """Oh well
Final Answer: curses foiled again"""]
fake_llm = FakeListLLM(cache=False, responses=responses)
tools = [Tool(name='Search', func=lambda x: x, description=
'Useful for searching'), Tool(name='Lookup', func=lambda x: x,
description='Useful for looking up things in a table')]
agent = initialize_agent(tools, fake_llm, agent=AgentType.
ZERO_SHOT_REACT_DESCRIPTION, verbose=True, **kwargs)
return agent
|
Get agent for testing.
|
delete
|
query = f"""
DELETE FROM {self.full_table_name}
WHERE key = ?
"""
with self.conn:
self.conn.execute(query, (key,))
|
def delete(self, key: str) ->None:
query = f"""
DELETE FROM {self.full_table_name}
WHERE key = ?
"""
with self.conn:
self.conn.execute(query, (key,))
| null |
get_lc_namespace
|
"""Get the namespace of the langchain object."""
return ['langchain', 'schema', 'messages']
|
@classmethod
def get_lc_namespace(cls) ->List[str]:
"""Get the namespace of the langchain object."""
return ['langchain', 'schema', 'messages']
|
Get the namespace of the langchain object.
|
_create_index_if_not_exist
|
try:
from redis.commands.search.indexDefinition import IndexDefinition, IndexType
except ImportError:
raise ImportError(
'Could not import redis python package. Please install it with `pip install redis`.'
)
self._schema.content_vector.dims = dim
if not check_index_exists(self.client, self.index_name):
self.client.ft(self.index_name).create_index(fields=self._schema.
get_fields(), definition=IndexDefinition(prefix=[self.key_prefix],
index_type=IndexType.HASH))
|
def _create_index_if_not_exist(self, dim: int=1536) ->None:
try:
from redis.commands.search.indexDefinition import IndexDefinition, IndexType
except ImportError:
raise ImportError(
'Could not import redis python package. Please install it with `pip install redis`.'
)
self._schema.content_vector.dims = dim
if not check_index_exists(self.client, self.index_name):
self.client.ft(self.index_name).create_index(fields=self._schema.
get_fields(), definition=IndexDefinition(prefix=[self.
key_prefix], index_type=IndexType.HASH))
| null |
InputType
|
return self.default.InputType
|
@property
def InputType(self) ->Type[Input]:
return self.default.InputType
| null |
embeddings
|
return self.embedding_function
|
@property
def embeddings(self) ->Embeddings:
return self.embedding_function
| null |
raise_callback_manager_deprecation
|
"""Raise deprecation warning if callback_manager is used."""
if values.get('callback_manager') is not None:
if values.get('callbacks') is not None:
raise ValueError(
'Cannot specify both callback_manager and callbacks. callback_manager is deprecated, callbacks is the preferred parameter to pass in.'
)
warnings.warn(
'callback_manager is deprecated. Please use callbacks instead.',
DeprecationWarning)
values['callbacks'] = values.pop('callback_manager', None)
return values
|
@root_validator()
def raise_callback_manager_deprecation(cls, values: Dict) ->Dict:
"""Raise deprecation warning if callback_manager is used."""
if values.get('callback_manager') is not None:
if values.get('callbacks') is not None:
raise ValueError(
'Cannot specify both callback_manager and callbacks. callback_manager is deprecated, callbacks is the preferred parameter to pass in.'
)
warnings.warn(
'callback_manager is deprecated. Please use callbacks instead.',
DeprecationWarning)
values['callbacks'] = values.pop('callback_manager', None)
return values
|
Raise deprecation warning if callback_manager is used.
|
__init__
|
super().__init__()
self.config = config
self._arg_on_start = on_start
self._arg_on_end = on_end
self._arg_on_error = on_error
self.root_id: Optional[UUID] = None
|
def __init__(self, *, config: RunnableConfig, on_start: Optional[Listener],
on_end: Optional[Listener], on_error: Optional[Listener]) ->None:
super().__init__()
self.config = config
self._arg_on_start = on_start
self._arg_on_end = on_end
self._arg_on_error = on_error
self.root_id: Optional[UUID] = None
| null |
test_visit_comparison
|
comp = Comparison(comparator=Comparator.LT, attribute='foo', value='1')
expected = "( doc.foo < '1' )"
actual = DEFAULT_TRANSLATOR.visit_comparison(comp)
assert expected == actual
|
def test_visit_comparison() ->None:
comp = Comparison(comparator=Comparator.LT, attribute='foo', value='1')
expected = "( doc.foo < '1' )"
actual = DEFAULT_TRANSLATOR.visit_comparison(comp)
assert expected == actual
| null |
test_zilliz_no_drop
|
"""Test end to end construction and MRR search."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = _zilliz_from_texts(metadatas=metadatas)
del docsearch
docsearch = _zilliz_from_texts(metadatas=metadatas, drop=False)
output = docsearch.similarity_search('foo', k=10)
assert len(output) == 6
|
def test_zilliz_no_drop() ->None:
"""Test end to end construction and MRR search."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = _zilliz_from_texts(metadatas=metadatas)
del docsearch
docsearch = _zilliz_from_texts(metadatas=metadatas, drop=False)
output = docsearch.similarity_search('foo', k=10)
assert len(output) == 6
|
Test end to end construction and MRR search.
|
is_single_input
|
"""Whether the tool only accepts a single input."""
keys = {k for k in self.args if k != 'kwargs'}
return len(keys) == 1
|
@property
def is_single_input(self) ->bool:
"""Whether the tool only accepts a single input."""
keys = {k for k in self.args if k != 'kwargs'}
return len(keys) == 1
|
Whether the tool only accepts a single input.
|
test_saving_loading_llm
|
"""Test saving/loading an NLPCloud LLM."""
llm = NLPCloud(max_length=10)
llm.save(file_path=tmp_path / 'nlpcloud.yaml')
loaded_llm = load_llm(tmp_path / 'nlpcloud.yaml')
assert_llm_equality(llm, loaded_llm)
|
def test_saving_loading_llm(tmp_path: Path) ->None:
"""Test saving/loading an NLPCloud LLM."""
llm = NLPCloud(max_length=10)
llm.save(file_path=tmp_path / 'nlpcloud.yaml')
loaded_llm = load_llm(tmp_path / 'nlpcloud.yaml')
assert_llm_equality(llm, loaded_llm)
|
Test saving/loading an NLPCloud LLM.
|
check_index_exists
|
"""Check if Redis index exists."""
try:
client.ft(index_name).info()
except:
logger.debug('Index does not exist')
return False
logger.debug('Index already exists')
return True
|
def check_index_exists(client: RedisType, index_name: str) ->bool:
"""Check if Redis index exists."""
try:
client.ft(index_name).info()
except:
logger.debug('Index does not exist')
return False
logger.debug('Index already exists')
return True
|
Check if Redis index exists.
|
__aiter__
|
return self
|
def __aiter__(self) ->AsyncIterator[Any]:
return self
| null |
flush
|
"""Explicitly write current profile if using a rolling logger."""
if self._logger and hasattr(self._logger, '_do_rollover'):
self._logger._do_rollover()
diagnostic_logger.info('Flushing WhyLabs logger, writing profile...')
|
def flush(self) ->None:
"""Explicitly write current profile if using a rolling logger."""
if self._logger and hasattr(self._logger, '_do_rollover'):
self._logger._do_rollover()
diagnostic_logger.info('Flushing WhyLabs logger, writing profile...')
|
Explicitly write current profile if using a rolling logger.
|
similarity_search_with_score
|
"""Run similarity search with distance."""
query_embedding = self._embedding.embed_query(query)
points = self._search_points(query_embedding, k=k)
return [(Document(page_content=p['metadata']['text'], metadata=p['metadata'
]), p['distance']) for p in points]
|
def similarity_search_with_score(self, query: str, k: int=4, **kwargs: Any
) ->List[Tuple[Document, float]]:
"""Run similarity search with distance."""
query_embedding = self._embedding.embed_query(query)
points = self._search_points(query_embedding, k=k)
return [(Document(page_content=p['metadata']['text'], metadata=p[
'metadata']), p['distance']) for p in points]
|
Run similarity search with distance.
|
setUp
|
self.controller = ToTController(c=3)
|
def setUp(self) ->None:
self.controller = ToTController(c=3)
| null |
invoke
|
bound, config = prepared
if return_exceptions:
try:
return bound.invoke(input, config, **kwargs)
except Exception as e:
return e
else:
return bound.invoke(input, config, **kwargs)
|
def invoke(prepared: Tuple[Runnable[Input, Output], RunnableConfig], input:
Input) ->Union[Output, Exception]:
bound, config = prepared
if return_exceptions:
try:
return bound.invoke(input, config, **kwargs)
except Exception as e:
return e
else:
return bound.invoke(input, config, **kwargs)
| null |
_get_relevant_documents
|
results = self.client.search({'text': query}, **self.params)
final_results = []
for r in results['data']:
metadata = {k: v for k, v in r.items() if k != 'text'}
final_results.append(Document(page_content=r['text'], metadata=metadata))
return final_results
|
def _get_relevant_documents(self, query: str, *, run_manager:
CallbackManagerForRetrieverRun) ->List[Document]:
results = self.client.search({'text': query}, **self.params)
final_results = []
for r in results['data']:
metadata = {k: v for k, v in r.items() if k != 'text'}
final_results.append(Document(page_content=r['text'], metadata=
metadata))
return final_results
| null |
_AugAssign
|
self.fill()
self.dispatch(t.target)
self.write(' ' + self.binop[t.op.__class__.__name__] + '= ')
self.dispatch(t.value)
|
def _AugAssign(self, t):
self.fill()
self.dispatch(t.target)
self.write(' ' + self.binop[t.op.__class__.__name__] + '= ')
self.dispatch(t.value)
| null |
test_fallacy_critique_parsing
|
"""Test parsing of critique text."""
for text in [TEXT_ONE, TEXT_TWO, TEXT_THREE]:
fallacy_critique = FallacyChain._parse_critique(text)
assert fallacy_critique.strip(
) == 'This text is bad.', f'Failed on {text} with {fallacy_critique}'
|
def test_fallacy_critique_parsing() ->None:
"""Test parsing of critique text."""
for text in [TEXT_ONE, TEXT_TWO, TEXT_THREE]:
fallacy_critique = FallacyChain._parse_critique(text)
assert fallacy_critique.strip(
) == 'This text is bad.', f'Failed on {text} with {fallacy_critique}'
|
Test parsing of critique text.
|
similarity_search_by_vector
|
docs_and_scores = self.similarity_search_with_score_by_vector(embedding=
embedding, k=k, filter=filter)
return [doc for doc, _ in docs_and_scores]
|
def similarity_search_by_vector(self, embedding: List[float], k: int=4,
filter: Optional[dict]=None, **kwargs: Any) ->List[Document]:
docs_and_scores = self.similarity_search_with_score_by_vector(embedding
=embedding, k=k, filter=filter)
return [doc for doc, _ in docs_and_scores]
| null |
clear
|
"""Clear session memory in Elasticsearch"""
try:
from elasticsearch import ApiError
self.client.delete_by_query(index=self.index, query={'term': {
'session_id': self.session_id}}, refresh=True)
except ApiError as err:
logger.error(f'Could not clear session memory in Elasticsearch: {err}')
raise err
|
def clear(self) ->None:
"""Clear session memory in Elasticsearch"""
try:
from elasticsearch import ApiError
self.client.delete_by_query(index=self.index, query={'term': {
'session_id': self.session_id}}, refresh=True)
except ApiError as err:
logger.error(f'Could not clear session memory in Elasticsearch: {err}')
raise err
|
Clear session memory in Elasticsearch
|
_make_request
|
try:
import grpc
from google.protobuf.wrappers_pb2 import DoubleValue, Int64Value
from yandex.cloud.ai.foundation_models.v1.foundation_models_pb2 import CompletionOptions, Message
from yandex.cloud.ai.foundation_models.v1.foundation_models_service_pb2 import CompletionRequest
from yandex.cloud.ai.foundation_models.v1.foundation_models_service_pb2_grpc import TextGenerationServiceStub
except ImportError as e:
raise ImportError(
'Please install YandexCloud SDK with `pip install yandexcloud`.'
) from e
if not messages:
raise ValueError(
'You should provide at least one message to start the chat!')
message_history = _parse_chat_history(messages)
channel_credentials = grpc.ssl_channel_credentials()
channel = grpc.secure_channel(self.url, channel_credentials)
request = CompletionRequest(model_uri=self.model_uri, completion_options=
CompletionOptions(temperature=DoubleValue(value=self.temperature),
max_tokens=Int64Value(value=self.max_tokens)), messages=[Message(**
message) for message in message_history])
stub = TextGenerationServiceStub(channel)
res = stub.Completion(request, metadata=self._grpc_metadata)
return list(res)[0].alternatives[0].message.text
|
def _make_request(self: ChatYandexGPT, messages: List[BaseMessage]) ->str:
try:
import grpc
from google.protobuf.wrappers_pb2 import DoubleValue, Int64Value
from yandex.cloud.ai.foundation_models.v1.foundation_models_pb2 import CompletionOptions, Message
from yandex.cloud.ai.foundation_models.v1.foundation_models_service_pb2 import CompletionRequest
from yandex.cloud.ai.foundation_models.v1.foundation_models_service_pb2_grpc import TextGenerationServiceStub
except ImportError as e:
raise ImportError(
'Please install YandexCloud SDK with `pip install yandexcloud`.'
) from e
if not messages:
raise ValueError(
'You should provide at least one message to start the chat!')
message_history = _parse_chat_history(messages)
channel_credentials = grpc.ssl_channel_credentials()
channel = grpc.secure_channel(self.url, channel_credentials)
request = CompletionRequest(model_uri=self.model_uri,
completion_options=CompletionOptions(temperature=DoubleValue(value=
self.temperature), max_tokens=Int64Value(value=self.max_tokens)),
messages=[Message(**message) for message in message_history])
stub = TextGenerationServiceStub(channel)
res = stub.Completion(request, metadata=self._grpc_metadata)
return list(res)[0].alternatives[0].message.text
| null |
_import_self_hosted_hugging_face
|
from langchain_community.llms.self_hosted_hugging_face import SelfHostedHuggingFaceLLM
return SelfHostedHuggingFaceLLM
|
def _import_self_hosted_hugging_face() ->Any:
from langchain_community.llms.self_hosted_hugging_face import SelfHostedHuggingFaceLLM
return SelfHostedHuggingFaceLLM
| null |
test_neo4jvector_catch_wrong_index_name
|
"""Test if index name is misspelled, but node label and property are correct."""
text_embeddings = FakeEmbeddingsWithOsDimension().embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
Neo4jVector.from_embeddings(text_embeddings=text_embedding_pairs, embedding
=FakeEmbeddingsWithOsDimension(), url=url, username=username, password=
password, pre_delete_collection=True)
existing = Neo4jVector.from_existing_index(embedding=
FakeEmbeddingsWithOsDimension(), url=url, username=username, password=
password, index_name='test')
output = existing.similarity_search('foo', k=1)
assert output == [Document(page_content='foo')]
drop_vector_indexes(existing)
|
def test_neo4jvector_catch_wrong_index_name() ->None:
"""Test if index name is misspelled, but node label and property are correct."""
text_embeddings = FakeEmbeddingsWithOsDimension().embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
Neo4jVector.from_embeddings(text_embeddings=text_embedding_pairs,
embedding=FakeEmbeddingsWithOsDimension(), url=url, username=
username, password=password, pre_delete_collection=True)
existing = Neo4jVector.from_existing_index(embedding=
FakeEmbeddingsWithOsDimension(), url=url, username=username,
password=password, index_name='test')
output = existing.similarity_search('foo', k=1)
assert output == [Document(page_content='foo')]
drop_vector_indexes(existing)
|
Test if index name is misspelled, but node label and property are correct.
|
__init__
|
"""Initialize the SQLRecordManager.
This class serves as a manager persistence layer that uses an SQL
backend to track upserted records. You should specify either a db_url
to create an engine or provide an existing engine.
Args:
namespace: The namespace associated with this record manager.
engine: An already existing SQL Alchemy engine.
Default is None.
db_url: A database connection string used to create
an SQL Alchemy engine. Default is None.
engine_kwargs: Additional keyword arguments
to be passed when creating the engine. Default is an empty dictionary.
async_mode: Whether to create an async engine.
Driver should support async operations.
It only applies if db_url is provided.
Default is False.
Raises:
ValueError: If both db_url and engine are provided or neither.
AssertionError: If something unexpected happens during engine configuration.
"""
super().__init__(namespace=namespace)
if db_url is None and engine is None:
raise ValueError('Must specify either db_url or engine')
if db_url is not None and engine is not None:
raise ValueError('Must specify either db_url or engine, not both')
_engine: Union[Engine, AsyncEngine]
if db_url:
if async_mode:
_engine = create_async_engine(db_url, **engine_kwargs or {})
else:
_engine = create_engine(db_url, **engine_kwargs or {})
elif engine:
_engine = engine
else:
raise AssertionError('Something went wrong with configuration of engine.')
_session_factory: Union[sessionmaker[Session], async_sessionmaker[AsyncSession]
]
if isinstance(_engine, AsyncEngine):
_session_factory = async_sessionmaker(bind=_engine)
else:
_session_factory = sessionmaker(bind=_engine)
self.engine = _engine
self.dialect = _engine.dialect.name
self.session_factory = _session_factory
|
def __init__(self, namespace: str, *, engine: Optional[Union[Engine,
AsyncEngine]]=None, db_url: Union[None, str, URL]=None, engine_kwargs:
Optional[Dict[str, Any]]=None, async_mode: bool=False) ->None:
"""Initialize the SQLRecordManager.
This class serves as a manager persistence layer that uses an SQL
backend to track upserted records. You should specify either a db_url
to create an engine or provide an existing engine.
Args:
namespace: The namespace associated with this record manager.
engine: An already existing SQL Alchemy engine.
Default is None.
db_url: A database connection string used to create
an SQL Alchemy engine. Default is None.
engine_kwargs: Additional keyword arguments
to be passed when creating the engine. Default is an empty dictionary.
async_mode: Whether to create an async engine.
Driver should support async operations.
It only applies if db_url is provided.
Default is False.
Raises:
ValueError: If both db_url and engine are provided or neither.
AssertionError: If something unexpected happens during engine configuration.
"""
super().__init__(namespace=namespace)
if db_url is None and engine is None:
raise ValueError('Must specify either db_url or engine')
if db_url is not None and engine is not None:
raise ValueError('Must specify either db_url or engine, not both')
_engine: Union[Engine, AsyncEngine]
if db_url:
if async_mode:
_engine = create_async_engine(db_url, **engine_kwargs or {})
else:
_engine = create_engine(db_url, **engine_kwargs or {})
elif engine:
_engine = engine
else:
raise AssertionError(
'Something went wrong with configuration of engine.')
_session_factory: Union[sessionmaker[Session], async_sessionmaker[
AsyncSession]]
if isinstance(_engine, AsyncEngine):
_session_factory = async_sessionmaker(bind=_engine)
else:
_session_factory = sessionmaker(bind=_engine)
self.engine = _engine
self.dialect = _engine.dialect.name
self.session_factory = _session_factory
|
Initialize the SQLRecordManager.
This class serves as a manager persistence layer that uses an SQL
backend to track upserted records. You should specify either a db_url
to create an engine or provide an existing engine.
Args:
namespace: The namespace associated with this record manager.
engine: An already existing SQL Alchemy engine.
Default is None.
db_url: A database connection string used to create
an SQL Alchemy engine. Default is None.
engine_kwargs: Additional keyword arguments
to be passed when creating the engine. Default is an empty dictionary.
async_mode: Whether to create an async engine.
Driver should support async operations.
It only applies if db_url is provided.
Default is False.
Raises:
ValueError: If both db_url and engine are provided or neither.
AssertionError: If something unexpected happens during engine configuration.
|
validate_environment
|
"""Validate that api key exists in environment."""
values['edenai_api_key'] = get_from_dict_or_env(values, 'edenai_api_key',
'EDENAI_API_KEY')
return values
|
@root_validator()
def validate_environment(cls, values: Dict) ->Dict:
"""Validate that api key exists in environment."""
values['edenai_api_key'] = get_from_dict_or_env(values,
'edenai_api_key', 'EDENAI_API_KEY')
return values
|
Validate that api key exists in environment.
|
_skip_reference_warning
|
"""Warning to show when reference is ignored."""
return f'Ignoring reference in {self.__class__.__name__}, as it is not expected.'
|
@property
def _skip_reference_warning(self) ->str:
"""Warning to show when reference is ignored."""
return (
f'Ignoring reference in {self.__class__.__name__}, as it is not expected.'
)
|
Warning to show when reference is ignored.
|
test_scann_with_config
|
"""Test ScaNN with approximate search config."""
texts = [str(i) for i in range(10000)]
scann_config = dependable_scann_import().scann_ops_pybind.builder(np.zeros(
shape=(0, 10)), 10, 'squared_l2').tree(num_leaves=100,
num_leaves_to_search=10).score_ah(2).reorder(100).create_config()
mips_search = ScaNN.from_texts(texts, ConsistentFakeEmbeddings(),
scann_config=scann_config, distance_strategy=DistanceStrategy.
MAX_INNER_PRODUCT, normalize_L2=True)
output = mips_search.similarity_search_with_score('42', k=1)
expected = [(Document(page_content='42', metadata={}), 0.0)]
assert output == expected
|
def test_scann_with_config() ->None:
"""Test ScaNN with approximate search config."""
texts = [str(i) for i in range(10000)]
scann_config = dependable_scann_import().scann_ops_pybind.builder(np.
zeros(shape=(0, 10)), 10, 'squared_l2').tree(num_leaves=100,
num_leaves_to_search=10).score_ah(2).reorder(100).create_config()
mips_search = ScaNN.from_texts(texts, ConsistentFakeEmbeddings(),
scann_config=scann_config, distance_strategy=DistanceStrategy.
MAX_INNER_PRODUCT, normalize_L2=True)
output = mips_search.similarity_search_with_score('42', k=1)
expected = [(Document(page_content='42', metadata={}), 0.0)]
assert output == expected
|
Test ScaNN with approximate search config.
|
_call
|
"""Call base LLM with sanitization before and de-sanitization after.
Args:
prompt: The prompt to pass into the model.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = op_llm("Tell me a joke.")
"""
import opaqueprompts as op
_run_manager = run_manager or CallbackManagerForLLMRun.get_noop_manager()
sanitize_response: op.SanitizeResponse = op.sanitize([prompt])
sanitized_prompt_value_str = sanitize_response.sanitized_texts[0]
llm_response = self.base_llm.predict(sanitized_prompt_value_str, stop=stop)
desanitize_response: op.DesanitizeResponse = op.desanitize(llm_response,
secure_context=sanitize_response.secure_context)
return desanitize_response.desanitized_text
|
def _call(self, prompt: str, stop: Optional[List[str]]=None, run_manager:
Optional[CallbackManagerForLLMRun]=None, **kwargs: Any) ->str:
"""Call base LLM with sanitization before and de-sanitization after.
Args:
prompt: The prompt to pass into the model.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = op_llm("Tell me a joke.")
"""
import opaqueprompts as op
_run_manager = run_manager or CallbackManagerForLLMRun.get_noop_manager()
sanitize_response: op.SanitizeResponse = op.sanitize([prompt])
sanitized_prompt_value_str = sanitize_response.sanitized_texts[0]
llm_response = self.base_llm.predict(sanitized_prompt_value_str, stop=stop)
desanitize_response: op.DesanitizeResponse = op.desanitize(llm_response,
secure_context=sanitize_response.secure_context)
return desanitize_response.desanitized_text
|
Call base LLM with sanitization before and de-sanitization after.
Args:
prompt: The prompt to pass into the model.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = op_llm("Tell me a joke.")
|
input_mapper
|
assert 'the wrong input' in inputs
return {'the right input': inputs['the wrong input']}
|
def input_mapper(inputs: dict) ->dict:
assert 'the wrong input' in inputs
return {'the right input': inputs['the wrong input']}
| null |
test_partial
|
"""Test prompt can be partialed."""
prefix = 'This is a test about {content}.'
suffix = 'Now you try to talk about {new_content}.'
examples = [{'question': 'foo', 'answer': 'bar'}, {'question': 'baz',
'answer': 'foo'}]
prompt = FewShotPromptTemplate(suffix=suffix, prefix=prefix,
input_variables=['content', 'new_content'], examples=examples,
example_prompt=EXAMPLE_PROMPT, example_separator='\n')
new_prompt = prompt.partial(content='foo')
new_output = new_prompt.format(new_content='party')
expected_output = """This is a test about foo.
foo: bar
baz: foo
Now you try to talk about party."""
assert new_output == expected_output
output = prompt.format(new_content='party', content='bar')
expected_output = """This is a test about bar.
foo: bar
baz: foo
Now you try to talk about party."""
assert output == expected_output
|
def test_partial() ->None:
"""Test prompt can be partialed."""
prefix = 'This is a test about {content}.'
suffix = 'Now you try to talk about {new_content}.'
examples = [{'question': 'foo', 'answer': 'bar'}, {'question': 'baz',
'answer': 'foo'}]
prompt = FewShotPromptTemplate(suffix=suffix, prefix=prefix,
input_variables=['content', 'new_content'], examples=examples,
example_prompt=EXAMPLE_PROMPT, example_separator='\n')
new_prompt = prompt.partial(content='foo')
new_output = new_prompt.format(new_content='party')
expected_output = """This is a test about foo.
foo: bar
baz: foo
Now you try to talk about party."""
assert new_output == expected_output
output = prompt.format(new_content='party', content='bar')
expected_output = """This is a test about bar.
foo: bar
baz: foo
Now you try to talk about party."""
assert output == expected_output
|
Test prompt can be partialed.
|
get_lc_namespace
|
"""Get the namespace of the langchain object."""
return ['langchain', 'schema', 'agent']
|
@classmethod
def get_lc_namespace(cls) ->List[str]:
"""Get the namespace of the langchain object."""
return ['langchain', 'schema', 'agent']
|
Get the namespace of the langchain object.
|
_default_schema
|
return {'class': index_name, 'properties': [{'name': 'text', 'dataType': [
'text']}]}
|
def _default_schema(index_name: str) ->Dict:
return {'class': index_name, 'properties': [{'name': 'text', 'dataType':
['text']}]}
| null |
set_debug
|
"""Set a new value for the `debug` global setting."""
import langchain
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message=
'Importing debug from langchain root module is no longer supported')
langchain.debug = value
global _debug
_debug = value
|
def set_debug(value: bool) ->None:
"""Set a new value for the `debug` global setting."""
import langchain
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message=
'Importing debug from langchain root module is no longer supported'
)
langchain.debug = value
global _debug
_debug = value
|
Set a new value for the `debug` global setting.
|
test_json_distance_evaluator_requires_input
|
assert json_distance_evaluator.requires_input is False
|
@pytest.mark.requires('rapidfuzz')
def test_json_distance_evaluator_requires_input(json_distance_evaluator:
JsonEditDistanceEvaluator) ->None:
assert json_distance_evaluator.requires_input is False
| null |
load
|
"""Load documents."""
try:
import boto3
except ImportError:
raise ImportError(
'Could not import boto3 python package. Please install it with `pip install boto3`.'
)
s3 = boto3.resource('s3', region_name=self.region_name, api_version=self.
api_version, use_ssl=self.use_ssl, verify=self.verify, endpoint_url=
self.endpoint_url, aws_access_key_id=self.aws_access_key_id,
aws_secret_access_key=self.aws_secret_access_key, aws_session_token=
self.aws_session_token, config=self.boto_config)
bucket = s3.Bucket(self.bucket)
docs = []
for obj in bucket.objects.filter(Prefix=self.prefix):
loader = S3FileLoader(self.bucket, obj.key, region_name=self.
region_name, api_version=self.api_version, use_ssl=self.use_ssl,
verify=self.verify, endpoint_url=self.endpoint_url,
aws_access_key_id=self.aws_access_key_id, aws_secret_access_key=
self.aws_secret_access_key, aws_session_token=self.
aws_session_token, boto_config=self.boto_config)
docs.extend(loader.load())
return docs
|
def load(self) ->List[Document]:
"""Load documents."""
try:
import boto3
except ImportError:
raise ImportError(
'Could not import boto3 python package. Please install it with `pip install boto3`.'
)
s3 = boto3.resource('s3', region_name=self.region_name, api_version=
self.api_version, use_ssl=self.use_ssl, verify=self.verify,
endpoint_url=self.endpoint_url, aws_access_key_id=self.
aws_access_key_id, aws_secret_access_key=self.aws_secret_access_key,
aws_session_token=self.aws_session_token, config=self.boto_config)
bucket = s3.Bucket(self.bucket)
docs = []
for obj in bucket.objects.filter(Prefix=self.prefix):
loader = S3FileLoader(self.bucket, obj.key, region_name=self.
region_name, api_version=self.api_version, use_ssl=self.use_ssl,
verify=self.verify, endpoint_url=self.endpoint_url,
aws_access_key_id=self.aws_access_key_id, aws_secret_access_key
=self.aws_secret_access_key, aws_session_token=self.
aws_session_token, boto_config=self.boto_config)
docs.extend(loader.load())
return docs
|
Load documents.
|
_map_comparator
|
"""
Maps Langchain comparator to PostgREST comparator:
https://postgrest.org/en/stable/references/api/tables_views.html#operators
"""
postgrest_comparator = {Comparator.EQ: 'eq', Comparator.NE: 'neq',
Comparator.GT: 'gt', Comparator.GTE: 'gte', Comparator.LT: 'lt',
Comparator.LTE: 'lte', Comparator.LIKE: 'like'}.get(comparator)
if postgrest_comparator is None:
raise Exception(
f"Comparator '{comparator}' is not currently supported in Supabase Vector"
)
return postgrest_comparator
|
def _map_comparator(self, comparator: Comparator) ->str:
"""
Maps Langchain comparator to PostgREST comparator:
https://postgrest.org/en/stable/references/api/tables_views.html#operators
"""
postgrest_comparator = {Comparator.EQ: 'eq', Comparator.NE: 'neq',
Comparator.GT: 'gt', Comparator.GTE: 'gte', Comparator.LT: 'lt',
Comparator.LTE: 'lte', Comparator.LIKE: 'like'}.get(comparator)
if postgrest_comparator is None:
raise Exception(
f"Comparator '{comparator}' is not currently supported in Supabase Vector"
)
return postgrest_comparator
|
Maps Langchain comparator to PostgREST comparator:
https://postgrest.org/en/stable/references/api/tables_views.html#operators
|
_on_chain_error
|
"""Process the Chain Run upon error."""
self._submit(self._update_run_single, _copy(run))
|
def _on_chain_error(self, run: Run) ->None:
"""Process the Chain Run upon error."""
self._submit(self._update_run_single, _copy(run))
|
Process the Chain Run upon error.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.