method_name
stringlengths 1
78
| method_body
stringlengths 3
9.66k
| full_code
stringlengths 31
10.7k
| docstring
stringlengths 4
4.74k
⌀ |
|---|---|---|---|
_get_elements
|
from unstructured.partition.odt import partition_odt
return partition_odt(filename=self.file_path, **self.unstructured_kwargs)
|
def _get_elements(self) ->List:
from unstructured.partition.odt import partition_odt
return partition_odt(filename=self.file_path, **self.unstructured_kwargs)
| null |
ignore_chain
|
"""Whether to ignore chain callbacks."""
return self.ignore_chain_
|
@property
def ignore_chain(self) ->bool:
"""Whether to ignore chain callbacks."""
return self.ignore_chain_
|
Whether to ignore chain callbacks.
|
convert_dict_to_message
|
"""Convert a dictionary to a LangChain message.
Args:
_dict: The dictionary.
Returns:
The LangChain message.
"""
role = _dict.get('role')
if role == 'user':
return HumanMessage(content=_dict.get('content', ''))
elif role == 'assistant':
content = _dict.get('content', '') or ''
additional_kwargs: Dict = {}
if (function_call := _dict.get('function_call')):
additional_kwargs['function_call'] = dict(function_call)
if (tool_calls := _dict.get('tool_calls')):
additional_kwargs['tool_calls'] = tool_calls
return AIMessage(content=content, additional_kwargs=additional_kwargs)
elif role == 'system':
return SystemMessage(content=_dict.get('content', ''))
elif role == 'function':
return FunctionMessage(content=_dict.get('content', ''), name=_dict.get
('name'))
elif role == 'tool':
additional_kwargs = {}
if 'name' in _dict:
additional_kwargs['name'] = _dict['name']
return ToolMessage(content=_dict.get('content', ''), tool_call_id=_dict
.get('tool_call_id'), additional_kwargs=additional_kwargs)
else:
return ChatMessage(content=_dict.get('content', ''), role=role)
|
def convert_dict_to_message(_dict: Mapping[str, Any]) ->BaseMessage:
"""Convert a dictionary to a LangChain message.
Args:
_dict: The dictionary.
Returns:
The LangChain message.
"""
role = _dict.get('role')
if role == 'user':
return HumanMessage(content=_dict.get('content', ''))
elif role == 'assistant':
content = _dict.get('content', '') or ''
additional_kwargs: Dict = {}
if (function_call := _dict.get('function_call')):
additional_kwargs['function_call'] = dict(function_call)
if (tool_calls := _dict.get('tool_calls')):
additional_kwargs['tool_calls'] = tool_calls
return AIMessage(content=content, additional_kwargs=additional_kwargs)
elif role == 'system':
return SystemMessage(content=_dict.get('content', ''))
elif role == 'function':
return FunctionMessage(content=_dict.get('content', ''), name=_dict
.get('name'))
elif role == 'tool':
additional_kwargs = {}
if 'name' in _dict:
additional_kwargs['name'] = _dict['name']
return ToolMessage(content=_dict.get('content', ''), tool_call_id=
_dict.get('tool_call_id'), additional_kwargs=additional_kwargs)
else:
return ChatMessage(content=_dict.get('content', ''), role=role)
|
Convert a dictionary to a LangChain message.
Args:
_dict: The dictionary.
Returns:
The LangChain message.
|
__init__
|
"""Create a RedisTag FilterField.
Args:
field (str): The name of the RedisTag field in the index to be queried
against.
"""
super().__init__(field)
|
def __init__(self, field: str):
"""Create a RedisTag FilterField.
Args:
field (str): The name of the RedisTag field in the index to be queried
against.
"""
super().__init__(field)
|
Create a RedisTag FilterField.
Args:
field (str): The name of the RedisTag field in the index to be queried
against.
|
test_load_success_load_max_docs
|
"""Test that returns the correct answer"""
api_client = PubMedLoader(query='chatgpt', load_max_docs=2)
docs = api_client.load()
print(docs)
assert len(docs) == api_client.load_max_docs == 2
assert_docs(docs)
|
def test_load_success_load_max_docs() ->None:
"""Test that returns the correct answer"""
api_client = PubMedLoader(query='chatgpt', load_max_docs=2)
docs = api_client.load()
print(docs)
assert len(docs) == api_client.load_max_docs == 2
assert_docs(docs)
|
Test that returns the correct answer
|
test_sim_search
|
"""Test end to end construction and simple similarity search."""
hnsw_vec_store = DocArrayHnswSearch.from_texts(texts, FakeEmbeddings(),
work_dir=str(tmp_path), n_dim=10, dist_metric=metric, index=True)
output = hnsw_vec_store.similarity_search('foo', k=1)
assert output == [Document(page_content='foo')]
|
@pytest.mark.parametrize('metric', ['cosine', 'l2'])
def test_sim_search(metric: str, texts: List[str], tmp_path: Path) ->None:
"""Test end to end construction and simple similarity search."""
hnsw_vec_store = DocArrayHnswSearch.from_texts(texts, FakeEmbeddings(),
work_dir=str(tmp_path), n_dim=10, dist_metric=metric, index=True)
output = hnsw_vec_store.similarity_search('foo', k=1)
assert output == [Document(page_content='foo')]
|
Test end to end construction and simple similarity search.
|
input_keys
|
"""Return the singular input key.
:meta private:
"""
return [self.input_key]
|
@property
def input_keys(self) ->List[str]:
"""Return the singular input key.
:meta private:
"""
return [self.input_key]
|
Return the singular input key.
:meta private:
|
_import_azure_openai
|
from langchain_community.llms.openai import AzureOpenAI
return AzureOpenAI
|
def _import_azure_openai() ->Any:
from langchain_community.llms.openai import AzureOpenAI
return AzureOpenAI
| null |
get_validated_relative_path
|
"""Resolve a relative path, raising an error if not within the root directory."""
root = root.resolve()
full_path = (root / user_path).resolve()
if not is_relative_to(full_path, root):
raise FileValidationError(
f'Path {user_path} is outside of the allowed directory {root}')
return full_path
|
def get_validated_relative_path(root: Path, user_path: str) ->Path:
"""Resolve a relative path, raising an error if not within the root directory."""
root = root.resolve()
full_path = (root / user_path).resolve()
if not is_relative_to(full_path, root):
raise FileValidationError(
f'Path {user_path} is outside of the allowed directory {root}')
return full_path
|
Resolve a relative path, raising an error if not within the root directory.
|
_get_json_operator
|
if isinstance(value, str):
return '->>'
else:
return '->'
|
def _get_json_operator(self, value: Any) ->str:
if isinstance(value, str):
return '->>'
else:
return '->'
| null |
output_keys
|
"""Return output key.
:meta private:
"""
_output_keys = [self.answer_key, self.sources_answer_key]
if self.return_source_documents:
_output_keys = _output_keys + ['source_documents']
return _output_keys
|
@property
def output_keys(self) ->List[str]:
"""Return output key.
:meta private:
"""
_output_keys = [self.answer_key, self.sources_answer_key]
if self.return_source_documents:
_output_keys = _output_keys + ['source_documents']
return _output_keys
|
Return output key.
:meta private:
|
load
|
"""Extract text from Diffbot on all the URLs and return Documents"""
docs: List[Document] = list()
for url in self.urls:
try:
data = self._get_diffbot_data(url)
text = data['objects'][0]['text'] if 'objects' in data else ''
metadata = {'source': url}
docs.append(Document(page_content=text, metadata=metadata))
except Exception as e:
if self.continue_on_failure:
logger.error(f'Error fetching or processing {url}, exception: {e}')
else:
raise e
return docs
|
def load(self) ->List[Document]:
"""Extract text from Diffbot on all the URLs and return Documents"""
docs: List[Document] = list()
for url in self.urls:
try:
data = self._get_diffbot_data(url)
text = data['objects'][0]['text'] if 'objects' in data else ''
metadata = {'source': url}
docs.append(Document(page_content=text, metadata=metadata))
except Exception as e:
if self.continue_on_failure:
logger.error(
f'Error fetching or processing {url}, exception: {e}')
else:
raise e
return docs
|
Extract text from Diffbot on all the URLs and return Documents
|
_get_docs
|
"""Get docs."""
docs = self.retriever.get_relevant_documents(question, callbacks=
run_manager.get_child())
return self._reduce_tokens_below_limit(docs)
|
def _get_docs(self, question: str, inputs: Dict[str, Any], *, run_manager:
CallbackManagerForChainRun) ->List[Document]:
"""Get docs."""
docs = self.retriever.get_relevant_documents(question, callbacks=
run_manager.get_child())
return self._reduce_tokens_below_limit(docs)
|
Get docs.
|
save_context
|
"""Pass."""
|
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) ->None:
"""Pass."""
|
Pass.
|
__add__
|
if isinstance(other, GenerationChunk):
generation_info = ({**self.generation_info or {}, **other.
generation_info or {}} if self.generation_info is not None or other
.generation_info is not None else None)
return GenerationChunk(text=self.text + other.text, generation_info=
generation_info)
else:
raise TypeError(
f"unsupported operand type(s) for +: '{type(self)}' and '{type(other)}'"
)
|
def __add__(self, other: GenerationChunk) ->GenerationChunk:
if isinstance(other, GenerationChunk):
generation_info = ({**self.generation_info or {}, **other.
generation_info or {}} if self.generation_info is not None or
other.generation_info is not None else None)
return GenerationChunk(text=self.text + other.text, generation_info
=generation_info)
else:
raise TypeError(
f"unsupported operand type(s) for +: '{type(self)}' and '{type(other)}'"
)
| null |
_load_retrieval_qa
|
if 'retriever' in kwargs:
retriever = kwargs.pop('retriever')
else:
raise ValueError('`retriever` must be present.')
if 'combine_documents_chain' in config:
combine_documents_chain_config = config.pop('combine_documents_chain')
combine_documents_chain = load_chain_from_config(
combine_documents_chain_config)
elif 'combine_documents_chain_path' in config:
combine_documents_chain = load_chain(config.pop(
'combine_documents_chain_path'))
else:
raise ValueError(
'One of `combine_documents_chain` or `combine_documents_chain_path` must be present.'
)
return RetrievalQA(combine_documents_chain=combine_documents_chain,
retriever=retriever, **config)
|
def _load_retrieval_qa(config: dict, **kwargs: Any) ->RetrievalQA:
if 'retriever' in kwargs:
retriever = kwargs.pop('retriever')
else:
raise ValueError('`retriever` must be present.')
if 'combine_documents_chain' in config:
combine_documents_chain_config = config.pop('combine_documents_chain')
combine_documents_chain = load_chain_from_config(
combine_documents_chain_config)
elif 'combine_documents_chain_path' in config:
combine_documents_chain = load_chain(config.pop(
'combine_documents_chain_path'))
else:
raise ValueError(
'One of `combine_documents_chain` or `combine_documents_chain_path` must be present.'
)
return RetrievalQA(combine_documents_chain=combine_documents_chain,
retriever=retriever, **config)
| null |
test_initialization
|
"""Test integration vectorstore initialization."""
__ModuleName__VectorStore()
|
def test_initialization() ->None:
"""Test integration vectorstore initialization."""
__ModuleName__VectorStore()
|
Test integration vectorstore initialization.
|
_create_api_controller_tool
|
"""Expose controller as a tool.
The tool is invoked with a plan from the planner, and dynamically
creates a controller agent with relevant documentation only to
constrain the context.
"""
base_url = api_spec.servers[0]['url']
def _create_and_run_api_controller_agent(plan_str: str) ->str:
pattern = '\\b(GET|POST|PATCH|DELETE)\\s+(/\\S+)*'
matches = re.findall(pattern, plan_str)
endpoint_names = ['{method} {route}'.format(method=method, route=route.
split('?')[0]) for method, route in matches]
docs_str = ''
for endpoint_name in endpoint_names:
found_match = False
for name, _, docs in api_spec.endpoints:
regex_name = re.compile(re.sub('\\{.*?\\}', '.*', name))
if regex_name.match(endpoint_name):
found_match = True
docs_str += (
f'== Docs for {endpoint_name} == \n{yaml.dump(docs)}\n')
if not found_match:
raise ValueError(f'{endpoint_name} endpoint does not exist.')
agent = _create_api_controller_agent(base_url, docs_str,
requests_wrapper, llm)
return agent.run(plan_str)
return Tool(name=API_CONTROLLER_TOOL_NAME, func=
_create_and_run_api_controller_agent, description=
API_CONTROLLER_TOOL_DESCRIPTION)
|
def _create_api_controller_tool(api_spec: ReducedOpenAPISpec,
requests_wrapper: RequestsWrapper, llm: BaseLanguageModel) ->Tool:
"""Expose controller as a tool.
The tool is invoked with a plan from the planner, and dynamically
creates a controller agent with relevant documentation only to
constrain the context.
"""
base_url = api_spec.servers[0]['url']
def _create_and_run_api_controller_agent(plan_str: str) ->str:
pattern = '\\b(GET|POST|PATCH|DELETE)\\s+(/\\S+)*'
matches = re.findall(pattern, plan_str)
endpoint_names = ['{method} {route}'.format(method=method, route=
route.split('?')[0]) for method, route in matches]
docs_str = ''
for endpoint_name in endpoint_names:
found_match = False
for name, _, docs in api_spec.endpoints:
regex_name = re.compile(re.sub('\\{.*?\\}', '.*', name))
if regex_name.match(endpoint_name):
found_match = True
docs_str += (
f'== Docs for {endpoint_name} == \n{yaml.dump(docs)}\n'
)
if not found_match:
raise ValueError(f'{endpoint_name} endpoint does not exist.')
agent = _create_api_controller_agent(base_url, docs_str,
requests_wrapper, llm)
return agent.run(plan_str)
return Tool(name=API_CONTROLLER_TOOL_NAME, func=
_create_and_run_api_controller_agent, description=
API_CONTROLLER_TOOL_DESCRIPTION)
|
Expose controller as a tool.
The tool is invoked with a plan from the planner, and dynamically
creates a controller agent with relevant documentation only to
constrain the context.
|
parse_issue
|
"""Create Document objects from a list of GitHub issues."""
metadata = {'url': issue['html_url'], 'title': issue['title'], 'creator':
issue['user']['login'], 'created_at': issue['created_at'], 'comments':
issue['comments'], 'state': issue['state'], 'labels': [label['name'] for
label in issue['labels']], 'assignee': issue['assignee']['login'] if
issue['assignee'] else None, 'milestone': issue['milestone']['title'] if
issue['milestone'] else None, 'locked': issue['locked'], 'number':
issue['number'], 'is_pull_request': 'pull_request' in issue}
content = issue['body'] if issue['body'] is not None else ''
return Document(page_content=content, metadata=metadata)
|
def parse_issue(self, issue: dict) ->Document:
"""Create Document objects from a list of GitHub issues."""
metadata = {'url': issue['html_url'], 'title': issue['title'],
'creator': issue['user']['login'], 'created_at': issue['created_at'
], 'comments': issue['comments'], 'state': issue['state'], 'labels':
[label['name'] for label in issue['labels']], 'assignee': issue[
'assignee']['login'] if issue['assignee'] else None, 'milestone':
issue['milestone']['title'] if issue['milestone'] else None,
'locked': issue['locked'], 'number': issue['number'],
'is_pull_request': 'pull_request' in issue}
content = issue['body'] if issue['body'] is not None else ''
return Document(page_content=content, metadata=metadata)
|
Create Document objects from a list of GitHub issues.
|
_chain_type
|
return 'llm_math_chain'
|
@property
def _chain_type(self) ->str:
return 'llm_math_chain'
| null |
calculator
|
"""Do math."""
return 'bar'
|
@tool
def calculator(expression: str) ->str:
"""Do math."""
return 'bar'
|
Do math.
|
on_chain_error
|
if self.__has_valid_config is False:
return
try:
self.__track_event('chain', 'error', run_id=str(run_id), parent_run_id=
str(parent_run_id) if parent_run_id else None, error={'message':
str(error), 'stack': traceback.format_exc()}, app_id=self.__app_id)
except Exception as e:
logger.error(f'[LLMonitor] An error occurred in on_chain_error: {e}')
|
def on_chain_error(self, error: BaseException, *, run_id: UUID,
parent_run_id: Union[UUID, None]=None, **kwargs: Any) ->Any:
if self.__has_valid_config is False:
return
try:
self.__track_event('chain', 'error', run_id=str(run_id),
parent_run_id=str(parent_run_id) if parent_run_id else None,
error={'message': str(error), 'stack': traceback.format_exc()},
app_id=self.__app_id)
except Exception as e:
logger.error(f'[LLMonitor] An error occurred in on_chain_error: {e}')
| null |
_run
|
try:
source_path_ = self.get_relative_path(source_path)
except FileValidationError:
return INVALID_PATH_TEMPLATE.format(arg_name='source_path', value=
source_path)
try:
destination_path_ = self.get_relative_path(destination_path)
except FileValidationError:
return INVALID_PATH_TEMPLATE.format(arg_name='destination_path_', value
=destination_path_)
if not source_path_.exists():
return f'Error: no such file or directory {source_path}'
try:
shutil.move(str(source_path_), destination_path_)
return f'File moved successfully from {source_path} to {destination_path}.'
except Exception as e:
return 'Error: ' + str(e)
|
def _run(self, source_path: str, destination_path: str, run_manager:
Optional[CallbackManagerForToolRun]=None) ->str:
try:
source_path_ = self.get_relative_path(source_path)
except FileValidationError:
return INVALID_PATH_TEMPLATE.format(arg_name='source_path', value=
source_path)
try:
destination_path_ = self.get_relative_path(destination_path)
except FileValidationError:
return INVALID_PATH_TEMPLATE.format(arg_name='destination_path_',
value=destination_path_)
if not source_path_.exists():
return f'Error: no such file or directory {source_path}'
try:
shutil.move(str(source_path_), destination_path_)
return (
f'File moved successfully from {source_path} to {destination_path}.'
)
except Exception as e:
return 'Error: ' + str(e)
| null |
similarity_search_with_score
|
"""Run similarity search with **vector distance**.
The "scores" returned from this function are the raw vector
distances from the query vector. For similarity scores, use
``similarity_search_with_relevance_scores``.
Args:
query (str): The query text for which to find similar documents.
k (int): The number of documents to return. Default is 4.
filter (RedisFilterExpression, optional): Optional metadata filter.
Defaults to None.
return_metadata (bool, optional): Whether to return metadata.
Defaults to True.
Returns:
List[Tuple[Document, float]]: A list of documents that are
most similar to the query with the distance for each document.
"""
try:
import redis
except ImportError as e:
raise ImportError(
'Could not import redis python package. Please install it with `pip install redis`.'
) from e
if 'score_threshold' in kwargs:
logger.warning(
'score_threshold is deprecated. Use distance_threshold instead.' +
'score_threshold should only be used in ' +
'similarity_search_with_relevance_scores.' +
'score_threshold will be removed in a future release.')
query_embedding = self._embeddings.embed_query(query)
redis_query, params_dict = self._prepare_query(query_embedding, k=k, filter
=filter, with_metadata=return_metadata, with_distance=True, **kwargs)
try:
results = self.client.ft(self.index_name).search(redis_query, params_dict)
except redis.exceptions.ResponseError as e:
if str(e).split(' ')[0] == 'Syntax':
raise ValueError('Query failed with syntax error. ' +
'This is likely due to malformation of ' +
'filter, vector, or query argument') from e
raise e
docs_with_scores: List[Tuple[Document, float]] = []
for result in results.docs:
metadata = {}
if return_metadata:
metadata = {'id': result.id}
metadata.update(self._collect_metadata(result))
doc = Document(page_content=result.content, metadata=metadata)
distance = self._calculate_fp_distance(result.distance)
docs_with_scores.append((doc, distance))
return docs_with_scores
|
def similarity_search_with_score(self, query: str, k: int=4, filter:
Optional[RedisFilterExpression]=None, return_metadata: bool=True, **
kwargs: Any) ->List[Tuple[Document, float]]:
"""Run similarity search with **vector distance**.
The "scores" returned from this function are the raw vector
distances from the query vector. For similarity scores, use
``similarity_search_with_relevance_scores``.
Args:
query (str): The query text for which to find similar documents.
k (int): The number of documents to return. Default is 4.
filter (RedisFilterExpression, optional): Optional metadata filter.
Defaults to None.
return_metadata (bool, optional): Whether to return metadata.
Defaults to True.
Returns:
List[Tuple[Document, float]]: A list of documents that are
most similar to the query with the distance for each document.
"""
try:
import redis
except ImportError as e:
raise ImportError(
'Could not import redis python package. Please install it with `pip install redis`.'
) from e
if 'score_threshold' in kwargs:
logger.warning(
'score_threshold is deprecated. Use distance_threshold instead.' +
'score_threshold should only be used in ' +
'similarity_search_with_relevance_scores.' +
'score_threshold will be removed in a future release.')
query_embedding = self._embeddings.embed_query(query)
redis_query, params_dict = self._prepare_query(query_embedding, k=k,
filter=filter, with_metadata=return_metadata, with_distance=True,
**kwargs)
try:
results = self.client.ft(self.index_name).search(redis_query,
params_dict)
except redis.exceptions.ResponseError as e:
if str(e).split(' ')[0] == 'Syntax':
raise ValueError('Query failed with syntax error. ' +
'This is likely due to malformation of ' +
'filter, vector, or query argument') from e
raise e
docs_with_scores: List[Tuple[Document, float]] = []
for result in results.docs:
metadata = {}
if return_metadata:
metadata = {'id': result.id}
metadata.update(self._collect_metadata(result))
doc = Document(page_content=result.content, metadata=metadata)
distance = self._calculate_fp_distance(result.distance)
docs_with_scores.append((doc, distance))
return docs_with_scores
|
Run similarity search with **vector distance**.
The "scores" returned from this function are the raw vector
distances from the query vector. For similarity scores, use
``similarity_search_with_relevance_scores``.
Args:
query (str): The query text for which to find similar documents.
k (int): The number of documents to return. Default is 4.
filter (RedisFilterExpression, optional): Optional metadata filter.
Defaults to None.
return_metadata (bool, optional): Whether to return metadata.
Defaults to True.
Returns:
List[Tuple[Document, float]]: A list of documents that are
most similar to the query with the distance for each document.
|
create_prompt
|
"""Return default prompt."""
return WIKI_PROMPT
|
@classmethod
def create_prompt(cls, tools: Sequence[BaseTool]) ->BasePromptTemplate:
"""Return default prompt."""
return WIKI_PROMPT
|
Return default prompt.
|
from_texts
|
index = create_index(texts, embeddings)
return cls(embeddings=embeddings, index=index, texts=texts, **kwargs)
|
@classmethod
def from_texts(cls, texts: List[str], embeddings: Embeddings, **kwargs: Any
) ->KNNRetriever:
index = create_index(texts, embeddings)
return cls(embeddings=embeddings, index=index, texts=texts, **kwargs)
| null |
__init__
|
"""Initializes the `deepevalCallbackHandler`.
Args:
implementation_name: Name of the implementation you want.
metrics: What metrics do you want to track?
Raises:
ImportError: if the `deepeval` package is not installed.
ConnectionError: if the connection to deepeval fails.
"""
super().__init__()
try:
import deepeval
except ImportError:
raise ImportError(
"""To use the deepeval callback manager you need to have the
`deepeval` Python package installed. Please install it with
`pip install deepeval`"""
)
if os.path.exists('.deepeval'):
warnings.warn(
"""You are currently not logging anything to the dashboard, we
recommend using `deepeval login`."""
)
self.implementation_name = implementation_name
self.metrics = metrics
warnings.warn(
f'The `DeepEvalCallbackHandler` is currently in beta and is subject to change based on updates to `langchain`. Please report any issues to {self.ISSUES_URL} as an `integration` issue.'
)
|
def __init__(self, metrics: List[Any], implementation_name: Optional[str]=None
) ->None:
"""Initializes the `deepevalCallbackHandler`.
Args:
implementation_name: Name of the implementation you want.
metrics: What metrics do you want to track?
Raises:
ImportError: if the `deepeval` package is not installed.
ConnectionError: if the connection to deepeval fails.
"""
super().__init__()
try:
import deepeval
except ImportError:
raise ImportError(
"""To use the deepeval callback manager you need to have the
`deepeval` Python package installed. Please install it with
`pip install deepeval`"""
)
if os.path.exists('.deepeval'):
warnings.warn(
"""You are currently not logging anything to the dashboard, we
recommend using `deepeval login`."""
)
self.implementation_name = implementation_name
self.metrics = metrics
warnings.warn(
f'The `DeepEvalCallbackHandler` is currently in beta and is subject to change based on updates to `langchain`. Please report any issues to {self.ISSUES_URL} as an `integration` issue.'
)
|
Initializes the `deepevalCallbackHandler`.
Args:
implementation_name: Name of the implementation you want.
metrics: What metrics do you want to track?
Raises:
ImportError: if the `deepeval` package is not installed.
ConnectionError: if the connection to deepeval fails.
|
_llm_type
|
"""Return the type of llm."""
return 'rwkv'
|
@property
def _llm_type(self) ->str:
"""Return the type of llm."""
return 'rwkv'
|
Return the type of llm.
|
_identifying_params
|
return {'responses': self.responses}
|
@property
def _identifying_params(self) ->Dict[str, Any]:
return {'responses': self.responses}
| null |
__init__
|
self.datastore_url = datastore_url
self.api_key = api_key
self.top_k = top_k
|
def __init__(self, datastore_url: str, top_k: Optional[int]=None, api_key:
Optional[str]=None):
self.datastore_url = datastore_url
self.api_key = api_key
self.top_k = top_k
| null |
test_batch
|
"""Test batch tokens from ChatAnthropicMessages."""
llm = ChatAnthropicMessages(model_name='claude-instant-1.2')
result = llm.batch(["I'm Pickle Rick", "I'm not Pickle Rick"])
for token in result:
assert isinstance(token.content, str)
|
def test_batch() ->None:
"""Test batch tokens from ChatAnthropicMessages."""
llm = ChatAnthropicMessages(model_name='claude-instant-1.2')
result = llm.batch(["I'm Pickle Rick", "I'm not Pickle Rick"])
for token in result:
assert isinstance(token.content, str)
|
Test batch tokens from ChatAnthropicMessages.
|
similarity_search_with_score_by_vector
|
"""Perform a similarity search with Yellowbrick with vector
Args:
embedding (List[float]): query embedding
k (int, optional): Top K neighbors to retrieve. Defaults to 4.
NOTE: Please do not let end-user fill this and always be aware
of SQL injection.
Returns:
List[Document, float]: List of Documents and scores
"""
from psycopg2 import sql
cursor = self._connection.cursor()
tmp_table = 'tmp_' + self._table
cursor.execute(sql.SQL(
'CREATE TEMPORARY TABLE {} ( embedding_id INTEGER, embedding FLOAT)'
).format(sql.Identifier(tmp_table)))
self._connection.commit()
data_input = [(embedding_id, embedding) for embedding_id, embedding in zip(
range(len(embedding)), embedding)]
flattened_input = [val for sublist in data_input for val in sublist]
insert_query = sql.SQL(
'INSERT INTO {t} (embedding_id, embedding) VALUES {v}').format(
t=sql.Identifier(tmp_table), v=sql.SQL(',').join([sql.SQL('(%s,%s)') for
_ in range(len(embedding))]))
cursor.execute(insert_query, flattened_input)
self._connection.commit()
sql_query = sql.SQL(
'SELECT text, metadata, sum(v1.embedding * v2.embedding) / ( sqrt(sum(v1.embedding * v1.embedding)) * sqrt(sum(v2.embedding * v2.embedding))) AS score FROM {v1} v1 INNER JOIN {v2} v2 ON v1.embedding_id = v2.embedding_id GROUP BY v2.id, v2.text, v2.metadata ORDER BY score DESC LIMIT %s'
).format(v1=sql.Identifier(tmp_table), v2=sql.Identifier(self._table))
cursor.execute(sql_query, (k,))
results = cursor.fetchall()
self.drop(tmp_table)
documents: List[Tuple[Document, float]] = []
for result in results:
metadata = json.loads(result[1]) or {}
doc = Document(page_content=result[0], metadata=metadata)
documents.append((doc, result[2]))
cursor.close()
return documents
|
def similarity_search_with_score_by_vector(self, embedding: List[float], k:
int=4, **kwargs: Any) ->List[Tuple[Document, float]]:
"""Perform a similarity search with Yellowbrick with vector
Args:
embedding (List[float]): query embedding
k (int, optional): Top K neighbors to retrieve. Defaults to 4.
NOTE: Please do not let end-user fill this and always be aware
of SQL injection.
Returns:
List[Document, float]: List of Documents and scores
"""
from psycopg2 import sql
cursor = self._connection.cursor()
tmp_table = 'tmp_' + self._table
cursor.execute(sql.SQL(
'CREATE TEMPORARY TABLE {} ( embedding_id INTEGER, embedding FLOAT)'
).format(sql.Identifier(tmp_table)))
self._connection.commit()
data_input = [(embedding_id, embedding) for embedding_id, embedding in
zip(range(len(embedding)), embedding)]
flattened_input = [val for sublist in data_input for val in sublist]
insert_query = sql.SQL(
'INSERT INTO {t} (embedding_id, embedding) VALUES {v}'
).format(t=sql.Identifier(tmp_table), v=sql.SQL(',').join([sql.SQL(
'(%s,%s)') for _ in range(len(embedding))]))
cursor.execute(insert_query, flattened_input)
self._connection.commit()
sql_query = sql.SQL(
'SELECT text, metadata, sum(v1.embedding * v2.embedding) / ( sqrt(sum(v1.embedding * v1.embedding)) * sqrt(sum(v2.embedding * v2.embedding))) AS score FROM {v1} v1 INNER JOIN {v2} v2 ON v1.embedding_id = v2.embedding_id GROUP BY v2.id, v2.text, v2.metadata ORDER BY score DESC LIMIT %s'
).format(v1=sql.Identifier(tmp_table), v2=sql.Identifier(self._table))
cursor.execute(sql_query, (k,))
results = cursor.fetchall()
self.drop(tmp_table)
documents: List[Tuple[Document, float]] = []
for result in results:
metadata = json.loads(result[1]) or {}
doc = Document(page_content=result[0], metadata=metadata)
documents.append((doc, result[2]))
cursor.close()
return documents
|
Perform a similarity search with Yellowbrick with vector
Args:
embedding (List[float]): query embedding
k (int, optional): Top K neighbors to retrieve. Defaults to 4.
NOTE: Please do not let end-user fill this and always be aware
of SQL injection.
Returns:
List[Document, float]: List of Documents and scores
|
drop
|
"""
Helper function: Drop data
"""
from psycopg2 import sql
cursor = self._connection.cursor()
cursor.execute(sql.SQL('DROP TABLE IF EXISTS {}').format(sql.Identifier(table))
)
self._connection.commit()
cursor.close()
|
def drop(self, table: str) ->None:
"""
Helper function: Drop data
"""
from psycopg2 import sql
cursor = self._connection.cursor()
cursor.execute(sql.SQL('DROP TABLE IF EXISTS {}').format(sql.Identifier
(table)))
self._connection.commit()
cursor.close()
|
Helper function: Drop data
|
test_tool_lambda_args_schema
|
"""Test args schema inference when the tool argument is a lambda function."""
tool = Tool(name='tool', description='A tool', func=lambda tool_input:
tool_input)
assert tool.args_schema is None
expected_args = {'tool_input': {'type': 'string'}}
assert tool.args == expected_args
|
def test_tool_lambda_args_schema() ->None:
"""Test args schema inference when the tool argument is a lambda function."""
tool = Tool(name='tool', description='A tool', func=lambda tool_input:
tool_input)
assert tool.args_schema is None
expected_args = {'tool_input': {'type': 'string'}}
assert tool.args == expected_args
|
Test args schema inference when the tool argument is a lambda function.
|
test_from_documents
|
"""Test end to end construction and search."""
documents = [Document(page_content='Dogs are tough.', metadata={'a': 1}),
Document(page_content='Cats have fluff.', metadata={'b': 1}), Document(
page_content='What is a sandwich?', metadata={'c': 1}), Document(
page_content='That fence is purple.', metadata={'d': 1, 'e': 2})]
vectorstore = MongoDBAtlasVectorSearch.from_documents(documents,
embedding_openai, collection=collection, index_name=INDEX_NAME)
sleep(1)
output = vectorstore.similarity_search('Sandwich', k=1)
assert output[0].page_content == 'What is a sandwich?'
assert output[0].metadata['c'] == 1
|
def test_from_documents(self, embedding_openai: Embeddings, collection: Any
) ->None:
"""Test end to end construction and search."""
documents = [Document(page_content='Dogs are tough.', metadata={'a': 1}
), Document(page_content='Cats have fluff.', metadata={'b': 1}),
Document(page_content='What is a sandwich?', metadata={'c': 1}),
Document(page_content='That fence is purple.', metadata={'d': 1,
'e': 2})]
vectorstore = MongoDBAtlasVectorSearch.from_documents(documents,
embedding_openai, collection=collection, index_name=INDEX_NAME)
sleep(1)
output = vectorstore.similarity_search('Sandwich', k=1)
assert output[0].page_content == 'What is a sandwich?'
assert output[0].metadata['c'] == 1
|
Test end to end construction and search.
|
_llm_type
|
"""Return type of llm."""
return 'octoai_endpoint'
|
@property
def _llm_type(self) ->str:
"""Return type of llm."""
return 'octoai_endpoint'
|
Return type of llm.
|
test_visit_structured_query
|
query = 'What is the capital of France?'
structured_query = StructuredQuery(query=query, filter=None)
expected: Tuple[str, Dict] = (query, {})
actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query)
assert expected == actual
comp = Comparison(comparator=Comparator.LT, attribute='foo', value=['1', '2'])
structured_query = StructuredQuery(query=query, filter=comp)
expected = query, {'where_str': "metadata.foo < ['1', '2']"}
actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query)
assert expected == actual
op = Operation(operator=Operator.AND, arguments=[Comparison(comparator=
Comparator.LT, attribute='foo', value=2), Comparison(comparator=
Comparator.EQ, attribute='bar', value='baz')])
structured_query = StructuredQuery(query=query, filter=op)
expected = query, {'where_str': "metadata.foo < 2 AND metadata.bar = 'baz'"}
actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query)
assert expected == actual
|
def test_visit_structured_query() ->None:
query = 'What is the capital of France?'
structured_query = StructuredQuery(query=query, filter=None)
expected: Tuple[str, Dict] = (query, {})
actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query)
assert expected == actual
comp = Comparison(comparator=Comparator.LT, attribute='foo', value=['1',
'2'])
structured_query = StructuredQuery(query=query, filter=comp)
expected = query, {'where_str': "metadata.foo < ['1', '2']"}
actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query)
assert expected == actual
op = Operation(operator=Operator.AND, arguments=[Comparison(comparator=
Comparator.LT, attribute='foo', value=2), Comparison(comparator=
Comparator.EQ, attribute='bar', value='baz')])
structured_query = StructuredQuery(query=query, filter=op)
expected = query, {'where_str': "metadata.foo < 2 AND metadata.bar = 'baz'"
}
actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query)
assert expected == actual
| null |
test_final_answer_after_parsable_action
|
llm_output = """
Observation: I can use the `foo` tool to achieve the goal.
Action: foo
Action Input: bar
Final Answer: The best pizza to eat is margaritta
"""
with pytest.raises(OutputParserException) as exception_info:
mrkl_output_parser.parse_folder(llm_output)
assert 'Parsing LLM output produced both a final answer and a parse-able action' in exception_info.value.args[
0]
|
def test_final_answer_after_parsable_action() ->None:
llm_output = """
Observation: I can use the `foo` tool to achieve the goal.
Action: foo
Action Input: bar
Final Answer: The best pizza to eat is margaritta
"""
with pytest.raises(OutputParserException) as exception_info:
mrkl_output_parser.parse_folder(llm_output)
assert 'Parsing LLM output produced both a final answer and a parse-able action' in exception_info.value.args[
0]
| null |
_create_query
|
hits = k
doc_embedding_field = self._embedding_field
input_embedding_field = self._input_field
ranking_function = kwargs['ranking'] if 'ranking' in kwargs else 'default'
filter = kwargs['filter'] if 'filter' in kwargs else None
approximate = kwargs['approximate'] if 'approximate' in kwargs else False
approximate = 'true' if approximate else 'false'
yql = 'select * from sources * where '
yql += f'{{targetHits: {hits}, approximate: {approximate}}}'
yql += f'nearestNeighbor({doc_embedding_field}, {input_embedding_field})'
if filter is not None:
yql += f' and {filter}'
query = {'yql': yql, f'input.query({input_embedding_field})':
query_embedding, 'ranking': ranking_function, 'hits': hits}
return query
|
def _create_query(self, query_embedding: List[float], k: int=4, **kwargs: Any
) ->Dict:
hits = k
doc_embedding_field = self._embedding_field
input_embedding_field = self._input_field
ranking_function = kwargs['ranking'] if 'ranking' in kwargs else 'default'
filter = kwargs['filter'] if 'filter' in kwargs else None
approximate = kwargs['approximate'] if 'approximate' in kwargs else False
approximate = 'true' if approximate else 'false'
yql = 'select * from sources * where '
yql += f'{{targetHits: {hits}, approximate: {approximate}}}'
yql += f'nearestNeighbor({doc_embedding_field}, {input_embedding_field})'
if filter is not None:
yql += f' and {filter}'
query = {'yql': yql, f'input.query({input_embedding_field})':
query_embedding, 'ranking': ranking_function, 'hits': hits}
return query
| null |
test_openai_invalid_model_kwargs
|
with pytest.raises(ValueError):
OpenAI(model_kwargs={'model_name': 'foo'})
|
@pytest.mark.requires('openai')
def test_openai_invalid_model_kwargs() ->None:
with pytest.raises(ValueError):
OpenAI(model_kwargs={'model_name': 'foo'})
| null |
_agent_type
|
"""Return Identifier of agent type."""
return AgentType.ZERO_SHOT_REACT_DESCRIPTION
|
@property
def _agent_type(self) ->str:
"""Return Identifier of agent type."""
return AgentType.ZERO_SHOT_REACT_DESCRIPTION
|
Return Identifier of agent type.
|
test_hallucinating
|
"""
Test CPAL approach does not hallucinate when given
an invalid entity in the question.
The PAL chain would hallucinates here!
"""
narrative_input = (
'Jan has three times the number of pets as Marcia.Marcia has two more pets than Cindy.If Cindy has ten pets, how many pets does Barak have?'
)
llm = OpenAI(temperature=0, max_tokens=512)
cpal_chain = CPALChain.from_univariate_prompt(llm=llm, verbose=True)
with pytest.raises(Exception) as e_info:
print(e_info)
cpal_chain.run(narrative_input)
|
def test_hallucinating(self) ->None:
"""
Test CPAL approach does not hallucinate when given
an invalid entity in the question.
The PAL chain would hallucinates here!
"""
narrative_input = (
'Jan has three times the number of pets as Marcia.Marcia has two more pets than Cindy.If Cindy has ten pets, how many pets does Barak have?'
)
llm = OpenAI(temperature=0, max_tokens=512)
cpal_chain = CPALChain.from_univariate_prompt(llm=llm, verbose=True)
with pytest.raises(Exception) as e_info:
print(e_info)
cpal_chain.run(narrative_input)
|
Test CPAL approach does not hallucinate when given
an invalid entity in the question.
The PAL chain would hallucinates here!
|
parse_result
|
text = result[0].text
text = text.strip()
if partial:
try:
return parse_json_markdown(text)
except JSONDecodeError:
return None
else:
try:
return parse_json_markdown(text)
except JSONDecodeError as e:
raise OutputParserException(f'Invalid json output: {text}') from e
|
def parse_result(self, result: List[Generation], *, partial: bool=False) ->Any:
text = result[0].text
text = text.strip()
if partial:
try:
return parse_json_markdown(text)
except JSONDecodeError:
return None
else:
try:
return parse_json_markdown(text)
except JSONDecodeError as e:
raise OutputParserException(f'Invalid json output: {text}') from e
| null |
input_keys
|
"""Will be whatever keys the prompt expects.
:meta private:
"""
return [self.input_key]
|
@property
def input_keys(self) ->List[str]:
"""Will be whatever keys the prompt expects.
:meta private:
"""
return [self.input_key]
|
Will be whatever keys the prompt expects.
:meta private:
|
test_create_directory_and_files
|
"""Test creation of a directory and files in a temporary directory."""
session = BashProcess(strip_newlines=True)
temp_dir = tmp_path / 'test_dir'
temp_dir.mkdir()
commands = [f'touch {temp_dir}/file1.txt', f'touch {temp_dir}/file2.txt',
f"echo 'hello world' > {temp_dir}/file2.txt", f'cat {temp_dir}/file2.txt']
output = session.run(commands)
assert output == 'hello world'
output = session.run([f'ls {temp_dir}'])
assert output == """file1.txt
file2.txt"""
|
@pytest.mark.skipif(sys.platform.startswith('win'), reason=
'Test not supported on Windows')
def test_create_directory_and_files(tmp_path: Path) ->None:
"""Test creation of a directory and files in a temporary directory."""
session = BashProcess(strip_newlines=True)
temp_dir = tmp_path / 'test_dir'
temp_dir.mkdir()
commands = [f'touch {temp_dir}/file1.txt',
f'touch {temp_dir}/file2.txt',
f"echo 'hello world' > {temp_dir}/file2.txt",
f'cat {temp_dir}/file2.txt']
output = session.run(commands)
assert output == 'hello world'
output = session.run([f'ls {temp_dir}'])
assert output == 'file1.txt\nfile2.txt'
|
Test creation of a directory and files in a temporary directory.
|
add_message
|
"""Add a Message object to the history.
Args:
message: A BaseMessage object to store.
"""
if self.sync and 'id' not in message.additional_kwargs:
message.additional_kwargs['id'] = self.message_uuid_method()
self.client.Documents.patch_documents(collection=self.collection, workspace
=self.workspace, data=[self.rockset.model.patch_document.PatchDocument(
id=self.session_id, patch=[self.rockset.model.patch_operation.
PatchOperation(op='ADD', path=f'/{self.messages_key}/-', value=
message_to_dict(message))])])
if self.sync:
self._wait_until_message_added(message.additional_kwargs['id'])
|
def add_message(self, message: BaseMessage) ->None:
"""Add a Message object to the history.
Args:
message: A BaseMessage object to store.
"""
if self.sync and 'id' not in message.additional_kwargs:
message.additional_kwargs['id'] = self.message_uuid_method()
self.client.Documents.patch_documents(collection=self.collection,
workspace=self.workspace, data=[self.rockset.model.patch_document.
PatchDocument(id=self.session_id, patch=[self.rockset.model.
patch_operation.PatchOperation(op='ADD', path=
f'/{self.messages_key}/-', value=message_to_dict(message))])])
if self.sync:
self._wait_until_message_added(message.additional_kwargs['id'])
|
Add a Message object to the history.
Args:
message: A BaseMessage object to store.
|
__init__
|
"""Building a myscale vector store without metadata column
embedding (Embeddings): embedding model
config (MyScaleSettings): Configuration to MyScale Client
must_have_cols (List[str]): column names to be included in query
Other keyword arguments will pass into
[clickhouse-connect](https://docs.myscale.com/)
"""
super().__init__(embedding, config, **kwargs)
self.must_have_cols: List[str] = must_have_cols
|
def __init__(self, embedding: Embeddings, config: Optional[MyScaleSettings]
=None, must_have_cols: List[str]=[], **kwargs: Any) ->None:
"""Building a myscale vector store without metadata column
embedding (Embeddings): embedding model
config (MyScaleSettings): Configuration to MyScale Client
must_have_cols (List[str]): column names to be included in query
Other keyword arguments will pass into
[clickhouse-connect](https://docs.myscale.com/)
"""
super().__init__(embedding, config, **kwargs)
self.must_have_cols: List[str] = must_have_cols
|
Building a myscale vector store without metadata column
embedding (Embeddings): embedding model
config (MyScaleSettings): Configuration to MyScale Client
must_have_cols (List[str]): column names to be included in query
Other keyword arguments will pass into
[clickhouse-connect](https://docs.myscale.com/)
|
is_lc_serializable
|
return True
|
@classmethod
def is_lc_serializable(self) ->bool:
return True
| null |
query
|
return list(self.lazy_query(query))
|
def query(self, query: str) ->List[dict]:
return list(self.lazy_query(query))
| null |
load
|
"""Loads all cards from the specified Trello board.
You can filter the cards, metadata and text included by using the optional
parameters.
Returns:
A list of documents, one for each card in the board.
"""
try:
from bs4 import BeautifulSoup
except ImportError as ex:
raise ImportError(
'`beautifulsoup4` package not found, please run `pip install beautifulsoup4`'
) from ex
board = self._get_board()
list_dict = {list_item.id: list_item.name for list_item in board.list_lists()}
cards = board.get_cards(card_filter=self.card_filter)
return [self._card_to_doc(card, list_dict) for card in cards]
|
def load(self) ->List[Document]:
"""Loads all cards from the specified Trello board.
You can filter the cards, metadata and text included by using the optional
parameters.
Returns:
A list of documents, one for each card in the board.
"""
try:
from bs4 import BeautifulSoup
except ImportError as ex:
raise ImportError(
'`beautifulsoup4` package not found, please run `pip install beautifulsoup4`'
) from ex
board = self._get_board()
list_dict = {list_item.id: list_item.name for list_item in board.
list_lists()}
cards = board.get_cards(card_filter=self.card_filter)
return [self._card_to_doc(card, list_dict) for card in cards]
|
Loads all cards from the specified Trello board.
You can filter the cards, metadata and text included by using the optional
parameters.
Returns:
A list of documents, one for each card in the board.
|
test_single_agent_action_observation
|
agent_action = AgentAction(tool='Tool1', tool_input='Input1', log='Log1')
observation = 'Observation1'
intermediate_steps = [(agent_action, observation)]
result = format_xml(intermediate_steps)
expected_result = (
'<tool>Tool1</tool><tool_input>Input1</tool_input><observation>Observation1</observation>'
)
assert result == expected_result
|
def test_single_agent_action_observation() ->None:
agent_action = AgentAction(tool='Tool1', tool_input='Input1', log='Log1')
observation = 'Observation1'
intermediate_steps = [(agent_action, observation)]
result = format_xml(intermediate_steps)
expected_result = (
'<tool>Tool1</tool><tool_input>Input1</tool_input><observation>Observation1</observation>'
)
assert result == expected_result
| null |
buffer
|
"""String buffer of memory."""
return self.buffer_as_messages if self.return_messages else self.buffer_as_str
|
@property
def buffer(self) ->Union[str, List[BaseMessage]]:
"""String buffer of memory."""
return (self.buffer_as_messages if self.return_messages else self.
buffer_as_str)
|
String buffer of memory.
|
test_usearch_add_texts
|
"""Test adding a new document"""
texts = ['foo', 'bar', 'baz']
docsearch = USearch.from_texts(texts, FakeEmbeddings())
docsearch.add_texts(['foo'])
output = docsearch.similarity_search('foo', k=2)
assert output == [Document(page_content='foo'), Document(page_content='foo')]
|
def test_usearch_add_texts() ->None:
"""Test adding a new document"""
texts = ['foo', 'bar', 'baz']
docsearch = USearch.from_texts(texts, FakeEmbeddings())
docsearch.add_texts(['foo'])
output = docsearch.similarity_search('foo', k=2)
assert output == [Document(page_content='foo'), Document(page_content=
'foo')]
|
Test adding a new document
|
validate_chains
|
"""Validate that chains are all single input/output."""
for chain in values['chains']:
if len(chain.input_keys) != 1:
raise ValueError(
f'Chains used in SimplePipeline should all have one input, got {chain} with {len(chain.input_keys)} inputs.'
)
if len(chain.output_keys) != 1:
raise ValueError(
f'Chains used in SimplePipeline should all have one output, got {chain} with {len(chain.output_keys)} outputs.'
)
return values
|
@root_validator()
def validate_chains(cls, values: Dict) ->Dict:
"""Validate that chains are all single input/output."""
for chain in values['chains']:
if len(chain.input_keys) != 1:
raise ValueError(
f'Chains used in SimplePipeline should all have one input, got {chain} with {len(chain.input_keys)} inputs.'
)
if len(chain.output_keys) != 1:
raise ValueError(
f'Chains used in SimplePipeline should all have one output, got {chain} with {len(chain.output_keys)} outputs.'
)
return values
|
Validate that chains are all single input/output.
|
test_prompt_from_jinja2_template
|
"""Test prompts can be constructed from a jinja2 template."""
template = """Hello there
There is no variable here {
Will it get confused{ }?
"""
prompt = PromptTemplate.from_template(template, template_format='jinja2')
expected_prompt = PromptTemplate(template=template, input_variables=[],
template_format='jinja2')
assert prompt == expected_prompt
|
@pytest.mark.requires('jinja2')
def test_prompt_from_jinja2_template() ->None:
"""Test prompts can be constructed from a jinja2 template."""
template = (
'Hello there\nThere is no variable here {\nWill it get confused{ }? \n '
)
prompt = PromptTemplate.from_template(template, template_format='jinja2')
expected_prompt = PromptTemplate(template=template, input_variables=[],
template_format='jinja2')
assert prompt == expected_prompt
|
Test prompts can be constructed from a jinja2 template.
|
visit_operation
|
try:
from qdrant_client.http import models as rest
except ImportError as e:
raise ImportError(
'Cannot import qdrant_client. Please install with `pip install qdrant-client`.'
) from e
args = [arg.accept(self) for arg in operation.arguments]
operator = {Operator.AND: 'must', Operator.OR: 'should', Operator.NOT:
'must_not'}[operation.operator]
return rest.Filter(**{operator: args})
|
def visit_operation(self, operation: Operation) ->rest.Filter:
try:
from qdrant_client.http import models as rest
except ImportError as e:
raise ImportError(
'Cannot import qdrant_client. Please install with `pip install qdrant-client`.'
) from e
args = [arg.accept(self) for arg in operation.arguments]
operator = {Operator.AND: 'must', Operator.OR: 'should', Operator.NOT:
'must_not'}[operation.operator]
return rest.Filter(**{operator: args})
| null |
validate_client
|
"""Validate that the client is of the correct type."""
from metal_sdk.metal import Metal
if 'client' in values:
client = values['client']
if not isinstance(client, Metal):
raise ValueError(
f'Got unexpected client, should be of type metal_sdk.metal.Metal. Instead, got {type(client)}'
)
values['params'] = values.get('params', {})
return values
|
@root_validator(pre=True)
def validate_client(cls, values: dict) ->dict:
"""Validate that the client is of the correct type."""
from metal_sdk.metal import Metal
if 'client' in values:
client = values['client']
if not isinstance(client, Metal):
raise ValueError(
f'Got unexpected client, should be of type metal_sdk.metal.Metal. Instead, got {type(client)}'
)
values['params'] = values.get('params', {})
return values
|
Validate that the client is of the correct type.
|
on_retry
|
llm_run = self._get_run(run_id)
retry_d: Dict[str, Any] = {'slept': retry_state.idle_for, 'attempt':
retry_state.attempt_number}
if retry_state.outcome is None:
retry_d['outcome'] = 'N/A'
elif retry_state.outcome.failed:
retry_d['outcome'] = 'failed'
exception = retry_state.outcome.exception()
retry_d['exception'] = str(exception)
retry_d['exception_type'] = exception.__class__.__name__
else:
retry_d['outcome'] = 'success'
retry_d['result'] = str(retry_state.outcome.result())
llm_run.events.append({'name': 'retry', 'time': datetime.now(timezone.utc),
'kwargs': retry_d})
return llm_run
|
def on_retry(self, retry_state: RetryCallState, *, run_id: UUID, **kwargs: Any
) ->Run:
llm_run = self._get_run(run_id)
retry_d: Dict[str, Any] = {'slept': retry_state.idle_for, 'attempt':
retry_state.attempt_number}
if retry_state.outcome is None:
retry_d['outcome'] = 'N/A'
elif retry_state.outcome.failed:
retry_d['outcome'] = 'failed'
exception = retry_state.outcome.exception()
retry_d['exception'] = str(exception)
retry_d['exception_type'] = exception.__class__.__name__
else:
retry_d['outcome'] = 'success'
retry_d['result'] = str(retry_state.outcome.result())
llm_run.events.append({'name': 'retry', 'time': datetime.now(timezone.
utc), 'kwargs': retry_d})
return llm_run
| null |
test_api_key_masked_when_passed_from_env
|
mock_response = mock_get.return_value
mock_response.status_code = 200
mock_response.json.return_value = {'model_id': '', 'status':
'training_complete'}
monkeypatch.setenv('ARCEE_API_KEY', 'secret_api_key')
arcee_with_env_var = Arcee(model='DALM-PubMed', arcee_api_url=
'https://localhost', arcee_api_version='version')
print(arcee_with_env_var.arcee_api_key, end='')
captured = capsys.readouterr()
assert '**********' == captured.out
|
@patch('langchain_community.utilities.arcee.requests.get')
def test_api_key_masked_when_passed_from_env(mock_get: MagicMock, capsys:
CaptureFixture, monkeypatch: MonkeyPatch) ->None:
mock_response = mock_get.return_value
mock_response.status_code = 200
mock_response.json.return_value = {'model_id': '', 'status':
'training_complete'}
monkeypatch.setenv('ARCEE_API_KEY', 'secret_api_key')
arcee_with_env_var = Arcee(model='DALM-PubMed', arcee_api_url=
'https://localhost', arcee_api_version='version')
print(arcee_with_env_var.arcee_api_key, end='')
captured = capsys.readouterr()
assert '**********' == captured.out
| null |
observation_prefix
|
"""Prefix to append the observation with."""
return 'Observation: '
|
@property
def observation_prefix(self) ->str:
"""Prefix to append the observation with."""
return 'Observation: '
|
Prefix to append the observation with.
|
test_konko_model_test
|
"""Check how ChatKonko manages model_name."""
chat_instance = ChatKonko(model='alpha')
assert chat_instance.model == 'alpha'
chat_instance = ChatKonko(model='beta')
assert chat_instance.model == 'beta'
|
def test_konko_model_test() ->None:
"""Check how ChatKonko manages model_name."""
chat_instance = ChatKonko(model='alpha')
assert chat_instance.model == 'alpha'
chat_instance = ChatKonko(model='beta')
assert chat_instance.model == 'beta'
|
Check how ChatKonko manages model_name.
|
use_simple_prompt
|
"""Decides whether to use the simple prompt"""
if llm._llm_type and 'anthropic' in llm._llm_type:
return True
if hasattr(llm, 'model_id') and 'anthropic' in llm.model_id:
return True
return False
|
def use_simple_prompt(llm: BaseLanguageModel) ->bool:
"""Decides whether to use the simple prompt"""
if llm._llm_type and 'anthropic' in llm._llm_type:
return True
if hasattr(llm, 'model_id') and 'anthropic' in llm.model_id:
return True
return False
|
Decides whether to use the simple prompt
|
lazy_parse
|
"""Lazily parse the blob."""
import io
try:
import openai
except ImportError:
raise ImportError(
'openai package not found, please install it with `pip install openai`'
)
try:
from pydub import AudioSegment
except ImportError:
raise ImportError(
'pydub package not found, please install it with `pip install pydub`')
if is_openai_v1():
client = openai.OpenAI(api_key=self.api_key)
elif self.api_key:
openai.api_key = self.api_key
audio = AudioSegment.from_file(blob.path)
chunk_duration = 20
chunk_duration_ms = chunk_duration * 60 * 1000
for split_number, i in enumerate(range(0, len(audio), chunk_duration_ms)):
chunk = audio[i:i + chunk_duration_ms]
file_obj = io.BytesIO(chunk.export(format='mp3').read())
if blob.source is not None:
file_obj.name = blob.source + f'_part_{split_number}.mp3'
else:
file_obj.name = f'part_{split_number}.mp3'
print(f'Transcribing part {split_number + 1}!')
attempts = 0
while attempts < 3:
try:
if is_openai_v1():
transcript = client.audio.transcriptions.create(model=
'whisper-1', file=file_obj)
else:
transcript = openai.Audio.transcribe('whisper-1', file_obj)
break
except Exception as e:
attempts += 1
print(f'Attempt {attempts} failed. Exception: {str(e)}')
time.sleep(5)
else:
print('Failed to transcribe after 3 attempts.')
continue
yield Document(page_content=transcript.text, metadata={'source': blob.
source, 'chunk': split_number})
|
def lazy_parse(self, blob: Blob) ->Iterator[Document]:
"""Lazily parse the blob."""
import io
try:
import openai
except ImportError:
raise ImportError(
'openai package not found, please install it with `pip install openai`'
)
try:
from pydub import AudioSegment
except ImportError:
raise ImportError(
'pydub package not found, please install it with `pip install pydub`'
)
if is_openai_v1():
client = openai.OpenAI(api_key=self.api_key)
elif self.api_key:
openai.api_key = self.api_key
audio = AudioSegment.from_file(blob.path)
chunk_duration = 20
chunk_duration_ms = chunk_duration * 60 * 1000
for split_number, i in enumerate(range(0, len(audio), chunk_duration_ms)):
chunk = audio[i:i + chunk_duration_ms]
file_obj = io.BytesIO(chunk.export(format='mp3').read())
if blob.source is not None:
file_obj.name = blob.source + f'_part_{split_number}.mp3'
else:
file_obj.name = f'part_{split_number}.mp3'
print(f'Transcribing part {split_number + 1}!')
attempts = 0
while attempts < 3:
try:
if is_openai_v1():
transcript = client.audio.transcriptions.create(model=
'whisper-1', file=file_obj)
else:
transcript = openai.Audio.transcribe('whisper-1', file_obj)
break
except Exception as e:
attempts += 1
print(f'Attempt {attempts} failed. Exception: {str(e)}')
time.sleep(5)
else:
print('Failed to transcribe after 3 attempts.')
continue
yield Document(page_content=transcript.text, metadata={'source':
blob.source, 'chunk': split_number})
|
Lazily parse the blob.
|
test_similarity_search_with_metadata
|
"""Test end to end construction and search with metadata."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = ElasticVectorSearch.from_texts(texts, FakeEmbeddings(),
metadatas=metadatas, elasticsearch_url=elasticsearch_url)
output = docsearch.similarity_search('foo', k=1)
assert output == [Document(page_content='foo', metadata={'page': 0})]
|
def test_similarity_search_with_metadata(self, elasticsearch_url: str) ->None:
"""Test end to end construction and search with metadata."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = ElasticVectorSearch.from_texts(texts, FakeEmbeddings(),
metadatas=metadatas, elasticsearch_url=elasticsearch_url)
output = docsearch.similarity_search('foo', k=1)
assert output == [Document(page_content='foo', metadata={'page': 0})]
|
Test end to end construction and search with metadata.
|
get_test_api_data
|
"""Fake api data to use for testing."""
api_docs = """
This API endpoint will search the notes for a user.
Endpoint: https://thisapidoesntexist.com
GET /api/notes
Query parameters:
q | string | The search term for notes
"""
return {'api_docs': api_docs, 'question':
'Search for notes containing langchain', 'api_url':
'https://thisapidoesntexist.com/api/notes?q=langchain', 'api_response':
json.dumps({'success': True, 'results': [{'id': 1, 'content':
'Langchain is awesome!'}]}), 'api_summary':
'There is 1 note about langchain.'}
|
def get_test_api_data() ->dict:
"""Fake api data to use for testing."""
api_docs = """
This API endpoint will search the notes for a user.
Endpoint: https://thisapidoesntexist.com
GET /api/notes
Query parameters:
q | string | The search term for notes
"""
return {'api_docs': api_docs, 'question':
'Search for notes containing langchain', 'api_url':
'https://thisapidoesntexist.com/api/notes?q=langchain',
'api_response': json.dumps({'success': True, 'results': [{'id': 1,
'content': 'Langchain is awesome!'}]}), 'api_summary':
'There is 1 note about langchain.'}
|
Fake api data to use for testing.
|
test_openai_predict
|
llm = ChatOpenAI()
mock_client = MagicMock()
completed = False
def mock_create(*args: Any, **kwargs: Any) ->Any:
nonlocal completed
completed = True
return mock_completion
mock_client.create = mock_create
with patch.object(llm, 'client', mock_client):
res = llm.predict('bar')
assert res == 'Bar Baz'
assert completed
|
@pytest.mark.requires('openai')
def test_openai_predict(mock_completion: dict) ->None:
llm = ChatOpenAI()
mock_client = MagicMock()
completed = False
def mock_create(*args: Any, **kwargs: Any) ->Any:
nonlocal completed
completed = True
return mock_completion
mock_client.create = mock_create
with patch.object(llm, 'client', mock_client):
res = llm.predict('bar')
assert res == 'Bar Baz'
assert completed
| null |
preprocess_json_input
|
"""Preprocesses a string to be parsed as json.
Replace single backslashes with double backslashes,
while leaving already escaped ones intact.
Args:
input_str: String to be preprocessed
Returns:
Preprocessed string
"""
corrected_str = re.sub('(?<!\\\\)\\\\(?!["\\\\/bfnrt]|u[0-9a-fA-F]{4})',
'\\\\\\\\', input_str)
return corrected_str
|
def preprocess_json_input(input_str: str) ->str:
"""Preprocesses a string to be parsed as json.
Replace single backslashes with double backslashes,
while leaving already escaped ones intact.
Args:
input_str: String to be preprocessed
Returns:
Preprocessed string
"""
corrected_str = re.sub('(?<!\\\\)\\\\(?!["\\\\/bfnrt]|u[0-9a-fA-F]{4})',
'\\\\\\\\', input_str)
return corrected_str
|
Preprocesses a string to be parsed as json.
Replace single backslashes with double backslashes,
while leaving already escaped ones intact.
Args:
input_str: String to be preprocessed
Returns:
Preprocessed string
|
get_format_instructions
|
return """Your response should be a markdown list, eg: `- foo
- bar
- baz`"""
|
def get_format_instructions(self) ->str:
return 'Your response should be a markdown list, eg: `- foo\n- bar\n- baz`'
| null |
embed_documents
|
"""Compute doc embeddings using a HuggingFace transformer model.
Args:
texts: The list of texts to embed.s
Returns:
List of embeddings, one for each text.
"""
texts = list(map(lambda x: x.replace('\n', ' '), texts))
embeddings = self.client(self.pipeline_ref, texts)
if not isinstance(embeddings, list):
return embeddings.tolist()
return embeddings
|
def embed_documents(self, texts: List[str]) ->List[List[float]]:
"""Compute doc embeddings using a HuggingFace transformer model.
Args:
texts: The list of texts to embed.s
Returns:
List of embeddings, one for each text.
"""
texts = list(map(lambda x: x.replace('\n', ' '), texts))
embeddings = self.client(self.pipeline_ref, texts)
if not isinstance(embeddings, list):
return embeddings.tolist()
return embeddings
|
Compute doc embeddings using a HuggingFace transformer model.
Args:
texts: The list of texts to embed.s
Returns:
List of embeddings, one for each text.
|
test_openai_streaming
|
"""Test streaming tokens from OpenAI."""
llm = ChatOpenAI(max_tokens=10)
for token in llm.stream("I'm Pickle Rick"):
assert isinstance(token.content, str)
|
@pytest.mark.scheduled
def test_openai_streaming() ->None:
"""Test streaming tokens from OpenAI."""
llm = ChatOpenAI(max_tokens=10)
for token in llm.stream("I'm Pickle Rick"):
assert isinstance(token.content, str)
|
Test streaming tokens from OpenAI.
|
__init__
|
try:
from alibabacloud_ha3engine_vector import client, models
from alibabacloud_tea_util import models as util_models
except ImportError:
raise ImportError(
'Could not import alibaba cloud opensearch python package. Please install it with `pip install alibabacloud-ha3engine-vector`.'
)
self.config = config
self.embedding = embedding
self.runtime = util_models.RuntimeOptions(connect_timeout=5000,
read_timeout=10000, autoretry=False, ignore_ssl=False, max_idle_conns=50)
self.ha3_engine_client = client.Client(models.Config(endpoint=config.
endpoint, instance_id=config.instance_id, protocol=config.protocol,
access_user_name=config.username, access_pass_word=config.password))
self.options_headers: Dict[str, str] = {}
|
def __init__(self, embedding: Embeddings, config:
AlibabaCloudOpenSearchSettings, **kwargs: Any) ->None:
try:
from alibabacloud_ha3engine_vector import client, models
from alibabacloud_tea_util import models as util_models
except ImportError:
raise ImportError(
'Could not import alibaba cloud opensearch python package. Please install it with `pip install alibabacloud-ha3engine-vector`.'
)
self.config = config
self.embedding = embedding
self.runtime = util_models.RuntimeOptions(connect_timeout=5000,
read_timeout=10000, autoretry=False, ignore_ssl=False,
max_idle_conns=50)
self.ha3_engine_client = client.Client(models.Config(endpoint=config.
endpoint, instance_id=config.instance_id, protocol=config.protocol,
access_user_name=config.username, access_pass_word=config.password))
self.options_headers: Dict[str, str] = {}
| null |
evaluation_name
|
return 'json_edit_distance'
|
@property
def evaluation_name(self) ->str:
return 'json_edit_distance'
| null |
_create_api_controller_agent
|
from langchain.agents.agent import AgentExecutor
from langchain.agents.mrkl.base import ZeroShotAgent
from langchain.chains.llm import LLMChain
get_llm_chain = LLMChain(llm=llm, prompt=PARSING_GET_PROMPT)
post_llm_chain = LLMChain(llm=llm, prompt=PARSING_POST_PROMPT)
tools: List[BaseTool] = [RequestsGetToolWithParsing(requests_wrapper=
requests_wrapper, llm_chain=get_llm_chain), RequestsPostToolWithParsing
(requests_wrapper=requests_wrapper, llm_chain=post_llm_chain)]
prompt = PromptTemplate(template=API_CONTROLLER_PROMPT, input_variables=[
'input', 'agent_scratchpad'], partial_variables={'api_url': api_url,
'api_docs': api_docs, 'tool_names': ', '.join([tool.name for tool in
tools]), 'tool_descriptions': '\n'.join([
f'{tool.name}: {tool.description}' for tool in tools])})
agent = ZeroShotAgent(llm_chain=LLMChain(llm=llm, prompt=prompt),
allowed_tools=[tool.name for tool in tools])
return AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose
=True)
|
def _create_api_controller_agent(api_url: str, api_docs: str,
requests_wrapper: RequestsWrapper, llm: BaseLanguageModel) ->Any:
from langchain.agents.agent import AgentExecutor
from langchain.agents.mrkl.base import ZeroShotAgent
from langchain.chains.llm import LLMChain
get_llm_chain = LLMChain(llm=llm, prompt=PARSING_GET_PROMPT)
post_llm_chain = LLMChain(llm=llm, prompt=PARSING_POST_PROMPT)
tools: List[BaseTool] = [RequestsGetToolWithParsing(requests_wrapper=
requests_wrapper, llm_chain=get_llm_chain),
RequestsPostToolWithParsing(requests_wrapper=requests_wrapper,
llm_chain=post_llm_chain)]
prompt = PromptTemplate(template=API_CONTROLLER_PROMPT, input_variables
=['input', 'agent_scratchpad'], partial_variables={'api_url':
api_url, 'api_docs': api_docs, 'tool_names': ', '.join([tool.name for
tool in tools]), 'tool_descriptions': '\n'.join([
f'{tool.name}: {tool.description}' for tool in tools])})
agent = ZeroShotAgent(llm_chain=LLMChain(llm=llm, prompt=prompt),
allowed_tools=[tool.name for tool in tools])
return AgentExecutor.from_agent_and_tools(agent=agent, tools=tools,
verbose=True)
| null |
force_delete_by_path
|
"""Force delete dataset by path.
Args:
path (str): path of the dataset to delete.
Raises:
ValueError: if deeplake is not installed.
"""
try:
import deeplake
except ImportError:
raise ValueError(
'Could not import deeplake python package. Please install it with `pip install deeplake`.'
)
deeplake.delete(path, large_ok=True, force=True)
|
@classmethod
def force_delete_by_path(cls, path: str) ->None:
"""Force delete dataset by path.
Args:
path (str): path of the dataset to delete.
Raises:
ValueError: if deeplake is not installed.
"""
try:
import deeplake
except ImportError:
raise ValueError(
'Could not import deeplake python package. Please install it with `pip install deeplake`.'
)
deeplake.delete(path, large_ok=True, force=True)
|
Force delete dataset by path.
Args:
path (str): path of the dataset to delete.
Raises:
ValueError: if deeplake is not installed.
|
other
|
try:
import json
except ImportError:
raise ImportError(
'json is not installed. Please install it with `pip install json`')
params = json.loads(query)
jira_function = getattr(self.jira, params['function'])
return jira_function(*params.get('args', []), **params.get('kwargs', {}))
|
def other(self, query: str) ->str:
try:
import json
except ImportError:
raise ImportError(
'json is not installed. Please install it with `pip install json`')
params = json.loads(query)
jira_function = getattr(self.jira, params['function'])
return jira_function(*params.get('args', []), **params.get('kwargs', {}))
| null |
_create_retry_decorator
|
import openai
errors = [openai.error.Timeout, openai.error.APIError, openai.error.
APIConnectionError, openai.error.RateLimitError, openai.error.
ServiceUnavailableError]
return create_base_retry_decorator(error_types=errors, max_retries=llm.
max_retries, run_manager=run_manager)
|
def _create_retry_decorator(llm: Union[BaseOpenAI, OpenAIChat], run_manager:
Optional[Union[AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun]
]=None) ->Callable[[Any], Any]:
import openai
errors = [openai.error.Timeout, openai.error.APIError, openai.error.
APIConnectionError, openai.error.RateLimitError, openai.error.
ServiceUnavailableError]
return create_base_retry_decorator(error_types=errors, max_retries=llm.
max_retries, run_manager=run_manager)
| null |
_evaluate_strings
|
parsed = self._canonicalize(self._parse_json(prediction))
label = self._canonicalize(self._parse_json(reference))
distance = self._string_distance(parsed, label)
return {'score': distance}
|
def _evaluate_strings(self, prediction: str, input: Optional[str]=None,
reference: Optional[str]=None, **kwargs: Any) ->dict:
parsed = self._canonicalize(self._parse_json(prediction))
label = self._canonicalize(self._parse_json(reference))
distance = self._string_distance(parsed, label)
return {'score': distance}
| null |
wait_for_all_evaluators
|
"""Wait for all tracers to finish."""
global _TRACERS
for tracer in list(_TRACERS):
if tracer is not None:
tracer.wait_for_futures()
|
def wait_for_all_evaluators() ->None:
"""Wait for all tracers to finish."""
global _TRACERS
for tracer in list(_TRACERS):
if tracer is not None:
tracer.wait_for_futures()
|
Wait for all tracers to finish.
|
_get_relevant_documents
|
"""Look up similar documents in Weaviate.
query: The query to search for relevant documents
of using weviate hybrid search.
where_filter: A filter to apply to the query.
https://weaviate.io/developers/weaviate/guides/querying/#filtering
score: Whether to include the score, and score explanation
in the returned Documents meta_data.
hybrid_search_kwargs: Used to pass additional arguments
to the .with_hybrid() method.
The primary uses cases for this are:
1) Search specific properties only -
specify which properties to be used during hybrid search portion.
Note: this is not the same as the (self.attributes) to be returned.
Example - hybrid_search_kwargs={"properties": ["question", "answer"]}
https://weaviate.io/developers/weaviate/search/hybrid#selected-properties-only
2) Weight boosted searched properties -
Boost the weight of certain properties during the hybrid search portion.
Example - hybrid_search_kwargs={"properties": ["question^2", "answer"]}
https://weaviate.io/developers/weaviate/search/hybrid#weight-boost-searched-properties
3) Search with a custom vector - Define a different vector
to be used during the hybrid search portion.
Example - hybrid_search_kwargs={"vector": [0.1, 0.2, 0.3, ...]}
https://weaviate.io/developers/weaviate/search/hybrid#with-a-custom-vector
4) Use Fusion ranking method
Example - from weaviate.gql.get import HybridFusion
hybrid_search_kwargs={"fusion": fusion_type=HybridFusion.RELATIVE_SCORE}
https://weaviate.io/developers/weaviate/search/hybrid#fusion-ranking-method
"""
query_obj = self.client.query.get(self.index_name, self.attributes)
if where_filter:
query_obj = query_obj.with_where(where_filter)
if score:
query_obj = query_obj.with_additional(['score', 'explainScore'])
if hybrid_search_kwargs is None:
hybrid_search_kwargs = {}
result = query_obj.with_hybrid(query, alpha=self.alpha, **hybrid_search_kwargs
).with_limit(self.k).do()
if 'errors' in result:
raise ValueError(f"Error during query: {result['errors']}")
docs = []
for res in result['data']['Get'][self.index_name]:
text = res.pop(self.text_key)
docs.append(Document(page_content=text, metadata=res))
return docs
|
def _get_relevant_documents(self, query: str, *, run_manager:
CallbackManagerForRetrieverRun, where_filter: Optional[Dict[str, object
]]=None, score: bool=False, hybrid_search_kwargs: Optional[Dict[str,
object]]=None) ->List[Document]:
"""Look up similar documents in Weaviate.
query: The query to search for relevant documents
of using weviate hybrid search.
where_filter: A filter to apply to the query.
https://weaviate.io/developers/weaviate/guides/querying/#filtering
score: Whether to include the score, and score explanation
in the returned Documents meta_data.
hybrid_search_kwargs: Used to pass additional arguments
to the .with_hybrid() method.
The primary uses cases for this are:
1) Search specific properties only -
specify which properties to be used during hybrid search portion.
Note: this is not the same as the (self.attributes) to be returned.
Example - hybrid_search_kwargs={"properties": ["question", "answer"]}
https://weaviate.io/developers/weaviate/search/hybrid#selected-properties-only
2) Weight boosted searched properties -
Boost the weight of certain properties during the hybrid search portion.
Example - hybrid_search_kwargs={"properties": ["question^2", "answer"]}
https://weaviate.io/developers/weaviate/search/hybrid#weight-boost-searched-properties
3) Search with a custom vector - Define a different vector
to be used during the hybrid search portion.
Example - hybrid_search_kwargs={"vector": [0.1, 0.2, 0.3, ...]}
https://weaviate.io/developers/weaviate/search/hybrid#with-a-custom-vector
4) Use Fusion ranking method
Example - from weaviate.gql.get import HybridFusion
hybrid_search_kwargs={"fusion": fusion_type=HybridFusion.RELATIVE_SCORE}
https://weaviate.io/developers/weaviate/search/hybrid#fusion-ranking-method
"""
query_obj = self.client.query.get(self.index_name, self.attributes)
if where_filter:
query_obj = query_obj.with_where(where_filter)
if score:
query_obj = query_obj.with_additional(['score', 'explainScore'])
if hybrid_search_kwargs is None:
hybrid_search_kwargs = {}
result = query_obj.with_hybrid(query, alpha=self.alpha, **
hybrid_search_kwargs).with_limit(self.k).do()
if 'errors' in result:
raise ValueError(f"Error during query: {result['errors']}")
docs = []
for res in result['data']['Get'][self.index_name]:
text = res.pop(self.text_key)
docs.append(Document(page_content=text, metadata=res))
return docs
|
Look up similar documents in Weaviate.
query: The query to search for relevant documents
of using weviate hybrid search.
where_filter: A filter to apply to the query.
https://weaviate.io/developers/weaviate/guides/querying/#filtering
score: Whether to include the score, and score explanation
in the returned Documents meta_data.
hybrid_search_kwargs: Used to pass additional arguments
to the .with_hybrid() method.
The primary uses cases for this are:
1) Search specific properties only -
specify which properties to be used during hybrid search portion.
Note: this is not the same as the (self.attributes) to be returned.
Example - hybrid_search_kwargs={"properties": ["question", "answer"]}
https://weaviate.io/developers/weaviate/search/hybrid#selected-properties-only
2) Weight boosted searched properties -
Boost the weight of certain properties during the hybrid search portion.
Example - hybrid_search_kwargs={"properties": ["question^2", "answer"]}
https://weaviate.io/developers/weaviate/search/hybrid#weight-boost-searched-properties
3) Search with a custom vector - Define a different vector
to be used during the hybrid search portion.
Example - hybrid_search_kwargs={"vector": [0.1, 0.2, 0.3, ...]}
https://weaviate.io/developers/weaviate/search/hybrid#with-a-custom-vector
4) Use Fusion ranking method
Example - from weaviate.gql.get import HybridFusion
hybrid_search_kwargs={"fusion": fusion_type=HybridFusion.RELATIVE_SCORE}
https://weaviate.io/developers/weaviate/search/hybrid#fusion-ranking-method
|
test_similarity_search_with_uuids
|
"""Test end to end construction and search with uuids."""
texts = ['foo', 'bar', 'baz']
uuids = [uuid.uuid5(uuid.NAMESPACE_DNS, 'same-name') for text in texts]
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = Weaviate.from_texts(texts, embedding_openai, metadatas=
metadatas, weaviate_url=weaviate_url, uuids=uuids)
output = docsearch.similarity_search('foo', k=2)
assert len(output) == 1
|
@pytest.mark.vcr(ignore_localhost=True)
def test_similarity_search_with_uuids(self, weaviate_url: str,
embedding_openai: OpenAIEmbeddings) ->None:
"""Test end to end construction and search with uuids."""
texts = ['foo', 'bar', 'baz']
uuids = [uuid.uuid5(uuid.NAMESPACE_DNS, 'same-name') for text in texts]
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = Weaviate.from_texts(texts, embedding_openai, metadatas=
metadatas, weaviate_url=weaviate_url, uuids=uuids)
output = docsearch.similarity_search('foo', k=2)
assert len(output) == 1
|
Test end to end construction and search with uuids.
|
_get_relevant_documents
|
"""Get documents relevant for a query."""
from google.cloud.discoveryengine_v1beta import ConverseConversationRequest, TextInput
request = ConverseConversationRequest(name=self._client.conversation_path(
self.project_id, self.location_id, self.data_store_id, self.
conversation_id), serving_config=self._serving_config, query=TextInput(
input=query))
response = self._client.converse_conversation(request)
if self.engine_data_type == 2:
return self._convert_website_search_response(response.search_results,
'extractive_answers')
return self._convert_unstructured_search_response(response.search_results,
'extractive_answers')
|
def _get_relevant_documents(self, query: str, *, run_manager:
CallbackManagerForRetrieverRun) ->List[Document]:
"""Get documents relevant for a query."""
from google.cloud.discoveryengine_v1beta import ConverseConversationRequest, TextInput
request = ConverseConversationRequest(name=self._client.
conversation_path(self.project_id, self.location_id, self.
data_store_id, self.conversation_id), serving_config=self.
_serving_config, query=TextInput(input=query))
response = self._client.converse_conversation(request)
if self.engine_data_type == 2:
return self._convert_website_search_response(response.
search_results, 'extractive_answers')
return self._convert_unstructured_search_response(response.
search_results, 'extractive_answers')
|
Get documents relevant for a query.
|
run
|
if mode == 'jql':
return self.search(query)
elif mode == 'get_projects':
return self.project()
elif mode == 'create_issue':
return self.issue_create(query)
elif mode == 'other':
return self.other(query)
elif mode == 'create_page':
return self.page_create(query)
else:
raise ValueError(f'Got unexpected mode {mode}')
|
def run(self, mode: str, query: str) ->str:
if mode == 'jql':
return self.search(query)
elif mode == 'get_projects':
return self.project()
elif mode == 'create_issue':
return self.issue_create(query)
elif mode == 'other':
return self.other(query)
elif mode == 'create_page':
return self.page_create(query)
else:
raise ValueError(f'Got unexpected mode {mode}')
| null |
test_singlestoredb_filter_metadata_6
|
"""Test filtering by other bool"""
table_name = 'test_singlestoredb_filter_metadata_6'
drop(table_name)
docs = [Document(page_content=t, metadata={'index': i, 'category': 'budget',
'is_good': i == 1}) for i, t in enumerate(texts)]
docsearch = SingleStoreDB.from_documents(docs, FakeEmbeddings(),
distance_strategy=DistanceStrategy.EUCLIDEAN_DISTANCE, table_name=
table_name, host=TEST_SINGLESTOREDB_URL)
output = docsearch.similarity_search('foo', k=1, filter={'category':
'budget', 'is_good': True})
assert output == [Document(page_content='bar', metadata={'index': 1,
'category': 'budget', 'is_good': True})]
drop(table_name)
|
@pytest.mark.skipif(not singlestoredb_installed, reason=
'singlestoredb not installed')
def test_singlestoredb_filter_metadata_6(texts: List[str]) ->None:
"""Test filtering by other bool"""
table_name = 'test_singlestoredb_filter_metadata_6'
drop(table_name)
docs = [Document(page_content=t, metadata={'index': i, 'category':
'budget', 'is_good': i == 1}) for i, t in enumerate(texts)]
docsearch = SingleStoreDB.from_documents(docs, FakeEmbeddings(),
distance_strategy=DistanceStrategy.EUCLIDEAN_DISTANCE, table_name=
table_name, host=TEST_SINGLESTOREDB_URL)
output = docsearch.similarity_search('foo', k=1, filter={'category':
'budget', 'is_good': True})
assert output == [Document(page_content='bar', metadata={'index': 1,
'category': 'budget', 'is_good': True})]
drop(table_name)
|
Test filtering by other bool
|
visit_comparison
|
try:
from qdrant_client.http import models as rest
except ImportError as e:
raise ImportError(
'Cannot import qdrant_client. Please install with `pip install qdrant-client`.'
) from e
self._validate_func(comparison.comparator)
attribute = self.metadata_key + '.' + comparison.attribute
if comparison.comparator == Comparator.EQ:
return rest.FieldCondition(key=attribute, match=rest.MatchValue(value=
comparison.value))
kwargs = {comparison.comparator.value: comparison.value}
return rest.FieldCondition(key=attribute, range=rest.Range(**kwargs))
|
def visit_comparison(self, comparison: Comparison) ->rest.FieldCondition:
try:
from qdrant_client.http import models as rest
except ImportError as e:
raise ImportError(
'Cannot import qdrant_client. Please install with `pip install qdrant-client`.'
) from e
self._validate_func(comparison.comparator)
attribute = self.metadata_key + '.' + comparison.attribute
if comparison.comparator == Comparator.EQ:
return rest.FieldCondition(key=attribute, match=rest.MatchValue(
value=comparison.value))
kwargs = {comparison.comparator.value: comparison.value}
return rest.FieldCondition(key=attribute, range=rest.Range(**kwargs))
| null |
test_mistralai_initialization
|
"""Test ChatMistralAI initialization."""
ChatMistralAI(model='test', mistral_api_key='test')
|
@pytest.mark.requires('mistralai')
def test_mistralai_initialization() ->None:
"""Test ChatMistralAI initialization."""
ChatMistralAI(model='test', mistral_api_key='test')
|
Test ChatMistralAI initialization.
|
_searx_api_query
|
"""Actual request to searx API."""
raw_result = requests.get(self.searx_host, headers=self.headers, params=
params, verify=not self.unsecure)
if not raw_result.ok:
raise ValueError('Searx API returned an error: ', raw_result.text)
res = SearxResults(raw_result.text)
self._result = res
return res
|
def _searx_api_query(self, params: dict) ->SearxResults:
"""Actual request to searx API."""
raw_result = requests.get(self.searx_host, headers=self.headers, params
=params, verify=not self.unsecure)
if not raw_result.ok:
raise ValueError('Searx API returned an error: ', raw_result.text)
res = SearxResults(raw_result.text)
self._result = res
return res
|
Actual request to searx API.
|
__getattr__
|
if name == 'MRKLChain':
from langchain.agents import MRKLChain
_warn_on_import(name, replacement='langchain.agents.MRKLChain')
return MRKLChain
elif name == 'ReActChain':
from langchain.agents import ReActChain
_warn_on_import(name, replacement='langchain.agents.ReActChain')
return ReActChain
elif name == 'SelfAskWithSearchChain':
from langchain.agents import SelfAskWithSearchChain
_warn_on_import(name, replacement='langchain.agents.SelfAskWithSearchChain'
)
return SelfAskWithSearchChain
elif name == 'ConversationChain':
from langchain.chains import ConversationChain
_warn_on_import(name, replacement='langchain.chains.ConversationChain')
return ConversationChain
elif name == 'LLMBashChain':
raise ImportError(
'This module has been moved to langchain-experimental. For more details: https://github.com/langchain-ai/langchain/discussions/11352.To access this code, install it with `pip install langchain-experimental`.`from langchain_experimental.llm_bash.base import LLMBashChain`'
)
elif name == 'LLMChain':
from langchain.chains import LLMChain
_warn_on_import(name, replacement='langchain.chains.LLMChain')
return LLMChain
elif name == 'LLMCheckerChain':
from langchain.chains import LLMCheckerChain
_warn_on_import(name, replacement='langchain.chains.LLMCheckerChain')
return LLMCheckerChain
elif name == 'LLMMathChain':
from langchain.chains import LLMMathChain
_warn_on_import(name, replacement='langchain.chains.LLMMathChain')
return LLMMathChain
elif name == 'QAWithSourcesChain':
from langchain.chains import QAWithSourcesChain
_warn_on_import(name, replacement='langchain.chains.QAWithSourcesChain')
return QAWithSourcesChain
elif name == 'VectorDBQA':
from langchain.chains import VectorDBQA
_warn_on_import(name, replacement='langchain.chains.VectorDBQA')
return VectorDBQA
elif name == 'VectorDBQAWithSourcesChain':
from langchain.chains import VectorDBQAWithSourcesChain
_warn_on_import(name, replacement=
'langchain.chains.VectorDBQAWithSourcesChain')
return VectorDBQAWithSourcesChain
elif name == 'InMemoryDocstore':
from langchain.docstore import InMemoryDocstore
_warn_on_import(name, replacement='langchain.docstore.InMemoryDocstore')
return InMemoryDocstore
elif name == 'Wikipedia':
from langchain.docstore import Wikipedia
_warn_on_import(name, replacement='langchain.docstore.Wikipedia')
return Wikipedia
elif name == 'Anthropic':
from langchain_community.llms import Anthropic
_warn_on_import(name, replacement='langchain.llms.Anthropic')
return Anthropic
elif name == 'Banana':
from langchain_community.llms import Banana
_warn_on_import(name, replacement='langchain.llms.Banana')
return Banana
elif name == 'CerebriumAI':
from langchain_community.llms import CerebriumAI
_warn_on_import(name, replacement='langchain.llms.CerebriumAI')
return CerebriumAI
elif name == 'Cohere':
from langchain_community.llms import Cohere
_warn_on_import(name, replacement='langchain.llms.Cohere')
return Cohere
elif name == 'ForefrontAI':
from langchain_community.llms import ForefrontAI
_warn_on_import(name, replacement='langchain.llms.ForefrontAI')
return ForefrontAI
elif name == 'GooseAI':
from langchain_community.llms import GooseAI
_warn_on_import(name, replacement='langchain.llms.GooseAI')
return GooseAI
elif name == 'HuggingFaceHub':
from langchain_community.llms import HuggingFaceHub
_warn_on_import(name, replacement='langchain.llms.HuggingFaceHub')
return HuggingFaceHub
elif name == 'HuggingFaceTextGenInference':
from langchain_community.llms import HuggingFaceTextGenInference
_warn_on_import(name, replacement=
'langchain.llms.HuggingFaceTextGenInference')
return HuggingFaceTextGenInference
elif name == 'LlamaCpp':
from langchain_community.llms import LlamaCpp
_warn_on_import(name, replacement='langchain.llms.LlamaCpp')
return LlamaCpp
elif name == 'Modal':
from langchain_community.llms import Modal
_warn_on_import(name, replacement='langchain.llms.Modal')
return Modal
elif name == 'OpenAI':
from langchain_community.llms import OpenAI
_warn_on_import(name, replacement='langchain.llms.OpenAI')
return OpenAI
elif name == 'Petals':
from langchain_community.llms import Petals
_warn_on_import(name, replacement='langchain.llms.Petals')
return Petals
elif name == 'PipelineAI':
from langchain_community.llms import PipelineAI
_warn_on_import(name, replacement='langchain.llms.PipelineAI')
return PipelineAI
elif name == 'SagemakerEndpoint':
from langchain_community.llms import SagemakerEndpoint
_warn_on_import(name, replacement='langchain.llms.SagemakerEndpoint')
return SagemakerEndpoint
elif name == 'StochasticAI':
from langchain_community.llms import StochasticAI
_warn_on_import(name, replacement='langchain.llms.StochasticAI')
return StochasticAI
elif name == 'Writer':
from langchain_community.llms import Writer
_warn_on_import(name, replacement='langchain.llms.Writer')
return Writer
elif name == 'HuggingFacePipeline':
from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline
_warn_on_import(name, replacement=
'langchain.llms.huggingface_pipeline.HuggingFacePipeline')
return HuggingFacePipeline
elif name == 'FewShotPromptTemplate':
from langchain_core.prompts import FewShotPromptTemplate
_warn_on_import(name, replacement='langchain.prompts.FewShotPromptTemplate'
)
return FewShotPromptTemplate
elif name == 'Prompt':
from langchain.prompts import Prompt
_warn_on_import(name, replacement='langchain.prompts.Prompt')
return Prompt
elif name == 'PromptTemplate':
from langchain_core.prompts import PromptTemplate
_warn_on_import(name, replacement='langchain.prompts.PromptTemplate')
return PromptTemplate
elif name == 'BasePromptTemplate':
from langchain_core.prompts import BasePromptTemplate
_warn_on_import(name, replacement=
'langchain.schema.prompt_template.BasePromptTemplate')
return BasePromptTemplate
elif name == 'ArxivAPIWrapper':
from langchain_community.utilities import ArxivAPIWrapper
_warn_on_import(name, replacement=
'langchain_community.utilities.ArxivAPIWrapper')
return ArxivAPIWrapper
elif name == 'GoldenQueryAPIWrapper':
from langchain_community.utilities import GoldenQueryAPIWrapper
_warn_on_import(name, replacement=
'langchain_community.utilities.GoldenQueryAPIWrapper')
return GoldenQueryAPIWrapper
elif name == 'GoogleSearchAPIWrapper':
from langchain_community.utilities import GoogleSearchAPIWrapper
_warn_on_import(name, replacement=
'langchain_community.utilities.GoogleSearchAPIWrapper')
return GoogleSearchAPIWrapper
elif name == 'GoogleSerperAPIWrapper':
from langchain_community.utilities import GoogleSerperAPIWrapper
_warn_on_import(name, replacement=
'langchain_community.utilities.GoogleSerperAPIWrapper')
return GoogleSerperAPIWrapper
elif name == 'PowerBIDataset':
from langchain_community.utilities import PowerBIDataset
_warn_on_import(name, replacement=
'langchain_community.utilities.PowerBIDataset')
return PowerBIDataset
elif name == 'SearxSearchWrapper':
from langchain_community.utilities import SearxSearchWrapper
_warn_on_import(name, replacement=
'langchain_community.utilities.SearxSearchWrapper')
return SearxSearchWrapper
elif name == 'WikipediaAPIWrapper':
from langchain_community.utilities import WikipediaAPIWrapper
_warn_on_import(name, replacement=
'langchain_community.utilities.WikipediaAPIWrapper')
return WikipediaAPIWrapper
elif name == 'WolframAlphaAPIWrapper':
from langchain_community.utilities import WolframAlphaAPIWrapper
_warn_on_import(name, replacement=
'langchain_community.utilities.WolframAlphaAPIWrapper')
return WolframAlphaAPIWrapper
elif name == 'SQLDatabase':
from langchain_community.utilities import SQLDatabase
_warn_on_import(name, replacement=
'langchain_community.utilities.SQLDatabase')
return SQLDatabase
elif name == 'FAISS':
from langchain_community.vectorstores import FAISS
_warn_on_import(name, replacement='langchain_community.vectorstores.FAISS')
return FAISS
elif name == 'ElasticVectorSearch':
from langchain_community.vectorstores import ElasticVectorSearch
_warn_on_import(name, replacement=
'langchain_community.vectorstores.ElasticVectorSearch')
return ElasticVectorSearch
elif name == 'SerpAPIChain' or name == 'SerpAPIWrapper':
from langchain_community.utilities import SerpAPIWrapper
_warn_on_import(name, replacement=
'langchain_community.utilities.SerpAPIWrapper')
return SerpAPIWrapper
elif name == 'verbose':
from langchain.globals import _verbose
_warn_on_import(name, replacement=
'langchain.globals.set_verbose() / langchain.globals.get_verbose()')
return _verbose
elif name == 'debug':
from langchain.globals import _debug
_warn_on_import(name, replacement=
'langchain.globals.set_debug() / langchain.globals.get_debug()')
return _debug
elif name == 'llm_cache':
from langchain.globals import _llm_cache
_warn_on_import(name, replacement=
'langchain.globals.set_llm_cache() / langchain.globals.get_llm_cache()'
)
return _llm_cache
else:
raise AttributeError(f'Could not find: {name}')
|
def __getattr__(name: str) ->Any:
if name == 'MRKLChain':
from langchain.agents import MRKLChain
_warn_on_import(name, replacement='langchain.agents.MRKLChain')
return MRKLChain
elif name == 'ReActChain':
from langchain.agents import ReActChain
_warn_on_import(name, replacement='langchain.agents.ReActChain')
return ReActChain
elif name == 'SelfAskWithSearchChain':
from langchain.agents import SelfAskWithSearchChain
_warn_on_import(name, replacement=
'langchain.agents.SelfAskWithSearchChain')
return SelfAskWithSearchChain
elif name == 'ConversationChain':
from langchain.chains import ConversationChain
_warn_on_import(name, replacement='langchain.chains.ConversationChain')
return ConversationChain
elif name == 'LLMBashChain':
raise ImportError(
'This module has been moved to langchain-experimental. For more details: https://github.com/langchain-ai/langchain/discussions/11352.To access this code, install it with `pip install langchain-experimental`.`from langchain_experimental.llm_bash.base import LLMBashChain`'
)
elif name == 'LLMChain':
from langchain.chains import LLMChain
_warn_on_import(name, replacement='langchain.chains.LLMChain')
return LLMChain
elif name == 'LLMCheckerChain':
from langchain.chains import LLMCheckerChain
_warn_on_import(name, replacement='langchain.chains.LLMCheckerChain')
return LLMCheckerChain
elif name == 'LLMMathChain':
from langchain.chains import LLMMathChain
_warn_on_import(name, replacement='langchain.chains.LLMMathChain')
return LLMMathChain
elif name == 'QAWithSourcesChain':
from langchain.chains import QAWithSourcesChain
_warn_on_import(name, replacement='langchain.chains.QAWithSourcesChain'
)
return QAWithSourcesChain
elif name == 'VectorDBQA':
from langchain.chains import VectorDBQA
_warn_on_import(name, replacement='langchain.chains.VectorDBQA')
return VectorDBQA
elif name == 'VectorDBQAWithSourcesChain':
from langchain.chains import VectorDBQAWithSourcesChain
_warn_on_import(name, replacement=
'langchain.chains.VectorDBQAWithSourcesChain')
return VectorDBQAWithSourcesChain
elif name == 'InMemoryDocstore':
from langchain.docstore import InMemoryDocstore
_warn_on_import(name, replacement='langchain.docstore.InMemoryDocstore'
)
return InMemoryDocstore
elif name == 'Wikipedia':
from langchain.docstore import Wikipedia
_warn_on_import(name, replacement='langchain.docstore.Wikipedia')
return Wikipedia
elif name == 'Anthropic':
from langchain_community.llms import Anthropic
_warn_on_import(name, replacement='langchain.llms.Anthropic')
return Anthropic
elif name == 'Banana':
from langchain_community.llms import Banana
_warn_on_import(name, replacement='langchain.llms.Banana')
return Banana
elif name == 'CerebriumAI':
from langchain_community.llms import CerebriumAI
_warn_on_import(name, replacement='langchain.llms.CerebriumAI')
return CerebriumAI
elif name == 'Cohere':
from langchain_community.llms import Cohere
_warn_on_import(name, replacement='langchain.llms.Cohere')
return Cohere
elif name == 'ForefrontAI':
from langchain_community.llms import ForefrontAI
_warn_on_import(name, replacement='langchain.llms.ForefrontAI')
return ForefrontAI
elif name == 'GooseAI':
from langchain_community.llms import GooseAI
_warn_on_import(name, replacement='langchain.llms.GooseAI')
return GooseAI
elif name == 'HuggingFaceHub':
from langchain_community.llms import HuggingFaceHub
_warn_on_import(name, replacement='langchain.llms.HuggingFaceHub')
return HuggingFaceHub
elif name == 'HuggingFaceTextGenInference':
from langchain_community.llms import HuggingFaceTextGenInference
_warn_on_import(name, replacement=
'langchain.llms.HuggingFaceTextGenInference')
return HuggingFaceTextGenInference
elif name == 'LlamaCpp':
from langchain_community.llms import LlamaCpp
_warn_on_import(name, replacement='langchain.llms.LlamaCpp')
return LlamaCpp
elif name == 'Modal':
from langchain_community.llms import Modal
_warn_on_import(name, replacement='langchain.llms.Modal')
return Modal
elif name == 'OpenAI':
from langchain_community.llms import OpenAI
_warn_on_import(name, replacement='langchain.llms.OpenAI')
return OpenAI
elif name == 'Petals':
from langchain_community.llms import Petals
_warn_on_import(name, replacement='langchain.llms.Petals')
return Petals
elif name == 'PipelineAI':
from langchain_community.llms import PipelineAI
_warn_on_import(name, replacement='langchain.llms.PipelineAI')
return PipelineAI
elif name == 'SagemakerEndpoint':
from langchain_community.llms import SagemakerEndpoint
_warn_on_import(name, replacement='langchain.llms.SagemakerEndpoint')
return SagemakerEndpoint
elif name == 'StochasticAI':
from langchain_community.llms import StochasticAI
_warn_on_import(name, replacement='langchain.llms.StochasticAI')
return StochasticAI
elif name == 'Writer':
from langchain_community.llms import Writer
_warn_on_import(name, replacement='langchain.llms.Writer')
return Writer
elif name == 'HuggingFacePipeline':
from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline
_warn_on_import(name, replacement=
'langchain.llms.huggingface_pipeline.HuggingFacePipeline')
return HuggingFacePipeline
elif name == 'FewShotPromptTemplate':
from langchain_core.prompts import FewShotPromptTemplate
_warn_on_import(name, replacement=
'langchain.prompts.FewShotPromptTemplate')
return FewShotPromptTemplate
elif name == 'Prompt':
from langchain.prompts import Prompt
_warn_on_import(name, replacement='langchain.prompts.Prompt')
return Prompt
elif name == 'PromptTemplate':
from langchain_core.prompts import PromptTemplate
_warn_on_import(name, replacement='langchain.prompts.PromptTemplate')
return PromptTemplate
elif name == 'BasePromptTemplate':
from langchain_core.prompts import BasePromptTemplate
_warn_on_import(name, replacement=
'langchain.schema.prompt_template.BasePromptTemplate')
return BasePromptTemplate
elif name == 'ArxivAPIWrapper':
from langchain_community.utilities import ArxivAPIWrapper
_warn_on_import(name, replacement=
'langchain_community.utilities.ArxivAPIWrapper')
return ArxivAPIWrapper
elif name == 'GoldenQueryAPIWrapper':
from langchain_community.utilities import GoldenQueryAPIWrapper
_warn_on_import(name, replacement=
'langchain_community.utilities.GoldenQueryAPIWrapper')
return GoldenQueryAPIWrapper
elif name == 'GoogleSearchAPIWrapper':
from langchain_community.utilities import GoogleSearchAPIWrapper
_warn_on_import(name, replacement=
'langchain_community.utilities.GoogleSearchAPIWrapper')
return GoogleSearchAPIWrapper
elif name == 'GoogleSerperAPIWrapper':
from langchain_community.utilities import GoogleSerperAPIWrapper
_warn_on_import(name, replacement=
'langchain_community.utilities.GoogleSerperAPIWrapper')
return GoogleSerperAPIWrapper
elif name == 'PowerBIDataset':
from langchain_community.utilities import PowerBIDataset
_warn_on_import(name, replacement=
'langchain_community.utilities.PowerBIDataset')
return PowerBIDataset
elif name == 'SearxSearchWrapper':
from langchain_community.utilities import SearxSearchWrapper
_warn_on_import(name, replacement=
'langchain_community.utilities.SearxSearchWrapper')
return SearxSearchWrapper
elif name == 'WikipediaAPIWrapper':
from langchain_community.utilities import WikipediaAPIWrapper
_warn_on_import(name, replacement=
'langchain_community.utilities.WikipediaAPIWrapper')
return WikipediaAPIWrapper
elif name == 'WolframAlphaAPIWrapper':
from langchain_community.utilities import WolframAlphaAPIWrapper
_warn_on_import(name, replacement=
'langchain_community.utilities.WolframAlphaAPIWrapper')
return WolframAlphaAPIWrapper
elif name == 'SQLDatabase':
from langchain_community.utilities import SQLDatabase
_warn_on_import(name, replacement=
'langchain_community.utilities.SQLDatabase')
return SQLDatabase
elif name == 'FAISS':
from langchain_community.vectorstores import FAISS
_warn_on_import(name, replacement=
'langchain_community.vectorstores.FAISS')
return FAISS
elif name == 'ElasticVectorSearch':
from langchain_community.vectorstores import ElasticVectorSearch
_warn_on_import(name, replacement=
'langchain_community.vectorstores.ElasticVectorSearch')
return ElasticVectorSearch
elif name == 'SerpAPIChain' or name == 'SerpAPIWrapper':
from langchain_community.utilities import SerpAPIWrapper
_warn_on_import(name, replacement=
'langchain_community.utilities.SerpAPIWrapper')
return SerpAPIWrapper
elif name == 'verbose':
from langchain.globals import _verbose
_warn_on_import(name, replacement=
'langchain.globals.set_verbose() / langchain.globals.get_verbose()'
)
return _verbose
elif name == 'debug':
from langchain.globals import _debug
_warn_on_import(name, replacement=
'langchain.globals.set_debug() / langchain.globals.get_debug()')
return _debug
elif name == 'llm_cache':
from langchain.globals import _llm_cache
_warn_on_import(name, replacement=
'langchain.globals.set_llm_cache() / langchain.globals.get_llm_cache()'
)
return _llm_cache
else:
raise AttributeError(f'Could not find: {name}')
| null |
test_chat_message_partial
|
template = ChatPromptTemplate.from_messages([('system',
'You are an AI assistant named {name}.'), ('human', "Hi I'm {user}"), (
'ai', "Hi there, {user}, I'm {name}."), ('human', '{input}')])
template2 = template.partial(user='Lucy', name='R2D2')
with pytest.raises(KeyError):
template.format_messages(input='hello')
res = template2.format_messages(input='hello')
expected = [SystemMessage(content='You are an AI assistant named R2D2.'),
HumanMessage(content="Hi I'm Lucy"), AIMessage(content=
"Hi there, Lucy, I'm R2D2."), HumanMessage(content='hello')]
assert res == expected
assert template2.format(input='hello') == get_buffer_string(expected)
|
def test_chat_message_partial() ->None:
template = ChatPromptTemplate.from_messages([('system',
'You are an AI assistant named {name}.'), ('human', "Hi I'm {user}"
), ('ai', "Hi there, {user}, I'm {name}."), ('human', '{input}')])
template2 = template.partial(user='Lucy', name='R2D2')
with pytest.raises(KeyError):
template.format_messages(input='hello')
res = template2.format_messages(input='hello')
expected = [SystemMessage(content='You are an AI assistant named R2D2.'
), HumanMessage(content="Hi I'm Lucy"), AIMessage(content=
"Hi there, Lucy, I'm R2D2."), HumanMessage(content='hello')]
assert res == expected
assert template2.format(input='hello') == get_buffer_string(expected)
| null |
query
|
"""Query Neptune database."""
try:
return self.client.execute_open_cypher_query(openCypherQuery=query)
except Exception as e:
raise NeptuneQueryException({'message':
'An error occurred while executing the query.', 'details': str(e)})
|
def query(self, query: str, params: dict={}) ->Dict[str, Any]:
"""Query Neptune database."""
try:
return self.client.execute_open_cypher_query(openCypherQuery=query)
except Exception as e:
raise NeptuneQueryException({'message':
'An error occurred while executing the query.', 'details': str(e)})
|
Query Neptune database.
|
from_browser
|
"""Instantiate the tool."""
lazy_import_playwright_browsers()
return cls(sync_browser=sync_browser, async_browser=async_browser)
|
@classmethod
def from_browser(cls, sync_browser: Optional[SyncBrowser]=None,
async_browser: Optional[AsyncBrowser]=None) ->BaseBrowserTool:
"""Instantiate the tool."""
lazy_import_playwright_browsers()
return cls(sync_browser=sync_browser, async_browser=async_browser)
|
Instantiate the tool.
|
_process_llm_result
|
run_manager.on_text(llm_output, color='green', verbose=self.verbose)
llm_output = llm_output.strip()
text_match = re.search('^```text(.*?)```', llm_output, re.DOTALL)
if text_match:
expression = text_match.group(1)
output = self._evaluate_expression(expression)
run_manager.on_text('\nAnswer: ', verbose=self.verbose)
run_manager.on_text(output, color='yellow', verbose=self.verbose)
answer = 'Answer: ' + output
elif llm_output.startswith('Answer:'):
answer = llm_output
elif 'Answer:' in llm_output:
answer = 'Answer: ' + llm_output.split('Answer:')[-1]
else:
raise ValueError(f'unknown format from LLM: {llm_output}')
return {self.output_key: answer}
|
def _process_llm_result(self, llm_output: str, run_manager:
CallbackManagerForChainRun) ->Dict[str, str]:
run_manager.on_text(llm_output, color='green', verbose=self.verbose)
llm_output = llm_output.strip()
text_match = re.search('^```text(.*?)```', llm_output, re.DOTALL)
if text_match:
expression = text_match.group(1)
output = self._evaluate_expression(expression)
run_manager.on_text('\nAnswer: ', verbose=self.verbose)
run_manager.on_text(output, color='yellow', verbose=self.verbose)
answer = 'Answer: ' + output
elif llm_output.startswith('Answer:'):
answer = llm_output
elif 'Answer:' in llm_output:
answer = 'Answer: ' + llm_output.split('Answer:')[-1]
else:
raise ValueError(f'unknown format from LLM: {llm_output}')
return {self.output_key: answer}
| null |
_get_tables_to_query
|
"""Get the tables names that need to be queried, after checking they exist."""
if table_names is not None:
if isinstance(table_names, list) and len(table_names) > 0 and table_names[0
] != '':
fixed_tables = [fix_table_name(table) for table in table_names]
non_existing_tables = [table for table in fixed_tables if table not in
self.table_names]
if non_existing_tables:
logger.warning('Table(s) %s not found in dataset.', ', '.join(
non_existing_tables))
tables = [table for table in fixed_tables if table not in
non_existing_tables]
return tables if tables else None
if isinstance(table_names, str) and table_names != '':
if table_names not in self.table_names:
logger.warning('Table %s not found in dataset.', table_names)
return None
return [fix_table_name(table_names)]
return self.table_names
|
def _get_tables_to_query(self, table_names: Optional[Union[List[str], str]]
=None) ->Optional[List[str]]:
"""Get the tables names that need to be queried, after checking they exist."""
if table_names is not None:
if isinstance(table_names, list) and len(table_names
) > 0 and table_names[0] != '':
fixed_tables = [fix_table_name(table) for table in table_names]
non_existing_tables = [table for table in fixed_tables if table
not in self.table_names]
if non_existing_tables:
logger.warning('Table(s) %s not found in dataset.', ', '.
join(non_existing_tables))
tables = [table for table in fixed_tables if table not in
non_existing_tables]
return tables if tables else None
if isinstance(table_names, str) and table_names != '':
if table_names not in self.table_names:
logger.warning('Table %s not found in dataset.', table_names)
return None
return [fix_table_name(table_names)]
return self.table_names
|
Get the tables names that need to be queried, after checking they exist.
|
test_openai_batch
|
"""Test streaming tokens from OpenAI."""
llm = OpenAI(max_tokens=10)
result = llm.batch(["I'm Pickle Rick", "I'm not Pickle Rick"])
for token in result:
assert isinstance(token, str)
|
@pytest.mark.scheduled
def test_openai_batch() ->None:
"""Test streaming tokens from OpenAI."""
llm = OpenAI(max_tokens=10)
result = llm.batch(["I'm Pickle Rick", "I'm not Pickle Rick"])
for token in result:
assert isinstance(token, str)
|
Test streaming tokens from OpenAI.
|
on_chain_error
|
"""Do nothing when LLM chain outputs an error."""
pass
|
def on_chain_error(self, error: BaseException, **kwargs: Any) ->None:
"""Do nothing when LLM chain outputs an error."""
pass
|
Do nothing when LLM chain outputs an error.
|
test_pgvector_with_metadatas
|
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': str(i)} for i in range(len(texts))]
docsearch = PGVector.from_texts(texts=texts, collection_name=
'test_collection', embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas, connection_string=CONNECTION_STRING,
pre_delete_collection=True)
output = docsearch.similarity_search('foo', k=1)
assert output == [Document(page_content='foo', metadata={'page': '0'})]
|
def test_pgvector_with_metadatas() ->None:
"""Test end to end construction and search."""
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': str(i)} for i in range(len(texts))]
docsearch = PGVector.from_texts(texts=texts, collection_name=
'test_collection', embedding=FakeEmbeddingsWithAdaDimension(),
metadatas=metadatas, connection_string=CONNECTION_STRING,
pre_delete_collection=True)
output = docsearch.similarity_search('foo', k=1)
assert output == [Document(page_content='foo', metadata={'page': '0'})]
|
Test end to end construction and search.
|
from_texts
|
"""Construct Vectara wrapper from raw documents.
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain_community.vectorstores import Vectara
vectara = Vectara.from_texts(
texts,
vectara_customer_id=customer_id,
vectara_corpus_id=corpus_id,
vectara_api_key=api_key,
)
"""
doc_metadata = kwargs.pop('doc_metadata', {})
vectara = cls(**kwargs)
vectara.add_texts(texts, metadatas, doc_metadata=doc_metadata, **kwargs)
return vectara
|
@classmethod
def from_texts(cls: Type[Vectara], texts: List[str], embedding: Optional[
Embeddings]=None, metadatas: Optional[List[dict]]=None, **kwargs: Any
) ->Vectara:
"""Construct Vectara wrapper from raw documents.
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain_community.vectorstores import Vectara
vectara = Vectara.from_texts(
texts,
vectara_customer_id=customer_id,
vectara_corpus_id=corpus_id,
vectara_api_key=api_key,
)
"""
doc_metadata = kwargs.pop('doc_metadata', {})
vectara = cls(**kwargs)
vectara.add_texts(texts, metadatas, doc_metadata=doc_metadata, **kwargs)
return vectara
|
Construct Vectara wrapper from raw documents.
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain_community.vectorstores import Vectara
vectara = Vectara.from_texts(
texts,
vectara_customer_id=customer_id,
vectara_corpus_id=corpus_id,
vectara_api_key=api_key,
)
|
value
|
"""The only defined document attribute value or None.
According to Amazon Kendra, you can only provide one
value for a document attribute.
"""
if self.DateValue:
return self.DateValue
if self.LongValue:
return self.LongValue
if self.StringListValue:
return self.StringListValue
if self.StringValue:
return self.StringValue
return None
|
@property
def value(self) ->DocumentAttributeValueType:
"""The only defined document attribute value or None.
According to Amazon Kendra, you can only provide one
value for a document attribute.
"""
if self.DateValue:
return self.DateValue
if self.LongValue:
return self.LongValue
if self.StringListValue:
return self.StringListValue
if self.StringValue:
return self.StringValue
return None
|
The only defined document attribute value or None.
According to Amazon Kendra, you can only provide one
value for a document attribute.
|
similarity_search
|
"""
Returns the most similar indexed documents to the query text.
Args:
query (str): The query text for which to find similar documents.
k (int): The number of documents to return. Default is 4.
Returns:
List[Document]: A list of documents that are most similar to the query text.
"""
embedding = self.embedding_function.embed_query(query)
keys_and_scores = self.client.tvs_knnsearch(self.index_name, k, embedding,
False, None, **kwargs)
pipeline = self.client.pipeline(transaction=False)
for key, _ in keys_and_scores:
pipeline.tvs_hmget(self.index_name, key, self.metadata_key, self.
content_key)
docs = pipeline.execute()
return [Document(page_content=d[1], metadata=json.loads(d[0])) for d in docs]
|
def similarity_search(self, query: str, k: int=4, **kwargs: Any) ->List[
Document]:
"""
Returns the most similar indexed documents to the query text.
Args:
query (str): The query text for which to find similar documents.
k (int): The number of documents to return. Default is 4.
Returns:
List[Document]: A list of documents that are most similar to the query text.
"""
embedding = self.embedding_function.embed_query(query)
keys_and_scores = self.client.tvs_knnsearch(self.index_name, k,
embedding, False, None, **kwargs)
pipeline = self.client.pipeline(transaction=False)
for key, _ in keys_and_scores:
pipeline.tvs_hmget(self.index_name, key, self.metadata_key, self.
content_key)
docs = pipeline.execute()
return [Document(page_content=d[1], metadata=json.loads(d[0])) for d in
docs]
|
Returns the most similar indexed documents to the query text.
Args:
query (str): The query text for which to find similar documents.
k (int): The number of documents to return. Default is 4.
Returns:
List[Document]: A list of documents that are most similar to the query text.
|
test_elasticsearch_indexing_exception_error
|
"""Test bulk exception logging is giving better hints."""
from elasticsearch.helpers import BulkIndexError
docsearch = ElasticsearchStore(embedding=ConsistentFakeEmbeddings(), **
elasticsearch_connection, index_name=index_name)
docsearch.client.indices.create(index=index_name, mappings={'properties': {
}}, settings={'index': {'default_pipeline': 'not-existing-pipeline'}})
texts = ['foo']
with pytest.raises(BulkIndexError):
docsearch.add_texts(texts)
error_reason = 'pipeline with id [not-existing-pipeline] does not exist'
log_message = f'First error reason: {error_reason}'
assert log_message in caplog.text
|
def test_elasticsearch_indexing_exception_error(self,
elasticsearch_connection: dict, index_name: str, caplog: pytest.
LogCaptureFixture) ->None:
"""Test bulk exception logging is giving better hints."""
from elasticsearch.helpers import BulkIndexError
docsearch = ElasticsearchStore(embedding=ConsistentFakeEmbeddings(), **
elasticsearch_connection, index_name=index_name)
docsearch.client.indices.create(index=index_name, mappings={
'properties': {}}, settings={'index': {'default_pipeline':
'not-existing-pipeline'}})
texts = ['foo']
with pytest.raises(BulkIndexError):
docsearch.add_texts(texts)
error_reason = 'pipeline with id [not-existing-pipeline] does not exist'
log_message = f'First error reason: {error_reason}'
assert log_message in caplog.text
|
Test bulk exception logging is giving better hints.
|
set_ref
|
self.ref = ref
|
def set_ref(self, ref: str) ->None:
self.ref = ref
| null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.