method_name
stringlengths 1
78
| method_body
stringlengths 3
9.66k
| full_code
stringlengths 31
10.7k
| docstring
stringlengths 4
4.74k
⌀ |
|---|---|---|---|
is_running
|
return any(not op.done() for op in operations)
|
def is_running(self, operations: List['Operation']) ->bool:
return any(not op.done() for op in operations)
| null |
get_relevant_documents
|
assert isinstance(self, FakeRetrieverV1)
return [Document(page_content=query, metadata={'uuid': '1234'})]
|
def get_relevant_documents(self, query: str) ->List[Document]:
assert isinstance(self, FakeRetrieverV1)
return [Document(page_content=query, metadata={'uuid': '1234'})]
| null |
test_promptlayer_chat_openai_multiple_completions
|
"""Test PromptLayerChatOpenAI wrapper with multiple completions."""
chat = PromptLayerChatOpenAI(max_tokens=10, n=5)
message = HumanMessage(content='Hello')
response = chat._generate([message])
assert isinstance(response, ChatResult)
assert len(response.generations) == 5
for generation in response.generations:
assert isinstance(generation.message, BaseMessage)
assert isinstance(generation.message.content, str)
|
def test_promptlayer_chat_openai_multiple_completions() ->None:
"""Test PromptLayerChatOpenAI wrapper with multiple completions."""
chat = PromptLayerChatOpenAI(max_tokens=10, n=5)
message = HumanMessage(content='Hello')
response = chat._generate([message])
assert isinstance(response, ChatResult)
assert len(response.generations) == 5
for generation in response.generations:
assert isinstance(generation.message, BaseMessage)
assert isinstance(generation.message.content, str)
|
Test PromptLayerChatOpenAI wrapper with multiple completions.
|
_format_message_as_text
|
if isinstance(message, ChatMessage):
message_text = f'\n\n{message.role.capitalize()}: {message.content}'
elif isinstance(message, HumanMessage):
if message.content[0].get('type') == 'text':
message_text = f"[INST] {message.content[0]['text']} [/INST]"
elif message.content[0].get('type') == 'image_url':
message_text = message.content[0]['image_url']['url']
elif isinstance(message, AIMessage):
message_text = f'{message.content}'
elif isinstance(message, SystemMessage):
message_text = f'<<SYS>> {message.content} <</SYS>>'
else:
raise ValueError(f'Got unknown type {message}')
return message_text
|
@deprecated('0.0.3', alternative='_convert_messages_to_ollama_messages')
def _format_message_as_text(self, message: BaseMessage) ->str:
if isinstance(message, ChatMessage):
message_text = f'\n\n{message.role.capitalize()}: {message.content}'
elif isinstance(message, HumanMessage):
if message.content[0].get('type') == 'text':
message_text = f"[INST] {message.content[0]['text']} [/INST]"
elif message.content[0].get('type') == 'image_url':
message_text = message.content[0]['image_url']['url']
elif isinstance(message, AIMessage):
message_text = f'{message.content}'
elif isinstance(message, SystemMessage):
message_text = f'<<SYS>> {message.content} <</SYS>>'
else:
raise ValueError(f'Got unknown type {message}')
return message_text
| null |
resolve_pairwise_criteria
|
"""Resolve the criteria for the pairwise evaluator.
Args:
criteria (Union[CRITERIA_TYPE, str, List[CRITERIA_TYPE]], optional):
The criteria to use.
Returns:
dict: The resolved criteria.
"""
if criteria is None:
_default_criteria = [Criteria.HELPFULNESS, Criteria.RELEVANCE, Criteria
.CORRECTNESS, Criteria.DEPTH]
return {k.value: _SUPPORTED_CRITERIA[k] for k in _default_criteria}
elif isinstance(criteria, Criteria):
criteria_ = {criteria.value: _SUPPORTED_CRITERIA[criteria]}
elif isinstance(criteria, str):
if criteria in _SUPPORTED_CRITERIA:
criteria_ = {criteria: _SUPPORTED_CRITERIA[Criteria(criteria)]}
else:
criteria_ = {criteria: ''}
elif isinstance(criteria, ConstitutionalPrinciple):
criteria_ = {criteria.name: criteria.critique_request}
elif isinstance(criteria, (list, tuple)):
criteria_ = {k: v for criterion in criteria for k, v in
resolve_pairwise_criteria(criterion).items()}
else:
if not criteria:
raise ValueError(
'Criteria cannot be empty. Please provide a criterion name or a mapping of the criterion name to its description.'
)
criteria_ = dict(criteria)
return criteria_
|
def resolve_pairwise_criteria(criteria: Optional[Union[CRITERIA_TYPE, str,
List[CRITERIA_TYPE]]]) ->dict:
"""Resolve the criteria for the pairwise evaluator.
Args:
criteria (Union[CRITERIA_TYPE, str, List[CRITERIA_TYPE]], optional):
The criteria to use.
Returns:
dict: The resolved criteria.
"""
if criteria is None:
_default_criteria = [Criteria.HELPFULNESS, Criteria.RELEVANCE,
Criteria.CORRECTNESS, Criteria.DEPTH]
return {k.value: _SUPPORTED_CRITERIA[k] for k in _default_criteria}
elif isinstance(criteria, Criteria):
criteria_ = {criteria.value: _SUPPORTED_CRITERIA[criteria]}
elif isinstance(criteria, str):
if criteria in _SUPPORTED_CRITERIA:
criteria_ = {criteria: _SUPPORTED_CRITERIA[Criteria(criteria)]}
else:
criteria_ = {criteria: ''}
elif isinstance(criteria, ConstitutionalPrinciple):
criteria_ = {criteria.name: criteria.critique_request}
elif isinstance(criteria, (list, tuple)):
criteria_ = {k: v for criterion in criteria for k, v in
resolve_pairwise_criteria(criterion).items()}
else:
if not criteria:
raise ValueError(
'Criteria cannot be empty. Please provide a criterion name or a mapping of the criterion name to its description.'
)
criteria_ = dict(criteria)
return criteria_
|
Resolve the criteria for the pairwise evaluator.
Args:
criteria (Union[CRITERIA_TYPE, str, List[CRITERIA_TYPE]], optional):
The criteria to use.
Returns:
dict: The resolved criteria.
|
_validate_tools
|
"""Validate that appropriate tools are passed in."""
pass
|
@classmethod
def _validate_tools(cls, tools: Sequence[BaseTool]) ->None:
"""Validate that appropriate tools are passed in."""
pass
|
Validate that appropriate tools are passed in.
|
__getitem__
|
...
|
@overload
def __getitem__(self, index: int) ->MessageLike:
...
| null |
atransform
|
if not hasattr(self, '_atransform'):
raise NotImplementedError('This runnable does not support async methods.')
return self._atransform_stream_with_config(input, self._atransform, config,
**kwargs)
|
def atransform(self, input: AsyncIterator[Input], config: Optional[
RunnableConfig]=None, **kwargs: Any) ->AsyncIterator[Output]:
if not hasattr(self, '_atransform'):
raise NotImplementedError(
'This runnable does not support async methods.')
return self._atransform_stream_with_config(input, self._atransform,
config, **kwargs)
| null |
create
|
"""
Create a EmbedchainRetriever from a YAML configuration file.
Args:
yaml_path: Path to the YAML configuration file. If not provided,
a default configuration is used.
Returns:
An instance of EmbedchainRetriever.
"""
from embedchain import Pipeline
if yaml_path:
client = Pipeline.from_config(yaml_path=yaml_path)
else:
client = Pipeline()
return cls(client=client)
|
@classmethod
def create(cls, yaml_path: Optional[str]=None) ->EmbedchainRetriever:
"""
Create a EmbedchainRetriever from a YAML configuration file.
Args:
yaml_path: Path to the YAML configuration file. If not provided,
a default configuration is used.
Returns:
An instance of EmbedchainRetriever.
"""
from embedchain import Pipeline
if yaml_path:
client = Pipeline.from_config(yaml_path=yaml_path)
else:
client = Pipeline()
return cls(client=client)
|
Create a EmbedchainRetriever from a YAML configuration file.
Args:
yaml_path: Path to the YAML configuration file. If not provided,
a default configuration is used.
Returns:
An instance of EmbedchainRetriever.
|
lazy_load
|
"""Lazy load from a file path."""
dump = self._load_dump_file()
for page in dump.pages:
if self.skip_redirects and page.redirect:
continue
if self.namespaces and page.namespace not in self.namespaces:
continue
try:
yield self._load_single_page_from_dump(page)
except Exception as e:
logger.error('Parsing error: {}'.format(e))
if self.stop_on_error:
raise e
else:
continue
|
def lazy_load(self) ->Iterator[Document]:
"""Lazy load from a file path."""
dump = self._load_dump_file()
for page in dump.pages:
if self.skip_redirects and page.redirect:
continue
if self.namespaces and page.namespace not in self.namespaces:
continue
try:
yield self._load_single_page_from_dump(page)
except Exception as e:
logger.error('Parsing error: {}'.format(e))
if self.stop_on_error:
raise e
else:
continue
|
Lazy load from a file path.
|
delete
|
"""Delete documents from the index.
Only support direct-access index.
Args:
ids: List of ids of documents to delete.
Returns:
True if successful.
"""
self._op_require_direct_access_index('delete')
if ids is None:
raise ValueError('ids must be provided.')
self.index.delete(ids)
return True
|
def delete(self, ids: Optional[List[Any]]=None, **kwargs: Any) ->Optional[bool
]:
"""Delete documents from the index.
Only support direct-access index.
Args:
ids: List of ids of documents to delete.
Returns:
True if successful.
"""
self._op_require_direct_access_index('delete')
if ids is None:
raise ValueError('ids must be provided.')
self.index.delete(ids)
return True
|
Delete documents from the index.
Only support direct-access index.
Args:
ids: List of ids of documents to delete.
Returns:
True if successful.
|
_tools_description
|
"""Get the description of the agent tools.
Returns:
str: The description of the agent tools.
"""
if self.agent_tools is None:
return ''
return '\n\n'.join([
f"""Tool {i}: {tool.name}
Description: {tool.description}""" for i,
tool in enumerate(self.agent_tools, 1)])
|
@property
def _tools_description(self) ->str:
"""Get the description of the agent tools.
Returns:
str: The description of the agent tools.
"""
if self.agent_tools is None:
return ''
return '\n\n'.join([
f'Tool {i}: {tool.name}\nDescription: {tool.description}' for i,
tool in enumerate(self.agent_tools, 1)])
|
Get the description of the agent tools.
Returns:
str: The description of the agent tools.
|
_import_playwright_CurrentWebPageTool
|
from langchain_community.tools.playwright import CurrentWebPageTool
return CurrentWebPageTool
|
def _import_playwright_CurrentWebPageTool() ->Any:
from langchain_community.tools.playwright import CurrentWebPageTool
return CurrentWebPageTool
| null |
parse
|
lines = re.findall('\\d+\\..*?(?:\\n|$)', text)
return LineList(lines=lines)
|
def parse(self, text: str) ->LineList:
lines = re.findall('\\d+\\..*?(?:\\n|$)', text)
return LineList(lines=lines)
| null |
_make_tool
|
if isinstance(dec_func, Runnable):
runnable = dec_func
if runnable.input_schema.schema().get('type') != 'object':
raise ValueError('Runnable must have an object schema.')
async def ainvoke_wrapper(callbacks: Optional[Callbacks]=None, **kwargs:
Any) ->Any:
return await runnable.ainvoke(kwargs, {'callbacks': callbacks})
def invoke_wrapper(callbacks: Optional[Callbacks]=None, **kwargs: Any
) ->Any:
return runnable.invoke(kwargs, {'callbacks': callbacks})
coroutine = ainvoke_wrapper
func = invoke_wrapper
schema: Optional[Type[BaseModel]] = runnable.input_schema
description = repr(runnable)
elif inspect.iscoroutinefunction(dec_func):
coroutine = dec_func
func = None
schema = args_schema
description = None
else:
coroutine = None
func = dec_func
schema = args_schema
description = None
if infer_schema or args_schema is not None:
return StructuredTool.from_function(func, coroutine, name=tool_name,
description=description, return_direct=return_direct, args_schema=
schema, infer_schema=infer_schema)
if func.__doc__ is None:
raise ValueError(
'Function must have a docstring if description not provided and infer_schema is False.'
)
return Tool(name=tool_name, func=func, description=f'{tool_name} tool',
return_direct=return_direct, coroutine=coroutine)
|
def _make_tool(dec_func: Union[Callable, Runnable]) ->BaseTool:
if isinstance(dec_func, Runnable):
runnable = dec_func
if runnable.input_schema.schema().get('type') != 'object':
raise ValueError('Runnable must have an object schema.')
async def ainvoke_wrapper(callbacks: Optional[Callbacks]=None, **
kwargs: Any) ->Any:
return await runnable.ainvoke(kwargs, {'callbacks': callbacks})
def invoke_wrapper(callbacks: Optional[Callbacks]=None, **kwargs: Any
) ->Any:
return runnable.invoke(kwargs, {'callbacks': callbacks})
coroutine = ainvoke_wrapper
func = invoke_wrapper
schema: Optional[Type[BaseModel]] = runnable.input_schema
description = repr(runnable)
elif inspect.iscoroutinefunction(dec_func):
coroutine = dec_func
func = None
schema = args_schema
description = None
else:
coroutine = None
func = dec_func
schema = args_schema
description = None
if infer_schema or args_schema is not None:
return StructuredTool.from_function(func, coroutine, name=tool_name,
description=description, return_direct=return_direct,
args_schema=schema, infer_schema=infer_schema)
if func.__doc__ is None:
raise ValueError(
'Function must have a docstring if description not provided and infer_schema is False.'
)
return Tool(name=tool_name, func=func, description=f'{tool_name} tool',
return_direct=return_direct, coroutine=coroutine)
| null |
add_node
|
"""Add a node to the graph and return it."""
node = Node(id=self.next_id(), data=data)
self.nodes[node.id] = node
return node
|
def add_node(self, data: Union[Type[BaseModel], RunnableType]) ->Node:
"""Add a node to the graph and return it."""
node = Node(id=self.next_id(), data=data)
self.nodes[node.id] = node
return node
|
Add a node to the graph and return it.
|
test_visit_structured_query_deep_nesting
|
query = 'What is the capital of France?'
op = Operation(operator=Operator.AND, arguments=[Comparison(comparator=
Comparator.EQ, attribute='name', value='foo'), Operation(operator=
Operator.OR, arguments=[Comparison(comparator=Comparator.GT, attribute=
'qty', value=6), Comparison(comparator=Comparator.NIN, attribute='tags',
value=['bar', 'foo'])])])
structured_query = StructuredQuery(query=query, filter=op)
expected = query, {'pre_filter': {'$and': [{'name': {'$eq': 'foo'}}, {'$or':
[{'qty': {'$gt': 6}}, {'tags': {'$nin': ['bar', 'foo']}}]}]}}
actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query)
assert expected == actual
|
def test_visit_structured_query_deep_nesting() ->None:
query = 'What is the capital of France?'
op = Operation(operator=Operator.AND, arguments=[Comparison(comparator=
Comparator.EQ, attribute='name', value='foo'), Operation(operator=
Operator.OR, arguments=[Comparison(comparator=Comparator.GT,
attribute='qty', value=6), Comparison(comparator=Comparator.NIN,
attribute='tags', value=['bar', 'foo'])])])
structured_query = StructuredQuery(query=query, filter=op)
expected = query, {'pre_filter': {'$and': [{'name': {'$eq': 'foo'}}, {
'$or': [{'qty': {'$gt': 6}}, {'tags': {'$nin': ['bar', 'foo']}}]}]}}
actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query)
assert expected == actual
| null |
test_embeddings_redundant_filter
|
texts = ['What happened to all of my cookies?',
'Where did all of my cookies go?',
'I wish there were better Italian restaurants in my neighborhood.']
docs = [Document(page_content=t) for t in texts]
embeddings = OpenAIEmbeddings()
redundant_filter = EmbeddingsRedundantFilter(embeddings=embeddings)
actual = redundant_filter.transform_documents(docs)
assert len(actual) == 2
assert set(texts[:2]).intersection([d.page_content for d in actual])
|
def test_embeddings_redundant_filter() ->None:
texts = ['What happened to all of my cookies?',
'Where did all of my cookies go?',
'I wish there were better Italian restaurants in my neighborhood.']
docs = [Document(page_content=t) for t in texts]
embeddings = OpenAIEmbeddings()
redundant_filter = EmbeddingsRedundantFilter(embeddings=embeddings)
actual = redundant_filter.transform_documents(docs)
assert len(actual) == 2
assert set(texts[:2]).intersection([d.page_content for d in actual])
| null |
test_clear
|
"""
Test cleanup of data in the store
"""
self.vectorstore.clear()
assert self.vectorstore.count() == 0
|
def test_clear(self) ->None:
"""
Test cleanup of data in the store
"""
self.vectorstore.clear()
assert self.vectorstore.count() == 0
|
Test cleanup of data in the store
|
get_lc_namespace
|
"""Get the namespace of the langchain object."""
return ['langchain', 'prompts', 'prompt']
|
@classmethod
def get_lc_namespace(cls) ->List[str]:
"""Get the namespace of the langchain object."""
return ['langchain', 'prompts', 'prompt']
|
Get the namespace of the langchain object.
|
_prepare_params
|
stop_sequences = stop or self.stop
params_mapping = {'n': 'candidate_count'}
params = {params_mapping.get(k, k): v for k, v in kwargs.items()}
params = {**self._default_params, 'stop_sequences': stop_sequences, **params}
if stream or self.streaming:
params.pop('candidate_count')
return params
|
def _prepare_params(self, stop: Optional[List[str]]=None, stream: bool=
False, **kwargs: Any) ->dict:
stop_sequences = stop or self.stop
params_mapping = {'n': 'candidate_count'}
params = {params_mapping.get(k, k): v for k, v in kwargs.items()}
params = {**self._default_params, 'stop_sequences': stop_sequences, **
params}
if stream or self.streaming:
params.pop('candidate_count')
return params
| null |
test_api_key_is_string
|
llm = PipelineAI(pipeline_api_key='secret-api-key')
assert isinstance(llm.pipeline_api_key, SecretStr)
|
def test_api_key_is_string() ->None:
llm = PipelineAI(pipeline_api_key='secret-api-key')
assert isinstance(llm.pipeline_api_key, SecretStr)
| null |
embed_documents
|
text_features = []
for text in texts:
tokenized_text = self.tokenizer(text)
embeddings_tensor = self.model.encode_text(tokenized_text)
norm = embeddings_tensor.norm(p=2, dim=1, keepdim=True)
normalized_embeddings_tensor = embeddings_tensor.div(norm)
embeddings_list = normalized_embeddings_tensor.squeeze(0).tolist()
text_features.append(embeddings_list)
return text_features
|
def embed_documents(self, texts: List[str]) ->List[List[float]]:
text_features = []
for text in texts:
tokenized_text = self.tokenizer(text)
embeddings_tensor = self.model.encode_text(tokenized_text)
norm = embeddings_tensor.norm(p=2, dim=1, keepdim=True)
normalized_embeddings_tensor = embeddings_tensor.div(norm)
embeddings_list = normalized_embeddings_tensor.squeeze(0).tolist()
text_features.append(embeddings_list)
return text_features
| null |
test_chroma_update_document
|
"""Test the update_document function in the Chroma class."""
embedding = ConsistentFakeEmbeddings()
initial_content = 'foo'
document_id = 'doc1'
original_doc = Document(page_content=initial_content, metadata={'page': '0'})
docsearch = Chroma.from_documents(collection_name='test_collection',
documents=[original_doc], embedding=embedding, ids=[document_id])
old_embedding = docsearch._collection.peek()['embeddings'][docsearch.
_collection.peek()['ids'].index(document_id)]
updated_content = 'updated foo'
updated_doc = Document(page_content=updated_content, metadata={'page': '0'})
docsearch.update_document(document_id=document_id, document=updated_doc)
output = docsearch.similarity_search(updated_content, k=1)
assert output == [Document(page_content=updated_content, metadata={'page':
'0'})]
new_embedding = docsearch._collection.peek()['embeddings'][docsearch.
_collection.peek()['ids'].index(document_id)]
assert new_embedding == embedding.embed_documents([updated_content])[0]
assert new_embedding != old_embedding
|
def test_chroma_update_document() ->None:
"""Test the update_document function in the Chroma class."""
embedding = ConsistentFakeEmbeddings()
initial_content = 'foo'
document_id = 'doc1'
original_doc = Document(page_content=initial_content, metadata={'page':
'0'})
docsearch = Chroma.from_documents(collection_name='test_collection',
documents=[original_doc], embedding=embedding, ids=[document_id])
old_embedding = docsearch._collection.peek()['embeddings'][docsearch.
_collection.peek()['ids'].index(document_id)]
updated_content = 'updated foo'
updated_doc = Document(page_content=updated_content, metadata={'page': '0'}
)
docsearch.update_document(document_id=document_id, document=updated_doc)
output = docsearch.similarity_search(updated_content, k=1)
assert output == [Document(page_content=updated_content, metadata={
'page': '0'})]
new_embedding = docsearch._collection.peek()['embeddings'][docsearch.
_collection.peek()['ids'].index(document_id)]
assert new_embedding == embedding.embed_documents([updated_content])[0]
assert new_embedding != old_embedding
|
Test the update_document function in the Chroma class.
|
test_minimax_call
|
"""Test valid call to minimax."""
llm = Minimax(max_tokens=10)
output = llm('Hello world!')
assert isinstance(output, str)
|
def test_minimax_call() ->None:
"""Test valid call to minimax."""
llm = Minimax(max_tokens=10)
output = llm('Hello world!')
assert isinstance(output, str)
|
Test valid call to minimax.
|
input_keys
|
"""Input keys for Hyde's LLM chain."""
return self.llm_chain.input_keys
|
@property
def input_keys(self) ->List[str]:
"""Input keys for Hyde's LLM chain."""
return self.llm_chain.input_keys
|
Input keys for Hyde's LLM chain.
|
_import_clarifai
|
from langchain_community.vectorstores.clarifai import Clarifai
return Clarifai
|
def _import_clarifai() ->Any:
from langchain_community.vectorstores.clarifai import Clarifai
return Clarifai
| null |
OutputType
|
for cls in self.__class__.__orig_bases__:
type_args = get_args(cls)
if type_args and len(type_args) == 1:
return type_args[0]
raise TypeError(
f"Runnable {self.__class__.__name__} doesn't have an inferable OutputType. Override the OutputType property to specify the output type."
)
|
@property
def OutputType(self) ->Type[T]:
for cls in self.__class__.__orig_bases__:
type_args = get_args(cls)
if type_args and len(type_args) == 1:
return type_args[0]
raise TypeError(
f"Runnable {self.__class__.__name__} doesn't have an inferable OutputType. Override the OutputType property to specify the output type."
)
| null |
_load_refine_chain
|
initial_chain = LLMChain(llm=llm, prompt=question_prompt, verbose=verbose)
_refine_llm = refine_llm or llm
refine_chain = LLMChain(llm=_refine_llm, prompt=refine_prompt, verbose=verbose)
return RefineDocumentsChain(initial_llm_chain=initial_chain,
refine_llm_chain=refine_chain, document_variable_name=
document_variable_name, initial_response_name=initial_response_name,
document_prompt=document_prompt, verbose=verbose, **kwargs)
|
def _load_refine_chain(llm: BaseLanguageModel, question_prompt:
BasePromptTemplate=refine_prompts.DEFAULT_TEXT_QA_PROMPT, refine_prompt:
BasePromptTemplate=refine_prompts.DEFAULT_REFINE_PROMPT,
document_prompt: BasePromptTemplate=refine_prompts.EXAMPLE_PROMPT,
document_variable_name: str='context_str', initial_response_name: str=
'existing_answer', refine_llm: Optional[BaseLanguageModel]=None,
verbose: Optional[bool]=None, **kwargs: Any) ->RefineDocumentsChain:
initial_chain = LLMChain(llm=llm, prompt=question_prompt, verbose=verbose)
_refine_llm = refine_llm or llm
refine_chain = LLMChain(llm=_refine_llm, prompt=refine_prompt, verbose=
verbose)
return RefineDocumentsChain(initial_llm_chain=initial_chain,
refine_llm_chain=refine_chain, document_variable_name=
document_variable_name, initial_response_name=initial_response_name,
document_prompt=document_prompt, verbose=verbose, **kwargs)
| null |
test_missing_apikey_raises_validation_error
|
with self.assertRaises(ValueError) as cm:
RSpaceLoader(url=TestRSpaceLoader.url, global_id=TestRSpaceLoader.global_id
)
e = cm.exception
self.assertRegex(str(e), 'Did not find api_key')
|
def test_missing_apikey_raises_validation_error(self) ->None:
with self.assertRaises(ValueError) as cm:
RSpaceLoader(url=TestRSpaceLoader.url, global_id=TestRSpaceLoader.
global_id)
e = cm.exception
self.assertRegex(str(e), 'Did not find api_key')
| null |
input_keys
|
"""Return the input keys.
:meta private:
"""
|
@property
@abstractmethod
def input_keys(self) ->List[str]:
"""Return the input keys.
:meta private:
"""
|
Return the input keys.
:meta private:
|
from_browser
|
"""Instantiate the toolkit."""
lazy_import_playwright_browsers()
return cls(sync_browser=sync_browser, async_browser=async_browser)
|
@classmethod
def from_browser(cls, sync_browser: Optional[SyncBrowser]=None,
async_browser: Optional[AsyncBrowser]=None) ->PlayWrightBrowserToolkit:
"""Instantiate the toolkit."""
lazy_import_playwright_browsers()
return cls(sync_browser=sync_browser, async_browser=async_browser)
|
Instantiate the toolkit.
|
test_all_imports
|
assert set(__all__) == set(EXPECTED_ALL)
|
def test_all_imports() ->None:
assert set(__all__) == set(EXPECTED_ALL)
| null |
on_tool_start
|
"""Run when tool starts running."""
self.step += 1
self.tool_starts += 1
self.starts += 1
resp = self._init_resp()
resp.update({'action': 'on_tool_start', 'input_str': input_str})
resp.update(flatten_dict(serialized))
resp.update(self.get_custom_callback_meta())
self.on_tool_start_records.append(resp)
self.action_records.append(resp)
if self.stream_logs:
self.logger.report_text(resp)
|
def on_tool_start(self, serialized: Dict[str, Any], input_str: str, **
kwargs: Any) ->None:
"""Run when tool starts running."""
self.step += 1
self.tool_starts += 1
self.starts += 1
resp = self._init_resp()
resp.update({'action': 'on_tool_start', 'input_str': input_str})
resp.update(flatten_dict(serialized))
resp.update(self.get_custom_callback_meta())
self.on_tool_start_records.append(resp)
self.action_records.append(resp)
if self.stream_logs:
self.logger.report_text(resp)
|
Run when tool starts running.
|
score
|
return self.sum / len(self.queue) if len(self.queue) > 0 else 0
|
@property
def score(self) ->float:
return self.sum / len(self.queue) if len(self.queue) > 0 else 0
| null |
from_texts
|
"""Create a Zilliz collection, indexes it with HNSW, and insert data.
Args:
texts (List[str]): Text data.
embedding (Embeddings): Embedding function.
metadatas (Optional[List[dict]]): Metadata for each text if it exists.
Defaults to None.
collection_name (str, optional): Collection name to use. Defaults to
"LangChainCollection".
connection_args (dict[str, Any], optional): Connection args to use. Defaults
to DEFAULT_MILVUS_CONNECTION.
consistency_level (str, optional): Which consistency level to use. Defaults
to "Session".
index_params (Optional[dict], optional): Which index_params to use.
Defaults to None.
search_params (Optional[dict], optional): Which search params to use.
Defaults to None.
drop_old (Optional[bool], optional): Whether to drop the collection with
that name if it exists. Defaults to False.
Returns:
Zilliz: Zilliz Vector Store
"""
vector_db = cls(embedding_function=embedding, collection_name=
collection_name, connection_args=connection_args or {},
consistency_level=consistency_level, index_params=index_params,
search_params=search_params, drop_old=drop_old, **kwargs)
vector_db.add_texts(texts=texts, metadatas=metadatas)
return vector_db
|
@classmethod
def from_texts(cls, texts: List[str], embedding: Embeddings, metadatas:
Optional[List[dict]]=None, collection_name: str='LangChainCollection',
connection_args: Optional[Dict[str, Any]]=None, consistency_level: str=
'Session', index_params: Optional[dict]=None, search_params: Optional[
dict]=None, drop_old: bool=False, **kwargs: Any) ->Zilliz:
"""Create a Zilliz collection, indexes it with HNSW, and insert data.
Args:
texts (List[str]): Text data.
embedding (Embeddings): Embedding function.
metadatas (Optional[List[dict]]): Metadata for each text if it exists.
Defaults to None.
collection_name (str, optional): Collection name to use. Defaults to
"LangChainCollection".
connection_args (dict[str, Any], optional): Connection args to use. Defaults
to DEFAULT_MILVUS_CONNECTION.
consistency_level (str, optional): Which consistency level to use. Defaults
to "Session".
index_params (Optional[dict], optional): Which index_params to use.
Defaults to None.
search_params (Optional[dict], optional): Which search params to use.
Defaults to None.
drop_old (Optional[bool], optional): Whether to drop the collection with
that name if it exists. Defaults to False.
Returns:
Zilliz: Zilliz Vector Store
"""
vector_db = cls(embedding_function=embedding, collection_name=
collection_name, connection_args=connection_args or {},
consistency_level=consistency_level, index_params=index_params,
search_params=search_params, drop_old=drop_old, **kwargs)
vector_db.add_texts(texts=texts, metadatas=metadatas)
return vector_db
|
Create a Zilliz collection, indexes it with HNSW, and insert data.
Args:
texts (List[str]): Text data.
embedding (Embeddings): Embedding function.
metadatas (Optional[List[dict]]): Metadata for each text if it exists.
Defaults to None.
collection_name (str, optional): Collection name to use. Defaults to
"LangChainCollection".
connection_args (dict[str, Any], optional): Connection args to use. Defaults
to DEFAULT_MILVUS_CONNECTION.
consistency_level (str, optional): Which consistency level to use. Defaults
to "Session".
index_params (Optional[dict], optional): Which index_params to use.
Defaults to None.
search_params (Optional[dict], optional): Which search params to use.
Defaults to None.
drop_old (Optional[bool], optional): Whether to drop the collection with
that name if it exists. Defaults to False.
Returns:
Zilliz: Zilliz Vector Store
|
test_initialization
|
"""Test embedding model initialization."""
TogetherEmbeddings(model='togethercomputer/m2-bert-80M-8k-retrieval')
|
def test_initialization() ->None:
"""Test embedding model initialization."""
TogetherEmbeddings(model='togethercomputer/m2-bert-80M-8k-retrieval')
|
Test embedding model initialization.
|
set_handlers
|
"""Set handlers as the only handlers on the callback manager."""
self.handlers = []
self.inheritable_handlers = []
for handler in handlers:
self.add_handler(handler, inherit=inherit)
|
def set_handlers(self, handlers: List[BaseCallbackHandler], inherit: bool=True
) ->None:
"""Set handlers as the only handlers on the callback manager."""
self.handlers = []
self.inheritable_handlers = []
for handler in handlers:
self.add_handler(handler, inherit=inherit)
|
Set handlers as the only handlers on the callback manager.
|
_get_document_for_channel
|
try:
from youtube_transcript_api import NoTranscriptFound, TranscriptsDisabled
except ImportError:
raise ImportError(
'You must run`pip install --upgrade youtube-transcript-api` to use the youtube loader'
)
channel_id = self._get_channel_id(channel)
request = self.youtube_client.search().list(part='id,snippet', channelId=
channel_id, maxResults=50)
video_ids = []
while request is not None:
response = request.execute()
for item in response['items']:
if not item['id'].get('videoId'):
continue
meta_data = {'videoId': item['id']['videoId']}
if self.add_video_info:
item['snippet'].pop('thumbnails')
meta_data.update(item['snippet'])
try:
page_content = self._get_transcripe_for_video_id(item['id'][
'videoId'])
video_ids.append(Document(page_content=page_content, metadata=
meta_data))
except (TranscriptsDisabled, NoTranscriptFound) as e:
if self.continue_on_failure:
logger.error('Error fetching transscript ' +
f" {item['id']['videoId']}, exception: {e}")
else:
raise e
pass
request = self.youtube_client.search().list_next(request, response)
return video_ids
|
def _get_document_for_channel(self, channel: str, **kwargs: Any) ->List[
Document]:
try:
from youtube_transcript_api import NoTranscriptFound, TranscriptsDisabled
except ImportError:
raise ImportError(
'You must run`pip install --upgrade youtube-transcript-api` to use the youtube loader'
)
channel_id = self._get_channel_id(channel)
request = self.youtube_client.search().list(part='id,snippet',
channelId=channel_id, maxResults=50)
video_ids = []
while request is not None:
response = request.execute()
for item in response['items']:
if not item['id'].get('videoId'):
continue
meta_data = {'videoId': item['id']['videoId']}
if self.add_video_info:
item['snippet'].pop('thumbnails')
meta_data.update(item['snippet'])
try:
page_content = self._get_transcripe_for_video_id(item['id']
['videoId'])
video_ids.append(Document(page_content=page_content,
metadata=meta_data))
except (TranscriptsDisabled, NoTranscriptFound) as e:
if self.continue_on_failure:
logger.error('Error fetching transscript ' +
f" {item['id']['videoId']}, exception: {e}")
else:
raise e
pass
request = self.youtube_client.search().list_next(request, response)
return video_ids
| null |
test_sentence_transformers_split_text
|
splitter = SentenceTransformersTokenTextSplitter(model_name=
'sentence-transformers/paraphrase-albert-small-v2')
text = 'lorem ipsum'
text_chunks = splitter.split_text(text=text)
expected_text_chunks = [text]
assert expected_text_chunks == text_chunks
|
def test_sentence_transformers_split_text() ->None:
splitter = SentenceTransformersTokenTextSplitter(model_name=
'sentence-transformers/paraphrase-albert-small-v2')
text = 'lorem ipsum'
text_chunks = splitter.split_text(text=text)
expected_text_chunks = [text]
assert expected_text_chunks == text_chunks
| null |
similarity_search_with_score
|
"""Return Dingo documents most similar to query, along with scores.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
search_params: Dictionary of argument(s) to filter on metadata
Returns:
List of Documents most similar to the query and score for each
"""
docs = []
query_obj = self._embedding.embed_query(query)
results = self._client.vector_search(self._index_name, xq=query_obj, top_k=
k, search_params=search_params)
if not results:
return []
for res in results[0]['vectorWithDistances']:
metadatas = res['scalarData']
id = res['id']
score = res['distance']
text = metadatas[self._text_key]['fields'][0]['data']
metadata = {'id': id, 'text': text, 'score': score}
for meta_key in metadatas.keys():
metadata[meta_key] = metadatas[meta_key]['fields'][0]['data']
docs.append((Document(page_content=text, metadata=metadata), score))
return docs
|
def similarity_search_with_score(self, query: str, k: int=4, search_params:
Optional[dict]=None, timeout: Optional[int]=None, **kwargs: Any) ->List[
Tuple[Document, float]]:
"""Return Dingo documents most similar to query, along with scores.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
search_params: Dictionary of argument(s) to filter on metadata
Returns:
List of Documents most similar to the query and score for each
"""
docs = []
query_obj = self._embedding.embed_query(query)
results = self._client.vector_search(self._index_name, xq=query_obj,
top_k=k, search_params=search_params)
if not results:
return []
for res in results[0]['vectorWithDistances']:
metadatas = res['scalarData']
id = res['id']
score = res['distance']
text = metadatas[self._text_key]['fields'][0]['data']
metadata = {'id': id, 'text': text, 'score': score}
for meta_key in metadatas.keys():
metadata[meta_key] = metadatas[meta_key]['fields'][0]['data']
docs.append((Document(page_content=text, metadata=metadata), score))
return docs
|
Return Dingo documents most similar to query, along with scores.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
search_params: Dictionary of argument(s) to filter on metadata
Returns:
List of Documents most similar to the query and score for each
|
exists
|
"""Check if the given keys exist in the SQLite database."""
with self._make_session() as session:
records = session.query(UpsertionRecord.key).filter(and_(
UpsertionRecord.key.in_(keys), UpsertionRecord.namespace == self.
namespace)).all()
found_keys = set(r.key for r in records)
return [(k in found_keys) for k in keys]
|
def exists(self, keys: Sequence[str]) ->List[bool]:
"""Check if the given keys exist in the SQLite database."""
with self._make_session() as session:
records = session.query(UpsertionRecord.key).filter(and_(
UpsertionRecord.key.in_(keys), UpsertionRecord.namespace ==
self.namespace)).all()
found_keys = set(r.key for r in records)
return [(k in found_keys) for k in keys]
|
Check if the given keys exist in the SQLite database.
|
_import_fake
|
from langchain_community.llms.fake import FakeListLLM
return FakeListLLM
|
def _import_fake() ->Any:
from langchain_community.llms.fake import FakeListLLM
return FakeListLLM
| null |
parse
|
"""Parse the output text.
Args:
text (str): The output text to parse.
Returns:
Dict: The parsed output.
Raises:
ValueError: If the verdict is invalid.
"""
match = _FIND_DOUBLE_BRACKETS.search(text)
if match:
verdict = match.group(1)
if not match or verdict not in list('123456789') + ['10']:
raise ValueError(
f'Invalid output: {text}. Output must contain a double bracketed string with the verdict between 1 and 10.'
)
return {'reasoning': text, 'score': int(verdict)}
|
def parse(self, text: str) ->Dict[str, Any]:
"""Parse the output text.
Args:
text (str): The output text to parse.
Returns:
Dict: The parsed output.
Raises:
ValueError: If the verdict is invalid.
"""
match = _FIND_DOUBLE_BRACKETS.search(text)
if match:
verdict = match.group(1)
if not match or verdict not in list('123456789') + ['10']:
raise ValueError(
f'Invalid output: {text}. Output must contain a double bracketed string with the verdict between 1 and 10.'
)
return {'reasoning': text, 'score': int(verdict)}
|
Parse the output text.
Args:
text (str): The output text to parse.
Returns:
Dict: The parsed output.
Raises:
ValueError: If the verdict is invalid.
|
__init__
|
super().__init__()
self.flags = flags
|
def __init__(self, *, flags: int=0, **kwargs: Any):
super().__init__()
self.flags = flags
| null |
from_llm
|
question_to_checked_assertions_chain = (
_load_question_to_checked_assertions_chain(llm,
create_draft_answer_prompt, list_assertions_prompt,
check_assertions_prompt, revised_answer_prompt))
return cls(question_to_checked_assertions_chain=
question_to_checked_assertions_chain, **kwargs)
|
@classmethod
def from_llm(cls, llm: BaseLanguageModel, create_draft_answer_prompt:
PromptTemplate=CREATE_DRAFT_ANSWER_PROMPT, list_assertions_prompt:
PromptTemplate=LIST_ASSERTIONS_PROMPT, check_assertions_prompt:
PromptTemplate=CHECK_ASSERTIONS_PROMPT, revised_answer_prompt:
PromptTemplate=REVISED_ANSWER_PROMPT, **kwargs: Any) ->LLMCheckerChain:
question_to_checked_assertions_chain = (
_load_question_to_checked_assertions_chain(llm,
create_draft_answer_prompt, list_assertions_prompt,
check_assertions_prompt, revised_answer_prompt))
return cls(question_to_checked_assertions_chain=
question_to_checked_assertions_chain, **kwargs)
| null |
_get_default_output_parser
|
"""Get default output parser for this class."""
|
@classmethod
@abstractmethod
def _get_default_output_parser(cls, **kwargs: Any) ->AgentOutputParser:
"""Get default output parser for this class."""
|
Get default output parser for this class.
|
fake_retriever_v1_with_kwargs
|
with pytest.warns(DeprecationWarning, match=
'Retrievers must implement abstract `_get_relevant_documents` method instead of `get_relevant_documents`'
):
class FakeRetrieverV1(BaseRetriever):
def get_relevant_documents(self, query: str, where_filter: Optional
[Dict[str, object]]=None) ->List[Document]:
assert isinstance(self, FakeRetrieverV1)
return [Document(page_content=query, metadata=where_filter or {})]
async def aget_relevant_documents(self, query: str, where_filter:
Optional[Dict[str, object]]=None) ->List[Document]:
assert isinstance(self, FakeRetrieverV1)
return [Document(page_content=f'Async query {query}', metadata=
where_filter or {})]
return FakeRetrieverV1()
|
@pytest.fixture
def fake_retriever_v1_with_kwargs() ->BaseRetriever:
with pytest.warns(DeprecationWarning, match=
'Retrievers must implement abstract `_get_relevant_documents` method instead of `get_relevant_documents`'
):
class FakeRetrieverV1(BaseRetriever):
def get_relevant_documents(self, query: str, where_filter:
Optional[Dict[str, object]]=None) ->List[Document]:
assert isinstance(self, FakeRetrieverV1)
return [Document(page_content=query, metadata=where_filter or
{})]
async def aget_relevant_documents(self, query: str,
where_filter: Optional[Dict[str, object]]=None) ->List[Document
]:
assert isinstance(self, FakeRetrieverV1)
return [Document(page_content=f'Async query {query}',
metadata=where_filter or {})]
return FakeRetrieverV1()
| null |
messages
|
"""Return the messages that correspond to this action."""
return _convert_agent_action_to_messages(self)
|
@property
def messages(self) ->Sequence[BaseMessage]:
"""Return the messages that correspond to this action."""
return _convert_agent_action_to_messages(self)
|
Return the messages that correspond to this action.
|
finish
|
"""Waits for all asynchronous processes to finish and data to upload.
Proxy for `wandb.finish()`.
"""
self._wandb.finish()
|
def finish(self) ->None:
"""Waits for all asynchronous processes to finish and data to upload.
Proxy for `wandb.finish()`.
"""
self._wandb.finish()
|
Waits for all asynchronous processes to finish and data to upload.
Proxy for `wandb.finish()`.
|
escape_symbol
|
value = match.group(0)
return f'\\{value}'
|
def escape_symbol(match: re.Match) ->str:
value = match.group(0)
return f'\\{value}'
| null |
_create_session_analysis_df
|
"""Create a dataframe with all the information from the session."""
pd = import_pandas()
on_llm_start_records_df = pd.DataFrame(self.on_llm_start_records)
on_llm_end_records_df = pd.DataFrame(self.on_llm_end_records)
llm_input_prompts_df = on_llm_start_records_df[['step', 'prompts', 'name']
].dropna(axis=1).rename({'step': 'prompt_step'}, axis=1)
complexity_metrics_columns = []
visualizations_columns = []
if self.complexity_metrics:
complexity_metrics_columns = ['flesch_reading_ease',
'flesch_kincaid_grade', 'smog_index', 'coleman_liau_index',
'automated_readability_index', 'dale_chall_readability_score',
'difficult_words', 'linsear_write_formula', 'gunning_fog',
'text_standard', 'fernandez_huerta', 'szigriszt_pazos',
'gutierrez_polini', 'crawford', 'gulpease_index', 'osman']
if self.visualize:
visualizations_columns = ['dependency_tree', 'entities']
llm_outputs_df = on_llm_end_records_df[['step', 'text',
'token_usage_total_tokens', 'token_usage_prompt_tokens',
'token_usage_completion_tokens'] + complexity_metrics_columns +
visualizations_columns].dropna(axis=1).rename({'step': 'output_step',
'text': 'output'}, axis=1)
session_analysis_df = pd.concat([llm_input_prompts_df, llm_outputs_df], axis=1)
session_analysis_df['chat_html'] = session_analysis_df[['prompts', 'output']
].apply(lambda row: construct_html_from_prompt_and_generation(row[
'prompts'], row['output']), axis=1)
return session_analysis_df
|
def _create_session_analysis_df(self) ->Any:
"""Create a dataframe with all the information from the session."""
pd = import_pandas()
on_llm_start_records_df = pd.DataFrame(self.on_llm_start_records)
on_llm_end_records_df = pd.DataFrame(self.on_llm_end_records)
llm_input_prompts_df = on_llm_start_records_df[['step', 'prompts', 'name']
].dropna(axis=1).rename({'step': 'prompt_step'}, axis=1)
complexity_metrics_columns = []
visualizations_columns = []
if self.complexity_metrics:
complexity_metrics_columns = ['flesch_reading_ease',
'flesch_kincaid_grade', 'smog_index', 'coleman_liau_index',
'automated_readability_index', 'dale_chall_readability_score',
'difficult_words', 'linsear_write_formula', 'gunning_fog',
'text_standard', 'fernandez_huerta', 'szigriszt_pazos',
'gutierrez_polini', 'crawford', 'gulpease_index', 'osman']
if self.visualize:
visualizations_columns = ['dependency_tree', 'entities']
llm_outputs_df = on_llm_end_records_df[['step', 'text',
'token_usage_total_tokens', 'token_usage_prompt_tokens',
'token_usage_completion_tokens'] + complexity_metrics_columns +
visualizations_columns].dropna(axis=1).rename({'step':
'output_step', 'text': 'output'}, axis=1)
session_analysis_df = pd.concat([llm_input_prompts_df, llm_outputs_df],
axis=1)
session_analysis_df['chat_html'] = session_analysis_df[['prompts',
'output']].apply(lambda row:
construct_html_from_prompt_and_generation(row['prompts'], row[
'output']), axis=1)
return session_analysis_df
|
Create a dataframe with all the information from the session.
|
_create_subset_model
|
"""Create a pydantic model with only a subset of model's fields."""
fields = {}
for field_name in field_names:
field = model.__fields__[field_name]
fields[field_name] = field.outer_type_, field.field_info
return create_model(name, **fields)
|
def _create_subset_model(name: str, model: BaseModel, field_names: list
) ->Type[BaseModel]:
"""Create a pydantic model with only a subset of model's fields."""
fields = {}
for field_name in field_names:
field = model.__fields__[field_name]
fields[field_name] = field.outer_type_, field.field_info
return create_model(name, **fields)
|
Create a pydantic model with only a subset of model's fields.
|
test_bagel
|
"""Test from_texts"""
texts = ['hello bagel', 'hello langchain']
txt_search = Bagel.from_texts(cluster_name='testing', texts=texts)
output = txt_search.similarity_search('hello bagel', k=1)
assert output == [Document(page_content='hello bagel')]
txt_search.delete_cluster()
|
def test_bagel() ->None:
"""Test from_texts"""
texts = ['hello bagel', 'hello langchain']
txt_search = Bagel.from_texts(cluster_name='testing', texts=texts)
output = txt_search.similarity_search('hello bagel', k=1)
assert output == [Document(page_content='hello bagel')]
txt_search.delete_cluster()
|
Test from_texts
|
_evaluate_strings
|
"""Evaluate Chain or LLM output, based on optional input and label.
Args:
prediction (str): the LLM or chain prediction to evaluate.
reference (Optional[str], optional): the reference label
to evaluate against.
input (Optional[str], optional): the input to consider during evaluation
callbacks (Callbacks, optional): the callbacks to use for tracing.
include_run_info (bool, optional): whether to include run info in the
returned results.
**kwargs: additional keyword arguments, including callbacks, tags, etc.
Returns:
dict: The evaluation results containing the score or value.
"""
result = self({'query': input, 'answer': reference, 'result': prediction},
callbacks=callbacks, include_run_info=include_run_info)
return self._prepare_output(result)
|
def _evaluate_strings(self, *, prediction: str, reference: Optional[str]=
None, input: Optional[str]=None, callbacks: Callbacks=None,
include_run_info: bool=False, **kwargs: Any) ->dict:
"""Evaluate Chain or LLM output, based on optional input and label.
Args:
prediction (str): the LLM or chain prediction to evaluate.
reference (Optional[str], optional): the reference label
to evaluate against.
input (Optional[str], optional): the input to consider during evaluation
callbacks (Callbacks, optional): the callbacks to use for tracing.
include_run_info (bool, optional): whether to include run info in the
returned results.
**kwargs: additional keyword arguments, including callbacks, tags, etc.
Returns:
dict: The evaluation results containing the score or value.
"""
result = self({'query': input, 'answer': reference, 'result':
prediction}, callbacks=callbacks, include_run_info=include_run_info)
return self._prepare_output(result)
|
Evaluate Chain or LLM output, based on optional input and label.
Args:
prediction (str): the LLM or chain prediction to evaluate.
reference (Optional[str], optional): the reference label
to evaluate against.
input (Optional[str], optional): the input to consider during evaluation
callbacks (Callbacks, optional): the callbacks to use for tracing.
include_run_info (bool, optional): whether to include run info in the
returned results.
**kwargs: additional keyword arguments, including callbacks, tags, etc.
Returns:
dict: The evaluation results containing the score or value.
|
load
|
"""Load given path as pages."""
return list(self.lazy_load())
|
def load(self) ->List[Document]:
"""Load given path as pages."""
return list(self.lazy_load())
|
Load given path as pages.
|
dict
|
"""Return dictionary representation of prompt."""
prompt_dict = super().dict(**kwargs)
try:
prompt_dict['_type'] = self._prompt_type
except NotImplementedError:
pass
return prompt_dict
|
def dict(self, **kwargs: Any) ->Dict:
"""Return dictionary representation of prompt."""
prompt_dict = super().dict(**kwargs)
try:
prompt_dict['_type'] = self._prompt_type
except NotImplementedError:
pass
return prompt_dict
|
Return dictionary representation of prompt.
|
_run
|
"""Use the Steam-WebAPI tool."""
return self.api_wrapper.run(self.mode, query)
|
def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun]
=None) ->str:
"""Use the Steam-WebAPI tool."""
return self.api_wrapper.run(self.mode, query)
|
Use the Steam-WebAPI tool.
|
similarity_search_with_score
|
"""Return Vectara documents most similar to query, along with scores.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 10.
any other querying variable in VectaraQueryConfig like:
- lambda_val: lexical match parameter for hybrid search.
- filter: filter string
- score_threshold: minimal score threshold for the result.
- n_sentence_context: number of sentences before/after the matching segment
- mmr_config: optional configuration for MMR (see MMRConfig dataclass)
- summary_config: optional configuration for summary
(see SummaryConfig dataclass)
Returns:
List of Documents most similar to the query and score for each.
"""
config = VectaraQueryConfig(**kwargs)
docs = self.vectara_query(query, config)
return docs
|
def similarity_search_with_score(self, query: str, **kwargs: Any) ->List[Tuple
[Document, float]]:
"""Return Vectara documents most similar to query, along with scores.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 10.
any other querying variable in VectaraQueryConfig like:
- lambda_val: lexical match parameter for hybrid search.
- filter: filter string
- score_threshold: minimal score threshold for the result.
- n_sentence_context: number of sentences before/after the matching segment
- mmr_config: optional configuration for MMR (see MMRConfig dataclass)
- summary_config: optional configuration for summary
(see SummaryConfig dataclass)
Returns:
List of Documents most similar to the query and score for each.
"""
config = VectaraQueryConfig(**kwargs)
docs = self.vectara_query(query, config)
return docs
|
Return Vectara documents most similar to query, along with scores.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 10.
any other querying variable in VectaraQueryConfig like:
- lambda_val: lexical match parameter for hybrid search.
- filter: filter string
- score_threshold: minimal score threshold for the result.
- n_sentence_context: number of sentences before/after the matching segment
- mmr_config: optional configuration for MMR (see MMRConfig dataclass)
- summary_config: optional configuration for summary
(see SummaryConfig dataclass)
Returns:
List of Documents most similar to the query and score for each.
|
test_load_returns_full_set_of_metadata
|
"""Test that returns several docs"""
api_client = ArxivAPIWrapper(load_max_docs=1, load_all_available_meta=True)
docs = api_client.load('ChatGPT')
assert len(docs) == 1
for doc in docs:
assert doc.page_content
assert doc.metadata
assert set(doc.metadata).issuperset({'Published', 'Title', 'Authors',
'Summary'})
print(doc.metadata)
assert len(set(doc.metadata)) > 4
|
def test_load_returns_full_set_of_metadata() ->None:
"""Test that returns several docs"""
api_client = ArxivAPIWrapper(load_max_docs=1, load_all_available_meta=True)
docs = api_client.load('ChatGPT')
assert len(docs) == 1
for doc in docs:
assert doc.page_content
assert doc.metadata
assert set(doc.metadata).issuperset({'Published', 'Title',
'Authors', 'Summary'})
print(doc.metadata)
assert len(set(doc.metadata)) > 4
|
Test that returns several docs
|
is_lc_serializable
|
"""Return whether this model can be serialized by Langchain."""
return True
|
@classmethod
def is_lc_serializable(cls) ->bool:
"""Return whether this model can be serialized by Langchain."""
return True
|
Return whether this model can be serialized by Langchain.
|
__init__
|
try:
from xinference.client import RESTfulClient
except ImportError as e:
raise ImportError(
'Could not import RESTfulClient from xinference. Please install it with `pip install xinference`.'
) from e
model_kwargs = model_kwargs or {}
super().__init__(**{'server_url': server_url, 'model_uid': model_uid,
'model_kwargs': model_kwargs})
if self.server_url is None:
raise ValueError('Please provide server URL')
if self.model_uid is None:
raise ValueError('Please provide the model UID')
self.client = RESTfulClient(server_url)
|
def __init__(self, server_url: Optional[str]=None, model_uid: Optional[str]
=None, **model_kwargs: Any):
try:
from xinference.client import RESTfulClient
except ImportError as e:
raise ImportError(
'Could not import RESTfulClient from xinference. Please install it with `pip install xinference`.'
) from e
model_kwargs = model_kwargs or {}
super().__init__(**{'server_url': server_url, 'model_uid': model_uid,
'model_kwargs': model_kwargs})
if self.server_url is None:
raise ValueError('Please provide server URL')
if self.model_uid is None:
raise ValueError('Please provide the model UID')
self.client = RESTfulClient(server_url)
| null |
similarity_search_with_score
|
"""
Return Jaguar documents most similar to query, along with scores.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 3.
lambda_val: lexical match parameter for hybrid search.
where: the where clause in select similarity. For example a
where can be "rating > 3.0 and (state = 'NV' or state = 'CA')"
args: extra options passed to select similarity
kwargs: vector_index=vcol, vector_type=cosine_fraction_float
Returns:
List of Documents most similar to the query and score for each.
List of Tuples of (doc, similarity_score):
[ (doc, score), (doc, score), ...]
"""
vcol = self._vector_index
vtype = self._vector_type
embeddings = self._embedding.embed_query(query)
str_embeddings = [str(f) for f in embeddings]
qv_comma = ','.join(str_embeddings)
podstore = self._pod + '.' + self._store
q = 'select similarity(' + vcol + ",'" + qv_comma + "','topk=" + str(k
) + ',fetch_k=' + str(fetch_k) + ',type=' + vtype
q += ',with_score=yes,with_text=yes'
if args is not None:
q += ',' + args
if metadatas is not None:
meta = '&'.join(metadatas)
q += ',metadata=' + meta
q += "') from " + podstore
if where is not None:
q += ' where ' + where
jarr = self.run(q)
if jarr is None:
return []
docs_with_score = []
for js in jarr:
score = js['score']
text = js['text']
zid = js['zid']
md = {}
md['zid'] = zid
if metadatas is not None:
for m in metadatas:
mv = js[m]
md[m] = mv
doc = Document(page_content=text, metadata=md)
tup = doc, score
docs_with_score.append(tup)
return docs_with_score
|
def similarity_search_with_score(self, query: str, k: int=3, fetch_k: int=-
1, where: Optional[str]=None, args: Optional[str]=None, metadatas:
Optional[List[str]]=None, **kwargs: Any) ->List[Tuple[Document, float]]:
"""
Return Jaguar documents most similar to query, along with scores.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 3.
lambda_val: lexical match parameter for hybrid search.
where: the where clause in select similarity. For example a
where can be "rating > 3.0 and (state = 'NV' or state = 'CA')"
args: extra options passed to select similarity
kwargs: vector_index=vcol, vector_type=cosine_fraction_float
Returns:
List of Documents most similar to the query and score for each.
List of Tuples of (doc, similarity_score):
[ (doc, score), (doc, score), ...]
"""
vcol = self._vector_index
vtype = self._vector_type
embeddings = self._embedding.embed_query(query)
str_embeddings = [str(f) for f in embeddings]
qv_comma = ','.join(str_embeddings)
podstore = self._pod + '.' + self._store
q = 'select similarity(' + vcol + ",'" + qv_comma + "','topk=" + str(k
) + ',fetch_k=' + str(fetch_k) + ',type=' + vtype
q += ',with_score=yes,with_text=yes'
if args is not None:
q += ',' + args
if metadatas is not None:
meta = '&'.join(metadatas)
q += ',metadata=' + meta
q += "') from " + podstore
if where is not None:
q += ' where ' + where
jarr = self.run(q)
if jarr is None:
return []
docs_with_score = []
for js in jarr:
score = js['score']
text = js['text']
zid = js['zid']
md = {}
md['zid'] = zid
if metadatas is not None:
for m in metadatas:
mv = js[m]
md[m] = mv
doc = Document(page_content=text, metadata=md)
tup = doc, score
docs_with_score.append(tup)
return docs_with_score
|
Return Jaguar documents most similar to query, along with scores.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 3.
lambda_val: lexical match parameter for hybrid search.
where: the where clause in select similarity. For example a
where can be "rating > 3.0 and (state = 'NV' or state = 'CA')"
args: extra options passed to select similarity
kwargs: vector_index=vcol, vector_type=cosine_fraction_float
Returns:
List of Documents most similar to the query and score for each.
List of Tuples of (doc, similarity_score):
[ (doc, score), (doc, score), ...]
|
add_message
|
"""Append the message to the Zep memory history"""
from zep_python import Memory, Message
zep_message = Message(content=message.content, role=message.type, metadata=
metadata)
zep_memory = Memory(messages=[zep_message])
self.zep_client.memory.add_memory(self.session_id, zep_memory)
|
def add_message(self, message: BaseMessage, metadata: Optional[Dict[str,
Any]]=None) ->None:
"""Append the message to the Zep memory history"""
from zep_python import Memory, Message
zep_message = Message(content=message.content, role=message.type,
metadata=metadata)
zep_memory = Memory(messages=[zep_message])
self.zep_client.memory.add_memory(self.session_id, zep_memory)
|
Append the message to the Zep memory history
|
_get_num_tokens
|
return _get_language_model(self.llm).get_num_tokens(text)
|
def _get_num_tokens(self, text: str) ->int:
return _get_language_model(self.llm).get_num_tokens(text)
| null |
_bing_search_results
|
headers = {'Ocp-Apim-Subscription-Key': self.bing_subscription_key}
params = {'q': search_term, 'count': count, 'textDecorations': True,
'textFormat': 'HTML', **self.search_kwargs}
response = requests.get(self.bing_search_url, headers=headers, params=params)
response.raise_for_status()
search_results = response.json()
if 'webPages' in search_results:
return search_results['webPages']['value']
return []
|
def _bing_search_results(self, search_term: str, count: int) ->List[dict]:
headers = {'Ocp-Apim-Subscription-Key': self.bing_subscription_key}
params = {'q': search_term, 'count': count, 'textDecorations': True,
'textFormat': 'HTML', **self.search_kwargs}
response = requests.get(self.bing_search_url, headers=headers, params=
params)
response.raise_for_status()
search_results = response.json()
if 'webPages' in search_results:
return search_results['webPages']['value']
return []
| null |
test_sitemap_block_does_not_exists
|
"""Test sitemap loader."""
loader = SitemapLoader('https://api.python.langchain.com/sitemap.xml',
blocksize=1000000, blocknum=15)
with pytest.raises(ValueError, match=
'Selected sitemap does not contain enough blocks for given blocknum'):
loader.load()
|
def test_sitemap_block_does_not_exists() ->None:
"""Test sitemap loader."""
loader = SitemapLoader('https://api.python.langchain.com/sitemap.xml',
blocksize=1000000, blocknum=15)
with pytest.raises(ValueError, match=
'Selected sitemap does not contain enough blocks for given blocknum'):
loader.load()
|
Test sitemap loader.
|
predict_and_parse
|
"""Call predict and then parse the results."""
warnings.warn(
'The predict_and_parse method is deprecated, instead pass an output parser directly to LLMChain.'
)
result = self.predict(callbacks=callbacks, **kwargs)
if self.prompt.output_parser is not None:
return self.prompt.output_parser.parse_folder(result)
else:
return result
|
def predict_and_parse(self, callbacks: Callbacks=None, **kwargs: Any) ->Union[
str, List[str], Dict[str, Any]]:
"""Call predict and then parse the results."""
warnings.warn(
'The predict_and_parse method is deprecated, instead pass an output parser directly to LLMChain.'
)
result = self.predict(callbacks=callbacks, **kwargs)
if self.prompt.output_parser is not None:
return self.prompt.output_parser.parse_folder(result)
else:
return result
|
Call predict and then parse the results.
|
__deepcopy__
|
return self
|
def __deepcopy__(self, memo: dict) ->'FakeAsyncCallbackHandler':
return self
| null |
load
|
result = []
current_start_token = self.startToken
start_time = time.time()
while True:
url = (
f'https://{self.blockchainType}.g.alchemy.com/nft/v2/{self.api_key}/getNFTsForCollection?withMetadata=True&contractAddress={self.contract_address}&startToken={current_start_token}'
)
response = requests.get(url)
if response.status_code != 200:
raise ValueError(
f'Request failed with status code {response.status_code}')
items = response.json()['nfts']
if not items:
break
for item in items:
content = str(item)
tokenId = item['id']['tokenId']
metadata = {'source': self.contract_address, 'blockchain': self.
blockchainType, 'tokenId': tokenId}
result.append(Document(page_content=content, metadata=metadata))
if not self.get_all_tokens:
break
current_start_token = self._get_next_tokenId(result[-1].metadata['tokenId']
)
if self.max_execution_time is not None and time.time(
) - start_time > self.max_execution_time:
raise RuntimeError('Execution time exceeded the allowed time limit.')
if not result:
raise ValueError(
f'No NFTs found for contract address {self.contract_address}')
return result
|
def load(self) ->List[Document]:
result = []
current_start_token = self.startToken
start_time = time.time()
while True:
url = (
f'https://{self.blockchainType}.g.alchemy.com/nft/v2/{self.api_key}/getNFTsForCollection?withMetadata=True&contractAddress={self.contract_address}&startToken={current_start_token}'
)
response = requests.get(url)
if response.status_code != 200:
raise ValueError(
f'Request failed with status code {response.status_code}')
items = response.json()['nfts']
if not items:
break
for item in items:
content = str(item)
tokenId = item['id']['tokenId']
metadata = {'source': self.contract_address, 'blockchain': self
.blockchainType, 'tokenId': tokenId}
result.append(Document(page_content=content, metadata=metadata))
if not self.get_all_tokens:
break
current_start_token = self._get_next_tokenId(result[-1].metadata[
'tokenId'])
if self.max_execution_time is not None and time.time(
) - start_time > self.max_execution_time:
raise RuntimeError(
'Execution time exceeded the allowed time limit.')
if not result:
raise ValueError(
f'No NFTs found for contract address {self.contract_address}')
return result
| null |
_get_resource
|
endpoint = MODERN_TREASURY_ENDPOINTS.get(self.resource)
if endpoint is None:
return []
return self._make_request(endpoint)
|
def _get_resource(self) ->List[Document]:
endpoint = MODERN_TREASURY_ENDPOINTS.get(self.resource)
if endpoint is None:
return []
return self._make_request(endpoint)
| null |
_on_llm_new_token
|
"""Process new LLM token."""
|
def _on_llm_new_token(self, run: Run, token: str, chunk: Optional[Union[
GenerationChunk, ChatGenerationChunk]]) ->None:
"""Process new LLM token."""
|
Process new LLM token.
|
add_message
|
"""Add a message to the chat session in Elasticsearch"""
try:
from elasticsearch import ApiError
self.client.index(index=self.index, document={'session_id': self.
session_id, 'created_at': round(time() * 1000), 'history': json.
dumps(message_to_dict(message), ensure_ascii=self.ensure_ascii)},
refresh=True)
except ApiError as err:
logger.error(f'Could not add message to Elasticsearch: {err}')
raise err
|
def add_message(self, message: BaseMessage) ->None:
"""Add a message to the chat session in Elasticsearch"""
try:
from elasticsearch import ApiError
self.client.index(index=self.index, document={'session_id': self.
session_id, 'created_at': round(time() * 1000), 'history': json
.dumps(message_to_dict(message), ensure_ascii=self.ensure_ascii
)}, refresh=True)
except ApiError as err:
logger.error(f'Could not add message to Elasticsearch: {err}')
raise err
|
Add a message to the chat session in Elasticsearch
|
test__filter_similar_embeddings_empty
|
assert len(_filter_similar_embeddings([], cosine_similarity, 0.0)) == 0
|
def test__filter_similar_embeddings_empty() ->None:
assert len(_filter_similar_embeddings([], cosine_similarity, 0.0)) == 0
| null |
_get_tool_return
|
"""Check if the tool is a returning tool."""
agent_action, observation = next_step_output
name_to_tool_map = {tool.name: tool for tool in self.tools}
return_value_key = 'output'
if len(self.agent.return_values) > 0:
return_value_key = self.agent.return_values[0]
if agent_action.tool in name_to_tool_map:
if name_to_tool_map[agent_action.tool].return_direct:
return AgentFinish({return_value_key: observation}, '')
return None
|
def _get_tool_return(self, next_step_output: Tuple[AgentAction, str]
) ->Optional[AgentFinish]:
"""Check if the tool is a returning tool."""
agent_action, observation = next_step_output
name_to_tool_map = {tool.name: tool for tool in self.tools}
return_value_key = 'output'
if len(self.agent.return_values) > 0:
return_value_key = self.agent.return_values[0]
if agent_action.tool in name_to_tool_map:
if name_to_tool_map[agent_action.tool].return_direct:
return AgentFinish({return_value_key: observation}, '')
return None
|
Check if the tool is a returning tool.
|
on_chain_error_common
|
self.errors += 1
|
def on_chain_error_common(self) ->None:
self.errors += 1
| null |
_import_powerbi_tool_QueryPowerBITool
|
from langchain_community.tools.powerbi.tool import QueryPowerBITool
return QueryPowerBITool
|
def _import_powerbi_tool_QueryPowerBITool() ->Any:
from langchain_community.tools.powerbi.tool import QueryPowerBITool
return QueryPowerBITool
| null |
output_keys
|
"""The keys to extract from the run."""
return ['reference']
|
@property
def output_keys(self) ->List[str]:
"""The keys to extract from the run."""
return ['reference']
|
The keys to extract from the run.
|
test_faiss_similarity_search_with_relevance_scores_with_threshold
|
"""Test the similarity search with normalized similarities with score threshold."""
texts = ['foo', 'bar', 'baz']
docsearch = FAISS.from_texts(texts, FakeEmbeddings(), relevance_score_fn=lambda
score: 1.0 - score / math.sqrt(2))
outputs = docsearch.similarity_search_with_relevance_scores('foo', k=2,
score_threshold=0.5)
assert len(outputs) == 1
output, score = outputs[0]
assert output == Document(page_content='foo')
assert score == 1.0
|
@pytest.mark.requires('faiss')
def test_faiss_similarity_search_with_relevance_scores_with_threshold() ->None:
"""Test the similarity search with normalized similarities with score threshold."""
texts = ['foo', 'bar', 'baz']
docsearch = FAISS.from_texts(texts, FakeEmbeddings(),
relevance_score_fn=lambda score: 1.0 - score / math.sqrt(2))
outputs = docsearch.similarity_search_with_relevance_scores('foo', k=2,
score_threshold=0.5)
assert len(outputs) == 1
output, score = outputs[0]
assert output == Document(page_content='foo')
assert score == 1.0
|
Test the similarity search with normalized similarities with score threshold.
|
_construct_result
|
if self.return_intermediate_steps:
extra_return_dict = {'intermediate_steps': refine_steps}
else:
extra_return_dict = {}
return res, extra_return_dict
|
def _construct_result(self, refine_steps: List[str], res: str) ->Tuple[str,
dict]:
if self.return_intermediate_steps:
extra_return_dict = {'intermediate_steps': refine_steps}
else:
extra_return_dict = {}
return res, extra_return_dict
| null |
append_copy
|
"""Append a copy of another MutableExpander's children to this
MutableExpander.
"""
other_records = other._child_records.copy()
for record in other_records:
self._create_child(record.type, record.kwargs)
|
def append_copy(self, other: MutableExpander) ->None:
"""Append a copy of another MutableExpander's children to this
MutableExpander.
"""
other_records = other._child_records.copy()
for record in other_records:
self._create_child(record.type, record.kwargs)
|
Append a copy of another MutableExpander's children to this
MutableExpander.
|
test_singlestoredb_filter_metadata_7
|
"""Test filtering by float"""
table_name = 'test_singlestoredb_filter_metadata_7'
drop(table_name)
docs = [Document(page_content=t, metadata={'index': i, 'category': 'budget',
'score': i + 0.5}) for i, t in enumerate(texts)]
docsearch = SingleStoreDB.from_documents(docs, FakeEmbeddings(),
distance_strategy=DistanceStrategy.EUCLIDEAN_DISTANCE, table_name=
table_name, host=TEST_SINGLESTOREDB_URL)
output = docsearch.similarity_search('bar', k=1, filter={'category':
'budget', 'score': 2.5})
assert output == [Document(page_content='baz', metadata={'index': 2,
'category': 'budget', 'score': 2.5})]
drop(table_name)
|
@pytest.mark.skipif(not singlestoredb_installed, reason=
'singlestoredb not installed')
def test_singlestoredb_filter_metadata_7(texts: List[str]) ->None:
"""Test filtering by float"""
table_name = 'test_singlestoredb_filter_metadata_7'
drop(table_name)
docs = [Document(page_content=t, metadata={'index': i, 'category':
'budget', 'score': i + 0.5}) for i, t in enumerate(texts)]
docsearch = SingleStoreDB.from_documents(docs, FakeEmbeddings(),
distance_strategy=DistanceStrategy.EUCLIDEAN_DISTANCE, table_name=
table_name, host=TEST_SINGLESTOREDB_URL)
output = docsearch.similarity_search('bar', k=1, filter={'category':
'budget', 'score': 2.5})
assert output == [Document(page_content='baz', metadata={'index': 2,
'category': 'budget', 'score': 2.5})]
drop(table_name)
|
Test filtering by float
|
get_value_text
|
return self.Value.TextWithHighlightsValue.Text
|
def get_value_text(self) ->str:
return self.Value.TextWithHighlightsValue.Text
| null |
__init__
|
"""Initialize by creating all tables."""
self.engine = engine
self.cache_schema = cache_schema
self.cache_schema.metadata.create_all(self.engine)
|
def __init__(self, engine: Engine, cache_schema: Type[FullLLMCache]=
FullLLMCache):
"""Initialize by creating all tables."""
self.engine = engine
self.cache_schema = cache_schema
self.cache_schema.metadata.create_all(self.engine)
|
Initialize by creating all tables.
|
test_visit_operation
|
op = Operation(operator=Operator.AND, arguments=[Comparison(comparator=
Comparator.LT, attribute='foo', value=2), Comparison(comparator=
Comparator.EQ, attribute='bar', value='baz'), Comparison(comparator=
Comparator.LT, attribute='abc', value=['1', '2'])])
expected = {'$and': [{'foo': {'$lt': 2}}, {'bar': {'$eq': 'baz'}}, {'abc':
{'$lt': ['1', '2']}}]}
actual = DEFAULT_TRANSLATOR.visit_operation(op)
assert expected == actual
|
def test_visit_operation() ->None:
op = Operation(operator=Operator.AND, arguments=[Comparison(comparator=
Comparator.LT, attribute='foo', value=2), Comparison(comparator=
Comparator.EQ, attribute='bar', value='baz'), Comparison(comparator
=Comparator.LT, attribute='abc', value=['1', '2'])])
expected = {'$and': [{'foo': {'$lt': 2}}, {'bar': {'$eq': 'baz'}}, {
'abc': {'$lt': ['1', '2']}}]}
actual = DEFAULT_TRANSLATOR.visit_operation(op)
assert expected == actual
| null |
test_singlestoredb_add_texts_to_existing
|
"""Test adding a new document"""
table_name = 'test_singlestoredb_add_texts_to_existing'
drop(table_name)
SingleStoreDB.from_texts(texts, NormilizedFakeEmbeddings(), table_name=
table_name, host=TEST_SINGLESTOREDB_URL)
docsearch = SingleStoreDB(NormilizedFakeEmbeddings(), table_name=table_name,
host=TEST_SINGLESTOREDB_URL)
docsearch.add_texts(['foo'])
output = docsearch.similarity_search('foo', k=2)
assert output == TEST_RESULT
drop(table_name)
|
@pytest.mark.skipif(not singlestoredb_installed, reason=
'singlestoredb not installed')
def test_singlestoredb_add_texts_to_existing(texts: List[str]) ->None:
"""Test adding a new document"""
table_name = 'test_singlestoredb_add_texts_to_existing'
drop(table_name)
SingleStoreDB.from_texts(texts, NormilizedFakeEmbeddings(), table_name=
table_name, host=TEST_SINGLESTOREDB_URL)
docsearch = SingleStoreDB(NormilizedFakeEmbeddings(), table_name=
table_name, host=TEST_SINGLESTOREDB_URL)
docsearch.add_texts(['foo'])
output = docsearch.similarity_search('foo', k=2)
assert output == TEST_RESULT
drop(table_name)
|
Test adding a new document
|
_raise_functions_not_supported
|
raise ValueError(
'Function messages are not supported by the Javelin AI Gateway. Please create a feature request at https://docs.getjavelin.io'
)
|
@staticmethod
def _raise_functions_not_supported() ->None:
raise ValueError(
'Function messages are not supported by the Javelin AI Gateway. Please create a feature request at https://docs.getjavelin.io'
)
| null |
test_milvus
|
"""Test end to end construction and search."""
docsearch = _milvus_from_texts()
output = docsearch.similarity_search('foo', k=1)
assert output == [Document(page_content='foo')]
|
def test_milvus() ->None:
"""Test end to end construction and search."""
docsearch = _milvus_from_texts()
output = docsearch.similarity_search('foo', k=1)
assert output == [Document(page_content='foo')]
|
Test end to end construction and search.
|
add_vector_field
|
if self.vector is None:
self.vector = []
if vector_field['algorithm'] == 'FLAT':
self.vector.append(FlatVectorField(**vector_field))
elif vector_field['algorithm'] == 'HNSW':
self.vector.append(HNSWVectorField(**vector_field))
else:
raise ValueError(
f"algorithm must be either FLAT or HNSW. Got {vector_field['algorithm']}"
)
|
def add_vector_field(self, vector_field: Dict[str, Any]) ->None:
if self.vector is None:
self.vector = []
if vector_field['algorithm'] == 'FLAT':
self.vector.append(FlatVectorField(**vector_field))
elif vector_field['algorithm'] == 'HNSW':
self.vector.append(HNSWVectorField(**vector_field))
else:
raise ValueError(
f"algorithm must be either FLAT or HNSW. Got {vector_field['algorithm']}"
)
| null |
_make_iterator
|
"""Create a function that optionally wraps an iterable in tqdm."""
if show_progress:
try:
from tqdm.auto import tqdm
except ImportError:
raise ImportError(
'You must install tqdm to use show_progress=True.You can install tqdm with `pip install tqdm`.'
)
def _with_tqdm(iterable: Iterable[T]) ->Iterator[T]:
"""Wrap an iterable in a tqdm progress bar."""
return tqdm(iterable, total=length_func())
iterator = _with_tqdm
else:
iterator = iter
return iterator
|
def _make_iterator(length_func: Callable[[], int], show_progress: bool=False
) ->Callable[[Iterable[T]], Iterator[T]]:
"""Create a function that optionally wraps an iterable in tqdm."""
if show_progress:
try:
from tqdm.auto import tqdm
except ImportError:
raise ImportError(
'You must install tqdm to use show_progress=True.You can install tqdm with `pip install tqdm`.'
)
def _with_tqdm(iterable: Iterable[T]) ->Iterator[T]:
"""Wrap an iterable in a tqdm progress bar."""
return tqdm(iterable, total=length_func())
iterator = _with_tqdm
else:
iterator = iter
return iterator
|
Create a function that optionally wraps an iterable in tqdm.
|
save_context
|
"""Save context from this conversation to buffer. Pruned."""
super().save_context(inputs, outputs)
buffer = self.chat_memory.messages
curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer)
if curr_buffer_length > self.max_token_limit:
pruned_memory = []
while curr_buffer_length > self.max_token_limit:
pruned_memory.append(buffer.pop(0))
curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer)
|
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) ->None:
"""Save context from this conversation to buffer. Pruned."""
super().save_context(inputs, outputs)
buffer = self.chat_memory.messages
curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer)
if curr_buffer_length > self.max_token_limit:
pruned_memory = []
while curr_buffer_length > self.max_token_limit:
pruned_memory.append(buffer.pop(0))
curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer)
|
Save context from this conversation to buffer. Pruned.
|
__init__
|
self.query = query
self.page_content_field = page_content_field
self.secret = secret
self.metadata_fields = metadata_fields
|
def __init__(self, query: str, page_content_field: str, secret: str,
metadata_fields: Optional[Sequence[str]]=None):
self.query = query
self.page_content_field = page_content_field
self.secret = secret
self.metadata_fields = metadata_fields
| null |
convert_to_base64
|
"""
Convert PIL images to Base64 encoded strings
:param pil_image: PIL image
:return: Re-sized Base64 string
"""
buffered = BytesIO()
pil_image.save(buffered, format='JPEG')
img_str = base64.b64encode(buffered.getvalue()).decode('utf-8')
return img_str
|
def convert_to_base64(pil_image):
"""
Convert PIL images to Base64 encoded strings
:param pil_image: PIL image
:return: Re-sized Base64 string
"""
buffered = BytesIO()
pil_image.save(buffered, format='JPEG')
img_str = base64.b64encode(buffered.getvalue()).decode('utf-8')
return img_str
|
Convert PIL images to Base64 encoded strings
:param pil_image: PIL image
:return: Re-sized Base64 string
|
output_keys
|
"""The checker output keys.
:meta private:
"""
return [self.output_key]
|
@property
def output_keys(self) ->List[str]:
"""The checker output keys.
:meta private:
"""
return [self.output_key]
|
The checker output keys.
:meta private:
|
test_math_question_1
|
"""Test simple question."""
question = """Olivia has $23. She bought five bagels for $3 each.
How much money does she have left?"""
prompt = MATH_PROMPT.format(question=question)
queries = {prompt: _MATH_SOLUTION_1}
fake_llm = FakeLLM(queries=queries)
fake_pal_chain = PALChain.from_math_prompt(fake_llm, timeout=None)
output = fake_pal_chain.run(question)
assert output == '8'
|
def test_math_question_1() ->None:
"""Test simple question."""
question = """Olivia has $23. She bought five bagels for $3 each.
How much money does she have left?"""
prompt = MATH_PROMPT.format(question=question)
queries = {prompt: _MATH_SOLUTION_1}
fake_llm = FakeLLM(queries=queries)
fake_pal_chain = PALChain.from_math_prompt(fake_llm, timeout=None)
output = fake_pal_chain.run(question)
assert output == '8'
|
Test simple question.
|
_split_text_with_regex
|
if separator:
if keep_separator:
_splits = re.split(f'({separator})', text)
splits = [(_splits[i] + _splits[i + 1]) for i in range(1, len(
_splits), 2)]
if len(_splits) % 2 == 0:
splits += _splits[-1:]
splits = [_splits[0]] + splits
else:
splits = re.split(separator, text)
else:
splits = list(text)
return [s for s in splits if s != '']
|
def _split_text_with_regex(text: str, separator: str, keep_separator: bool
) ->List[str]:
if separator:
if keep_separator:
_splits = re.split(f'({separator})', text)
splits = [(_splits[i] + _splits[i + 1]) for i in range(1, len(
_splits), 2)]
if len(_splits) % 2 == 0:
splits += _splits[-1:]
splits = [_splits[0]] + splits
else:
splits = re.split(separator, text)
else:
splits = list(text)
return [s for s in splits if s != '']
| null |
create_client
|
values['store'] = Zilliz(values['embedding_function'], values[
'collection_name'], values['connection_args'], values['consistency_level'])
values['retriever'] = values['store'].as_retriever(search_kwargs={'param':
values['search_params']})
return values
|
@root_validator(pre=True)
def create_client(cls, values: dict) ->dict:
values['store'] = Zilliz(values['embedding_function'], values[
'collection_name'], values['connection_args'], values[
'consistency_level'])
values['retriever'] = values['store'].as_retriever(search_kwargs={
'param': values['search_params']})
return values
| null |
_post_process_elements
|
"""Applies post processing functions to extracted unstructured elements.
Post processing functions are str -> str callables are passed
in using the post_processors kwarg when the loader is instantiated."""
for element in elements:
for post_processor in self.post_processors:
element.apply(post_processor)
return elements
|
def _post_process_elements(self, elements: list) ->list:
"""Applies post processing functions to extracted unstructured elements.
Post processing functions are str -> str callables are passed
in using the post_processors kwarg when the loader is instantiated."""
for element in elements:
for post_processor in self.post_processors:
element.apply(post_processor)
return elements
|
Applies post processing functions to extracted unstructured elements.
Post processing functions are str -> str callables are passed
in using the post_processors kwarg when the loader is instantiated.
|
_run
|
"""Use the Semantic Scholar tool."""
return self.api_wrapper.run(query)
|
def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun]
=None) ->str:
"""Use the Semantic Scholar tool."""
return self.api_wrapper.run(query)
|
Use the Semantic Scholar tool.
|
_Constant
|
value = t.value
if isinstance(value, tuple):
self.write('(')
if len(value) == 1:
self._write_constant(value[0])
self.write(',')
else:
interleave(lambda : self.write(', '), self._write_constant, value)
self.write(')')
elif value is ...:
self.write('...')
else:
if t.kind == 'u':
self.write('u')
self._write_constant(t.value)
|
def _Constant(self, t):
value = t.value
if isinstance(value, tuple):
self.write('(')
if len(value) == 1:
self._write_constant(value[0])
self.write(',')
else:
interleave(lambda : self.write(', '), self._write_constant, value)
self.write(')')
elif value is ...:
self.write('...')
else:
if t.kind == 'u':
self.write('u')
self._write_constant(t.value)
| null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.